Skip to content

Commit

Permalink
fix memtrace bug
Browse files Browse the repository at this point in the history
  • Loading branch information
DmitriyMusatkin committed Oct 3, 2023
1 parent a83b3da commit 4101201
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 20 deletions.
13 changes: 10 additions & 3 deletions source/memtrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -438,11 +438,18 @@ static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr) {
static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size) {
struct alloc_tracer *tracer = allocator->impl;
void *new_ptr = old_ptr;
if (aws_mem_realloc(tracer->traced_allocator, &new_ptr, old_size, new_size)) {
return NULL;
}

/*
* Careful with the ordering of state clean up here.
* Tracer keeps a hash table (alloc ptr as key) of meta info about each allocation.
* To avoid race conditions during realloc state update needs to be done in
* following order to avoid race conditions:
* - remove meta info (other threads cant reuse that key, cause ptr is still valid )
* - realloc (cant fail, ptr might remain the same)
* - add meta info for reallocated mem
*/
s_alloc_tracer_untrack(tracer, old_ptr);
aws_mem_realloc(tracer->traced_allocator, &new_ptr, old_size, new_size);
s_alloc_tracer_track(tracer, new_ptr, new_size);

return new_ptr;
Expand Down
2 changes: 2 additions & 0 deletions tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -317,6 +317,8 @@ add_test_case(sba_threaded_allocs_and_frees)
add_test_case(sba_threaded_reallocs)
add_test_case(sba_churn)
add_test_case(sba_metrics)
add_test_case(default_threaded_reallocs)
add_test_case(default_threaded_allocs_and_frees)

add_test_case(test_memtrace_none)
add_test_case(test_memtrace_count)
Expand Down
58 changes: 41 additions & 17 deletions tests/alloc_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -126,39 +126,39 @@ static int s_sba_random_reallocs(struct aws_allocator *allocator, void *ctx) {
}
AWS_TEST_CASE(sba_random_reallocs, s_sba_random_reallocs)

struct sba_thread_test_data {
struct aws_allocator *sba;
struct allocator_thread_test_data {
struct aws_allocator *test_allocator;
uint32_t thread_idx;
};

static void s_sba_threaded_alloc_worker(void *user_data) {
struct aws_allocator *sba = ((struct sba_thread_test_data *)user_data)->sba;
static void s_threaded_alloc_worker(void *user_data) {
struct aws_allocator *test_allocator = ((struct allocator_thread_test_data *)user_data)->test_allocator;

void *allocs[NUM_TEST_ALLOCS];
for (size_t count = 0; count < NUM_TEST_ALLOCS / NUM_TEST_THREADS; ++count) {
size_t size = aws_max_size(rand() % 512, 1);
void *alloc = aws_mem_acquire(sba, size);
void *alloc = aws_mem_acquire(test_allocator, size);
AWS_FATAL_ASSERT(alloc);
allocs[count] = alloc;
}

for (size_t count = 0; count < NUM_TEST_ALLOCS / NUM_TEST_THREADS; ++count) {
void *alloc = allocs[count];
aws_mem_release(sba, alloc);
aws_mem_release(test_allocator, alloc);
}
}

static void s_sba_thread_test(struct aws_allocator *allocator, void (*thread_fn)(void *), struct aws_allocator *sba) {
static void s_thread_test(struct aws_allocator *allocator, void (*thread_fn)(void *), struct aws_allocator *test_allocator) {
const struct aws_thread_options *thread_options = aws_default_thread_options();
struct aws_thread threads[NUM_TEST_THREADS];
struct sba_thread_test_data thread_data[NUM_TEST_THREADS];
struct allocator_thread_test_data thread_data[NUM_TEST_THREADS];
AWS_ZERO_ARRAY(threads);
AWS_ZERO_ARRAY(thread_data);
for (size_t thread_idx = 0; thread_idx < AWS_ARRAY_SIZE(threads); ++thread_idx) {
struct aws_thread *thread = &threads[thread_idx];
aws_thread_init(thread, allocator);
struct sba_thread_test_data *data = &thread_data[thread_idx];
data->sba = sba;
struct allocator_thread_test_data *data = &thread_data[thread_idx];
data->test_allocator = test_allocator;
data->thread_idx = (uint32_t)thread_idx;
aws_thread_launch(thread, thread_fn, data, thread_options);
}
Expand All @@ -175,7 +175,7 @@ static int s_sba_threaded_allocs_and_frees(struct aws_allocator *allocator, void

struct aws_allocator *sba = aws_small_block_allocator_new(allocator, true);

s_sba_thread_test(allocator, s_sba_threaded_alloc_worker, sba);
s_thread_test(allocator, s_threaded_alloc_worker, sba);

aws_small_block_allocator_destroy(sba);

Expand All @@ -184,8 +184,8 @@ static int s_sba_threaded_allocs_and_frees(struct aws_allocator *allocator, void
AWS_TEST_CASE(sba_threaded_allocs_and_frees, s_sba_threaded_allocs_and_frees)

static void s_sba_threaded_realloc_worker(void *user_data) {
struct sba_thread_test_data *thread_data = user_data;
struct aws_allocator *sba = thread_data->sba;
struct allocator_thread_test_data *thread_data = user_data;
struct aws_allocator *test_allocator = thread_data->test_allocator;
void *alloc = NULL;
size_t size = 0;
for (size_t count = 0; count < NUM_TEST_ALLOCS / NUM_TEST_THREADS; ++count) {
Expand All @@ -194,7 +194,7 @@ static void s_sba_threaded_realloc_worker(void *user_data) {
if (old_size) {
AWS_FATAL_ASSERT(0 == memcmp(alloc, &thread_data->thread_idx, 1));
}
AWS_FATAL_ASSERT(0 == aws_mem_realloc(sba, &alloc, old_size, size));
AWS_FATAL_ASSERT(0 == aws_mem_realloc(test_allocator, &alloc, old_size, size));
/* If there was a value, make sure it's still there */
if (old_size && size) {
AWS_FATAL_ASSERT(0 == memcmp(alloc, &thread_data->thread_idx, 1));
Expand All @@ -203,7 +203,7 @@ static void s_sba_threaded_realloc_worker(void *user_data) {
memset(alloc, (int)thread_data->thread_idx, size);
}
}
AWS_FATAL_ASSERT(0 == aws_mem_realloc(sba, &alloc, size, 0));
AWS_FATAL_ASSERT(0 == aws_mem_realloc(test_allocator, &alloc, size, 0));
}

static int s_sba_threaded_reallocs(struct aws_allocator *allocator, void *ctx) {
Expand All @@ -212,7 +212,7 @@ static int s_sba_threaded_reallocs(struct aws_allocator *allocator, void *ctx) {

struct aws_allocator *sba = aws_small_block_allocator_new(allocator, true);

s_sba_thread_test(allocator, s_sba_threaded_realloc_worker, sba);
s_thread_test(allocator, s_sba_threaded_realloc_worker, sba);

aws_small_block_allocator_destroy(sba);

Expand Down Expand Up @@ -313,10 +313,34 @@ static int s_sba_metrics_test(struct aws_allocator *allocator, void *ctx) {

ASSERT_INT_EQUALS(0, aws_small_block_allocator_bytes_active(sba));

/* after freeing everything, we should have reliniquished all but one page in each bin */
/* after freeing everything, we should have relinquished all but one page in each bin */
ASSERT_INT_EQUALS(5 * aws_small_block_allocator_page_size(sba), aws_small_block_allocator_bytes_reserved(sba));

aws_small_block_allocator_destroy(sba);
return 0;
}
AWS_TEST_CASE(sba_metrics, s_sba_metrics_test)

/*
* Default allocator tests.
*/
static int s_default_threaded_reallocs(struct aws_allocator *allocator, void *ctx) {
(void)ctx;
srand(15);

s_thread_test(allocator, s_sba_threaded_realloc_worker, allocator);

return 0;
}
AWS_TEST_CASE(default_threaded_reallocs, s_default_threaded_reallocs)

static int s_default_threaded_allocs_and_frees(struct aws_allocator *allocator, void *ctx) {
(void)ctx;
srand(99);

s_thread_test(allocator, s_threaded_alloc_worker, allocator);


return 0;
}
AWS_TEST_CASE(default_threaded_allocs_and_frees, s_default_threaded_allocs_and_frees)

0 comments on commit 4101201

Please sign in to comment.