diff --git a/CMakeLists.txt b/CMakeLists.txt index 8375ea518..0ff1d9e7e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -70,6 +70,7 @@ if (WIN32) file(GLOB AWS_COMMON_OS_SRC "source/windows/*.c" + "source/platform_fallback_stubs/system_info.c" ) if (MSVC) @@ -108,19 +109,26 @@ else () # Don't add the exact path to CoreFoundation as this would hardcode the SDK version list(APPEND PLATFORM_LIBS dl Threads::Threads "-framework CoreFoundation") list (APPEND AWS_COMMON_OS_SRC "source/darwin/*.c") # OS specific includes + list (APPEND AWS_COMMON_OS_SRC "source/platform_fallback_stubs/system_info.c") elseif (${CMAKE_SYSTEM_NAME} STREQUAL "Linux") # Android does not link to libpthread nor librt, so this is fine list(APPEND PLATFORM_LIBS dl m Threads::Threads rt) list (APPEND AWS_COMMON_OS_SRC "source/linux/*.c") # OS specific includes elseif(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD") list(APPEND PLATFORM_LIBS dl m thr execinfo) + list (APPEND AWS_COMMON_OS_SRC "source/platform_fallback_stubs/system_info.c") elseif(CMAKE_SYSTEM_NAME STREQUAL "NetBSD") list(APPEND PLATFORM_LIBS dl m Threads::Threads execinfo) + list (APPEND AWS_COMMON_OS_SRC "source/platform_fallback_stubs/system_info.c") elseif(CMAKE_SYSTEM_NAME STREQUAL "OpenBSD") list(APPEND PLATFORM_LIBS m Threads::Threads execinfo) + list (APPEND AWS_COMMON_OS_SRC "source/platform_fallback_stubs/system_info.c") elseif(CMAKE_SYSTEM_NAME STREQUAL "Android") list(APPEND PLATFORM_LIBS log) file(GLOB ANDROID_SRC "source/android/*.c") list(APPEND AWS_COMMON_OS_SRC "${ANDROID_SRC}") + list (APPEND AWS_COMMON_OS_SRC "source/platform_fallback_stubs/system_info.c") + else() + list (APPEND AWS_COMMON_OS_SRC "source/platform_fallback_stubs/system_info.c") endif() endif() @@ -298,6 +306,7 @@ configure_file(${CONFIG_HEADER_TEMPLATE} if (ALLOW_CROSS_COMPILED_TESTS OR NOT CMAKE_CROSSCOMPILING) if (BUILD_TESTING) add_subdirectory(tests) + add_subdirectory(bin/system_info) endif() endif() diff --git a/bin/system_info/CMakeLists.txt b/bin/system_info/CMakeLists.txt new file mode 100644 index 000000000..cb4ca0081 --- /dev/null +++ b/bin/system_info/CMakeLists.txt @@ -0,0 +1,18 @@ +project(print-sys-info C) + +list(APPEND CMAKE_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/cmake") + +file(GLOB SI_SRC + "*.c" + ) + +set(SI_PROJECT_NAME print-sys-info) +add_executable(${SI_PROJECT_NAME} ${SI_SRC}) +aws_set_common_properties(${SI_PROJECT_NAME}) + + +target_include_directories(${SI_PROJECT_NAME} PUBLIC + $ + $) + +target_link_libraries(${SI_PROJECT_NAME} PRIVATE aws-c-common) diff --git a/bin/system_info/print_system_info.c b/bin/system_info/print_system_info.c new file mode 100644 index 000000000..c29877086 --- /dev/null +++ b/bin/system_info/print_system_info.c @@ -0,0 +1,48 @@ + + +#include +#include +#include + +int main(void) { + struct aws_allocator *allocator = aws_default_allocator(); + aws_common_library_init(allocator); + struct aws_logger_standard_options options = { + .file = stderr, + .level = AWS_LOG_LEVEL_TRACE, + }; + + struct aws_logger logger; + aws_logger_init_standard(&logger, allocator, &options); + aws_logger_set(&logger); + + struct aws_system_environment *env = aws_system_environment_load(allocator); + + fprintf(stdout, "crt-detected env: {\n"); + + struct aws_byte_cursor virtualization_vendor = aws_system_environment_get_virtualization_vendor(env); + fprintf( + stdout, + " 'virtualization vendor': '" PRInSTR "',\n", + (int)virtualization_vendor.len, + virtualization_vendor.ptr); + struct aws_byte_cursor product_name = aws_system_environment_get_virtualization_product_name(env); + fprintf(stdout, " 'product name': '" PRInSTR "',\n", (int)product_name.len, product_name.ptr); + fprintf( + stdout, " 'number of processors': '%lu',\n", (unsigned long)aws_system_environment_get_processor_count(env)); + size_t numa_nodes = aws_system_environment_get_cpu_group_count(env); + + if (numa_nodes > 1) { + fprintf(stdout, " 'numa architecture': 'true',\n"); + fprintf(stdout, " 'number of numa nodes': '%lu'\n", (unsigned long)numa_nodes); + } else { + fprintf(stdout, " 'numa architecture': 'false'\n"); + } + + fprintf(stdout, "}\n"); + aws_system_environment_release(env); + aws_logger_clean_up(&logger); + + aws_common_library_clean_up(); + return 0; +} diff --git a/include/aws/common/byte_buf.h b/include/aws/common/byte_buf.h index 17b0ae59b..6fc5c3ff9 100644 --- a/include/aws/common/byte_buf.h +++ b/include/aws/common/byte_buf.h @@ -135,6 +135,18 @@ AWS_COMMON_API int aws_byte_buf_init_copy( AWS_COMMON_API int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename); +/** + * Same as aws_byte_buf_init_from_file(), but for reading "special files" like /proc/cpuinfo. + * These files don't accurately report their size, so size_hint is used as initial buffer size, + * and the buffer grows until the while file is read. + */ +AWS_COMMON_API +int aws_byte_buf_init_from_file_with_size_hint( + struct aws_byte_buf *out_buf, + struct aws_allocator *alloc, + const char *filename, + size_t size_hint); + /** * Evaluates the set of properties that define the shape of all valid aws_byte_buf structures. * It is also a cheap check, in the sense it run in constant time (i.e., no loops or recursion). diff --git a/include/aws/common/private/system_info_priv.h b/include/aws/common/private/system_info_priv.h new file mode 100644 index 000000000..27b1d4ad1 --- /dev/null +++ b/include/aws/common/private/system_info_priv.h @@ -0,0 +1,37 @@ +#ifndef AWS_COMMON_PRIVATE_SYSTEM_INFO_PRIV_H +#define AWS_COMMON_PRIVATE_SYSTEM_INFO_PRIV_H +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include +#include +#include +#include + +struct aws_system_environment { + struct aws_allocator *allocator; + struct aws_ref_count ref_count; + struct aws_byte_buf virtualization_vendor; + struct aws_byte_buf product_name; + enum aws_platform_os os; + size_t cpu_count; + size_t cpu_group_count; + void *impl; +}; + +/** + * For internal implementors. Fill in info in env that you're able to grab, such as dmi info, os version strings etc... + * in here. The default just returns AWS_OP_SUCCESS. This is currently only implemented for linux. + * + * Returns AWS_OP_ERR if the implementation wasn't able to fill in required information for the platform. + */ +int aws_system_environment_load_platform_impl(struct aws_system_environment *env); + +/** + * For internal implementors. Cleans up anything allocated in aws_system_environment_load_platform_impl, + * but does not release the memory for env. + */ +void aws_system_environment_destroy_platform_impl(struct aws_system_environment *env); + +#endif // AWS_COMMON_PRIVATE_SYSTEM_INFO_PRIV_H diff --git a/include/aws/common/system_info.h b/include/aws/common/system_info.h index fe7604120..91da41f9d 100644 --- a/include/aws/common/system_info.h +++ b/include/aws/common/system_info.h @@ -6,6 +6,7 @@ * SPDX-License-Identifier: Apache-2.0. */ +#include #include AWS_PUSH_SANE_WARNING_LEVEL @@ -21,8 +22,54 @@ struct aws_cpu_info { bool suspected_hyper_thread; }; +struct aws_system_environment; + AWS_EXTERN_C_BEGIN +/** + * Allocates and initializes information about the system the current process is executing on. + * If successful returns an instance of aws_system_environment. If it fails, it will return NULL. + * + * Note: This api is used internally and is still early in its evolution. + * It may change in incompatible ways in the future. + */ +AWS_COMMON_API +struct aws_system_environment *aws_system_environment_load(struct aws_allocator *allocator); + +AWS_COMMON_API +struct aws_system_environment *aws_system_environment_acquire(struct aws_system_environment *env); + +AWS_COMMON_API +void aws_system_environment_release(struct aws_system_environment *env); + +/** + * Returns the virtualization vendor for the specified compute environment, e.g. "Xen, Amazon EC2, etc..." + * + * The return value may be empty and in that case no vendor was detected. + */ +AWS_COMMON_API +struct aws_byte_cursor aws_system_environment_get_virtualization_vendor(const struct aws_system_environment *env); + +/** + * Returns the product name for the specified compute environment. For example, the Amazon EC2 Instance type. + * + * The return value may be empty and in that case no vendor was detected. + */ +AWS_COMMON_API +struct aws_byte_cursor aws_system_environment_get_virtualization_product_name(const struct aws_system_environment *env); + +/** + * Returns the number of processors for the specified compute environment. + */ +AWS_COMMON_API +size_t aws_system_environment_get_processor_count(struct aws_system_environment *env); + +/** + * Returns the number of separate cpu groupings (multi-socket configurations or NUMA). + */ +AWS_COMMON_API +size_t aws_system_environment_get_cpu_group_count(const struct aws_system_environment *env); + /* Returns the OS this was built under */ AWS_COMMON_API enum aws_platform_os aws_get_platform_build_os(void); diff --git a/include/aws/testing/aws_test_harness.h b/include/aws/testing/aws_test_harness.h index 2f743be54..f9bf631e4 100644 --- a/include/aws/testing/aws_test_harness.h +++ b/include/aws/testing/aws_test_harness.h @@ -474,10 +474,11 @@ static inline int s_aws_run_test_case(struct aws_test_harness *harness) { * but aws_mem_tracer_dump() needs a valid logger to be active */ aws_logger_set(&err_logger); - const size_t leaked_bytes = aws_mem_tracer_count(allocator); + const size_t leaked_allocations = aws_mem_tracer_count(allocator); + const size_t leaked_bytes = aws_mem_tracer_bytes(allocator); if (leaked_bytes) { aws_mem_tracer_dump(allocator); - PRINT_FAIL_INTERNAL0("Test leaked memory: %zu bytes", leaked_bytes); + PRINT_FAIL_INTERNAL0("Test leaked memory: %zu bytes %zu allocations", leaked_bytes, leaked_allocations); goto fail; } diff --git a/source/file.c b/source/file.c index 00723555f..504e547f5 100644 --- a/source/file.c +++ b/source/file.c @@ -11,6 +11,16 @@ #include +/* For "special files", the OS often lies about size. + * For example, on Amazon Linux 2: + * /proc/cpuinfo: size is 0, but contents are several KB of data. + * /sys/devices/virtual/dmi/id/product_name: size is 4096, but contents are "c5.2xlarge" + * + * Therefore, we may need to grow the buffer as we read until EOF. + * This is the min/max step size for growth. */ +#define MIN_BUFFER_GROWTH_READING_FILES 32 +#define MAX_BUFFER_GROWTH_READING_FILES 4096 + FILE *aws_fopen(const char *file_path, const char *mode) { if (!file_path || strlen(file_path) == 0) { AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file. path is empty"); @@ -34,7 +44,13 @@ FILE *aws_fopen(const char *file_path, const char *mode) { return file; } -int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename) { +/* Helper function used by aws_byte_buf_init_from_file() and aws_byte_buf_init_from_file_with_size_hint() */ +static int s_byte_buf_init_from_file_impl( + struct aws_byte_buf *out_buf, + struct aws_allocator *alloc, + const char *filename, + bool use_file_size_as_hint, + size_t size_hint) { AWS_ZERO_STRUCT(*out_buf); FILE *fp = aws_fopen(filename, "rb"); @@ -42,46 +58,78 @@ int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocat goto error; } - int64_t len64 = 0; - if (aws_file_get_length(fp, &len64)) { - AWS_LOGF_ERROR( - AWS_LS_COMMON_IO, - "static: Failed to get file length. file:'%s' error:%s", - filename, - aws_error_name(aws_last_error())); - goto error; - } + if (use_file_size_as_hint) { + int64_t len64 = 0; + if (aws_file_get_length(fp, &len64)) { + AWS_LOGF_ERROR( + AWS_LS_COMMON_IO, + "static: Failed to get file length. file:'%s' error:%s", + filename, + aws_error_name(aws_last_error())); + goto error; + } - if (len64 >= SIZE_MAX) { - aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); - AWS_LOGF_ERROR( - AWS_LS_COMMON_IO, - "static: File too large to read into memory. file:'%s' error:%s", - filename, - aws_error_name(aws_last_error())); - goto error; + if (len64 >= SIZE_MAX) { + aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); + AWS_LOGF_ERROR( + AWS_LS_COMMON_IO, + "static: File too large to read into memory. file:'%s' error:%s", + filename, + aws_error_name(aws_last_error())); + goto error; + } + + /* Leave space for null terminator at end of buffer */ + size_hint = (size_t)len64 + 1; } - size_t allocation_size = (size_t)len64 + 1; - aws_byte_buf_init(out_buf, alloc, allocation_size); + aws_byte_buf_init(out_buf, alloc, size_hint); + + /* Read in a loop until we hit EOF */ + while (true) { + /* Expand buffer if necessary (at a reasonable rate) */ + if (out_buf->len == out_buf->capacity) { + size_t additional_capacity = out_buf->capacity; + additional_capacity = aws_max_size(MIN_BUFFER_GROWTH_READING_FILES, additional_capacity); + additional_capacity = aws_min_size(MAX_BUFFER_GROWTH_READING_FILES, additional_capacity); + if (aws_byte_buf_reserve_relative(out_buf, additional_capacity)) { + AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to grow buffer for file:'%s'", filename); + goto error; + } + } - /* Ensure compatibility with null-terminated APIs, but don't consider - * the null terminator part of the length of the payload */ - out_buf->len = out_buf->capacity - 1; - out_buf->buffer[out_buf->len] = 0; + size_t space_available = out_buf->capacity - out_buf->len; + size_t bytes_read = fread(out_buf->buffer + out_buf->len, 1, space_available, fp); + out_buf->len += bytes_read; - size_t read = fread(out_buf->buffer, 1, out_buf->len, fp); - if (read < out_buf->len) { - int errno_value = ferror(fp) ? errno : 0; /* Always cache errno before potential side-effect */ - aws_translate_and_raise_io_error_or(errno_value, AWS_ERROR_FILE_READ_FAILURE); - AWS_LOGF_ERROR( - AWS_LS_COMMON_IO, - "static: Failed reading file:'%s' errno:%d aws-error:%s", - filename, - errno_value, - aws_error_name(aws_last_error())); - goto error; + /* If EOF, we're done! */ + if (feof(fp)) { + break; + } + + /* If no EOF but we read 0 bytes, there's been an error or at least we need + * to treat it like one because we can't just infinitely loop. */ + if (bytes_read == 0) { + int errno_value = ferror(fp) ? errno : 0; /* Always cache errno before potential side-effect */ + aws_translate_and_raise_io_error_or(errno_value, AWS_ERROR_FILE_READ_FAILURE); + AWS_LOGF_ERROR( + AWS_LS_COMMON_IO, + "static: Failed reading file:'%s' errno:%d aws-error:%s", + filename, + errno_value, + aws_error_name(aws_last_error())); + goto error; + } + } + + /* A null terminator is appended, but is not included as part of the length field. */ + if (out_buf->len == out_buf->capacity) { + if (aws_byte_buf_reserve_relative(out_buf, 1)) { + AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to grow buffer for file:'%s'", filename); + goto error; + } } + out_buf->buffer[out_buf->len] = 0; fclose(fp); return AWS_OP_SUCCESS; @@ -94,6 +142,19 @@ int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocat return AWS_OP_ERR; } +int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename) { + return s_byte_buf_init_from_file_impl(out_buf, alloc, filename, true /*use_file_size_as_hint*/, 0 /*size_hint*/); +} + +int aws_byte_buf_init_from_file_with_size_hint( + struct aws_byte_buf *out_buf, + struct aws_allocator *alloc, + const char *filename, + size_t size_hint) { + + return s_byte_buf_init_from_file_impl(out_buf, alloc, filename, false /*use_file_size_as_hint*/, size_hint); +} + bool aws_is_any_directory_separator(char value) { return value == '\\' || value == '/'; } diff --git a/source/linux/system_info.c b/source/linux/system_info.c new file mode 100644 index 000000000..2d9c5a120 --- /dev/null +++ b/source/linux/system_info.c @@ -0,0 +1,24 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include +#include + +int aws_system_environment_load_platform_impl(struct aws_system_environment *env) { + /* provide size_hint when reading "special files", since some platforms mis-report these files' size as 4KB */ + aws_byte_buf_init_from_file_with_size_hint( + &env->virtualization_vendor, env->allocator, "/sys/devices/virtual/dmi/id/sys_vendor", 32 /*size_hint*/); + + /* whether this one works depends on if this is a sysfs filesystem. If it fails, it will just be empty + * and these APIs are a best effort at the moment. We can add fallbacks as the loaders get more complicated. */ + aws_byte_buf_init_from_file_with_size_hint( + &env->product_name, env->allocator, "/sys/devices/virtual/dmi/id/product_name", 32 /*size_hint*/); + + return AWS_OP_SUCCESS; +} + +void aws_system_environment_destroy_platform_impl(struct aws_system_environment *env) { + aws_byte_buf_clean_up(&env->virtualization_vendor); + aws_byte_buf_clean_up(&env->product_name); +} diff --git a/source/memtrace.c b/source/memtrace.c index eea342ad1..9c5bff60c 100644 --- a/source/memtrace.c +++ b/source/memtrace.c @@ -438,11 +438,18 @@ static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr) { static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size) { struct alloc_tracer *tracer = allocator->impl; void *new_ptr = old_ptr; - if (aws_mem_realloc(tracer->traced_allocator, &new_ptr, old_size, new_size)) { - return NULL; - } + /* + * Careful with the ordering of state clean up here. + * Tracer keeps a hash table (alloc ptr as key) of meta info about each allocation. + * To avoid race conditions during realloc state update needs to be done in + * following order to avoid race conditions: + * - remove meta info (other threads cant reuse that key, cause ptr is still valid ) + * - realloc (cant fail, ptr might remain the same) + * - add meta info for reallocated mem + */ s_alloc_tracer_untrack(tracer, old_ptr); + aws_mem_realloc(tracer->traced_allocator, &new_ptr, old_size, new_size); s_alloc_tracer_track(tracer, new_ptr, new_size); return new_ptr; diff --git a/source/platform_fallback_stubs/system_info.c b/source/platform_fallback_stubs/system_info.c new file mode 100644 index 000000000..2b81469a8 --- /dev/null +++ b/source/platform_fallback_stubs/system_info.c @@ -0,0 +1,21 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include + +#include + +int aws_system_environment_load_platform_impl(struct aws_system_environment *env) { + (void)env; + AWS_LOGF_DEBUG( + AWS_LS_COMMON_GENERAL, + "id=%p: platform specific environment loading is not implemented for this platform.", + (void *)env); + + return AWS_OP_SUCCESS; +} + +void aws_system_environment_destroy_platform_impl(struct aws_system_environment *env) { + (void)env; +} diff --git a/source/system_info.c b/source/system_info.c new file mode 100644 index 000000000..4b721f63a --- /dev/null +++ b/source/system_info.c @@ -0,0 +1,80 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include + +#include + +void s_destroy_env(void *arg) { + struct aws_system_environment *env = arg; + + if (env) { + aws_system_environment_destroy_platform_impl(env); + aws_mem_release(env->allocator, env); + } +} + +struct aws_system_environment *aws_system_environment_load(struct aws_allocator *allocator) { + struct aws_system_environment *env = aws_mem_calloc(allocator, 1, sizeof(struct aws_system_environment)); + env->allocator = allocator; + aws_ref_count_init(&env->ref_count, env, s_destroy_env); + + if (aws_system_environment_load_platform_impl(env)) { + AWS_LOGF_ERROR( + AWS_LS_COMMON_GENERAL, + "id=%p: failed to load system environment with error %s.", + (void *)env, + aws_error_debug_str(aws_last_error())); + goto error; + } + + AWS_LOGF_TRACE( + AWS_LS_COMMON_GENERAL, + "id=%p: virtualization vendor detected as \"" PRInSTR "\"", + (void *)env, + AWS_BYTE_CURSOR_PRI(aws_system_environment_get_virtualization_vendor(env))); + AWS_LOGF_TRACE( + AWS_LS_COMMON_GENERAL, + "id=%p: virtualization product name detected as \"" PRInSTR " \"", + (void *)env, + AWS_BYTE_CURSOR_PRI(aws_system_environment_get_virtualization_vendor(env))); + + env->os = aws_get_platform_build_os(); + env->cpu_count = aws_system_info_processor_count(); + env->cpu_group_count = aws_get_cpu_group_count(); + + return env; +error: + s_destroy_env(env); + return NULL; +} + +struct aws_system_environment *aws_system_environment_acquire(struct aws_system_environment *env) { + aws_ref_count_acquire(&env->ref_count); + return env; +} + +void aws_system_environment_release(struct aws_system_environment *env) { + aws_ref_count_release(&env->ref_count); +} + +struct aws_byte_cursor aws_system_environment_get_virtualization_vendor(const struct aws_system_environment *env) { + struct aws_byte_cursor vendor_string = aws_byte_cursor_from_buf(&env->virtualization_vendor); + return aws_byte_cursor_trim_pred(&vendor_string, aws_char_is_space); +} + +struct aws_byte_cursor aws_system_environment_get_virtualization_product_name( + const struct aws_system_environment *env) { + struct aws_byte_cursor product_name_str = aws_byte_cursor_from_buf(&env->product_name); + return aws_byte_cursor_trim_pred(&product_name_str, aws_char_is_space); +} + +size_t aws_system_environment_get_processor_count(struct aws_system_environment *env) { + return env->cpu_count; +} + +AWS_COMMON_API +size_t aws_system_environment_get_cpu_group_count(const struct aws_system_environment *env) { + return env->cpu_group_count; +} diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index f8d9a8d44..4681e04c3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -304,6 +304,7 @@ add_test_case(test_cpu_count_at_least_works_superficially) add_test_case(test_stack_trace_decoding) add_test_case(test_platform_build_os) add_test_case(test_sanity_check_numa_discovery) +add_test_case(test_sanity_check_environment_loader) add_test_case(test_realloc_fallback) add_test_case(test_realloc_passthrough) @@ -317,6 +318,8 @@ add_test_case(sba_threaded_allocs_and_frees) add_test_case(sba_threaded_reallocs) add_test_case(sba_churn) add_test_case(sba_metrics) +add_test_case(default_threaded_reallocs) +add_test_case(default_threaded_allocs_and_frees) add_test_case(test_memtrace_none) add_test_case(test_memtrace_count) @@ -478,6 +481,7 @@ add_test_case(directory_move_src_non_existent_test) add_test_case(test_home_directory_not_null) add_test_case(test_normalize_posix_directory_separator) add_test_case(test_normalize_windows_directory_separator) +add_test_case(test_byte_buf_init_from_file) add_test_case(promise_test_wait_forever) add_test_case(promise_test_wait_for_a_bit) diff --git a/tests/alloc_test.c b/tests/alloc_test.c index 0891e89ec..66ff95999 100644 --- a/tests/alloc_test.c +++ b/tests/alloc_test.c @@ -126,39 +126,42 @@ static int s_sba_random_reallocs(struct aws_allocator *allocator, void *ctx) { } AWS_TEST_CASE(sba_random_reallocs, s_sba_random_reallocs) -struct sba_thread_test_data { - struct aws_allocator *sba; +struct allocator_thread_test_data { + struct aws_allocator *test_allocator; uint32_t thread_idx; }; -static void s_sba_threaded_alloc_worker(void *user_data) { - struct aws_allocator *sba = ((struct sba_thread_test_data *)user_data)->sba; +static void s_threaded_alloc_worker(void *user_data) { + struct aws_allocator *test_allocator = ((struct allocator_thread_test_data *)user_data)->test_allocator; void *allocs[NUM_TEST_ALLOCS]; for (size_t count = 0; count < NUM_TEST_ALLOCS / NUM_TEST_THREADS; ++count) { size_t size = aws_max_size(rand() % 512, 1); - void *alloc = aws_mem_acquire(sba, size); + void *alloc = aws_mem_acquire(test_allocator, size); AWS_FATAL_ASSERT(alloc); allocs[count] = alloc; } for (size_t count = 0; count < NUM_TEST_ALLOCS / NUM_TEST_THREADS; ++count) { void *alloc = allocs[count]; - aws_mem_release(sba, alloc); + aws_mem_release(test_allocator, alloc); } } -static void s_sba_thread_test(struct aws_allocator *allocator, void (*thread_fn)(void *), struct aws_allocator *sba) { +static void s_thread_test( + struct aws_allocator *allocator, + void (*thread_fn)(void *), + struct aws_allocator *test_allocator) { const struct aws_thread_options *thread_options = aws_default_thread_options(); struct aws_thread threads[NUM_TEST_THREADS]; - struct sba_thread_test_data thread_data[NUM_TEST_THREADS]; + struct allocator_thread_test_data thread_data[NUM_TEST_THREADS]; AWS_ZERO_ARRAY(threads); AWS_ZERO_ARRAY(thread_data); for (size_t thread_idx = 0; thread_idx < AWS_ARRAY_SIZE(threads); ++thread_idx) { struct aws_thread *thread = &threads[thread_idx]; aws_thread_init(thread, allocator); - struct sba_thread_test_data *data = &thread_data[thread_idx]; - data->sba = sba; + struct allocator_thread_test_data *data = &thread_data[thread_idx]; + data->test_allocator = test_allocator; data->thread_idx = (uint32_t)thread_idx; aws_thread_launch(thread, thread_fn, data, thread_options); } @@ -175,7 +178,7 @@ static int s_sba_threaded_allocs_and_frees(struct aws_allocator *allocator, void struct aws_allocator *sba = aws_small_block_allocator_new(allocator, true); - s_sba_thread_test(allocator, s_sba_threaded_alloc_worker, sba); + s_thread_test(allocator, s_threaded_alloc_worker, sba); aws_small_block_allocator_destroy(sba); @@ -183,9 +186,9 @@ static int s_sba_threaded_allocs_and_frees(struct aws_allocator *allocator, void } AWS_TEST_CASE(sba_threaded_allocs_and_frees, s_sba_threaded_allocs_and_frees) -static void s_sba_threaded_realloc_worker(void *user_data) { - struct sba_thread_test_data *thread_data = user_data; - struct aws_allocator *sba = thread_data->sba; +static void s_threaded_realloc_worker(void *user_data) { + struct allocator_thread_test_data *thread_data = user_data; + struct aws_allocator *test_allocator = thread_data->test_allocator; void *alloc = NULL; size_t size = 0; for (size_t count = 0; count < NUM_TEST_ALLOCS / NUM_TEST_THREADS; ++count) { @@ -194,7 +197,7 @@ static void s_sba_threaded_realloc_worker(void *user_data) { if (old_size) { AWS_FATAL_ASSERT(0 == memcmp(alloc, &thread_data->thread_idx, 1)); } - AWS_FATAL_ASSERT(0 == aws_mem_realloc(sba, &alloc, old_size, size)); + AWS_FATAL_ASSERT(0 == aws_mem_realloc(test_allocator, &alloc, old_size, size)); /* If there was a value, make sure it's still there */ if (old_size && size) { AWS_FATAL_ASSERT(0 == memcmp(alloc, &thread_data->thread_idx, 1)); @@ -203,7 +206,7 @@ static void s_sba_threaded_realloc_worker(void *user_data) { memset(alloc, (int)thread_data->thread_idx, size); } } - AWS_FATAL_ASSERT(0 == aws_mem_realloc(sba, &alloc, size, 0)); + AWS_FATAL_ASSERT(0 == aws_mem_realloc(test_allocator, &alloc, size, 0)); } static int s_sba_threaded_reallocs(struct aws_allocator *allocator, void *ctx) { @@ -212,7 +215,7 @@ static int s_sba_threaded_reallocs(struct aws_allocator *allocator, void *ctx) { struct aws_allocator *sba = aws_small_block_allocator_new(allocator, true); - s_sba_thread_test(allocator, s_sba_threaded_realloc_worker, sba); + s_thread_test(allocator, s_threaded_realloc_worker, sba); aws_small_block_allocator_destroy(sba); @@ -313,10 +316,33 @@ static int s_sba_metrics_test(struct aws_allocator *allocator, void *ctx) { ASSERT_INT_EQUALS(0, aws_small_block_allocator_bytes_active(sba)); - /* after freeing everything, we should have reliniquished all but one page in each bin */ + /* after freeing everything, we should have relinquished all but one page in each bin */ ASSERT_INT_EQUALS(5 * aws_small_block_allocator_page_size(sba), aws_small_block_allocator_bytes_reserved(sba)); aws_small_block_allocator_destroy(sba); return 0; } AWS_TEST_CASE(sba_metrics, s_sba_metrics_test) + +/* + * Default allocator tests. + */ +static int s_default_threaded_reallocs(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + srand(15); + + s_thread_test(allocator, s_threaded_realloc_worker, allocator); + + return 0; +} +AWS_TEST_CASE(default_threaded_reallocs, s_default_threaded_reallocs) + +static int s_default_threaded_allocs_and_frees(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + srand(99); + + s_thread_test(allocator, s_threaded_alloc_worker, allocator); + + return 0; +} +AWS_TEST_CASE(default_threaded_allocs_and_frees, s_default_threaded_allocs_and_frees) diff --git a/tests/file_test.c b/tests/file_test.c index 6eedd264e..6a3f4fe8b 100644 --- a/tests/file_test.c +++ b/tests/file_test.c @@ -2,6 +2,7 @@ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ +#include #include #include @@ -439,3 +440,105 @@ static int s_test_normalize_windows_directory_separator(struct aws_allocator *al } AWS_TEST_CASE(test_normalize_windows_directory_separator, s_test_normalize_windows_directory_separator); + +static int s_check_byte_buf_from_file(const struct aws_byte_buf *buf, struct aws_byte_cursor expected_contents) { + ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&expected_contents, buf), "Contents should match"); + ASSERT_TRUE(buf->capacity > buf->len, "Buffer should end with null-terminator"); + ASSERT_UINT_EQUALS(0, buf->buffer[buf->len], "Buffer should end with null-terminator"); + return AWS_OP_SUCCESS; +} + +static int s_create_file_then_read_it(struct aws_allocator *allocator, struct aws_byte_cursor contents) { + /* create file */ + const char *filename = "testy"; + FILE *f = aws_fopen(filename, "wb"); + ASSERT_UINT_EQUALS(contents.len, fwrite(contents.ptr, 1, contents.len, f)); + ASSERT_INT_EQUALS(0, fclose(f)); + + struct aws_byte_buf buf; + + /* check aws_byte_buf_init_from_file() */ + ASSERT_SUCCESS(aws_byte_buf_init_from_file(&buf, allocator, filename)); + ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents)); + aws_byte_buf_clean_up(&buf); + + /* now check aws_byte_buf_init_from_file_with_size_hint() ... */ + + /* size_hint more then big enough */ + size_t size_hint = contents.len * 2; + ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint)); + ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents)); + aws_byte_buf_clean_up(&buf); + + /* size_hint not big enough for null-terminator */ + size_hint = contents.len; + ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint)); + ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents)); + aws_byte_buf_clean_up(&buf); + + /* size_hint 0 */ + size_hint = 0; + ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint)); + ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents)); + aws_byte_buf_clean_up(&buf); + + /* size_hint 1 */ + size_hint = 1; + ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint)); + ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents)); + aws_byte_buf_clean_up(&buf); + + remove(filename); + return AWS_OP_SUCCESS; +} + +/* Read an actual "special file" (if it exists on this machine) */ +static int s_read_special_file(struct aws_allocator *allocator, const char *filename) { + struct aws_string *filename_str = aws_string_new_from_c_str(allocator, filename); + bool exists = aws_path_exists(filename_str); + aws_string_destroy(filename_str); + if (!exists) { + return AWS_OP_SUCCESS; + } + + struct aws_byte_buf buf; + ASSERT_SUCCESS(aws_byte_buf_init_from_file(&buf, allocator, filename)); + ASSERT_TRUE(buf.capacity > buf.len, "Buffer should end with null-terminator"); + ASSERT_UINT_EQUALS(0, buf.buffer[buf.len], "Buffer should end with null-terminator"); + + if (strcmp("/dev/null", filename) == 0) { + ASSERT_UINT_EQUALS(0, buf.len, "expected /dev/null to be empty"); + } else { + ASSERT_TRUE(buf.len > 0, "expected special file to have data"); + } + + aws_byte_buf_clean_up(&buf); + return AWS_OP_SUCCESS; +} + +static int s_test_byte_buf_init_from_file(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + /* simple text file */ + ASSERT_SUCCESS(s_create_file_then_read_it(allocator, aws_byte_cursor_from_c_str("asdf"))); + + /* empty file */ + ASSERT_SUCCESS(s_create_file_then_read_it(allocator, aws_byte_cursor_from_c_str(""))); + + /* large 3MB+1byte binary file */ + struct aws_byte_buf big_rando; + aws_byte_buf_init(&big_rando, allocator, (1024 * 1024 * 3) + 1); + ASSERT_SUCCESS(aws_device_random_buffer(&big_rando)); + ASSERT_SUCCESS(s_create_file_then_read_it(allocator, aws_byte_cursor_from_buf(&big_rando))); + aws_byte_buf_clean_up(&big_rando); + + /* test some "special files" (if they exist) */ + ASSERT_SUCCESS(s_read_special_file(allocator, "/proc/cpuinfo")); + ASSERT_SUCCESS(s_read_special_file(allocator, "/proc/net/tcp")); + ASSERT_SUCCESS(s_read_special_file(allocator, "/sys/devices/virtual/dmi/id/sys_vendor")); + ASSERT_SUCCESS(s_read_special_file(allocator, "/dev/null")); + + return AWS_OP_SUCCESS; +} + +AWS_TEST_CASE(test_byte_buf_init_from_file, s_test_byte_buf_init_from_file) diff --git a/tests/system_info_tests.c b/tests/system_info_tests.c index 8bd01ca4f..7a4324797 100644 --- a/tests/system_info_tests.c +++ b/tests/system_info_tests.c @@ -166,3 +166,22 @@ static int s_test_sanity_check_numa_discovery(struct aws_allocator *allocator, v } AWS_TEST_CASE(test_sanity_check_numa_discovery, s_test_sanity_check_numa_discovery) + +static int s_test_sanity_check_environment_loader(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + aws_common_library_init(allocator); + struct aws_system_environment *env = aws_system_environment_load(allocator); + ASSERT_NOT_NULL(env); + struct aws_byte_cursor virt_vendor = aws_system_environment_get_virtualization_vendor(env); + ASSERT_TRUE(aws_byte_cursor_is_valid(&virt_vendor)); + struct aws_byte_cursor virt_product = aws_system_environment_get_virtualization_product_name(env); + ASSERT_TRUE(aws_byte_cursor_is_valid(&virt_product)); + + aws_system_environment_release(env); + + aws_common_library_clean_up(); + return 0; +} + +AWS_TEST_CASE(test_sanity_check_environment_loader, s_test_sanity_check_environment_loader)