Skip to content

Commit

Permalink
Create session error code (#392)
Browse files Browse the repository at this point in the history
  • Loading branch information
TingDaoK authored Dec 7, 2023
1 parent 998e1cb commit fcd7a10
Show file tree
Hide file tree
Showing 14 changed files with 97 additions and 33 deletions.
14 changes: 7 additions & 7 deletions docs/memory_aware_request_execution.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@ scales resource usage, such as number of parallel requests in flight, to achieve
target throughput. The client creates buffers to hold data it is sending or
receiving for each request and scaling requests in flight has direct impact on
memory used. In practice, setting high target throughput or larger part size can
lead to high observed memory usage.
lead to high observed memory usage.

To mitigate high memory usages, memory reuse improvements were recently added to
the client along with options to limit max memory used. The following sections
will go into more detail on aspects of those changes and how the affect the
client.
client.

### Memory Reuse
At the basic level, CRT S3 client starts with a meta request for operation like
Expand All @@ -18,7 +18,7 @@ requests and release it right after the request was done. That approach,
resulted in a lot of very short lived allocations and allocator thrashing,
overall leading to memory use spikes considerably higher than whats needed. To
address that, the client is switching to a pooled buffer approach, discussed
below.
below.

Note: approach described below is work in progress and concentrates on improving
the common cases (default 8mb part sizes and part sizes smaller than 64mb).
Expand All @@ -29,24 +29,24 @@ Several observations about the client usage of buffers:
- Get operations always use either the configured part size or default of 8mb.
Part size for get is not adjusted, since there is no 10,000 part limitation.
- Both Put and Get operations go through fill and drain phases. Ex. for Put, the
client first schedules a number of reads to 'fil' the buffers from the source
client first schedules a number of reads to 'fill' the buffers from the source
and as those reads complete, the buffer are send over to the networking layer
are 'drained'
- individual uploadParts or ranged gets operations typically have a similar
lifespan (with some caveats). in practice part buffers are acquired/released
in bulk at the same time

The buffer pooling takes advantage of some of those allocation patterns and
works as follows.
works as follows.
The memory is split into primary and secondary areas. Secondary area is used for
requests with part size bigger than a predefined value (currently 4 times part size)
allocations from it got directly to allocator and are effectively old way of
doing things.
doing things.

Primary memory area is split into blocks of fixed size (part size if defined or
8mb if not times 16). Blocks are allocated on demand. Each block is logically
subdivided into part sized chunks. Pool allocates and releases in chunk sizes
only, and supports acquiring several chunks (up to 4) at once.
only, and supports acquiring several chunks (up to 4) at once.

Blocks are kept around while there are ongoing requests and are released async,
when there is low pressure on memory.
Expand Down
8 changes: 0 additions & 8 deletions include/aws/s3/private/s3_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,6 @@ struct aws_http_headers;
struct aws_http_message;
struct aws_s3_client;

enum aws_s3_response_status {
AWS_S3_RESPONSE_STATUS_SUCCESS = 200,
AWS_S3_RESPONSE_STATUS_NO_CONTENT_SUCCESS = 204,
AWS_S3_RESPONSE_STATUS_RANGE_SUCCESS = 206,
AWS_S3_RESPONSE_STATUS_INTERNAL_ERROR = 500,
AWS_S3_RESPONSE_STATUS_SLOW_DOWN = 503,
};

struct aws_cached_signing_config_aws {
struct aws_allocator *allocator;
struct aws_string *service;
Expand Down
1 change: 1 addition & 0 deletions include/aws/s3/s3.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ enum aws_s3_errors {
AWS_ERROR_S3_FILE_MODIFIED,
AWS_ERROR_S3_EXCEEDS_MEMORY_LIMIT,
AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG,
AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED,
AWS_ERROR_S3_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_S3_PACKAGE_ID)
};

Expand Down
1 change: 1 addition & 0 deletions source/s3.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ static struct aws_error_info s_errors[] = {
AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_EXCEEDS_MEMORY_LIMIT, "Request was not created due to used memory exceeding memory limit."),
AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG, "Specified memory configuration is invalid for the system. "
"Memory limit should be at least 1GiB. Part size and max part size should be smaller than memory limit."),
AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED, "CreateSession call failed when signing with S3 Express."),
};
/* clang-format on */

Expand Down
4 changes: 2 additions & 2 deletions source/s3_auto_ranged_get.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,10 @@ static int s_s3_auto_ranged_get_success_status(struct aws_s3_meta_request *meta_
AWS_PRECONDITION(auto_ranged_get);

if (auto_ranged_get->initial_message_has_range_header) {
return AWS_S3_RESPONSE_STATUS_RANGE_SUCCESS;
return AWS_HTTP_STATUS_CODE_206_PARTIAL_CONTENT;
}

return AWS_S3_RESPONSE_STATUS_SUCCESS;
return AWS_HTTP_STATUS_CODE_200_OK;
}

/* Allocate a new auto-ranged-get meta request. */
Expand Down
4 changes: 2 additions & 2 deletions source/s3_auto_ranged_put.c
Original file line number Diff line number Diff line change
Expand Up @@ -689,7 +689,7 @@ static bool s_s3_auto_ranged_put_update(
}

if (!work_remaining) {
aws_s3_meta_request_set_success_synced(meta_request, AWS_S3_RESPONSE_STATUS_SUCCESS);
aws_s3_meta_request_set_success_synced(meta_request, AWS_HTTP_STATUS_CODE_200_OK);
}

aws_s3_meta_request_unlock_synced_data(meta_request);
Expand Down Expand Up @@ -1412,7 +1412,7 @@ static void s_s3_auto_ranged_put_request_finished(
"(request finished prior to being paused?)",
(void *)meta_request);

aws_s3_meta_request_set_success_synced(meta_request, AWS_S3_RESPONSE_STATUS_SUCCESS);
aws_s3_meta_request_set_success_synced(meta_request, AWS_HTTP_STATUS_CODE_200_OK);
} else {
aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
}
Expand Down
2 changes: 1 addition & 1 deletion source/s3_copy_object.c
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ static bool s_s3_copy_object_update(
}

if (!work_remaining) {
aws_s3_meta_request_set_success_synced(meta_request, AWS_S3_RESPONSE_STATUS_SUCCESS);
aws_s3_meta_request_set_success_synced(meta_request, AWS_HTTP_STATUS_CODE_200_OK);
}

aws_s3_meta_request_unlock_synced_data(meta_request);
Expand Down
11 changes: 6 additions & 5 deletions source/s3_meta_request.c
Original file line number Diff line number Diff line change
Expand Up @@ -1088,15 +1088,16 @@ static int s_s3_meta_request_error_code_from_response_status(int response_status
int error_code = AWS_ERROR_UNKNOWN;

switch (response_status) {
case AWS_S3_RESPONSE_STATUS_SUCCESS:
case AWS_S3_RESPONSE_STATUS_RANGE_SUCCESS:
case AWS_S3_RESPONSE_STATUS_NO_CONTENT_SUCCESS:
case AWS_HTTP_STATUS_CODE_200_OK:
case AWS_HTTP_STATUS_CODE_206_PARTIAL_CONTENT:
case AWS_HTTP_STATUS_CODE_204_NO_CONTENT:
error_code = AWS_ERROR_SUCCESS;
break;
case AWS_S3_RESPONSE_STATUS_INTERNAL_ERROR:
case AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR:
error_code = AWS_ERROR_S3_INTERNAL_ERROR;
break;
case AWS_S3_RESPONSE_STATUS_SLOW_DOWN:
case AWS_HTTP_STATUS_CODE_503_SERVICE_UNAVAILABLE:
/* S3 response 503 for throttling, slow down the sending */
error_code = AWS_ERROR_S3_SLOW_DOWN;
break;
default:
Expand Down
23 changes: 17 additions & 6 deletions source/s3express_credentials_provider.c
Original file line number Diff line number Diff line change
Expand Up @@ -314,24 +314,35 @@ static void s_on_request_finished(
struct aws_credentials *credentials = NULL;
int error_code = meta_request_result->error_code;

if (error_code == AWS_ERROR_SUCCESS && meta_request_result->response_status != 200) {
error_code = AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE;
}

AWS_LOGF_DEBUG(
AWS_LS_AUTH_CREDENTIALS_PROVIDER,
"CreateSession call completed with http status %d and error code %s",
"(id=%p): CreateSession call completed with http status: %d and error code %s",
(void *)session_creator->provider,
meta_request_result->response_status,
aws_error_debug_str(error_code));

if (error_code && meta_request_result->error_response_body && meta_request_result->error_response_body->len > 0) {
/* The Create Session failed with an error response from S3, provide a specific error code for user. */
error_code = AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED;
AWS_LOGF_ERROR(
AWS_LS_AUTH_CREDENTIALS_PROVIDER,
"(id=%p): CreateSession call failed with http status: %d, and error response body is: %.*s",
(void *)session_creator->provider,
meta_request_result->response_status,
(int)meta_request_result->error_response_body->len,
meta_request_result->error_response_body->buffer);
}

if (error_code == AWS_ERROR_SUCCESS) {
credentials = s_parse_s3express_xml(
session_creator->allocator, aws_byte_cursor_from_buf(&session_creator->response_buf), session_creator);

if (!credentials) {
error_code = AWS_AUTH_PROVIDER_PARSER_UNEXPECTED_RESPONSE;
AWS_LOGF_ERROR(
AWS_LS_AUTH_CREDENTIALS_PROVIDER, "failed to read credentials from document, treating as an error.");
AWS_LS_AUTH_CREDENTIALS_PROVIDER,
"(id=%p): failed to read credentials from document, treating as an error.",
(void *)session_creator->provider);
}
}
{ /* BEGIN CRITICAL SECTION */
Expand Down
1 change: 1 addition & 0 deletions tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -302,6 +302,7 @@ add_net_test_case(s3express_client_put_object_multipart_multiple)
add_net_test_case(s3express_client_put_object_long_running_session_refresh)
add_net_test_case(s3express_client_get_object)
add_net_test_case(s3express_client_get_object_multiple)
add_net_test_case(s3express_client_get_object_create_session_error)

add_net_test_case(meta_request_auto_ranged_get_new_error_handling)
add_net_test_case(meta_request_auto_ranged_put_new_error_handling)
Expand Down
2 changes: 2 additions & 0 deletions tests/s3_mock_server_s3express_provider_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -298,10 +298,12 @@ TEST_CASE(s3express_provider_get_credentials_cancel_mock_server) {

ASSERT_SUCCESS(aws_s3express_credentials_provider_get_credentials(
provider, tester.anonymous_creds, &property, s_get_credentials_callback, &s_s3express_tester));
/* Release the provider right after we fetch the credentials, which will cancel the create session call. */
aws_s3express_credentials_provider_release(provider);
s_aws_wait_for_provider_shutdown_callback();

s_aws_wait_for_credentials_result(1);
/* The error code will be AWS_ERROR_S3_CANCELED. */
ASSERT_UINT_EQUALS(AWS_ERROR_S3_CANCELED, s_s3express_tester.error_code);
ASSERT_SUCCESS(s_s3express_tester_cleanup());
aws_s3_tester_clean_up(&tester);
Expand Down
2 changes: 1 addition & 1 deletion tests/s3_mock_server_tests.c
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ TEST_CASE(async_access_denied_from_complete_multipart_mock_server) {
ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &out_results));

ASSERT_UINT_EQUALS(AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR, out_results.finished_error_code);
ASSERT_UINT_EQUALS(AWS_S3_RESPONSE_STATUS_SUCCESS, out_results.finished_response_status);
ASSERT_UINT_EQUALS(AWS_HTTP_STATUS_CODE_200_OK, out_results.finished_response_status);
ASSERT_TRUE(out_results.error_response_body.len != 0);
ASSERT_STR_EQUALS("CompleteMultipartUpload", aws_string_c_str(out_results.error_response_operation_name));

Expand Down
2 changes: 1 addition & 1 deletion tests/s3_retry_tests.c
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,7 @@ static void s_s3_meta_request_send_request_finish_fail_first(
if (aws_s3_tester_inc_counter2(tester) == 1) {
AWS_ASSERT(connection->request->send_data.response_status == 404);

connection->request->send_data.response_status = AWS_S3_RESPONSE_STATUS_INTERNAL_ERROR;
connection->request->send_data.response_status = AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR;
}

struct aws_s3_meta_request_vtable *original_meta_request_vtable =
Expand Down
55 changes: 55 additions & 0 deletions tests/s3_s3express_client_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -590,3 +590,58 @@ TEST_CASE(s3express_client_get_object_multiple) {
aws_s3_tester_clean_up(&tester);
return AWS_OP_SUCCESS;
}

TEST_CASE(s3express_client_get_object_create_session_error) {
(void)ctx;

struct aws_s3_tester tester;
ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester));

struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1");

struct aws_s3_client_config client_config = {
.part_size = MB_TO_BYTES(5),
.enable_s3express = true,
.region = region_cursor,
};

ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_SIGNING));

struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config);

struct aws_byte_cursor my_dummy_endpoint = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(
"non-exist-bucket-test--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com");

struct aws_http_message *message =
aws_s3_test_get_object_request_new(allocator, my_dummy_endpoint, g_pre_existing_object_10MB);

struct aws_s3_meta_request_options options;
AWS_ZERO_STRUCT(options);
options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT;
options.message = message;
struct aws_signing_config_aws s3express_signing_config = {
.algorithm = AWS_SIGNING_ALGORITHM_V4_S3EXPRESS,
.service = g_s3express_service_name,
};
options.signing_config = &s3express_signing_config;
struct aws_s3_meta_request_test_results meta_request_test_results;
aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator);

ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(&tester, &options, &meta_request_test_results));

struct aws_s3_meta_request *meta_request = aws_s3_client_make_meta_request(client, &options);
ASSERT_TRUE(meta_request != NULL);
/* Wait for the request to finish. */
aws_s3_tester_wait_for_meta_request_finish(&tester);
ASSERT_UINT_EQUALS(meta_request_test_results.finished_error_code, AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED);

meta_request = aws_s3_meta_request_release(meta_request);
aws_s3_tester_wait_for_meta_request_shutdown(&tester);

aws_s3_meta_request_test_results_clean_up(&meta_request_test_results);

aws_http_message_release(message);
aws_s3_client_release(client);
aws_s3_tester_clean_up(&tester);
return AWS_OP_SUCCESS;
}

0 comments on commit fcd7a10

Please sign in to comment.