Skip to content

Commit

Permalink
Merge pull request #634 from hmaheswa/GetObjectAttributesAutomation
Browse files Browse the repository at this point in the history
adding support for testing s3api GetObjectAttributes
  • Loading branch information
mergify[bot] authored Oct 1, 2024
2 parents 3016c3d + 0a82bf2 commit d20fbac
Show file tree
Hide file tree
Showing 5 changed files with 226 additions and 25 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# upload type: non multipart
# script: test_Mbuckets_with_Nobjects.py
# polarion: CEPH-83595849
config:
user_count: 1
bucket_count: 2
objects_count: 25
objects_size_range:
min: 5
max: 15
test_ops:
create_bucket: true
create_object: true
download_object: true
delete_bucket_object: true
test_get_object_attributes: true
sharding:
enable: false
max_shards: 0
compression:
enable: false
type: zlib
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# upload type: non multipart
# script: test_Mbuckets_with_Nobjects.py
# polarion: CEPH-83595849
config:
user_count: 1
bucket_count: 2
objects_count: 25
objects_size_range:
min: 5
max: 15
test_ops:
create_bucket: true
create_object: true
download_object: true
delete_bucket_object: true
test_get_object_attributes: true
test_checksum: true
checksum_algorithm: SHA256
sharding:
enable: false
max_shards: 0
compression:
enable: false
type: zlib
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# upload type: multipart
# script: test_Mbuckets_with_Nobjects.py
# polarion: CEPH-83595849
config:
user_count: 1
bucket_count: 2
objects_count: 20
objects_size_range:
min: 30M
max: 50M
test_ops:
create_bucket: true
create_object: true
upload_type: multipart
download_object: true
delete_bucket_object: true
test_get_object_attributes: true
sharding:
enable: false
max_shards: 0
compression:
enable: false
type: zlib
107 changes: 105 additions & 2 deletions rgw/v2/tests/s3_swift/reusable.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,11 @@ def upload_object(
"SSEKMSKeyId": config.test_ops.get("encrypt_decrypt_key", "testKey01"),
}
args.append(extra_args)
if config.test_ops.get("test_checksum") is True:
checksum_algorithm = config.test_ops.get("checksum_algorithm")
log.info(f"ChecksumAlgorithm used is {checksum_algorithm}")
extra_args = {"ChecksumAlgorithm": checksum_algorithm}
args.append(extra_args)
object_uploaded_status = s3lib.resource_op(
{
"obj": s3_obj,
Expand Down Expand Up @@ -554,6 +559,8 @@ def upload_mutipart_object(
)
part_number = 1
parts_info = {"Parts": []}
if config.test_ops.get("test_get_object_attributes"):
object_parts_info = {"TotalPartsCount": len(parts_list), "Parts": []}
log.info("no of parts: %s" % len(parts_list))
abort_part_no = random.randint(1, len(parts_list) - 1)
"""if randomly selected abort-part-no is less than 1 then we will increment it by 1 to make sure atleast one part is uploaded
Expand Down Expand Up @@ -588,6 +595,10 @@ def upload_mutipart_object(
if abort_multipart and part_number == abort_part_no:
log.info(f"aborting multi part {part_number}")
return
if config.test_ops.get("test_get_object_attributes"):
part_info_get_obj_attr = part_info.copy()
part_info_get_obj_attr["Size"] = os.stat(each_part).st_size
object_parts_info["Parts"].append(part_info_get_obj_attr)

if config.local_file_delete is True:
log.info("deleting local file part")
Expand All @@ -597,6 +608,8 @@ def upload_mutipart_object(
log.info("all parts upload completed")
mpu.complete(MultipartUpload=parts_info)
log.info("multipart upload complete for key: %s" % s3_object_name)
if config.test_ops.get("test_get_object_attributes"):
return object_parts_info


def upload_part(
Expand Down Expand Up @@ -2110,7 +2123,7 @@ def verify_object_sync_on_other_site(rgw_ssh_con, bucket, config, bucket_object=
bkt_objects = bucket_stats["usage"]["rgw.main"]["num_objects"]
if bkt_objects != config.objects_count:
raise TestExecError(
f"Did not find {config.objects_count} in bucket {bkt.name}, but found {bkt_objects}"
f"Did not find {config.objects_count} in bucket {bucket.name}, but found {bkt_objects}"
)
else:
if (
Expand Down Expand Up @@ -2553,7 +2566,7 @@ def test_bucket_stats_colocated_archive_zone(bucket_name_to_create, each_user, c
arc_bucket_versioning = arc_bkt_stat_output["versioning"]
if arc_bucket_versioning == "off":
raise TestExecError(
f" bucket versioning is not enabled for archive zone when colocated with active zone for {bucket_name}"
f" bucket versioning is not enabled for archive zone when colocated with active zone for {bucket_name_to_create}"
)
else:
log.info(
Expand Down Expand Up @@ -2639,3 +2652,93 @@ def get_placement_and_storageclass_from_cluster():
storage_classes = out["placement_pools"][0]["val"]["storage_classes"]
storage_class_list = list(storage_classes.keys())
return placement_id, storage_class_list


def get_object_attributes(
rgw_s3_client,
bucket_name,
s3_object_name,
object_attributes=None,
object_parts_info=None,
):
log.info("Verifying GetObjectAttributes")
if object_attributes is None:
object_attributes = [
"ETag",
"StorageClass",
"ObjectSize",
"ObjectParts",
"Checksum",
]
get_obj_attr_resp = rgw_s3_client.get_object_attributes(
Bucket=bucket_name, Key=s3_object_name, ObjectAttributes=object_attributes
)
log.info(f"get_object_attributes resp: {get_obj_attr_resp}")

if "Checksum" in object_attributes:
log.info("Verifying Checksum")
out = utils.exec_shell_cmd(
f"radosgw-admin object stat --bucket {bucket_name} --object {s3_object_name}"
)
obj_stat = json.loads(out)
checksum_expected = {}
for key, val in obj_stat["attrs"].items():
if key.startswith("user.rgw.x-amz-checksum-"):
checksum_key = f"Checksum{key.split('-')[-1].upper()}"
checksum_expected[checksum_key] = val
log.info(f"checksum expected: {checksum_expected}")
if checksum_expected != get_obj_attr_resp["Checksum"]:
raise TestExecError(f"incorrect Checksum in GetObjectAttributes")
else:
log.info("Checksum verified successfully")
object_attributes.remove("Checksum")

if "ObjectParts" in object_attributes:
if object_parts_info is not None:
log.info("Verifying ObjectParts")
log.info(f"expected ObjectParts: {object_parts_info}")
object_parts_info_actual = get_obj_attr_resp["ObjectParts"]
if (
object_parts_info["TotalPartsCount"]
!= object_parts_info_actual["TotalPartsCount"]
):
raise TestExecError(
f"incorrect data for TotalPartsCount in ObjectParts"
)
parts_actual = object_parts_info_actual["Parts"]
parts_expected = object_parts_info["Parts"]
for index in range(0, len(parts_actual)):
if (
parts_expected[index]["PartNumber"]
!= parts_actual[index]["PartNumber"]
):
raise TestExecError(f"incorrect data for PartNumber in part{index}")
if parts_expected[index]["Size"] != parts_actual[index]["Size"]:
raise TestExecError(f"incorrect data for Size in part{index}")
log.info("ObjectParts verified successfully")
object_attributes.remove("ObjectParts")

out = utils.exec_shell_cmd(f"radosgw-admin bucket list --bucket {bucket_name}")
bkt_list = json.loads(out)
object_dict = {}
for dict in bkt_list:
if dict["name"] == s3_object_name:
object_dict = dict
break
for attr in object_attributes:
if attr == "StorageClass":
expected = object_dict["meta"]["storage_class"]
if expected == "":
expected = "STANDARD"
if attr == "ObjectSize":
expected = object_dict["meta"]["size"]
if attr == "ETag":
expected = object_dict["meta"]["etag"]
actual = get_obj_attr_resp[attr]
if expected != actual:
raise TestExecError(
f"incorrect data for {attr} in GetObjectAttributes. expected {expected}, but returned {actual}"
)
else:
log.info(f"{attr} verified successfully")
log.info("GetObjectAttributes verified successfully")
75 changes: 52 additions & 23 deletions rgw/v2/tests/s3_swift/test_Mbuckets_with_Nobjects.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@
test_Mbuckets_with_Nobjects_etag.yaml
test_changing_data_log_num_shards_cause_no_crash.yaml
test_bi_put_with_incomplete_multipart_upload.yaml
test_Mbuckets_with_Nobjects_get_object_attributes.yaml
test_Mbuckets_with_Nobjects_get_object_attributes_checksum_sha256.yaml
test_Mbuckets_with_Nobjects_get_object_attributes_multipart.yaml
Operation:
Creates M bucket and N objects
Expand Down Expand Up @@ -326,30 +329,48 @@ def test_exec(config, ssh_con):
log.info("upload type: multipart")
abort_multipart = config.abort_multipart
log.info(f"value of abort_multipart {abort_multipart}")
reusable.upload_mutipart_object(
s3_object_name,
bucket,
TEST_DATA_PATH,
config,
each_user,
abort_multipart=abort_multipart,
)
if abort_multipart:
log.info(f"verifying abort multipart")
bkt_stat_output = json.loads(
utils.exec_shell_cmd(
f"radosgw-admin bucket stats --bucket {bucket_name_to_create}"
)
if config.test_ops.get("test_get_object_attributes"):
object_parts_info = reusable.upload_mutipart_object(
s3_object_name,
bucket,
TEST_DATA_PATH,
config,
each_user,
abort_multipart=abort_multipart,
)
if (
bkt_stat_output["usage"]["rgw.multimeta"][
"num_objects"
]
> 0
):
log.info(f"In complete multipart found")
else:
raise AssertionError("Abort multipart failed")
log.info(f"sleeping for 3 seconds")
time.sleep(3)
reusable.get_object_attributes(
rgw_s3_client=rgw_conn2,
bucket_name=bucket_name_to_create,
s3_object_name=s3_object_name,
object_parts_info=object_parts_info,
)
else:
reusable.upload_mutipart_object(
s3_object_name,
bucket,
TEST_DATA_PATH,
config,
each_user,
abort_multipart=abort_multipart,
)
if abort_multipart:
log.info(f"verifying abort multipart")
bkt_stat_output = json.loads(
utils.exec_shell_cmd(
f"radosgw-admin bucket stats --bucket {bucket_name_to_create}"
)
)
if (
bkt_stat_output["usage"]["rgw.multimeta"][
"num_objects"
]
> 0
):
log.info(f"In complete multipart found")
else:
raise AssertionError("Abort multipart failed")

else:
if config.test_ops.get("enable_version", False):
Expand All @@ -371,6 +392,14 @@ def test_exec(config, ssh_con):
config,
each_user,
)
if config.test_ops.get("test_get_object_attributes"):
log.info(f"sleeping for 3 seconds")
time.sleep(3)
reusable.get_object_attributes(
rgw_s3_client=rgw_conn2,
bucket_name=bucket_name_to_create,
s3_object_name=s3_object_name,
)
if config.test_ops["download_object"] is True:
log.info("trying to download object: %s" % s3_object_name)
s3_object_download_name = s3_object_name + "." + "download"
Expand Down

0 comments on commit d20fbac

Please sign in to comment.