From 8280b29d77bb2fe0de2bc860f66ea699d10a938a Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Thu, 18 Dec 2025 20:37:16 +0100 Subject: [PATCH 1/7] HDDS-14209. Reduce parameter count in ObjectEndpoint --- .../ozone/s3/endpoint/ObjectEndpoint.java | 45 ++-- .../s3/endpoint/TestAbortMultipartUpload.java | 8 +- .../ozone/s3/endpoint/TestListParts.java | 31 +-- .../endpoint/TestMultipartUploadComplete.java | 7 +- .../endpoint/TestMultipartUploadWithCopy.java | 13 +- .../ozone/s3/endpoint/TestObjectDelete.java | 2 +- .../ozone/s3/endpoint/TestObjectGet.java | 22 +- .../ozone/s3/endpoint/TestObjectPut.java | 68 +++--- .../s3/endpoint/TestObjectTaggingDelete.java | 13 +- .../s3/endpoint/TestObjectTaggingGet.java | 11 +- .../s3/endpoint/TestObjectTaggingPut.java | 22 +- .../ozone/s3/endpoint/TestPartUpload.java | 21 +- .../s3/endpoint/TestPartUploadWithStream.java | 13 +- .../s3/endpoint/TestPermissionCheck.java | 15 +- .../s3/endpoint/TestUploadWithStream.java | 4 +- .../s3/metrics/TestS3GatewayMetrics.java | 200 ++++++++---------- 16 files changed, 254 insertions(+), 241 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index c6a2b6539098..7316aa2a1b99 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -168,7 +168,7 @@ public class ObjectEndpoint extends EndpointBase { /*FOR the feature Overriding Response Header https://docs.aws.amazon.com/de_de/AmazonS3/latest/API/API_GetObject.html */ - private Map overrideQueryParameter; + private final Map overrideQueryParameter; private int bufferSize; private int chunkSize; private boolean datastreamEnabled; @@ -209,17 +209,18 @@ public void init() { * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for * more details. */ - @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) + @SuppressWarnings("checkstyle:MethodLength") @PUT public Response put( @PathParam(BUCKET) String bucketName, @PathParam(PATH) String keyPath, @HeaderParam(HttpHeaders.CONTENT_LENGTH) long length, - @QueryParam(QueryParams.PART_NUMBER) int partNumber, - @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadID, - @QueryParam(QueryParams.TAGGING) String taggingMarker, - @QueryParam(QueryParams.ACL) String aclMarker, - final InputStream body) throws IOException, OS3Exception { + @QueryParam(QueryParams.PART_NUMBER) int partNumber, + final InputStream body + ) throws IOException, OS3Exception { + final String aclMarker = getQueryParam(QueryParams.ACL); + final String taggingMarker = getQueryParam(QueryParams.TAGGING); + final String uploadID = getQueryParam(QueryParams.UPLOAD_ID); long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_KEY; boolean auditSuccess = true; @@ -403,17 +404,17 @@ public Response put( * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html * for more details. */ - @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) + @SuppressWarnings("checkstyle:MethodLength") @GET public Response get( @PathParam(BUCKET) String bucketName, @PathParam(PATH) String keyPath, @QueryParam(QueryParams.PART_NUMBER) int partNumber, - @QueryParam(QueryParams.UPLOAD_ID) String uploadId, - @QueryParam(QueryParams.MAX_PARTS) @DefaultValue("1000") int maxParts, - @QueryParam(QueryParams.PART_NUMBER_MARKER) String partNumberMarker, - @QueryParam(QueryParams.TAGGING) String taggingMarker) - throws IOException, OS3Exception { + @QueryParam(QueryParams.MAX_PARTS) @DefaultValue("1000") int maxParts + ) throws IOException, OS3Exception { + final String uploadId = getQueryParam(QueryParams.UPLOAD_ID); + final String partNumberMarker = getQueryParam(QueryParams.PART_NUMBER_MARKER); + final String taggingMarker = getQueryParam(QueryParams.TAGGING); long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.GET_KEY; PerformanceStringBuilder perf = new PerformanceStringBuilder(); @@ -720,10 +721,11 @@ private Response abortMultipartUpload(OzoneVolume volume, String bucket, @SuppressWarnings("emptyblock") public Response delete( @PathParam(BUCKET) String bucketName, - @PathParam(PATH) String keyPath, - @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadId, - @QueryParam(QueryParams.TAGGING) String taggingMarker) throws - IOException, OS3Exception { + @PathParam(PATH) String keyPath + ) throws IOException, OS3Exception { + final String taggingMarker = getQueryParam(QueryParams.TAGGING); + final String uploadId = getQueryParam(QueryParams.UPLOAD_ID); + long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.DELETE_KEY; @@ -798,8 +800,7 @@ public Response delete( public Response initializeMultipartUpload( @PathParam(BUCKET) String bucket, @PathParam(PATH) String key - ) - throws IOException, OS3Exception { + ) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.INIT_MULTIPART_UPLOAD; @@ -863,9 +864,9 @@ private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket, public Response completeMultipartUpload( @PathParam(BUCKET) String bucket, @PathParam(PATH) String key, - @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadID, - CompleteMultipartUploadRequest multipartUploadRequest) - throws IOException, OS3Exception { + CompleteMultipartUploadRequest multipartUploadRequest + ) throws IOException, OS3Exception { + final String uploadID = getQueryParam(QueryParams.UPLOAD_ID); long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.COMPLETE_MULTIPART_UPLOAD; OzoneVolume volume = getVolume(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java index 9c46a718508f..f775ac69fa49 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java @@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.Test; /** @@ -63,15 +64,16 @@ public void testAbortMultipartUpload() throws Exception { assertNotNull(multipartUploadInitiateResponse.getUploadID()); String uploadID = multipartUploadInitiateResponse.getUploadID(); - // Abort multipart upload - response = rest.delete(bucket, key, uploadID, null); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); + response = rest.delete(bucket, key); assertEquals(204, response.getStatus()); // test with unknown upload Id. try { - rest.delete(bucket, key, "random", null); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "random"); + rest.delete(bucket, key); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), ex.getCode()); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java index 30be715b5305..013b33e5a855 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java @@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -44,7 +45,6 @@ public class TestListParts { private ObjectEndpoint rest; - private String uploadID; @BeforeEach public void setUp() throws Exception { @@ -67,8 +67,9 @@ public void setUp() throws Exception { OzoneConsts.KEY); MultipartUploadInitiateResponse multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - uploadID = multipartUploadInitiateResponse.getUploadID(); + String uploadID = multipartUploadInitiateResponse.getUploadID(); + assertNotNull(uploadID); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); assertEquals(200, response.getStatus()); @@ -76,25 +77,25 @@ public void setUp() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 2, uploadID, null, null, body); + content.length(), 2, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 3, uploadID, null, null, body); + content.length(), 3, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @Test public void testListParts() throws Exception { - Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 3, "0", null); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.PART_NUMBER_MARKER, "0"); + Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 3); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -106,8 +107,8 @@ public void testListParts() throws Exception { @Test public void testListPartsContinuation() throws Exception { - Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 2, "0", null); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.PART_NUMBER_MARKER, "0"); + Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 2); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -115,8 +116,9 @@ public void testListPartsContinuation() throws Exception { assertEquals(2, listPartsResponse.getPartList().size()); // Continue - response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, uploadID, 2, - Integer.toString(listPartsResponse.getNextPartNumberMarker()), null); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.PART_NUMBER_MARKER, + Integer.toString(listPartsResponse.getNextPartNumberMarker())); + response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 2); listPartsResponse = (ListPartsResponse) response.getEntity(); assertFalse(listPartsResponse.getTruncated()); @@ -126,9 +128,10 @@ public void testListPartsContinuation() throws Exception { @Test public void testListPartsWithUnknownUploadID() throws Exception { + rest.getQueryParameters().putSingle(S3Consts.QueryParams.PART_NUMBER_MARKER, "0"); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "no-such-upload"); try { - rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 2, "0", null); + rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 2); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), ex.getErrorMessage()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index fde336f48079..b2f4dea063ce 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -45,6 +45,7 @@ import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -106,8 +107,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String content) throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = rest.put(OzoneConsts.S3_BUCKET, key, content.length(), - partNumber, uploadID, null, null, body); + partNumber, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); @@ -120,8 +122,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String private void completeMultipartUpload(String key, CompleteMultipartUploadRequest completeMultipartUploadRequest, String uploadID) throws IOException, OS3Exception { + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = rest.completeMultipartUpload(OzoneConsts.S3_BUCKET, key, - uploadID, completeMultipartUploadRequest); + completeMultipartUploadRequest); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index 702c32d1abab..0da2e241a5ec 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -53,6 +53,7 @@ import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -330,8 +331,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String setHeaders(); ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = endpoint.put(OzoneConsts.S3_BUCKET, key, content.length(), - partNumber, uploadID, null, null, body); + partNumber, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); @@ -375,8 +377,9 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, setHeaders(additionalHeaders); ByteArrayInputStream body = new ByteArrayInputStream("".getBytes(UTF_8)); + endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = endpoint.put(OzoneConsts.S3_BUCKET, key, 0, partNumber, - uploadID, null, null, body); + body); assertEquals(200, response.getStatus()); CopyPartResult result = (CopyPartResult) response.getEntity(); @@ -403,7 +406,8 @@ public void testUploadWithRangeCopyContentLength() OzoneConsts.S3_BUCKET + "/" + EXISTING_KEY); additionalHeaders.put(COPY_SOURCE_HEADER_RANGE, "bytes=0-3"); setHeaders(additionalHeaders); - endpoint.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, uploadID, null, null, body); + endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); + endpoint.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, body); OzoneMultipartUploadPartListParts parts = client.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET) .listParts(KEY, uploadID, 0, 100); @@ -415,8 +419,9 @@ private void completeMultipartUpload(String key, CompleteMultipartUploadRequest completeMultipartUploadRequest, String uploadID) throws IOException, OS3Exception { setHeaders(); + endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = endpoint.completeMultipartUpload(OzoneConsts.S3_BUCKET, key, - uploadID, completeMultipartUploadRequest); + completeMultipartUploadRequest); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java index 3974cfcf9666..3b382c9bc4f1 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java @@ -47,7 +47,7 @@ public void delete() throws IOException, OS3Exception { .build(); //WHEN - rest.delete("b1", "key1", null, null); + rest.delete("b1", "key1"); //THEN assertFalse(bucket.listKeys("").hasNext(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java index a9fd7da4200e..f56e3b6abc2c 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java @@ -90,17 +90,17 @@ public void init() throws OS3Exception, IOException { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); rest.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); + 1, body); // Create a key with object tags when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), - 1, null, null, null, body); + 1, body); } @Test public void get() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); //THEN OzoneInputStream ozoneInputStream = @@ -122,7 +122,7 @@ public void get() throws IOException, OS3Exception { @Test public void getKeyWithTag() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, null); + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, 0); //THEN OzoneInputStream ozoneInputStream = @@ -144,7 +144,7 @@ public void getKeyWithTag() throws IOException, OS3Exception { public void inheritRequestHeader() throws IOException, OS3Exception { setDefaultHeader(); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals(CONTENT_TYPE1, response.getHeaderString("Content-Type")); @@ -174,7 +174,7 @@ public void overrideResponseHeader() throws IOException, OS3Exception { CONTENT_DISPOSITION2); queryParameter.putSingle("response-content-encoding", CONTENT_ENCODING2); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals(CONTENT_TYPE2, response.getHeaderString("Content-Type")); @@ -195,13 +195,13 @@ public void getRangeHeader() throws IOException, OS3Exception { Response response; when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-0"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals("1", response.getHeaderString("Content-Length")); assertEquals(String.format("bytes 0-0/%s", CONTENT.length()), response.getHeaderString("Content-Range")); when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals(String.valueOf(CONTENT.length()), response.getHeaderString("Content-Length")); assertEquals( @@ -214,7 +214,7 @@ public void getRangeHeader() throws IOException, OS3Exception { @Test public void getStatusCode() throws IOException, OS3Exception { Response response; - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals(response.getStatus(), Response.Status.OK.getStatusCode()); @@ -222,7 +222,7 @@ public void getStatusCode() throws IOException, OS3Exception { // The 206 (Partial Content) status code indicates that the server is // successfully fulfilling a range request for the target resource when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-1"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals(response.getStatus(), Response.Status.PARTIAL_CONTENT.getStatusCode()); assertNull(response.getHeaderString(TAG_COUNT_HEADER)); @@ -256,7 +256,7 @@ public void testGetWhenKeyIsDirectoryAndDoesNotEndWithASlash() // WHEN final OS3Exception ex = assertThrows(OS3Exception.class, - () -> rest.get(BUCKET_NAME, keyPath, 0, null, 0, null, null)); + () -> rest.get(BUCKET_NAME, keyPath, 0, 0)); // THEN assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index e5c34fb4e465..476b91020f2c 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -158,7 +158,7 @@ void testPutObject(int length, ReplicationConfig replication) throws IOException bucket.setReplicationConfig(replication); //WHEN - Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, null, null, null, body); + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, body); //THEN assertEquals(200, response.getStatus()); @@ -185,7 +185,7 @@ void testPutObjectContentLength() throws IOException, OS3Exception { new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); long dataSize = CONTENT.length(); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, null, null, null, body); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, body); assertEquals(dataSize, getKeyDataSize()); } @@ -202,8 +202,8 @@ void testPutObjectContentLengthForStreaming() when(headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)) .thenReturn("15"); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, null, null, - null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, + new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); assertEquals(15, getKeyDataSize()); } @@ -218,7 +218,7 @@ public void testPutObjectWithTags() throws IOException, OS3Exception { objectEndpoint.setHeaders(headersWithTags); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); + 1, body); assertEquals(200, response.getStatus()); @@ -242,7 +242,7 @@ public void testPutObjectWithOnlyTagKey() throws Exception { try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); + 1, body); fail("request with invalid query param should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -261,7 +261,7 @@ public void testPutObjectWithDuplicateTagKey() throws Exception { objectEndpoint.setHeaders(headersWithDuplicateTagKey); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); + 1, body); fail("request with duplicate tag key should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -281,7 +281,7 @@ public void testPutObjectWithLongTagKey() throws Exception { objectEndpoint.setHeaders(headersWithLongTagKey); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); + 1, body); fail("request with tag key exceeding the length limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -301,7 +301,7 @@ public void testPutObjectWithLongTagValue() throws Exception { when(headersWithLongTagValue.getHeaderString(TAG_HEADER)).thenReturn("tag1=" + longTagValue); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); + 1, body); fail("request with tag value exceeding the length limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -327,7 +327,7 @@ public void testPutObjectWithTooManyTags() throws Exception { objectEndpoint.setHeaders(headersWithTooManyTags); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); + 1, body); fail("request with number of tags exceeding limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -356,7 +356,7 @@ void testPutObjectWithSignedChunks() throws IOException, OS3Exception { //WHEN Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - chunkedContent.length(), 1, null, null, null, + chunkedContent.length(), 1, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); //THEN @@ -386,7 +386,7 @@ public void testPutObjectMessageDigestResetDuringException() throws OS3Exception new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT - .length(), 1, null, null, null, body); + .length(), 1, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the @@ -411,7 +411,7 @@ void testCopyObject() throws IOException, OS3Exception { when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - CONTENT.length(), 1, null, null, null, body); + CONTENT.length(), 1, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() .getS3Bucket(BUCKET_NAME) @@ -437,7 +437,7 @@ void testCopyObject() throws IOException, OS3Exception { BUCKET_NAME + "/" + urlEncode(KEY_NAME)); response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, null, null, body); + body); // Check destination key and response ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) @@ -467,7 +467,7 @@ void testCopyObject() throws IOException, OS3Exception { metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2"); response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, null, null, body); + body); ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) .readKey(DEST_KEY); @@ -494,7 +494,7 @@ void testCopyObject() throws IOException, OS3Exception { // wrong copy metadata directive when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("INVALID"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, null, null, body), + DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, body), "test copy object failed"); assertThat(e.getHttpCode()).isEqualTo(400); assertThat(e.getCode()).isEqualTo("InvalidArgument"); @@ -504,7 +504,7 @@ void testCopyObject() throws IOException, OS3Exception { // source and dest same e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, null, body), + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, body), "test copy object failed"); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); @@ -512,28 +512,28 @@ void testCopyObject() throws IOException, OS3Exception { when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(DEST_BUCKET_NAME, - DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, - DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); //Both source and dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, - DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // source key not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(NO_SUCH_BUCKET)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "nonexistent", KEY_NAME, CONTENT.length(), 1, null, null, null, body), + "nonexistent", KEY_NAME, CONTENT.length(), 1, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); } @@ -545,7 +545,7 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - CONTENT.length(), 1, null, null, null, body); + CONTENT.length(), 1, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() .getS3Bucket(BUCKET_NAME) @@ -573,7 +573,7 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException try { objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, null, null, body); + body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the @@ -596,7 +596,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { String sourceKeyName = "sourceKey"; Response putResponse = objectEndpoint.put(BUCKET_NAME, sourceKeyName, - CONTENT.length(), 1, null, null, null, body); + CONTENT.length(), 1, body); OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(sourceKeyName); @@ -614,7 +614,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { BUCKET_NAME + "/" + urlEncode(sourceKeyName)); objectEndpoint.setHeaders(headersForCopy); - Response copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); + Response copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, body); OzoneKeyDetails destKeyDetails = clientStub.getObjectStore() .getS3Bucket(DEST_BUCKET_NAME).getKey(destKey); @@ -633,7 +633,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { // With x-amz-tagging-directive = COPY with a different x-amz-tagging when(headersForCopy.getHeaderString(TAG_HEADER)).thenReturn("tag3=value3"); - copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); + copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, body); assertEquals(200, copyResponse.getStatus()); destKeyDetails = clientStub.getObjectStore() @@ -648,7 +648,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { // Copy object with x-amz-tagging-directive = REPLACE when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("REPLACE"); - copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); + copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, body); assertEquals(200, copyResponse.getStatus()); destKeyDetails = clientStub.getObjectStore() @@ -670,7 +670,7 @@ public void testCopyObjectWithInvalidTagCopyDirective() throws Exception { HttpHeaders headersForCopy = Mockito.mock(HttpHeaders.class); when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("INVALID"); try { - objectEndpoint.put(DEST_BUCKET_NAME, "somekey", CONTENT.length(), 1, null, null, null, body); + objectEndpoint.put(DEST_BUCKET_NAME, "somekey", CONTENT.length(), 1, body); } catch (OS3Exception ex) { assertEquals(INVALID_ARGUMENT.getCode(), ex.getCode()); assertThat(ex.getErrorMessage()).contains("The tagging copy directive specified is invalid"); @@ -685,7 +685,7 @@ void testInvalidStorageType() { when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, null, body)); + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, body)); assertEquals(S3ErrorTable.INVALID_STORAGE_CLASS.getErrorMessage(), e.getErrorMessage()); assertEquals("random", e.getResource()); @@ -698,7 +698,7 @@ void testEmptyStorageType() throws IOException, OS3Exception { when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT - .length(), 1, null, null, null, body); + .length(), 1, body); OzoneKeyDetails key = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) .getKey(KEY_NAME); @@ -717,7 +717,7 @@ void testDirectoryCreation() throws IOException, // WHEN try (Response response = objectEndpoint.put(fsoBucket.getName(), path, - 0L, 0, "", null, null, null)) { + 0L, 0, null)) { assertEquals(HttpStatus.SC_OK, response.getStatus()); } @@ -732,12 +732,12 @@ void testDirectoryCreationOverFile() throws IOException, OS3Exception { final String path = "key"; final ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", null, null, body); + objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, body); // WHEN final OS3Exception exception = assertThrows(OS3Exception.class, () -> objectEndpoint - .put(FSO_BUCKET_NAME, path + "/", 0, 0, "", null, null, null) + .put(FSO_BUCKET_NAME, path + "/", 0, 0, null) .close()); // THEN @@ -753,7 +753,7 @@ public void testPutEmptyObject() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(emptyString.getBytes(UTF_8)); objectEndpoint.setHeaders(headersWithTags); - Response putResponse = objectEndpoint.put(BUCKET_NAME, KEY_NAME, emptyString.length(), 1, null, null, null, body); + Response putResponse = objectEndpoint.put(BUCKET_NAME, KEY_NAME, emptyString.length(), 1, body); assertEquals(200, putResponse.getStatus()); OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(0, keyDetails.getDataSize()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java index 488474e30390..8b292ed1db7b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -82,12 +83,13 @@ public void init() throws OS3Exception, IOException { Mockito.when(headers.getHeaderString(X_AMZ_CONTENT_SHA256)) .thenReturn("mockSignature"); rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), - 1, null, null, null, body); + 1, body); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); } @Test public void testDeleteTagging() throws IOException, OS3Exception { - Response response = rest.delete(BUCKET_NAME, KEY_WITH_TAG, null, ""); + Response response = rest.delete(BUCKET_NAME, KEY_WITH_TAG); assertEquals(HTTP_NO_CONTENT, response.getStatus()); assertTrue(client.getObjectStore().getS3Bucket(BUCKET_NAME) @@ -97,7 +99,7 @@ public void testDeleteTagging() throws IOException, OS3Exception { @Test public void testDeleteTaggingNoKeyFound() throws Exception { try { - rest.delete(BUCKET_NAME, "nonexistent", null, ""); + rest.delete(BUCKET_NAME, "nonexistent"); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -108,7 +110,7 @@ public void testDeleteTaggingNoKeyFound() throws Exception { @Test public void testDeleteTaggingNoBucketFound() throws Exception { try { - rest.delete("nonexistent", "nonexistent", null, ""); + rest.delete("nonexistent", "nonexistent"); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -135,7 +137,8 @@ public void testDeleteObjectTaggingNotImplemented() throws Exception { ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).deleteObjectTagging("dir/"); try { - endpoint.delete("fsoBucket", "dir/", null, ""); + endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); + endpoint.delete("fsoBucket", "dir/"); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java index 1885e7d0cf6f..f7b039e66de9 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.endpoint.S3Tagging.Tag; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -72,13 +73,15 @@ public void init() throws OS3Exception, IOException { // Create a key with object tags Mockito.when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), - 1, null, null, null, body); + 1, body); + + rest.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); } @Test public void testGetTagging() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, ""); + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, 0); assertEquals(HTTP_OK, response.getStatus()); S3Tagging s3Tagging = (S3Tagging) response.getEntity(); @@ -99,7 +102,7 @@ public void testGetTagging() throws IOException, OS3Exception { @Test public void testGetTaggingNoKeyFound() throws Exception { try { - rest.get(BUCKET_NAME, "nonexistent", 0, null, 0, null, ""); + rest.get(BUCKET_NAME, "nonexistent", 0, 0); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -110,7 +113,7 @@ public void testGetTaggingNoKeyFound() throws Exception { @Test public void testGetTaggingNoBucketFound() throws Exception { try { - rest.get("nonexistent", "nonexistent", 0, null, 0, null, ""); + rest.get("nonexistent", "nonexistent", 0, 0); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java index d1651d6b59c0..9c42dd510d6d 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java @@ -90,13 +90,14 @@ void setup() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream("".getBytes(UTF_8)); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, null, null, body); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, body); } @Test public void testPutObjectTaggingWithEmptyBody() throws Exception { try { - objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", null, + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null); fail(); } catch (OS3Exception ex) { @@ -107,8 +108,8 @@ public void testPutObjectTaggingWithEmptyBody() throws Exception { @Test public void testPutValidObjectTagging() throws Exception { - assertEquals(HTTP_OK, objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, - "", null, twoTags()).getStatus()); + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); + assertEquals(HTTP_OK, objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, twoTags()).getStatus()); OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(2, keyDetails.getTags().size()); @@ -129,7 +130,8 @@ public void testPutInvalidObjectTagging() throws Exception { private void testInvalidObjectTagging(Supplier inputStream, int expectedHttpCode, String expectedErrorCode) throws Exception { try { - objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", null, + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, inputStream.get()); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { @@ -141,8 +143,9 @@ private void testInvalidObjectTagging(Supplier inputStream, @Test public void testPutObjectTaggingNoKeyFound() throws Exception { try { + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); objectEndpoint.put(BUCKET_NAME, "nonexistent", 0, 1, - null, "", null, twoTags()); + twoTags()); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -153,8 +156,9 @@ public void testPutObjectTaggingNoKeyFound() throws Exception { @Test public void testPutObjectTaggingNoBucketFound() throws Exception { try { + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); objectEndpoint.put("nonexistent", "nonexistent", 0, 1, - null, "", null, twoTags()); + twoTags()); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -185,9 +189,9 @@ public void testPutObjectTaggingNotImplemented() throws Exception { doThrow(new OMException("PutObjectTagging is not currently supported for FSO directory", ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).putObjectTagging("dir/", twoTagsMap); + endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); try { - endpoint.put("fsoBucket", "dir/", 0, 1, null, "", - null, twoTags()); + endpoint.put("fsoBucket", "dir/", 0, 1, twoTags()); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index 4981069528a8..57fa0264b509 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -50,6 +50,7 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.MockedStatic; @@ -96,8 +97,9 @@ public void testPartUpload() throws Exception { String content = "Multipart Upload"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -118,8 +120,9 @@ public void testPartUploadWithOverride() throws Exception { String content = "Multipart Upload"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -128,7 +131,7 @@ public void testPartUploadWithOverride() throws Exception { // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); @@ -140,8 +143,9 @@ public void testPartUploadWithIncorrectUploadID() throws Exception { String content = "Multipart Upload With Incorrect uploadID"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "random"); rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, - "random", null, null, body); + body); }); assertEquals("NoSuchUpload", ex.getCode()); assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -176,8 +180,9 @@ public void testPartUploadStreamContentLength() String uploadID = multipartUploadInitiateResponse.getUploadID(); long contentLength = chunkedContent.length(); + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); objectEndpoint.put(OzoneConsts.S3_BUCKET, keyName, contentLength, 1, - uploadID, null, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); + new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); assertContentLength(uploadID, keyName, 15); } @@ -199,8 +204,9 @@ public void testPartUploadContentLength() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); rest.put(OzoneConsts.S3_BUCKET, keyName, - contentLength, 1, uploadID, null, null, body); + contentLength, 1, body); assertContentLength(uploadID, keyName, content.length()); } @@ -244,8 +250,9 @@ public void testPartUploadMessageDigestResetDuringException() throws IOException ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); try { + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); objectEndpoint.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 736660073d57..1656fa35d438 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -93,8 +94,9 @@ public void testPartUpload() throws Exception { String content = "Multipart Upload"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); response = rest.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -114,8 +116,9 @@ public void testPartUploadWithOverride() throws Exception { String content = "Multipart Upload"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); response = rest.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -123,8 +126,9 @@ public void testPartUploadWithOverride() throws Exception { // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); response = rest.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); @@ -136,8 +140,9 @@ public void testPartUploadWithIncorrectUploadID() throws Exception { String content = "Multipart Upload With Incorrect uploadID"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "random"); rest.put(S3BUCKET, S3KEY, content.length(), 1, - "random", null, null, body); + body); }); assertEquals("NoSuchUpload", ex.getCode()); assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index 9872a711c639..8bf809c0a57e 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -261,8 +261,9 @@ public void testGetKey() throws IOException { .setConfig(conf) .build(); + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.PART_NUMBER_MARKER, "marker"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.get( - "bucketName", "keyPath", 0, null, 1000, "marker", null)); + "bucketName", "keyPath", 0, 1000)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -279,7 +280,7 @@ public void testPutKey() throws IOException { .build(); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "bucketName", "keyPath", 1024, 0, null, null, null, + "bucketName", "keyPath", 1024, 0, new ByteArrayInputStream(new byte[]{}))); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -297,7 +298,7 @@ public void testDeleteKey() throws IOException { .build(); OS3Exception e = assertThrows(OS3Exception.class, () -> - objectEndpoint.delete("bucketName", "keyPath", null, null)); + objectEndpoint.delete("bucketName", "keyPath")); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -341,18 +342,18 @@ public void testObjectTagging() throws Exception { InputStream tagInput = new ByteArrayInputStream(xml.getBytes(UTF_8)); + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put("bucketName", "keyPath", 0, 1, - null, "", null, tagInput)); + tagInput)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); e = assertThrows(OS3Exception.class, () -> - objectEndpoint.delete("bucketName", "keyPath", "", "")); + objectEndpoint.delete("bucketName", "keyPath")); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); e = assertThrows(OS3Exception.class, () -> - objectEndpoint.get("bucketName", "keyPath", 0, null, - 0, null, "")); + objectEndpoint.get("bucketName", "keyPath", 0, 0)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java index dbe21601dbd3..4586d477f734 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java @@ -92,7 +92,7 @@ public void testUpload() throws Exception { byte[] keyContent = S3_COPY_EXISTING_KEY_CONTENT.getBytes(UTF_8); ByteArrayInputStream body = new ByteArrayInputStream(keyContent); - Response response = rest.put(S3BUCKET, S3KEY, 0, 0, null, null, null, body); + Response response = rest.put(S3BUCKET, S3KEY, 0, 0, body); assertEquals(200, response.getStatus()); } @@ -126,7 +126,7 @@ public void testUploadWithCopy() throws Exception { .forEach((k, v) -> when(headers.getHeaderString(k)).thenReturn(v)); rest.setHeaders(headers); - Response response = rest.put(S3BUCKET, S3KEY, 0, 0, null, null, null, null); + Response response = rest.put(S3BUCKET, S3KEY, 0, 0, null); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java index 018ad0f1f5e2..39baae58584b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java @@ -20,6 +20,8 @@ import static java.net.HttpURLConnection.HTTP_CONFLICT; import static java.net.HttpURLConnection.HTTP_OK; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.BUCKET; +import static org.apache.hadoop.ozone.OzoneConsts.KEY; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; @@ -36,10 +38,10 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import javax.ws.rs.core.StreamingOutput; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; @@ -62,8 +64,6 @@ */ public class TestS3GatewayMetrics { - private String bucketName = OzoneConsts.BUCKET; - private String keyName = OzoneConsts.KEY; private OzoneClient clientStub; private BucketEndpoint bucketEndpoint; private RootEndpoint rootEndpoint; @@ -72,13 +72,15 @@ public class TestS3GatewayMetrics { private HttpHeaders headers; private static final String ACL_MARKER = "acl"; private static final String CONTENT = "0123456789"; + private static final int LENGTH = CONTENT.length(); + private static final byte[] BYTES = CONTENT.getBytes(StandardCharsets.UTF_8); private S3GatewayMetrics metrics; @BeforeEach public void setup() throws Exception { clientStub = new OzoneClientStub(); - clientStub.getObjectStore().createS3Bucket(bucketName); - bucket = clientStub.getObjectStore().getS3Bucket(bucketName); + clientStub.getObjectStore().createS3Bucket(BUCKET); + bucket = clientStub.getObjectStore().getS3Bucket(BUCKET); bucket.createKey("file1", 0).close(); headers = mock(HttpHeaders.class); @@ -113,7 +115,7 @@ public void testHeadBucketSuccess() throws Exception { long oriMetric = metrics.getHeadBucketSuccess(); - bucketEndpoint.head(bucketName); + bucketEndpoint.head(BUCKET); long curMetric = metrics.getHeadBucketSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -134,7 +136,7 @@ public void testListBucketSuccess() throws Exception { public void testGetBucketSuccess() throws Exception { long oriMetric = metrics.getGetBucketSuccess(); - bucketEndpoint.get(bucketName, 1000, 0).getEntity(); + bucketEndpoint.get(BUCKET, 1000, 0).getEntity(); long curMetric = metrics.getGetBucketSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -169,7 +171,7 @@ public void testCreateBucketFailure() throws Exception { // Creating an error by trying to create a bucket that already exists OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.put( - bucketName, null)); + BUCKET, null)); assertEquals(HTTP_CONFLICT, e.getHttpCode()); assertEquals(BUCKET_ALREADY_EXISTS.getCode(), e.getCode()); @@ -181,7 +183,7 @@ public void testCreateBucketFailure() throws Exception { public void testDeleteBucketSuccess() throws Exception { long oriMetric = metrics.getDeleteBucketSuccess(); - bucketEndpoint.delete(bucketName); + bucketEndpoint.delete(BUCKET); long curMetric = metrics.getDeleteBucketSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -190,11 +192,11 @@ public void testDeleteBucketSuccess() throws Exception { @Test public void testDeleteBucketFailure() throws Exception { long oriMetric = metrics.getDeleteBucketFailure(); - bucketEndpoint.delete(bucketName); + bucketEndpoint.delete(BUCKET); // Deleting a bucket that does not exist will result in delete failure OS3Exception e = assertThrows(OS3Exception.class, () -> - bucketEndpoint.delete(bucketName)); + bucketEndpoint.delete(BUCKET)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getErrorMessage(), e.getErrorMessage()); @@ -209,7 +211,7 @@ public void testGetAclSuccess() throws Exception { bucketEndpoint.getQueryParameters().add(QueryParams.ACL, ACL_MARKER); Response response = - bucketEndpoint.get(bucketName, 0, 0); + bucketEndpoint.get(BUCKET, 0, 0); long curMetric = metrics.getGetAclSuccess(); assertEquals(HTTP_OK, response.getStatus()); assertEquals(1L, curMetric - oriMetric); @@ -268,11 +270,11 @@ public void testPutAclFailure() throws Exception { @Test public void testHeadKeySuccess() throws Exception { - bucket.createKey(keyName, 0).close(); + bucket.createKey(KEY, 0).close(); long oriMetric = metrics.getHeadKeySuccess(); - keyEndpoint.head(bucketName, keyName); + keyEndpoint.head(BUCKET, KEY); long curMetric = metrics.getHeadKeySuccess(); assertEquals(1L, curMetric - oriMetric); @@ -282,7 +284,7 @@ public void testHeadKeySuccess() throws Exception { public void testHeadKeyFailure() throws Exception { long oriMetric = metrics.getHeadKeyFailure(); - keyEndpoint.head(bucketName, "unknownKey"); + keyEndpoint.head(BUCKET, "unknownKey"); long curMetric = metrics.getHeadKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -292,13 +294,7 @@ public void testHeadKeyFailure() throws Exception { public void testCreateKeySuccess() throws Exception { long oriMetric = metrics.getCreateKeySuccess(); - // Create an input stream - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - // Create the file - keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, null, null, body); - body.close(); + putObject(BUCKET, KEY); long curMetric = metrics.getCreateKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -308,9 +304,8 @@ public void testCreateKeyFailure() throws Exception { long oriMetric = metrics.getCreateKeyFailure(); // Create the file in a bucket that does not exist - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - "unknownBucket", keyName, CONTENT.length(), 1, null, null, - null, null)); + OS3Exception e = assertThrows(OS3Exception.class, + () -> putObject("unknownBucket", KEY)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getCreateKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -320,8 +315,8 @@ public void testCreateKeyFailure() throws Exception { public void testDeleteKeySuccess() throws Exception { long oriMetric = metrics.getDeleteKeySuccess(); - bucket.createKey(keyName, 0).close(); - keyEndpoint.delete(bucketName, keyName, null, null); + bucket.createKey(KEY, 0).close(); + keyEndpoint.delete(BUCKET, KEY); long curMetric = metrics.getDeleteKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -329,8 +324,8 @@ public void testDeleteKeySuccess() throws Exception { @Test public void testDeleteKeyFailure() throws Exception { long oriMetric = metrics.getDeleteKeyFailure(); - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( - "unknownBucket", keyName, null, null)); + OS3Exception e = assertThrows(OS3Exception.class, + () -> keyEndpoint.delete("unknownBucket", KEY)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getDeleteKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -339,15 +334,10 @@ public void testDeleteKeyFailure() throws Exception { @Test public void testGetKeySuccess() throws Exception { long oriMetric = metrics.getGetKeySuccess(); + putObject(BUCKET, KEY); - // Create an input stream - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - // Create the file - keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, null, null, body); // GET the key from the bucket - Response response = keyEndpoint.get(bucketName, keyName, 0, null, 0, null, null); + Response response = keyEndpoint.get(BUCKET, KEY, 0, 0); StreamingOutput stream = (StreamingOutput) response.getEntity(); stream.write(new ByteArrayOutputStream()); long curMetric = metrics.getGetKeySuccess(); @@ -359,8 +349,8 @@ public void testGetKeyFailure() throws Exception { long oriMetric = metrics.getGetKeyFailure(); // Fetching a non-existent key - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( - bucketName, "unknownKey", 0, null, 0, null, null)); + OS3Exception e = assertThrows(OS3Exception.class, + () -> keyEndpoint.get(BUCKET, "unknownKey", 0, 0)); assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), e.getCode()); long curMetric = metrics.getGetKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -368,9 +358,8 @@ public void testGetKeyFailure() throws Exception { @Test public void testInitMultiPartUploadSuccess() throws Exception { - long oriMetric = metrics.getInitMultiPartUploadSuccess(); - keyEndpoint.initializeMultipartUpload(bucketName, keyName); + keyEndpoint.initializeMultipartUpload(BUCKET, KEY); long curMetric = metrics.getInitMultiPartUploadSuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -378,8 +367,8 @@ public void testInitMultiPartUploadSuccess() throws Exception { @Test public void testInitMultiPartUploadFailure() throws Exception { long oriMetric = metrics.getInitMultiPartUploadFailure(); - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint - .initializeMultipartUpload("unknownBucket", keyName)); + OS3Exception e = assertThrows(OS3Exception.class, + () -> keyEndpoint.initializeMultipartUpload("unknownBucket", KEY)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getInitMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); @@ -389,12 +378,13 @@ public void testInitMultiPartUploadFailure() throws Exception { public void testAbortMultiPartUploadSuccess() throws Exception { // Initiate the Upload and fetch the upload ID - String uploadID = initiateMultipartUpload(bucketName, keyName); + String uploadID = initiateMultipartUpload(BUCKET, KEY); long oriMetric = metrics.getAbortMultiPartUploadSuccess(); // Abort the Upload Successfully by deleting the key using the Upload-Id - keyEndpoint.delete(bucketName, keyName, uploadID, null); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); + keyEndpoint.delete(BUCKET, KEY); long curMetric = metrics.getAbortMultiPartUploadSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -405,8 +395,9 @@ public void testAbortMultiPartUploadFailure() throws Exception { long oriMetric = metrics.getAbortMultiPartUploadFailure(); // Fail the Abort Method by providing wrong uploadID - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( - bucketName, keyName, "wrongId", null)); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "wrongId"); + OS3Exception e = assertThrows(OS3Exception.class, + () -> keyEndpoint.delete(BUCKET, KEY)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getAbortMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); @@ -416,14 +407,13 @@ public void testAbortMultiPartUploadFailure() throws Exception { public void testCompleteMultiPartUploadSuccess() throws Exception { // Initiate the Upload and fetch the upload ID - String uploadID = initiateMultipartUpload(bucketName, keyName); + String uploadID = initiateMultipartUpload(BUCKET, KEY); long oriMetric = metrics.getCompleteMultiPartUploadSuccess(); // complete multipart upload - CompleteMultipartUploadRequest completeMultipartUploadRequest = new - CompleteMultipartUploadRequest(); - Response response = keyEndpoint.completeMultipartUpload(bucketName, keyName, - uploadID, completeMultipartUploadRequest); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); + Response response = keyEndpoint.completeMultipartUpload(BUCKET, KEY, + new CompleteMultipartUploadRequest()); long curMetric = metrics.getCompleteMultiPartUploadSuccess(); assertEquals(200, response.getStatus()); assertEquals(1L, curMetric - oriMetric); @@ -432,11 +422,9 @@ public void testCompleteMultiPartUploadSuccess() throws Exception { @Test public void testCompleteMultiPartUploadFailure() throws Exception { long oriMetric = metrics.getCompleteMultiPartUploadFailure(); - CompleteMultipartUploadRequest completeMultipartUploadRequestNew = new - CompleteMultipartUploadRequest(); - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint - .completeMultipartUpload(bucketName, "key2", "random", - completeMultipartUploadRequestNew)); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "random"); + OS3Exception e = assertThrows(OS3Exception.class, + () -> keyEndpoint.completeMultipartUpload(BUCKET, "key2", new CompleteMultipartUploadRequest())); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getCompleteMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); @@ -446,13 +434,11 @@ public void testCompleteMultiPartUploadFailure() throws Exception { public void testCreateMultipartKeySuccess() throws Exception { // Initiate the Upload and fetch the upload ID - String uploadID = initiateMultipartUpload(bucketName, keyName); + String uploadID = initiateMultipartUpload(BUCKET, KEY); long oriMetric = metrics.getCreateMultipartKeySuccess(); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - keyEndpoint.put(bucketName, keyName, CONTENT.length(), - 1, uploadID, null, null, body); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); + putObject(BUCKET, KEY); long curMetric = metrics.getCreateMultipartKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -460,8 +446,9 @@ public void testCreateMultipartKeySuccess() throws Exception { @Test public void testCreateMultipartKeyFailure() throws Exception { long oriMetric = metrics.getCreateMultipartKeyFailure(); - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, "randomId", null, null, null)); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "randomId"); + OS3Exception e = assertThrows(OS3Exception.class, + () -> keyEndpoint.put(BUCKET, KEY, LENGTH, 1, null)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getCreateMultipartKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -472,11 +459,11 @@ public void testListPartsSuccess() throws Exception { long oriMetric = metrics.getListPartsSuccess(); // Initiate the Upload and fetch the upload ID - String uploadID = initiateMultipartUpload(bucketName, keyName); + String uploadID = initiateMultipartUpload(BUCKET, KEY); // Listing out the parts by providing the uploadID - keyEndpoint.get(bucketName, keyName, 0, - uploadID, 3, null, null); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); + keyEndpoint.get(BUCKET, KEY, 0, 3); long curMetric = metrics.getListPartsSuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -486,8 +473,9 @@ public void testListPartsFailure() throws Exception { long oriMetric = metrics.getListPartsFailure(); // Listing out the parts by providing the uploadID after aborting - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( - bucketName, keyName, 0, "wrong_id", 3, null, null)); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "wrong_id"); + OS3Exception e = assertThrows(OS3Exception.class, + () -> keyEndpoint.get(BUCKET, KEY, 0, 3)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getListPartsFailure(); assertEquals(1L, curMetric - oriMetric); @@ -504,18 +492,13 @@ public void testCopyObject() throws Exception { // Test for Success of CopyObjectSuccess Metric long oriMetric = metrics.getCopyObjectSuccess(); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - - keyEndpoint.put(bucketName, keyName, - CONTENT.length(), 1, null, null, null, body); + putObject(BUCKET, KEY); // Add copy header, and then call put when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + urlEncode(keyName)); + BUCKET + "/" + urlEncode(KEY)); + putObject(destBucket, destKey); - keyEndpoint.put(destBucket, destKey, CONTENT.length(), 1, - null, null, null, body); long curMetric = metrics.getCopyObjectSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -523,9 +506,7 @@ public void testCopyObject() throws Exception { oriMetric = metrics.getCopyObjectFailure(); // source and dest same when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, null, null, null, body), - "Test for CopyObjectMetric failed"); + OS3Exception e = assertThrows(OS3Exception.class, () -> putObject(BUCKET, KEY)); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); curMetric = metrics.getCopyObjectFailure(); assertEquals(1L, curMetric - oriMetric); @@ -535,15 +516,11 @@ public void testCopyObject() throws Exception { public void testPutObjectTaggingSuccess() throws Exception { long oriMetric = metrics.getPutObjectTaggingSuccess(); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - // Create the file - keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, null, null, body); - body.close(); + putObject(BUCKET, KEY); // Put object tagging - keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); + keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); + keyEndpoint.put(BUCKET, KEY, 0, 1, getPutTaggingBody()); long curMetric = metrics.getPutObjectTaggingSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -554,10 +531,9 @@ public void testPutObjectTaggingFailure() throws Exception { long oriMetric = metrics.getPutObjectTaggingFailure(); // Put object tagging for nonexistent key - OS3Exception ex = assertThrows(OS3Exception.class, () -> - keyEndpoint.put(bucketName, "nonexistent", 0, 1, null, "", - null, getPutTaggingBody()) - ); + keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); + OS3Exception ex = assertThrows(OS3Exception.class, + () -> keyEndpoint.put(BUCKET, "nonexistent", 0, 1, getPutTaggingBody())); assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); long curMetric = metrics.getPutObjectTaggingFailure(); @@ -569,18 +545,14 @@ public void testGetObjectTaggingSuccess() throws Exception { long oriMetric = metrics.getGetObjectTaggingSuccess(); // Create the file - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, null, null, body); - body.close(); + putObject(BUCKET, KEY); // Put object tagging - keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); + keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); + keyEndpoint.put(BUCKET, KEY, 0, 1, getPutTaggingBody()); // Get object tagging - keyEndpoint.get(bucketName, keyName, 0, - null, 0, null, ""); + keyEndpoint.get(BUCKET, KEY, 0, 0); long curMetric = metrics.getGetObjectTaggingSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -591,9 +563,9 @@ public void testGetObjectTaggingFailure() throws Exception { long oriMetric = metrics.getGetObjectTaggingFailure(); // Get object tagging for nonexistent key - OS3Exception ex = assertThrows(OS3Exception.class, () -> - keyEndpoint.get(bucketName, "nonexistent", 0, null, - 0, null, "")); + keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); + OS3Exception ex = assertThrows(OS3Exception.class, + () -> keyEndpoint.get(BUCKET, "nonexistent", 0, 0)); assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); long curMetric = metrics.getGetObjectTaggingFailure(); assertEquals(1L, curMetric - oriMetric); @@ -604,17 +576,14 @@ public void testDeleteObjectTaggingSuccess() throws Exception { long oriMetric = metrics.getDeleteObjectTaggingSuccess(); // Create the file - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, null, null, body); - body.close(); + putObject(BUCKET, KEY); // Put object tagging - keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); + keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); + keyEndpoint.put(BUCKET, KEY, 0, 1, getPutTaggingBody()); // Delete object tagging - keyEndpoint.delete(bucketName, keyName, null, ""); + keyEndpoint.delete(BUCKET, KEY); long curMetric = metrics.getDeleteObjectTaggingSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -625,19 +594,26 @@ public void testDeleteObjectTaggingFailure() throws Exception { long oriMetric = metrics.getDeleteObjectTaggingFailure(); // Delete object tagging for nonexistent key - OS3Exception ex = assertThrows(OS3Exception.class, () -> - keyEndpoint.delete(bucketName, "nonexistent", null, "")); + keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); + OS3Exception ex = assertThrows(OS3Exception.class, + () -> keyEndpoint.delete(BUCKET, "nonexistent")); assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); long curMetric = metrics.getDeleteObjectTaggingFailure(); assertEquals(1L, curMetric - oriMetric); } - private String initiateMultipartUpload(String bktName, String key) + private void putObject(String bucketName, String key) throws IOException, OS3Exception { + try (InputStream body = new ByteArrayInputStream(BYTES)) { + keyEndpoint.put(bucketName, key, LENGTH, 1, body); + } + } + + private String initiateMultipartUpload(String bucketName, String key) throws IOException, OS3Exception { // Initiate the Upload Response response = - keyEndpoint.initializeMultipartUpload(bktName, key); + keyEndpoint.initializeMultipartUpload(bucketName, key); MultipartUploadInitiateResponse multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity(); if (response.getStatus() == 200) { From 6269ead19009be4672adee2792faae3708b0c4fd Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 20 Dec 2025 11:38:46 +0100 Subject: [PATCH 2/7] set default value for upload ID in completeMultipartUpload --- .../org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java | 5 +++++ .../org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 99d7adc3042f..86437f329b98 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -118,6 +118,11 @@ protected String getQueryParam(String key) { return getQueryParameters().getFirst(key); } + protected String getQueryParam(String key, String defaultValue) { + final String value = getQueryParam(key); + return value != null ? value : defaultValue; + } + public MultivaluedMap getQueryParameters() { return context.getUriInfo().getQueryParameters(); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 7316aa2a1b99..6308c98853c7 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -866,7 +866,7 @@ public Response completeMultipartUpload( @PathParam(PATH) String key, CompleteMultipartUploadRequest multipartUploadRequest ) throws IOException, OS3Exception { - final String uploadID = getQueryParam(QueryParams.UPLOAD_ID); + final String uploadID = getQueryParam(QueryParams.UPLOAD_ID, ""); long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.COMPLETE_MULTIPART_UPLOAD; OzoneVolume volume = getVolume(); From 5fce605fe1f48086367294be40a117ef58e7c6ee Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Tue, 23 Dec 2025 11:52:49 +0100 Subject: [PATCH 3/7] Revert "set default value for upload ID in completeMultipartUpload" This reverts commit 6269ead19009be4672adee2792faae3708b0c4fd. --- .../org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java | 5 ----- .../org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 86437f329b98..99d7adc3042f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -118,11 +118,6 @@ protected String getQueryParam(String key) { return getQueryParameters().getFirst(key); } - protected String getQueryParam(String key, String defaultValue) { - final String value = getQueryParam(key); - return value != null ? value : defaultValue; - } - public MultivaluedMap getQueryParameters() { return context.getUriInfo().getQueryParameters(); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 6308c98853c7..7316aa2a1b99 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -866,7 +866,7 @@ public Response completeMultipartUpload( @PathParam(PATH) String key, CompleteMultipartUploadRequest multipartUploadRequest ) throws IOException, OS3Exception { - final String uploadID = getQueryParam(QueryParams.UPLOAD_ID, ""); + final String uploadID = getQueryParam(QueryParams.UPLOAD_ID); long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.COMPLETE_MULTIPART_UPLOAD; OzoneVolume volume = getVolume(); From 704f619963bbea9bb43830d0426b7b96dd03772f Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Tue, 23 Dec 2025 11:52:49 +0100 Subject: [PATCH 4/7] Revert "HDDS-14209. Reduce parameter count in ObjectEndpoint" This reverts commit 8280b29d77bb2fe0de2bc860f66ea699d10a938a. --- .../ozone/s3/endpoint/ObjectEndpoint.java | 45 ++-- .../s3/endpoint/TestAbortMultipartUpload.java | 8 +- .../ozone/s3/endpoint/TestListParts.java | 31 ++- .../endpoint/TestMultipartUploadComplete.java | 7 +- .../endpoint/TestMultipartUploadWithCopy.java | 13 +- .../ozone/s3/endpoint/TestObjectDelete.java | 2 +- .../ozone/s3/endpoint/TestObjectGet.java | 22 +- .../ozone/s3/endpoint/TestObjectPut.java | 68 +++--- .../s3/endpoint/TestObjectTaggingDelete.java | 13 +- .../s3/endpoint/TestObjectTaggingGet.java | 11 +- .../s3/endpoint/TestObjectTaggingPut.java | 22 +- .../ozone/s3/endpoint/TestPartUpload.java | 21 +- .../s3/endpoint/TestPartUploadWithStream.java | 13 +- .../s3/endpoint/TestPermissionCheck.java | 15 +- .../s3/endpoint/TestUploadWithStream.java | 4 +- .../s3/metrics/TestS3GatewayMetrics.java | 200 ++++++++++-------- 16 files changed, 241 insertions(+), 254 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 7316aa2a1b99..c6a2b6539098 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -168,7 +168,7 @@ public class ObjectEndpoint extends EndpointBase { /*FOR the feature Overriding Response Header https://docs.aws.amazon.com/de_de/AmazonS3/latest/API/API_GetObject.html */ - private final Map overrideQueryParameter; + private Map overrideQueryParameter; private int bufferSize; private int chunkSize; private boolean datastreamEnabled; @@ -209,18 +209,17 @@ public void init() { * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for * more details. */ - @SuppressWarnings("checkstyle:MethodLength") + @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) @PUT public Response put( @PathParam(BUCKET) String bucketName, @PathParam(PATH) String keyPath, @HeaderParam(HttpHeaders.CONTENT_LENGTH) long length, - @QueryParam(QueryParams.PART_NUMBER) int partNumber, - final InputStream body - ) throws IOException, OS3Exception { - final String aclMarker = getQueryParam(QueryParams.ACL); - final String taggingMarker = getQueryParam(QueryParams.TAGGING); - final String uploadID = getQueryParam(QueryParams.UPLOAD_ID); + @QueryParam(QueryParams.PART_NUMBER) int partNumber, + @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadID, + @QueryParam(QueryParams.TAGGING) String taggingMarker, + @QueryParam(QueryParams.ACL) String aclMarker, + final InputStream body) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_KEY; boolean auditSuccess = true; @@ -404,17 +403,17 @@ public Response put( * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html * for more details. */ - @SuppressWarnings("checkstyle:MethodLength") + @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) @GET public Response get( @PathParam(BUCKET) String bucketName, @PathParam(PATH) String keyPath, @QueryParam(QueryParams.PART_NUMBER) int partNumber, - @QueryParam(QueryParams.MAX_PARTS) @DefaultValue("1000") int maxParts - ) throws IOException, OS3Exception { - final String uploadId = getQueryParam(QueryParams.UPLOAD_ID); - final String partNumberMarker = getQueryParam(QueryParams.PART_NUMBER_MARKER); - final String taggingMarker = getQueryParam(QueryParams.TAGGING); + @QueryParam(QueryParams.UPLOAD_ID) String uploadId, + @QueryParam(QueryParams.MAX_PARTS) @DefaultValue("1000") int maxParts, + @QueryParam(QueryParams.PART_NUMBER_MARKER) String partNumberMarker, + @QueryParam(QueryParams.TAGGING) String taggingMarker) + throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.GET_KEY; PerformanceStringBuilder perf = new PerformanceStringBuilder(); @@ -721,11 +720,10 @@ private Response abortMultipartUpload(OzoneVolume volume, String bucket, @SuppressWarnings("emptyblock") public Response delete( @PathParam(BUCKET) String bucketName, - @PathParam(PATH) String keyPath - ) throws IOException, OS3Exception { - final String taggingMarker = getQueryParam(QueryParams.TAGGING); - final String uploadId = getQueryParam(QueryParams.UPLOAD_ID); - + @PathParam(PATH) String keyPath, + @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadId, + @QueryParam(QueryParams.TAGGING) String taggingMarker) throws + IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.DELETE_KEY; @@ -800,7 +798,8 @@ public Response delete( public Response initializeMultipartUpload( @PathParam(BUCKET) String bucket, @PathParam(PATH) String key - ) throws IOException, OS3Exception { + ) + throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.INIT_MULTIPART_UPLOAD; @@ -864,9 +863,9 @@ private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket, public Response completeMultipartUpload( @PathParam(BUCKET) String bucket, @PathParam(PATH) String key, - CompleteMultipartUploadRequest multipartUploadRequest - ) throws IOException, OS3Exception { - final String uploadID = getQueryParam(QueryParams.UPLOAD_ID); + @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadID, + CompleteMultipartUploadRequest multipartUploadRequest) + throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.COMPLETE_MULTIPART_UPLOAD; OzoneVolume volume = getVolume(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java index f775ac69fa49..9c46a718508f 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java @@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.Test; /** @@ -64,16 +63,15 @@ public void testAbortMultipartUpload() throws Exception { assertNotNull(multipartUploadInitiateResponse.getUploadID()); String uploadID = multipartUploadInitiateResponse.getUploadID(); + // Abort multipart upload - rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); - response = rest.delete(bucket, key); + response = rest.delete(bucket, key, uploadID, null); assertEquals(204, response.getStatus()); // test with unknown upload Id. try { - rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "random"); - rest.delete(bucket, key); + rest.delete(bucket, key, "random", null); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), ex.getCode()); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java index 013b33e5a855..30be715b5305 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java @@ -35,7 +35,6 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -45,6 +44,7 @@ public class TestListParts { private ObjectEndpoint rest; + private String uploadID; @BeforeEach public void setUp() throws Exception { @@ -67,9 +67,8 @@ public void setUp() throws Exception { OzoneConsts.KEY); MultipartUploadInitiateResponse multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity(); - String uploadID = multipartUploadInitiateResponse.getUploadID(); - assertNotNull(uploadID); - rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); + assertNotNull(multipartUploadInitiateResponse.getUploadID()); + uploadID = multipartUploadInitiateResponse.getUploadID(); assertEquals(200, response.getStatus()); @@ -77,25 +76,25 @@ public void setUp() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 2, body); + content.length(), 2, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 3, body); + content.length(), 3, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @Test public void testListParts() throws Exception { - rest.getQueryParameters().putSingle(S3Consts.QueryParams.PART_NUMBER_MARKER, "0"); - Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 3); + Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, + uploadID, 3, "0", null); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -107,8 +106,8 @@ public void testListParts() throws Exception { @Test public void testListPartsContinuation() throws Exception { - rest.getQueryParameters().putSingle(S3Consts.QueryParams.PART_NUMBER_MARKER, "0"); - Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 2); + Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, + uploadID, 2, "0", null); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -116,9 +115,8 @@ public void testListPartsContinuation() throws Exception { assertEquals(2, listPartsResponse.getPartList().size()); // Continue - rest.getQueryParameters().putSingle(S3Consts.QueryParams.PART_NUMBER_MARKER, - Integer.toString(listPartsResponse.getNextPartNumberMarker())); - response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 2); + response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, uploadID, 2, + Integer.toString(listPartsResponse.getNextPartNumberMarker()), null); listPartsResponse = (ListPartsResponse) response.getEntity(); assertFalse(listPartsResponse.getTruncated()); @@ -128,10 +126,9 @@ public void testListPartsContinuation() throws Exception { @Test public void testListPartsWithUnknownUploadID() throws Exception { - rest.getQueryParameters().putSingle(S3Consts.QueryParams.PART_NUMBER_MARKER, "0"); - rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "no-such-upload"); try { - rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 2); + rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, + uploadID, 2, "0", null); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), ex.getErrorMessage()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index b2f4dea063ce..fde336f48079 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -45,7 +45,6 @@ import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -107,9 +106,8 @@ private Part uploadPart(String key, String uploadID, int partNumber, String content) throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); - rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = rest.put(OzoneConsts.S3_BUCKET, key, content.length(), - partNumber, body); + partNumber, uploadID, null, null, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); @@ -122,9 +120,8 @@ private Part uploadPart(String key, String uploadID, int partNumber, String private void completeMultipartUpload(String key, CompleteMultipartUploadRequest completeMultipartUploadRequest, String uploadID) throws IOException, OS3Exception { - rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = rest.completeMultipartUpload(OzoneConsts.S3_BUCKET, key, - completeMultipartUploadRequest); + uploadID, completeMultipartUploadRequest); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index 0da2e241a5ec..702c32d1abab 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -53,7 +53,6 @@ import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -331,9 +330,8 @@ private Part uploadPart(String key, String uploadID, int partNumber, String setHeaders(); ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); - endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = endpoint.put(OzoneConsts.S3_BUCKET, key, content.length(), - partNumber, body); + partNumber, uploadID, null, null, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); @@ -377,9 +375,8 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, setHeaders(additionalHeaders); ByteArrayInputStream body = new ByteArrayInputStream("".getBytes(UTF_8)); - endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = endpoint.put(OzoneConsts.S3_BUCKET, key, 0, partNumber, - body); + uploadID, null, null, body); assertEquals(200, response.getStatus()); CopyPartResult result = (CopyPartResult) response.getEntity(); @@ -406,8 +403,7 @@ public void testUploadWithRangeCopyContentLength() OzoneConsts.S3_BUCKET + "/" + EXISTING_KEY); additionalHeaders.put(COPY_SOURCE_HEADER_RANGE, "bytes=0-3"); setHeaders(additionalHeaders); - endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); - endpoint.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, body); + endpoint.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, uploadID, null, null, body); OzoneMultipartUploadPartListParts parts = client.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET) .listParts(KEY, uploadID, 0, 100); @@ -419,9 +415,8 @@ private void completeMultipartUpload(String key, CompleteMultipartUploadRequest completeMultipartUploadRequest, String uploadID) throws IOException, OS3Exception { setHeaders(); - endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = endpoint.completeMultipartUpload(OzoneConsts.S3_BUCKET, key, - completeMultipartUploadRequest); + uploadID, completeMultipartUploadRequest); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java index 3b382c9bc4f1..3974cfcf9666 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java @@ -47,7 +47,7 @@ public void delete() throws IOException, OS3Exception { .build(); //WHEN - rest.delete("b1", "key1"); + rest.delete("b1", "key1", null, null); //THEN assertFalse(bucket.listKeys("").hasNext(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java index f56e3b6abc2c..a9fd7da4200e 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java @@ -90,17 +90,17 @@ public void init() throws OS3Exception, IOException { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); rest.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, body); + 1, null, null, null, body); // Create a key with object tags when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), - 1, body); + 1, null, null, null, body); } @Test public void get() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); //THEN OzoneInputStream ozoneInputStream = @@ -122,7 +122,7 @@ public void get() throws IOException, OS3Exception { @Test public void getKeyWithTag() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, 0); + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, null); //THEN OzoneInputStream ozoneInputStream = @@ -144,7 +144,7 @@ public void getKeyWithTag() throws IOException, OS3Exception { public void inheritRequestHeader() throws IOException, OS3Exception { setDefaultHeader(); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(CONTENT_TYPE1, response.getHeaderString("Content-Type")); @@ -174,7 +174,7 @@ public void overrideResponseHeader() throws IOException, OS3Exception { CONTENT_DISPOSITION2); queryParameter.putSingle("response-content-encoding", CONTENT_ENCODING2); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(CONTENT_TYPE2, response.getHeaderString("Content-Type")); @@ -195,13 +195,13 @@ public void getRangeHeader() throws IOException, OS3Exception { Response response; when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-0"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals("1", response.getHeaderString("Content-Length")); assertEquals(String.format("bytes 0-0/%s", CONTENT.length()), response.getHeaderString("Content-Range")); when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(String.valueOf(CONTENT.length()), response.getHeaderString("Content-Length")); assertEquals( @@ -214,7 +214,7 @@ public void getRangeHeader() throws IOException, OS3Exception { @Test public void getStatusCode() throws IOException, OS3Exception { Response response; - response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(response.getStatus(), Response.Status.OK.getStatusCode()); @@ -222,7 +222,7 @@ public void getStatusCode() throws IOException, OS3Exception { // The 206 (Partial Content) status code indicates that the server is // successfully fulfilling a range request for the target resource when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-1"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(response.getStatus(), Response.Status.PARTIAL_CONTENT.getStatusCode()); assertNull(response.getHeaderString(TAG_COUNT_HEADER)); @@ -256,7 +256,7 @@ public void testGetWhenKeyIsDirectoryAndDoesNotEndWithASlash() // WHEN final OS3Exception ex = assertThrows(OS3Exception.class, - () -> rest.get(BUCKET_NAME, keyPath, 0, 0)); + () -> rest.get(BUCKET_NAME, keyPath, 0, null, 0, null, null)); // THEN assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index 476b91020f2c..e5c34fb4e465 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -158,7 +158,7 @@ void testPutObject(int length, ReplicationConfig replication) throws IOException bucket.setReplicationConfig(replication); //WHEN - Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, body); + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, null, null, null, body); //THEN assertEquals(200, response.getStatus()); @@ -185,7 +185,7 @@ void testPutObjectContentLength() throws IOException, OS3Exception { new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); long dataSize = CONTENT.length(); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, body); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, null, null, null, body); assertEquals(dataSize, getKeyDataSize()); } @@ -202,8 +202,8 @@ void testPutObjectContentLengthForStreaming() when(headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)) .thenReturn("15"); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, - new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, null, null, + null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); assertEquals(15, getKeyDataSize()); } @@ -218,7 +218,7 @@ public void testPutObjectWithTags() throws IOException, OS3Exception { objectEndpoint.setHeaders(headersWithTags); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, body); + 1, null, null, null, body); assertEquals(200, response.getStatus()); @@ -242,7 +242,7 @@ public void testPutObjectWithOnlyTagKey() throws Exception { try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, body); + 1, null, null, null, body); fail("request with invalid query param should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -261,7 +261,7 @@ public void testPutObjectWithDuplicateTagKey() throws Exception { objectEndpoint.setHeaders(headersWithDuplicateTagKey); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, body); + 1, null, null, null, body); fail("request with duplicate tag key should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -281,7 +281,7 @@ public void testPutObjectWithLongTagKey() throws Exception { objectEndpoint.setHeaders(headersWithLongTagKey); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, body); + 1, null, null, null, body); fail("request with tag key exceeding the length limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -301,7 +301,7 @@ public void testPutObjectWithLongTagValue() throws Exception { when(headersWithLongTagValue.getHeaderString(TAG_HEADER)).thenReturn("tag1=" + longTagValue); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, body); + 1, null, null, null, body); fail("request with tag value exceeding the length limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -327,7 +327,7 @@ public void testPutObjectWithTooManyTags() throws Exception { objectEndpoint.setHeaders(headersWithTooManyTags); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, body); + 1, null, null, null, body); fail("request with number of tags exceeding limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -356,7 +356,7 @@ void testPutObjectWithSignedChunks() throws IOException, OS3Exception { //WHEN Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - chunkedContent.length(), 1, + chunkedContent.length(), 1, null, null, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); //THEN @@ -386,7 +386,7 @@ public void testPutObjectMessageDigestResetDuringException() throws OS3Exception new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT - .length(), 1, body); + .length(), 1, null, null, null, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the @@ -411,7 +411,7 @@ void testCopyObject() throws IOException, OS3Exception { when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - CONTENT.length(), 1, body); + CONTENT.length(), 1, null, null, null, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() .getS3Bucket(BUCKET_NAME) @@ -437,7 +437,7 @@ void testCopyObject() throws IOException, OS3Exception { BUCKET_NAME + "/" + urlEncode(KEY_NAME)); response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - body); + null, null, null, body); // Check destination key and response ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) @@ -467,7 +467,7 @@ void testCopyObject() throws IOException, OS3Exception { metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2"); response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - body); + null, null, null, body); ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) .readKey(DEST_KEY); @@ -494,7 +494,7 @@ void testCopyObject() throws IOException, OS3Exception { // wrong copy metadata directive when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("INVALID"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, body), + DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getHttpCode()).isEqualTo(400); assertThat(e.getCode()).isEqualTo("InvalidArgument"); @@ -504,7 +504,7 @@ void testCopyObject() throws IOException, OS3Exception { // source and dest same e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, body), + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); @@ -512,28 +512,28 @@ void testCopyObject() throws IOException, OS3Exception { when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(DEST_BUCKET_NAME, - DEST_KEY, CONTENT.length(), 1, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, - DEST_KEY, CONTENT.length(), 1, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); //Both source and dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, - DEST_KEY, CONTENT.length(), 1, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // source key not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(NO_SUCH_BUCKET)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "nonexistent", KEY_NAME, CONTENT.length(), 1, body), + "nonexistent", KEY_NAME, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); } @@ -545,7 +545,7 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - CONTENT.length(), 1, body); + CONTENT.length(), 1, null, null, null, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() .getS3Bucket(BUCKET_NAME) @@ -573,7 +573,7 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException try { objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - body); + null, null, null, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the @@ -596,7 +596,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { String sourceKeyName = "sourceKey"; Response putResponse = objectEndpoint.put(BUCKET_NAME, sourceKeyName, - CONTENT.length(), 1, body); + CONTENT.length(), 1, null, null, null, body); OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(sourceKeyName); @@ -614,7 +614,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { BUCKET_NAME + "/" + urlEncode(sourceKeyName)); objectEndpoint.setHeaders(headersForCopy); - Response copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, body); + Response copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); OzoneKeyDetails destKeyDetails = clientStub.getObjectStore() .getS3Bucket(DEST_BUCKET_NAME).getKey(destKey); @@ -633,7 +633,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { // With x-amz-tagging-directive = COPY with a different x-amz-tagging when(headersForCopy.getHeaderString(TAG_HEADER)).thenReturn("tag3=value3"); - copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, body); + copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); assertEquals(200, copyResponse.getStatus()); destKeyDetails = clientStub.getObjectStore() @@ -648,7 +648,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { // Copy object with x-amz-tagging-directive = REPLACE when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("REPLACE"); - copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, body); + copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); assertEquals(200, copyResponse.getStatus()); destKeyDetails = clientStub.getObjectStore() @@ -670,7 +670,7 @@ public void testCopyObjectWithInvalidTagCopyDirective() throws Exception { HttpHeaders headersForCopy = Mockito.mock(HttpHeaders.class); when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("INVALID"); try { - objectEndpoint.put(DEST_BUCKET_NAME, "somekey", CONTENT.length(), 1, body); + objectEndpoint.put(DEST_BUCKET_NAME, "somekey", CONTENT.length(), 1, null, null, null, body); } catch (OS3Exception ex) { assertEquals(INVALID_ARGUMENT.getCode(), ex.getCode()); assertThat(ex.getErrorMessage()).contains("The tagging copy directive specified is invalid"); @@ -685,7 +685,7 @@ void testInvalidStorageType() { when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, body)); + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, null, body)); assertEquals(S3ErrorTable.INVALID_STORAGE_CLASS.getErrorMessage(), e.getErrorMessage()); assertEquals("random", e.getResource()); @@ -698,7 +698,7 @@ void testEmptyStorageType() throws IOException, OS3Exception { when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT - .length(), 1, body); + .length(), 1, null, null, null, body); OzoneKeyDetails key = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) .getKey(KEY_NAME); @@ -717,7 +717,7 @@ void testDirectoryCreation() throws IOException, // WHEN try (Response response = objectEndpoint.put(fsoBucket.getName(), path, - 0L, 0, null)) { + 0L, 0, "", null, null, null)) { assertEquals(HttpStatus.SC_OK, response.getStatus()); } @@ -732,12 +732,12 @@ void testDirectoryCreationOverFile() throws IOException, OS3Exception { final String path = "key"; final ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, body); + objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", null, null, body); // WHEN final OS3Exception exception = assertThrows(OS3Exception.class, () -> objectEndpoint - .put(FSO_BUCKET_NAME, path + "/", 0, 0, null) + .put(FSO_BUCKET_NAME, path + "/", 0, 0, "", null, null, null) .close()); // THEN @@ -753,7 +753,7 @@ public void testPutEmptyObject() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(emptyString.getBytes(UTF_8)); objectEndpoint.setHeaders(headersWithTags); - Response putResponse = objectEndpoint.put(BUCKET_NAME, KEY_NAME, emptyString.length(), 1, body); + Response putResponse = objectEndpoint.put(BUCKET_NAME, KEY_NAME, emptyString.length(), 1, null, null, null, body); assertEquals(200, putResponse.getStatus()); OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(0, keyDetails.getDataSize()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java index 8b292ed1db7b..488474e30390 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java @@ -47,7 +47,6 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -83,13 +82,12 @@ public void init() throws OS3Exception, IOException { Mockito.when(headers.getHeaderString(X_AMZ_CONTENT_SHA256)) .thenReturn("mockSignature"); rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), - 1, body); - rest.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); + 1, null, null, null, body); } @Test public void testDeleteTagging() throws IOException, OS3Exception { - Response response = rest.delete(BUCKET_NAME, KEY_WITH_TAG); + Response response = rest.delete(BUCKET_NAME, KEY_WITH_TAG, null, ""); assertEquals(HTTP_NO_CONTENT, response.getStatus()); assertTrue(client.getObjectStore().getS3Bucket(BUCKET_NAME) @@ -99,7 +97,7 @@ public void testDeleteTagging() throws IOException, OS3Exception { @Test public void testDeleteTaggingNoKeyFound() throws Exception { try { - rest.delete(BUCKET_NAME, "nonexistent"); + rest.delete(BUCKET_NAME, "nonexistent", null, ""); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -110,7 +108,7 @@ public void testDeleteTaggingNoKeyFound() throws Exception { @Test public void testDeleteTaggingNoBucketFound() throws Exception { try { - rest.delete("nonexistent", "nonexistent"); + rest.delete("nonexistent", "nonexistent", null, ""); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -137,8 +135,7 @@ public void testDeleteObjectTaggingNotImplemented() throws Exception { ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).deleteObjectTagging("dir/"); try { - endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); - endpoint.delete("fsoBucket", "dir/"); + endpoint.delete("fsoBucket", "dir/", null, ""); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java index f7b039e66de9..1885e7d0cf6f 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java @@ -37,7 +37,6 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.endpoint.S3Tagging.Tag; import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -73,15 +72,13 @@ public void init() throws OS3Exception, IOException { // Create a key with object tags Mockito.when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), - 1, body); - - rest.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); + 1, null, null, null, body); } @Test public void testGetTagging() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, 0); + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, ""); assertEquals(HTTP_OK, response.getStatus()); S3Tagging s3Tagging = (S3Tagging) response.getEntity(); @@ -102,7 +99,7 @@ public void testGetTagging() throws IOException, OS3Exception { @Test public void testGetTaggingNoKeyFound() throws Exception { try { - rest.get(BUCKET_NAME, "nonexistent", 0, 0); + rest.get(BUCKET_NAME, "nonexistent", 0, null, 0, null, ""); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -113,7 +110,7 @@ public void testGetTaggingNoKeyFound() throws Exception { @Test public void testGetTaggingNoBucketFound() throws Exception { try { - rest.get("nonexistent", "nonexistent", 0, 0); + rest.get("nonexistent", "nonexistent", 0, null, 0, null, ""); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java index 9c42dd510d6d..d1651d6b59c0 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java @@ -90,14 +90,13 @@ void setup() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream("".getBytes(UTF_8)); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, body); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, null, null, body); } @Test public void testPutObjectTaggingWithEmptyBody() throws Exception { try { - objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", null, null); fail(); } catch (OS3Exception ex) { @@ -108,8 +107,8 @@ public void testPutObjectTaggingWithEmptyBody() throws Exception { @Test public void testPutValidObjectTagging() throws Exception { - objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); - assertEquals(HTTP_OK, objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, twoTags()).getStatus()); + assertEquals(HTTP_OK, objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, + "", null, twoTags()).getStatus()); OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(2, keyDetails.getTags().size()); @@ -130,8 +129,7 @@ public void testPutInvalidObjectTagging() throws Exception { private void testInvalidObjectTagging(Supplier inputStream, int expectedHttpCode, String expectedErrorCode) throws Exception { try { - objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", null, inputStream.get()); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { @@ -143,9 +141,8 @@ private void testInvalidObjectTagging(Supplier inputStream, @Test public void testPutObjectTaggingNoKeyFound() throws Exception { try { - objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); objectEndpoint.put(BUCKET_NAME, "nonexistent", 0, 1, - twoTags()); + null, "", null, twoTags()); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -156,9 +153,8 @@ public void testPutObjectTaggingNoKeyFound() throws Exception { @Test public void testPutObjectTaggingNoBucketFound() throws Exception { try { - objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); objectEndpoint.put("nonexistent", "nonexistent", 0, 1, - twoTags()); + null, "", null, twoTags()); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -189,9 +185,9 @@ public void testPutObjectTaggingNotImplemented() throws Exception { doThrow(new OMException("PutObjectTagging is not currently supported for FSO directory", ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).putObjectTagging("dir/", twoTagsMap); - endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); try { - endpoint.put("fsoBucket", "dir/", 0, 1, twoTags()); + endpoint.put("fsoBucket", "dir/", 0, 1, null, "", + null, twoTags()); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index 57fa0264b509..4981069528a8 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -50,7 +50,6 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.MockedStatic; @@ -97,9 +96,8 @@ public void testPartUpload() throws Exception { String content = "Multipart Upload"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); - rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -120,9 +118,8 @@ public void testPartUploadWithOverride() throws Exception { String content = "Multipart Upload"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); - rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -131,7 +128,7 @@ public void testPartUploadWithOverride() throws Exception { // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); @@ -143,9 +140,8 @@ public void testPartUploadWithIncorrectUploadID() throws Exception { String content = "Multipart Upload With Incorrect uploadID"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); - rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "random"); rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, - body); + "random", null, null, body); }); assertEquals("NoSuchUpload", ex.getCode()); assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -180,9 +176,8 @@ public void testPartUploadStreamContentLength() String uploadID = multipartUploadInitiateResponse.getUploadID(); long contentLength = chunkedContent.length(); - objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); objectEndpoint.put(OzoneConsts.S3_BUCKET, keyName, contentLength, 1, - new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); + uploadID, null, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); assertContentLength(uploadID, keyName, 15); } @@ -204,9 +199,8 @@ public void testPartUploadContentLength() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); - rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); rest.put(OzoneConsts.S3_BUCKET, keyName, - contentLength, 1, body); + contentLength, 1, uploadID, null, null, body); assertContentLength(uploadID, keyName, content.length()); } @@ -250,9 +244,8 @@ public void testPartUploadMessageDigestResetDuringException() throws IOException ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); try { - objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); objectEndpoint.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, body); + content.length(), 1, uploadID, null, null, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 1656fa35d438..736660073d57 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -38,7 +38,6 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -94,9 +93,8 @@ public void testPartUpload() throws Exception { String content = "Multipart Upload"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); - rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); response = rest.put(S3BUCKET, S3KEY, - content.length(), 1, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -116,9 +114,8 @@ public void testPartUploadWithOverride() throws Exception { String content = "Multipart Upload"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); - rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); response = rest.put(S3BUCKET, S3KEY, - content.length(), 1, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -126,9 +123,8 @@ public void testPartUploadWithOverride() throws Exception { // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; - rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); response = rest.put(S3BUCKET, S3KEY, - content.length(), 1, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); @@ -140,9 +136,8 @@ public void testPartUploadWithIncorrectUploadID() throws Exception { String content = "Multipart Upload With Incorrect uploadID"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); - rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "random"); rest.put(S3BUCKET, S3KEY, content.length(), 1, - body); + "random", null, null, body); }); assertEquals("NoSuchUpload", ex.getCode()); assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index 8bf809c0a57e..9872a711c639 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -261,9 +261,8 @@ public void testGetKey() throws IOException { .setConfig(conf) .build(); - objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.PART_NUMBER_MARKER, "marker"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.get( - "bucketName", "keyPath", 0, 1000)); + "bucketName", "keyPath", 0, null, 1000, "marker", null)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -280,7 +279,7 @@ public void testPutKey() throws IOException { .build(); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "bucketName", "keyPath", 1024, 0, + "bucketName", "keyPath", 1024, 0, null, null, null, new ByteArrayInputStream(new byte[]{}))); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -298,7 +297,7 @@ public void testDeleteKey() throws IOException { .build(); OS3Exception e = assertThrows(OS3Exception.class, () -> - objectEndpoint.delete("bucketName", "keyPath")); + objectEndpoint.delete("bucketName", "keyPath", null, null)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -342,18 +341,18 @@ public void testObjectTagging() throws Exception { InputStream tagInput = new ByteArrayInputStream(xml.getBytes(UTF_8)); - objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put("bucketName", "keyPath", 0, 1, - tagInput)); + null, "", null, tagInput)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); e = assertThrows(OS3Exception.class, () -> - objectEndpoint.delete("bucketName", "keyPath")); + objectEndpoint.delete("bucketName", "keyPath", "", "")); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); e = assertThrows(OS3Exception.class, () -> - objectEndpoint.get("bucketName", "keyPath", 0, 0)); + objectEndpoint.get("bucketName", "keyPath", 0, null, + 0, null, "")); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java index 4586d477f734..dbe21601dbd3 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java @@ -92,7 +92,7 @@ public void testUpload() throws Exception { byte[] keyContent = S3_COPY_EXISTING_KEY_CONTENT.getBytes(UTF_8); ByteArrayInputStream body = new ByteArrayInputStream(keyContent); - Response response = rest.put(S3BUCKET, S3KEY, 0, 0, body); + Response response = rest.put(S3BUCKET, S3KEY, 0, 0, null, null, null, body); assertEquals(200, response.getStatus()); } @@ -126,7 +126,7 @@ public void testUploadWithCopy() throws Exception { .forEach((k, v) -> when(headers.getHeaderString(k)).thenReturn(v)); rest.setHeaders(headers); - Response response = rest.put(S3BUCKET, S3KEY, 0, 0, null); + Response response = rest.put(S3BUCKET, S3KEY, 0, 0, null, null, null, null); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java index 39baae58584b..018ad0f1f5e2 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java @@ -20,8 +20,6 @@ import static java.net.HttpURLConnection.HTTP_CONFLICT; import static java.net.HttpURLConnection.HTTP_OK; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.ozone.OzoneConsts.BUCKET; -import static org.apache.hadoop.ozone.OzoneConsts.KEY; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; @@ -38,10 +36,10 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; -import java.nio.charset.StandardCharsets; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import javax.ws.rs.core.StreamingOutput; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; @@ -64,6 +62,8 @@ */ public class TestS3GatewayMetrics { + private String bucketName = OzoneConsts.BUCKET; + private String keyName = OzoneConsts.KEY; private OzoneClient clientStub; private BucketEndpoint bucketEndpoint; private RootEndpoint rootEndpoint; @@ -72,15 +72,13 @@ public class TestS3GatewayMetrics { private HttpHeaders headers; private static final String ACL_MARKER = "acl"; private static final String CONTENT = "0123456789"; - private static final int LENGTH = CONTENT.length(); - private static final byte[] BYTES = CONTENT.getBytes(StandardCharsets.UTF_8); private S3GatewayMetrics metrics; @BeforeEach public void setup() throws Exception { clientStub = new OzoneClientStub(); - clientStub.getObjectStore().createS3Bucket(BUCKET); - bucket = clientStub.getObjectStore().getS3Bucket(BUCKET); + clientStub.getObjectStore().createS3Bucket(bucketName); + bucket = clientStub.getObjectStore().getS3Bucket(bucketName); bucket.createKey("file1", 0).close(); headers = mock(HttpHeaders.class); @@ -115,7 +113,7 @@ public void testHeadBucketSuccess() throws Exception { long oriMetric = metrics.getHeadBucketSuccess(); - bucketEndpoint.head(BUCKET); + bucketEndpoint.head(bucketName); long curMetric = metrics.getHeadBucketSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -136,7 +134,7 @@ public void testListBucketSuccess() throws Exception { public void testGetBucketSuccess() throws Exception { long oriMetric = metrics.getGetBucketSuccess(); - bucketEndpoint.get(BUCKET, 1000, 0).getEntity(); + bucketEndpoint.get(bucketName, 1000, 0).getEntity(); long curMetric = metrics.getGetBucketSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -171,7 +169,7 @@ public void testCreateBucketFailure() throws Exception { // Creating an error by trying to create a bucket that already exists OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.put( - BUCKET, null)); + bucketName, null)); assertEquals(HTTP_CONFLICT, e.getHttpCode()); assertEquals(BUCKET_ALREADY_EXISTS.getCode(), e.getCode()); @@ -183,7 +181,7 @@ public void testCreateBucketFailure() throws Exception { public void testDeleteBucketSuccess() throws Exception { long oriMetric = metrics.getDeleteBucketSuccess(); - bucketEndpoint.delete(BUCKET); + bucketEndpoint.delete(bucketName); long curMetric = metrics.getDeleteBucketSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -192,11 +190,11 @@ public void testDeleteBucketSuccess() throws Exception { @Test public void testDeleteBucketFailure() throws Exception { long oriMetric = metrics.getDeleteBucketFailure(); - bucketEndpoint.delete(BUCKET); + bucketEndpoint.delete(bucketName); // Deleting a bucket that does not exist will result in delete failure OS3Exception e = assertThrows(OS3Exception.class, () -> - bucketEndpoint.delete(BUCKET)); + bucketEndpoint.delete(bucketName)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getErrorMessage(), e.getErrorMessage()); @@ -211,7 +209,7 @@ public void testGetAclSuccess() throws Exception { bucketEndpoint.getQueryParameters().add(QueryParams.ACL, ACL_MARKER); Response response = - bucketEndpoint.get(BUCKET, 0, 0); + bucketEndpoint.get(bucketName, 0, 0); long curMetric = metrics.getGetAclSuccess(); assertEquals(HTTP_OK, response.getStatus()); assertEquals(1L, curMetric - oriMetric); @@ -270,11 +268,11 @@ public void testPutAclFailure() throws Exception { @Test public void testHeadKeySuccess() throws Exception { - bucket.createKey(KEY, 0).close(); + bucket.createKey(keyName, 0).close(); long oriMetric = metrics.getHeadKeySuccess(); - keyEndpoint.head(BUCKET, KEY); + keyEndpoint.head(bucketName, keyName); long curMetric = metrics.getHeadKeySuccess(); assertEquals(1L, curMetric - oriMetric); @@ -284,7 +282,7 @@ public void testHeadKeySuccess() throws Exception { public void testHeadKeyFailure() throws Exception { long oriMetric = metrics.getHeadKeyFailure(); - keyEndpoint.head(BUCKET, "unknownKey"); + keyEndpoint.head(bucketName, "unknownKey"); long curMetric = metrics.getHeadKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -294,7 +292,13 @@ public void testHeadKeyFailure() throws Exception { public void testCreateKeySuccess() throws Exception { long oriMetric = metrics.getCreateKeySuccess(); - putObject(BUCKET, KEY); + // Create an input stream + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + // Create the file + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); + body.close(); long curMetric = metrics.getCreateKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -304,8 +308,9 @@ public void testCreateKeyFailure() throws Exception { long oriMetric = metrics.getCreateKeyFailure(); // Create the file in a bucket that does not exist - OS3Exception e = assertThrows(OS3Exception.class, - () -> putObject("unknownBucket", KEY)); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( + "unknownBucket", keyName, CONTENT.length(), 1, null, null, + null, null)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getCreateKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -315,8 +320,8 @@ public void testCreateKeyFailure() throws Exception { public void testDeleteKeySuccess() throws Exception { long oriMetric = metrics.getDeleteKeySuccess(); - bucket.createKey(KEY, 0).close(); - keyEndpoint.delete(BUCKET, KEY); + bucket.createKey(keyName, 0).close(); + keyEndpoint.delete(bucketName, keyName, null, null); long curMetric = metrics.getDeleteKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -324,8 +329,8 @@ public void testDeleteKeySuccess() throws Exception { @Test public void testDeleteKeyFailure() throws Exception { long oriMetric = metrics.getDeleteKeyFailure(); - OS3Exception e = assertThrows(OS3Exception.class, - () -> keyEndpoint.delete("unknownBucket", KEY)); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( + "unknownBucket", keyName, null, null)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getDeleteKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -334,10 +339,15 @@ public void testDeleteKeyFailure() throws Exception { @Test public void testGetKeySuccess() throws Exception { long oriMetric = metrics.getGetKeySuccess(); - putObject(BUCKET, KEY); + // Create an input stream + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + // Create the file + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); // GET the key from the bucket - Response response = keyEndpoint.get(BUCKET, KEY, 0, 0); + Response response = keyEndpoint.get(bucketName, keyName, 0, null, 0, null, null); StreamingOutput stream = (StreamingOutput) response.getEntity(); stream.write(new ByteArrayOutputStream()); long curMetric = metrics.getGetKeySuccess(); @@ -349,8 +359,8 @@ public void testGetKeyFailure() throws Exception { long oriMetric = metrics.getGetKeyFailure(); // Fetching a non-existent key - OS3Exception e = assertThrows(OS3Exception.class, - () -> keyEndpoint.get(BUCKET, "unknownKey", 0, 0)); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( + bucketName, "unknownKey", 0, null, 0, null, null)); assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), e.getCode()); long curMetric = metrics.getGetKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -358,8 +368,9 @@ public void testGetKeyFailure() throws Exception { @Test public void testInitMultiPartUploadSuccess() throws Exception { + long oriMetric = metrics.getInitMultiPartUploadSuccess(); - keyEndpoint.initializeMultipartUpload(BUCKET, KEY); + keyEndpoint.initializeMultipartUpload(bucketName, keyName); long curMetric = metrics.getInitMultiPartUploadSuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -367,8 +378,8 @@ public void testInitMultiPartUploadSuccess() throws Exception { @Test public void testInitMultiPartUploadFailure() throws Exception { long oriMetric = metrics.getInitMultiPartUploadFailure(); - OS3Exception e = assertThrows(OS3Exception.class, - () -> keyEndpoint.initializeMultipartUpload("unknownBucket", KEY)); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint + .initializeMultipartUpload("unknownBucket", keyName)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getInitMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); @@ -378,13 +389,12 @@ public void testInitMultiPartUploadFailure() throws Exception { public void testAbortMultiPartUploadSuccess() throws Exception { // Initiate the Upload and fetch the upload ID - String uploadID = initiateMultipartUpload(BUCKET, KEY); + String uploadID = initiateMultipartUpload(bucketName, keyName); long oriMetric = metrics.getAbortMultiPartUploadSuccess(); // Abort the Upload Successfully by deleting the key using the Upload-Id - keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); - keyEndpoint.delete(BUCKET, KEY); + keyEndpoint.delete(bucketName, keyName, uploadID, null); long curMetric = metrics.getAbortMultiPartUploadSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -395,9 +405,8 @@ public void testAbortMultiPartUploadFailure() throws Exception { long oriMetric = metrics.getAbortMultiPartUploadFailure(); // Fail the Abort Method by providing wrong uploadID - keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "wrongId"); - OS3Exception e = assertThrows(OS3Exception.class, - () -> keyEndpoint.delete(BUCKET, KEY)); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( + bucketName, keyName, "wrongId", null)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getAbortMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); @@ -407,13 +416,14 @@ public void testAbortMultiPartUploadFailure() throws Exception { public void testCompleteMultiPartUploadSuccess() throws Exception { // Initiate the Upload and fetch the upload ID - String uploadID = initiateMultipartUpload(BUCKET, KEY); + String uploadID = initiateMultipartUpload(bucketName, keyName); long oriMetric = metrics.getCompleteMultiPartUploadSuccess(); // complete multipart upload - keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); - Response response = keyEndpoint.completeMultipartUpload(BUCKET, KEY, - new CompleteMultipartUploadRequest()); + CompleteMultipartUploadRequest completeMultipartUploadRequest = new + CompleteMultipartUploadRequest(); + Response response = keyEndpoint.completeMultipartUpload(bucketName, keyName, + uploadID, completeMultipartUploadRequest); long curMetric = metrics.getCompleteMultiPartUploadSuccess(); assertEquals(200, response.getStatus()); assertEquals(1L, curMetric - oriMetric); @@ -422,9 +432,11 @@ public void testCompleteMultiPartUploadSuccess() throws Exception { @Test public void testCompleteMultiPartUploadFailure() throws Exception { long oriMetric = metrics.getCompleteMultiPartUploadFailure(); - keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "random"); - OS3Exception e = assertThrows(OS3Exception.class, - () -> keyEndpoint.completeMultipartUpload(BUCKET, "key2", new CompleteMultipartUploadRequest())); + CompleteMultipartUploadRequest completeMultipartUploadRequestNew = new + CompleteMultipartUploadRequest(); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint + .completeMultipartUpload(bucketName, "key2", "random", + completeMultipartUploadRequestNew)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getCompleteMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); @@ -434,11 +446,13 @@ public void testCompleteMultiPartUploadFailure() throws Exception { public void testCreateMultipartKeySuccess() throws Exception { // Initiate the Upload and fetch the upload ID - String uploadID = initiateMultipartUpload(BUCKET, KEY); + String uploadID = initiateMultipartUpload(bucketName, keyName); long oriMetric = metrics.getCreateMultipartKeySuccess(); - keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); - putObject(BUCKET, KEY); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + keyEndpoint.put(bucketName, keyName, CONTENT.length(), + 1, uploadID, null, null, body); long curMetric = metrics.getCreateMultipartKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -446,9 +460,8 @@ public void testCreateMultipartKeySuccess() throws Exception { @Test public void testCreateMultipartKeyFailure() throws Exception { long oriMetric = metrics.getCreateMultipartKeyFailure(); - keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "randomId"); - OS3Exception e = assertThrows(OS3Exception.class, - () -> keyEndpoint.put(BUCKET, KEY, LENGTH, 1, null)); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( + bucketName, keyName, CONTENT.length(), 1, "randomId", null, null, null)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getCreateMultipartKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -459,11 +472,11 @@ public void testListPartsSuccess() throws Exception { long oriMetric = metrics.getListPartsSuccess(); // Initiate the Upload and fetch the upload ID - String uploadID = initiateMultipartUpload(BUCKET, KEY); + String uploadID = initiateMultipartUpload(bucketName, keyName); // Listing out the parts by providing the uploadID - keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); - keyEndpoint.get(BUCKET, KEY, 0, 3); + keyEndpoint.get(bucketName, keyName, 0, + uploadID, 3, null, null); long curMetric = metrics.getListPartsSuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -473,9 +486,8 @@ public void testListPartsFailure() throws Exception { long oriMetric = metrics.getListPartsFailure(); // Listing out the parts by providing the uploadID after aborting - keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "wrong_id"); - OS3Exception e = assertThrows(OS3Exception.class, - () -> keyEndpoint.get(BUCKET, KEY, 0, 3)); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( + bucketName, keyName, 0, "wrong_id", 3, null, null)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getListPartsFailure(); assertEquals(1L, curMetric - oriMetric); @@ -492,13 +504,18 @@ public void testCopyObject() throws Exception { // Test for Success of CopyObjectSuccess Metric long oriMetric = metrics.getCopyObjectSuccess(); - putObject(BUCKET, KEY); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + + keyEndpoint.put(bucketName, keyName, + CONTENT.length(), 1, null, null, null, body); // Add copy header, and then call put when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - BUCKET + "/" + urlEncode(KEY)); - putObject(destBucket, destKey); + bucketName + "/" + urlEncode(keyName)); + keyEndpoint.put(destBucket, destKey, CONTENT.length(), 1, + null, null, null, body); long curMetric = metrics.getCopyObjectSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -506,7 +523,9 @@ public void testCopyObject() throws Exception { oriMetric = metrics.getCopyObjectFailure(); // source and dest same when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); - OS3Exception e = assertThrows(OS3Exception.class, () -> putObject(BUCKET, KEY)); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( + bucketName, keyName, CONTENT.length(), 1, null, null, null, body), + "Test for CopyObjectMetric failed"); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); curMetric = metrics.getCopyObjectFailure(); assertEquals(1L, curMetric - oriMetric); @@ -516,11 +535,15 @@ public void testCopyObject() throws Exception { public void testPutObjectTaggingSuccess() throws Exception { long oriMetric = metrics.getPutObjectTaggingSuccess(); - putObject(BUCKET, KEY); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + // Create the file + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); + body.close(); // Put object tagging - keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); - keyEndpoint.put(BUCKET, KEY, 0, 1, getPutTaggingBody()); + keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); long curMetric = metrics.getPutObjectTaggingSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -531,9 +554,10 @@ public void testPutObjectTaggingFailure() throws Exception { long oriMetric = metrics.getPutObjectTaggingFailure(); // Put object tagging for nonexistent key - keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); - OS3Exception ex = assertThrows(OS3Exception.class, - () -> keyEndpoint.put(BUCKET, "nonexistent", 0, 1, getPutTaggingBody())); + OS3Exception ex = assertThrows(OS3Exception.class, () -> + keyEndpoint.put(bucketName, "nonexistent", 0, 1, null, "", + null, getPutTaggingBody()) + ); assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); long curMetric = metrics.getPutObjectTaggingFailure(); @@ -545,14 +569,18 @@ public void testGetObjectTaggingSuccess() throws Exception { long oriMetric = metrics.getGetObjectTaggingSuccess(); // Create the file - putObject(BUCKET, KEY); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); + body.close(); // Put object tagging - keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); - keyEndpoint.put(BUCKET, KEY, 0, 1, getPutTaggingBody()); + keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); // Get object tagging - keyEndpoint.get(BUCKET, KEY, 0, 0); + keyEndpoint.get(bucketName, keyName, 0, + null, 0, null, ""); long curMetric = metrics.getGetObjectTaggingSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -563,9 +591,9 @@ public void testGetObjectTaggingFailure() throws Exception { long oriMetric = metrics.getGetObjectTaggingFailure(); // Get object tagging for nonexistent key - keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); - OS3Exception ex = assertThrows(OS3Exception.class, - () -> keyEndpoint.get(BUCKET, "nonexistent", 0, 0)); + OS3Exception ex = assertThrows(OS3Exception.class, () -> + keyEndpoint.get(bucketName, "nonexistent", 0, null, + 0, null, "")); assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); long curMetric = metrics.getGetObjectTaggingFailure(); assertEquals(1L, curMetric - oriMetric); @@ -576,14 +604,17 @@ public void testDeleteObjectTaggingSuccess() throws Exception { long oriMetric = metrics.getDeleteObjectTaggingSuccess(); // Create the file - putObject(BUCKET, KEY); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); + body.close(); // Put object tagging - keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); - keyEndpoint.put(BUCKET, KEY, 0, 1, getPutTaggingBody()); + keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); // Delete object tagging - keyEndpoint.delete(BUCKET, KEY); + keyEndpoint.delete(bucketName, keyName, null, ""); long curMetric = metrics.getDeleteObjectTaggingSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -594,26 +625,19 @@ public void testDeleteObjectTaggingFailure() throws Exception { long oriMetric = metrics.getDeleteObjectTaggingFailure(); // Delete object tagging for nonexistent key - keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); - OS3Exception ex = assertThrows(OS3Exception.class, - () -> keyEndpoint.delete(BUCKET, "nonexistent")); + OS3Exception ex = assertThrows(OS3Exception.class, () -> + keyEndpoint.delete(bucketName, "nonexistent", null, "")); assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); long curMetric = metrics.getDeleteObjectTaggingFailure(); assertEquals(1L, curMetric - oriMetric); } - private void putObject(String bucketName, String key) throws IOException, OS3Exception { - try (InputStream body = new ByteArrayInputStream(BYTES)) { - keyEndpoint.put(bucketName, key, LENGTH, 1, body); - } - } - - private String initiateMultipartUpload(String bucketName, String key) + private String initiateMultipartUpload(String bktName, String key) throws IOException, OS3Exception { // Initiate the Upload Response response = - keyEndpoint.initializeMultipartUpload(bucketName, key); + keyEndpoint.initializeMultipartUpload(bktName, key); MultipartUploadInitiateResponse multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity(); if (response.getStatus() == 200) { From ea8c924a62b9e81c67a8d30ebece6cc1a2426770 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Thu, 18 Dec 2025 20:37:16 +0100 Subject: [PATCH 5/7] HDDS-14209. Look up query params in ObjectEndpoint --- .../ozone/s3/endpoint/ObjectEndpoint.java | 54 ++-- .../ozone/s3/endpoint/EndpointTestUtils.java | 10 +- .../s3/endpoint/TestAbortMultipartUpload.java | 8 +- .../ozone/s3/endpoint/TestListParts.java | 48 ++-- .../endpoint/TestMultipartUploadComplete.java | 12 +- .../endpoint/TestMultipartUploadWithCopy.java | 39 +-- .../ozone/s3/endpoint/TestObjectDelete.java | 2 +- .../ozone/s3/endpoint/TestObjectGet.java | 28 +- .../s3/endpoint/TestObjectTaggingDelete.java | 18 +- .../s3/endpoint/TestObjectTaggingGet.java | 16 +- .../s3/endpoint/TestObjectTaggingPut.java | 136 +++------- .../ozone/s3/endpoint/TestPartUpload.java | 131 +++------- .../s3/endpoint/TestPartUploadWithStream.java | 58 +---- .../s3/endpoint/TestPermissionCheck.java | 35 +-- .../s3/endpoint/TestUploadWithStream.java | 15 +- .../s3/metrics/TestS3GatewayMetrics.java | 239 +++++++----------- 16 files changed, 309 insertions(+), 540 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index c6a2b6539098..fdef787c7830 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -168,7 +168,7 @@ public class ObjectEndpoint extends EndpointBase { /*FOR the feature Overriding Response Header https://docs.aws.amazon.com/de_de/AmazonS3/latest/API/API_GetObject.html */ - private Map overrideQueryParameter; + private final Map overrideQueryParameter; private int bufferSize; private int chunkSize; private boolean datastreamEnabled; @@ -209,17 +209,18 @@ public void init() { * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for * more details. */ - @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) + @SuppressWarnings("checkstyle:MethodLength") @PUT public Response put( @PathParam(BUCKET) String bucketName, @PathParam(PATH) String keyPath, @HeaderParam(HttpHeaders.CONTENT_LENGTH) long length, - @QueryParam(QueryParams.PART_NUMBER) int partNumber, - @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadID, - @QueryParam(QueryParams.TAGGING) String taggingMarker, - @QueryParam(QueryParams.ACL) String aclMarker, - final InputStream body) throws IOException, OS3Exception { + final InputStream body + ) throws IOException, OS3Exception { + final String aclMarker = queryParams().get(QueryParams.ACL); + final int partNumber = queryParams().getInt(QueryParams.PART_NUMBER, 0); + final String taggingMarker = queryParams().get(QueryParams.TAGGING); + final String uploadID = queryParams().get(QueryParams.UPLOAD_ID); long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_KEY; boolean auditSuccess = true; @@ -403,17 +404,26 @@ public Response put( * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html * for more details. */ - @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) + @SuppressWarnings("checkstyle:MethodLength") @GET + public Response get( + @PathParam(BUCKET) String bucketName, + @PathParam(PATH) String keyPath + ) throws IOException, OS3Exception { + final int partNumber = queryParams().getInt(QueryParams.PART_NUMBER, 0); + final int maxParts = queryParams().getInt(QueryParams.MAX_PARTS, 1000); + return get(bucketName, keyPath, partNumber, maxParts); + } + public Response get( @PathParam(BUCKET) String bucketName, @PathParam(PATH) String keyPath, @QueryParam(QueryParams.PART_NUMBER) int partNumber, - @QueryParam(QueryParams.UPLOAD_ID) String uploadId, - @QueryParam(QueryParams.MAX_PARTS) @DefaultValue("1000") int maxParts, - @QueryParam(QueryParams.PART_NUMBER_MARKER) String partNumberMarker, - @QueryParam(QueryParams.TAGGING) String taggingMarker) - throws IOException, OS3Exception { + @QueryParam(QueryParams.MAX_PARTS) @DefaultValue("1000") int maxParts + ) throws IOException, OS3Exception { + final String uploadId = queryParams().get(QueryParams.UPLOAD_ID); + final String partNumberMarker = queryParams().get(QueryParams.PART_NUMBER_MARKER); + final String taggingMarker = queryParams().get(QueryParams.TAGGING); long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.GET_KEY; PerformanceStringBuilder perf = new PerformanceStringBuilder(); @@ -720,10 +730,11 @@ private Response abortMultipartUpload(OzoneVolume volume, String bucket, @SuppressWarnings("emptyblock") public Response delete( @PathParam(BUCKET) String bucketName, - @PathParam(PATH) String keyPath, - @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadId, - @QueryParam(QueryParams.TAGGING) String taggingMarker) throws - IOException, OS3Exception { + @PathParam(PATH) String keyPath + ) throws IOException, OS3Exception { + final String taggingMarker = queryParams().get(QueryParams.TAGGING); + final String uploadId = queryParams().get(QueryParams.UPLOAD_ID); + long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.DELETE_KEY; @@ -798,8 +809,7 @@ public Response delete( public Response initializeMultipartUpload( @PathParam(BUCKET) String bucket, @PathParam(PATH) String key - ) - throws IOException, OS3Exception { + ) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.INIT_MULTIPART_UPLOAD; @@ -863,9 +873,9 @@ private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket, public Response completeMultipartUpload( @PathParam(BUCKET) String bucket, @PathParam(PATH) String key, - @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadID, - CompleteMultipartUploadRequest multipartUploadRequest) - throws IOException, OS3Exception { + CompleteMultipartUploadRequest multipartUploadRequest + ) throws IOException, OS3Exception { + final String uploadID = queryParams().get(QueryParams.UPLOAD_ID, ""); long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.COMPLETE_MULTIPART_UPLOAD; OzoneVolume volume = getVolume(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java index c6eff8066cd5..2f67e4fcb18e 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java @@ -25,6 +25,7 @@ import java.io.IOException; import javax.ws.rs.core.Response; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.apache.http.HttpStatus; import org.apache.ratis.util.function.CheckedSupplier; @@ -59,12 +60,17 @@ public static Response put( String uploadID, String content ) throws IOException, OS3Exception { + if (uploadID != null) { + subject.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); + } + subject.queryParamsForTest().setInt(S3Consts.QueryParams.PART_NUMBER, partNumber); + if (content == null) { - return subject.put(bucket, key, 0, partNumber, uploadID, null, null, null); + return subject.put(bucket, key, 0, null); } else { final long length = content.length(); try (ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8))) { - return subject.put(bucket, key, length, partNumber, uploadID, null, null, body); + return subject.put(bucket, key, length, body); } } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java index 9c46a718508f..7ec978c95635 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java @@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.Test; /** @@ -63,15 +64,16 @@ public void testAbortMultipartUpload() throws Exception { assertNotNull(multipartUploadInitiateResponse.getUploadID()); String uploadID = multipartUploadInitiateResponse.getUploadID(); - // Abort multipart upload - response = rest.delete(bucket, key, uploadID, null); + rest.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); + response = rest.delete(bucket, key); assertEquals(204, response.getStatus()); // test with unknown upload Id. try { - rest.delete(bucket, key, "random", null); + rest.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, "random"); + rest.delete(bucket, key); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), ex.getCode()); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java index 30be715b5305..e573c8393582 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.s3.endpoint; -import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.X_AMZ_CONTENT_SHA256; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -27,7 +27,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.io.ByteArrayInputStream; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import org.apache.hadoop.ozone.OzoneConsts; @@ -35,6 +34,7 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -44,7 +44,6 @@ public class TestListParts { private ObjectEndpoint rest; - private String uploadID; @BeforeEach public void setUp() throws Exception { @@ -67,34 +66,21 @@ public void setUp() throws Exception { OzoneConsts.KEY); MultipartUploadInitiateResponse multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - uploadID = multipartUploadInitiateResponse.getUploadID(); - + String uploadID = multipartUploadInitiateResponse.getUploadID(); + assertNotNull(uploadID); assertEquals(200, response.getStatus()); String content = "Multipart Upload"; - ByteArrayInputStream body = - new ByteArrayInputStream(content.getBytes(UTF_8)); - response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, null, null, body); - - assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); - - response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 2, uploadID, null, null, body); - - assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); - - response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 3, uploadID, null, null, body); - - assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + for (int i = 1; i <= 3; i++) { + response = put(rest, OzoneConsts.S3_BUCKET, OzoneConsts.KEY, i, uploadID, content); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + } } @Test public void testListParts() throws Exception { - Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 3, "0", null); + rest.queryParamsForTest().set(S3Consts.QueryParams.PART_NUMBER_MARKER, "0"); + Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 3); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -106,8 +92,8 @@ public void testListParts() throws Exception { @Test public void testListPartsContinuation() throws Exception { - Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 2, "0", null); + rest.queryParamsForTest().set(S3Consts.QueryParams.PART_NUMBER_MARKER, "0"); + Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 2); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -115,8 +101,9 @@ public void testListPartsContinuation() throws Exception { assertEquals(2, listPartsResponse.getPartList().size()); // Continue - response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, uploadID, 2, - Integer.toString(listPartsResponse.getNextPartNumberMarker()), null); + rest.queryParamsForTest().set(S3Consts.QueryParams.PART_NUMBER_MARKER, + Integer.toString(listPartsResponse.getNextPartNumberMarker())); + response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 2); listPartsResponse = (ListPartsResponse) response.getEntity(); assertFalse(listPartsResponse.getTruncated()); @@ -126,9 +113,10 @@ public void testListPartsContinuation() throws Exception { @Test public void testListPartsWithUnknownUploadID() throws Exception { + rest.queryParamsForTest().set(S3Consts.QueryParams.PART_NUMBER_MARKER, "0"); + rest.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, "no-such-upload"); try { - rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 2, "0", null); + rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 2); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), ex.getErrorMessage()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index fde336f48079..bf0d5654ab6a 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.s3.endpoint; -import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.X_AMZ_CONTENT_SHA256; @@ -27,7 +27,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -45,6 +44,7 @@ import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -104,10 +104,7 @@ private String initiateMultipartUpload(String key, Map metadata) private Part uploadPart(String key, String uploadID, int partNumber, String content) throws IOException, OS3Exception { - ByteArrayInputStream body = - new ByteArrayInputStream(content.getBytes(UTF_8)); - Response response = rest.put(OzoneConsts.S3_BUCKET, key, content.length(), - partNumber, uploadID, null, null, body); + Response response = put(rest, OzoneConsts.S3_BUCKET, key, partNumber, uploadID, content); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); @@ -120,8 +117,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String private void completeMultipartUpload(String key, CompleteMultipartUploadRequest completeMultipartUploadRequest, String uploadID) throws IOException, OS3Exception { + rest.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = rest.completeMultipartUpload(OzoneConsts.S3_BUCKET, key, - uploadID, completeMultipartUploadRequest); + completeMultipartUploadRequest); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index 702c32d1abab..5e9f070dbe54 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -18,6 +18,9 @@ package org.apache.hadoop.ozone.s3.endpoint; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertSucceeds; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; +import static org.apache.hadoop.ozone.s3.endpoint.TestPartUpload.initiateUpload; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER_RANGE; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_IF_MODIFIED_SINCE; @@ -30,7 +33,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -53,7 +55,9 @@ import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.apache.hadoop.ozone.web.utils.OzoneUtils; +import org.apache.http.HttpStatus; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -312,27 +316,14 @@ public void testMultipartTSHeaders() throws Exception { private String initiateMultipartUpload(String key) throws IOException, OS3Exception { setHeaders(); - Response response = endpoint.initializeMultipartUpload(OzoneConsts.S3_BUCKET, - key); - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - String uploadID = multipartUploadInitiateResponse.getUploadID(); - - assertEquals(200, response.getStatus()); - - return uploadID; - + return initiateUpload(endpoint, OzoneConsts.S3_BUCKET, key); } private Part uploadPart(String key, String uploadID, int partNumber, String content) throws IOException, OS3Exception { setHeaders(); - ByteArrayInputStream body = - new ByteArrayInputStream(content.getBytes(UTF_8)); - Response response = endpoint.put(OzoneConsts.S3_BUCKET, key, content.length(), - partNumber, uploadID, null, null, body); - assertEquals(200, response.getStatus()); + Response response = put(endpoint, OzoneConsts.S3_BUCKET, key, partNumber, uploadID, content); + assertEquals(HttpStatus.SC_OK, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); part.setETag(response.getHeaderString(OzoneConsts.ETAG)); @@ -374,10 +365,8 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, } setHeaders(additionalHeaders); - ByteArrayInputStream body = new ByteArrayInputStream("".getBytes(UTF_8)); - Response response = endpoint.put(OzoneConsts.S3_BUCKET, key, 0, partNumber, - uploadID, null, null, body); - assertEquals(200, response.getStatus()); + Response response = put(endpoint, OzoneConsts.S3_BUCKET, key, partNumber, uploadID, ""); + assertEquals(HttpStatus.SC_OK, response.getStatus()); CopyPartResult result = (CopyPartResult) response.getEntity(); assertNotNull(result.getETag()); @@ -391,19 +380,18 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, @Test public void testUploadWithRangeCopyContentLength() - throws IOException, OS3Exception { + throws Exception { // The contentLength specified when creating the Key should be the same as // the Content-Length, the key Commit will compare the Content-Length with // the actual length of the data written. String uploadID = initiateMultipartUpload(KEY); - ByteArrayInputStream body = new ByteArrayInputStream("".getBytes(UTF_8)); Map additionalHeaders = new HashMap<>(); additionalHeaders.put(COPY_SOURCE_HEADER, OzoneConsts.S3_BUCKET + "/" + EXISTING_KEY); additionalHeaders.put(COPY_SOURCE_HEADER_RANGE, "bytes=0-3"); setHeaders(additionalHeaders); - endpoint.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, uploadID, null, null, body); + assertSucceeds(() -> put(endpoint, OzoneConsts.S3_BUCKET, KEY, 1, uploadID, "")); OzoneMultipartUploadPartListParts parts = client.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET) .listParts(KEY, uploadID, 0, 100); @@ -415,8 +403,9 @@ private void completeMultipartUpload(String key, CompleteMultipartUploadRequest completeMultipartUploadRequest, String uploadID) throws IOException, OS3Exception { setHeaders(); + endpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = endpoint.completeMultipartUpload(OzoneConsts.S3_BUCKET, key, - uploadID, completeMultipartUploadRequest); + completeMultipartUploadRequest); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java index 3974cfcf9666..3b382c9bc4f1 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java @@ -47,7 +47,7 @@ public void delete() throws IOException, OS3Exception { .build(); //WHEN - rest.delete("b1", "key1", null, null); + rest.delete("b1", "key1"); //THEN assertFalse(bucket.listKeys("").hasNext(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java index a9fd7da4200e..bb9703d97c03 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java @@ -19,6 +19,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_FSO_DIRECTORY_CREATION_ENABLED; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_COUNT_HEADER; @@ -31,7 +32,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.io.ByteArrayInputStream; import java.io.IOException; import java.time.format.DateTimeFormatter; import javax.ws.rs.core.HttpHeaders; @@ -88,19 +88,17 @@ public void init() throws OS3Exception, IOException { .setHeaders(headers) .build(); - ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - rest.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); + put(rest, BUCKET_NAME, KEY_NAME, CONTENT); + // Create a key with object tags when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); - rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), - 1, null, null, null, body); + put(rest, BUCKET_NAME, KEY_WITH_TAG, CONTENT); } @Test public void get() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); //THEN OzoneInputStream ozoneInputStream = @@ -122,7 +120,7 @@ public void get() throws IOException, OS3Exception { @Test public void getKeyWithTag() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, null); + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, 0); //THEN OzoneInputStream ozoneInputStream = @@ -144,7 +142,7 @@ public void getKeyWithTag() throws IOException, OS3Exception { public void inheritRequestHeader() throws IOException, OS3Exception { setDefaultHeader(); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals(CONTENT_TYPE1, response.getHeaderString("Content-Type")); @@ -174,7 +172,7 @@ public void overrideResponseHeader() throws IOException, OS3Exception { CONTENT_DISPOSITION2); queryParameter.putSingle("response-content-encoding", CONTENT_ENCODING2); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals(CONTENT_TYPE2, response.getHeaderString("Content-Type")); @@ -195,13 +193,13 @@ public void getRangeHeader() throws IOException, OS3Exception { Response response; when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-0"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals("1", response.getHeaderString("Content-Length")); assertEquals(String.format("bytes 0-0/%s", CONTENT.length()), response.getHeaderString("Content-Range")); when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals(String.valueOf(CONTENT.length()), response.getHeaderString("Content-Length")); assertEquals( @@ -214,7 +212,7 @@ public void getRangeHeader() throws IOException, OS3Exception { @Test public void getStatusCode() throws IOException, OS3Exception { Response response; - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals(response.getStatus(), Response.Status.OK.getStatusCode()); @@ -222,7 +220,7 @@ public void getStatusCode() throws IOException, OS3Exception { // The 206 (Partial Content) status code indicates that the server is // successfully fulfilling a range request for the target resource when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-1"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals(response.getStatus(), Response.Status.PARTIAL_CONTENT.getStatusCode()); assertNull(response.getHeaderString(TAG_COUNT_HEADER)); @@ -256,7 +254,7 @@ public void testGetWhenKeyIsDirectoryAndDoesNotEndWithASlash() // WHEN final OS3Exception ex = assertThrows(OS3Exception.class, - () -> rest.get(BUCKET_NAME, keyPath, 0, null, 0, null, null)); + () -> rest.get(BUCKET_NAME, keyPath, 0, 0)); // THEN assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java index 488474e30390..435af5a90336 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java @@ -20,7 +20,7 @@ import static java.net.HttpURLConnection.HTTP_NOT_FOUND; import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; import static java.net.HttpURLConnection.HTTP_NO_CONTENT; -import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; @@ -33,7 +33,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.io.ByteArrayInputStream; import java.io.IOException; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; @@ -47,6 +46,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -76,18 +76,17 @@ public void init() throws OS3Exception, IOException { .setHeaders(headers) .build(); - ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); // Create a key with object tags Mockito.when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); Mockito.when(headers.getHeaderString(X_AMZ_CONTENT_SHA256)) .thenReturn("mockSignature"); - rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), - 1, null, null, null, body); + put(rest, BUCKET_NAME, KEY_WITH_TAG, CONTENT); + rest.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); } @Test public void testDeleteTagging() throws IOException, OS3Exception { - Response response = rest.delete(BUCKET_NAME, KEY_WITH_TAG, null, ""); + Response response = rest.delete(BUCKET_NAME, KEY_WITH_TAG); assertEquals(HTTP_NO_CONTENT, response.getStatus()); assertTrue(client.getObjectStore().getS3Bucket(BUCKET_NAME) @@ -97,7 +96,7 @@ public void testDeleteTagging() throws IOException, OS3Exception { @Test public void testDeleteTaggingNoKeyFound() throws Exception { try { - rest.delete(BUCKET_NAME, "nonexistent", null, ""); + rest.delete(BUCKET_NAME, "nonexistent"); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -108,7 +107,7 @@ public void testDeleteTaggingNoKeyFound() throws Exception { @Test public void testDeleteTaggingNoBucketFound() throws Exception { try { - rest.delete("nonexistent", "nonexistent", null, ""); + rest.delete("nonexistent", "nonexistent"); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -135,7 +134,8 @@ public void testDeleteObjectTaggingNotImplemented() throws Exception { ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).deleteObjectTagging("dir/"); try { - endpoint.delete("fsoBucket", "dir/", null, ""); + endpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); + endpoint.delete("fsoBucket", "dir/"); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java index 1885e7d0cf6f..5595d6d5b12e 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java @@ -19,7 +19,7 @@ import static java.net.HttpURLConnection.HTTP_NOT_FOUND; import static java.net.HttpURLConnection.HTTP_OK; -import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; @@ -28,7 +28,6 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.fail; -import java.io.ByteArrayInputStream; import java.io.IOException; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; @@ -37,6 +36,7 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.endpoint.S3Tagging.Tag; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -68,17 +68,17 @@ public void init() throws OS3Exception, IOException { .setHeaders(headers) .build(); - ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); // Create a key with object tags Mockito.when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); - rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), - 1, null, null, null, body); + put(rest, BUCKET_NAME, KEY_WITH_TAG, CONTENT); + + rest.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); } @Test public void testGetTagging() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, ""); + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, 0); assertEquals(HTTP_OK, response.getStatus()); S3Tagging s3Tagging = (S3Tagging) response.getEntity(); @@ -99,7 +99,7 @@ public void testGetTagging() throws IOException, OS3Exception { @Test public void testGetTaggingNoKeyFound() throws Exception { try { - rest.get(BUCKET_NAME, "nonexistent", 0, null, 0, null, ""); + rest.get(BUCKET_NAME, "nonexistent", 0, 0); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -110,7 +110,7 @@ public void testGetTaggingNoKeyFound() throws Exception { @Test public void testGetTaggingNoBucketFound() throws Exception { try { - rest.get("nonexistent", "nonexistent", 0, null, 0, null, ""); + rest.get("nonexistent", "nonexistent", 0, 0); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java index d1651d6b59c0..b001526f3146 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java @@ -17,28 +17,21 @@ package org.apache.hadoop.ozone.s3.endpoint; -import static java.net.HttpURLConnection.HTTP_BAD_REQUEST; -import static java.net.HttpURLConnection.HTTP_NOT_FOUND; -import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; -import static java.net.HttpURLConnection.HTTP_OK; -import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertErrorResponse; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertSucceeds; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_XML; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; import static org.apache.hadoop.ozone.s3.util.S3Consts.X_AMZ_CONTENT_SHA256; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; import java.util.HashMap; import java.util.Map; -import java.util.function.Supplier; import javax.ws.rs.core.HttpHeaders; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.client.ObjectStore; @@ -50,7 +43,6 @@ import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -67,7 +59,7 @@ public class TestObjectTaggingPut { private static final String KEY_NAME = "key=value/1"; @BeforeEach - void setup() throws IOException, OS3Exception { + void setup() throws Exception { OzoneConfiguration config = new OzoneConfiguration(); //Create client stub and object store stub. @@ -86,29 +78,19 @@ void setup() throws IOException, OS3Exception { .setHeaders(headers) .build(); - - ByteArrayInputStream body = - new ByteArrayInputStream("".getBytes(UTF_8)); - - objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, null, null, body); + assertSucceeds(() -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, "")); } @Test - public void testPutObjectTaggingWithEmptyBody() throws Exception { - try { - objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", null, - null); - fail(); - } catch (OS3Exception ex) { - assertEquals(HTTP_BAD_REQUEST, ex.getHttpCode()); - assertEquals(MALFORMED_XML.getCode(), ex.getCode()); - } + public void testPutObjectTaggingWithEmptyBody() { + objectEndpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); + assertErrorResponse(MALFORMED_XML, () -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, "")); } @Test public void testPutValidObjectTagging() throws Exception { - assertEquals(HTTP_OK, objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, - "", null, twoTags()).getStatus()); + objectEndpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); + assertSucceeds(() -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, twoTags())); OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(2, keyDetails.getTags().size()); @@ -117,49 +99,26 @@ public void testPutValidObjectTagging() throws Exception { } @Test - public void testPutInvalidObjectTagging() throws Exception { - testInvalidObjectTagging(this::emptyBody, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); - testInvalidObjectTagging(this::invalidXmlStructure, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); - testInvalidObjectTagging(this::noTagSet, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); - testInvalidObjectTagging(this::emptyTags, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); - testInvalidObjectTagging(this::tagKeyNotSpecified, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); - testInvalidObjectTagging(this::tagValueNotSpecified, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); - } - - private void testInvalidObjectTagging(Supplier inputStream, - int expectedHttpCode, String expectedErrorCode) throws Exception { - try { - objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", null, - inputStream.get()); - fail("Expected an OS3Exception to be thrown"); - } catch (OS3Exception ex) { - assertEquals(expectedHttpCode, ex.getHttpCode()); - assertEquals(expectedErrorCode, ex.getCode()); - } + public void testPutInvalidObjectTagging() { + objectEndpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); + assertErrorResponse(MALFORMED_XML, () -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, emptyBody())); + assertErrorResponse(MALFORMED_XML, () -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, invalidXmlStructure())); + assertErrorResponse(MALFORMED_XML, () -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, noTagSet())); + assertErrorResponse(MALFORMED_XML, () -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, emptyTags())); + assertErrorResponse(MALFORMED_XML, () -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, tagKeyNotSpecified())); + assertErrorResponse(MALFORMED_XML, () -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, tagValueNotSpecified())); } @Test - public void testPutObjectTaggingNoKeyFound() throws Exception { - try { - objectEndpoint.put(BUCKET_NAME, "nonexistent", 0, 1, - null, "", null, twoTags()); - fail("Expected an OS3Exception to be thrown"); - } catch (OS3Exception ex) { - assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); - assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); - } + public void testPutObjectTaggingNoKeyFound() { + objectEndpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); + assertErrorResponse(NO_SUCH_KEY, () -> put(objectEndpoint, BUCKET_NAME, "nonexistent", tagValueNotSpecified())); } @Test - public void testPutObjectTaggingNoBucketFound() throws Exception { - try { - objectEndpoint.put("nonexistent", "nonexistent", 0, 1, - null, "", null, twoTags()); - fail("Expected an OS3Exception to be thrown"); - } catch (OS3Exception ex) { - assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); - assertEquals(NO_SUCH_BUCKET.getCode(), ex.getCode()); - } + public void testPutObjectTaggingNoBucketFound() { + objectEndpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); + assertErrorResponse(NO_SUCH_BUCKET, () -> put(objectEndpoint, "nonexistent", "any", twoTags())); } @Test @@ -185,32 +144,24 @@ public void testPutObjectTaggingNotImplemented() throws Exception { doThrow(new OMException("PutObjectTagging is not currently supported for FSO directory", ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).putObjectTagging("dir/", twoTagsMap); - try { - endpoint.put("fsoBucket", "dir/", 0, 1, null, "", - null, twoTags()); - fail("Expected an OS3Exception to be thrown"); - } catch (OS3Exception ex) { - assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); - assertEquals(NOT_IMPLEMENTED.getCode(), ex.getCode()); - } + endpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); + assertErrorResponse(NOT_IMPLEMENTED, () -> put(endpoint, "fsoBucket", "dir/", twoTags())); } - private InputStream emptyBody() { + private String emptyBody() { return null; } - private InputStream invalidXmlStructure() { - String xml = + private String invalidXmlStructure() { + return "" + " " + " "; - - return new ByteArrayInputStream(xml.getBytes(UTF_8)); } - private InputStream twoTags() { - String xml = + private String twoTags() { + return "" + " " + " " + @@ -223,29 +174,24 @@ private InputStream twoTags() { " " + " " + ""; - - return new ByteArrayInputStream(xml.getBytes(UTF_8)); } - private InputStream noTagSet() { - String xml = + private String noTagSet() { + return "" + ""; - return new ByteArrayInputStream(xml.getBytes(UTF_8)); } - private InputStream emptyTags() { - String xml = + private String emptyTags() { + return "" + " " + " " + ""; - - return new ByteArrayInputStream(xml.getBytes(UTF_8)); } - public InputStream tagKeyNotSpecified() { - String xml = + public String tagKeyNotSpecified() { + return "" + " " + " " + @@ -253,12 +199,10 @@ public InputStream tagKeyNotSpecified() { " " + " " + ""; - - return new ByteArrayInputStream(xml.getBytes(UTF_8)); } - public InputStream tagValueNotSpecified() { - String xml = + public String tagValueNotSpecified() { + return "" + " " + " " + @@ -266,8 +210,6 @@ public InputStream tagValueNotSpecified() { " " + " " + ""; - - return new ByteArrayInputStream(xml.getBytes(UTF_8)); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index 4981069528a8..981eb264cbe7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -17,8 +17,9 @@ package org.apache.hadoop.ozone.s3.endpoint; -import static java.net.HttpURLConnection.HTTP_NOT_FOUND; -import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertErrorResponse; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertSucceeds; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; import static org.apache.hadoop.ozone.s3.util.S3Consts.DECODED_CONTENT_LENGTH_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.X_AMZ_CONTENT_SHA256; @@ -26,7 +27,6 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.mock; @@ -36,7 +36,6 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -50,6 +49,8 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.http.HttpStatus; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.MockedStatic; @@ -83,52 +84,27 @@ public void setUp() throws Exception { @Test public void testPartUpload() throws Exception { - - Response response = rest.initializeMultipartUpload(OzoneConsts.S3_BUCKET, - OzoneConsts.KEY); - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - String uploadID = multipartUploadInitiateResponse.getUploadID(); - - assertEquals(200, response.getStatus()); - + String uploadID = initiateUpload(OzoneConsts.KEY); String content = "Multipart Upload"; - ByteArrayInputStream body = - new ByteArrayInputStream(content.getBytes(UTF_8)); - response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, null, null, body); - assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + Response response = put(rest, OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 1, uploadID, content); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @Test public void testPartUploadWithOverride() throws Exception { - - Response response = rest.initializeMultipartUpload(OzoneConsts.S3_BUCKET, - OzoneConsts.KEY); - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - String uploadID = multipartUploadInitiateResponse.getUploadID(); - - assertEquals(200, response.getStatus()); + String uploadID = initiateUpload(OzoneConsts.KEY); String content = "Multipart Upload"; - ByteArrayInputStream body = - new ByteArrayInputStream(content.getBytes(UTF_8)); - response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, null, null, body); - - assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + Response response = put(rest, OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 1, uploadID, content); String eTag = response.getHeaderString(OzoneConsts.ETAG); + assertNotNull(eTag); // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; - response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, null, null, body); + response = put(rest, OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 1, uploadID, content); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); @@ -136,20 +112,14 @@ public void testPartUploadWithOverride() throws Exception { @Test public void testPartUploadWithIncorrectUploadID() throws Exception { - OS3Exception ex = assertThrows(OS3Exception.class, () -> { - String content = "Multipart Upload With Incorrect uploadID"; - ByteArrayInputStream body = - new ByteArrayInputStream(content.getBytes(UTF_8)); - rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, - "random", null, null, body); - }); - assertEquals("NoSuchUpload", ex.getCode()); - assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + String content = "Multipart Upload With Incorrect uploadID"; + assertErrorResponse(S3ErrorTable.NO_SUCH_UPLOAD, + () -> put(rest, OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 1, "random", content)); } @Test public void testPartUploadStreamContentLength() - throws IOException, OS3Exception { + throws Exception { HttpHeaders headers = mock(HttpHeaders.class); when(headers.getHeaderString(X_AMZ_CONTENT_SHA256)) .thenReturn("mockSignature"); @@ -168,39 +138,24 @@ public void testPartUploadStreamContentLength() when(headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)) .thenReturn("15"); - Response response = objectEndpoint.initializeMultipartUpload( - OzoneConsts.S3_BUCKET, keyName); - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - String uploadID = multipartUploadInitiateResponse.getUploadID(); - long contentLength = chunkedContent.length(); + String uploadID = initiateUpload(keyName); - objectEndpoint.put(OzoneConsts.S3_BUCKET, keyName, contentLength, 1, - uploadID, null, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); + assertSucceeds(() -> put(objectEndpoint, OzoneConsts.S3_BUCKET, keyName, 1, uploadID, chunkedContent)); assertContentLength(uploadID, keyName, 15); } @Test - public void testPartUploadContentLength() throws IOException, OS3Exception { + public void testPartUploadContentLength() throws Exception { // The contentLength specified when creating the Key should be the same as // the Content-Length, the key Commit will compare the Content-Length with // the actual length of the data written. String keyName = UUID.randomUUID().toString(); - Response response = rest.initializeMultipartUpload(OzoneConsts.S3_BUCKET, - keyName); - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - String uploadID = multipartUploadInitiateResponse.getUploadID(); + String uploadID = initiateUpload(keyName); String content = "Multipart Upload"; - long contentLength = content.length(); - ByteArrayInputStream body = - new ByteArrayInputStream(content.getBytes(UTF_8)); - rest.put(OzoneConsts.S3_BUCKET, keyName, - contentLength, 1, uploadID, null, null, body); + assertSucceeds(() -> put(rest, OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 1, uploadID, content)); + assertContentLength(uploadID, keyName, content.length()); } @@ -216,21 +171,12 @@ public void testPartUploadMessageDigestResetDuringException() throws IOException when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( "STANDARD"); - ObjectEndpoint objectEndpoint = EndpointBuilder.newObjectEndpointBuilder() + ObjectEndpoint objectEndpoint = spy(EndpointBuilder.newObjectEndpointBuilder() .setHeaders(headers) .setClient(clientStub) - .build(); - - objectEndpoint = spy(objectEndpoint); + .build()); - Response response = objectEndpoint.initializeMultipartUpload(OzoneConsts.S3_BUCKET, - OzoneConsts.KEY); - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - String uploadID = multipartUploadInitiateResponse.getUploadID(); - - assertEquals(200, response.getStatus()); + String uploadID = initiateUpload(OzoneConsts.KEY); MessageDigest messageDigest = mock(MessageDigest.class); try (MockedStatic mocked = mockStatic(IOUtils.class)) { @@ -241,17 +187,8 @@ public void testPartUploadMessageDigestResetDuringException() throws IOException .thenThrow(IOException.class); String content = "Multipart Upload"; - ByteArrayInputStream body = - new ByteArrayInputStream(content.getBytes(UTF_8)); - try { - objectEndpoint.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, null, null, body); - fail("Should throw IOException"); - } catch (IOException ignored) { - // Verify that the message digest is reset so that the instance can be reused for the - // next request in the same thread - verify(messageDigest, times(1)).reset(); - } + assertThrows(IOException.class, () -> put(objectEndpoint, OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 1, uploadID, content)); + verify(messageDigest, times(1)).reset(); } } @@ -264,4 +201,18 @@ private void assertContentLength(String uploadID, String key, assertEquals(contentLength, parts.getPartInfoList().get(0).getSize()); } + + private String initiateUpload(String key) throws IOException, OS3Exception { + return initiateUpload(rest, OzoneConsts.S3_BUCKET, key); + } + + static String initiateUpload(ObjectEndpoint subject, String bucket, String key) throws IOException, OS3Exception { + try (Response response = subject.initializeMultipartUpload(bucket, key)) { + MultipartUploadInitiateResponse multipartUploadInitiateResponse = + (MultipartUploadInitiateResponse) response.getEntity(); + assertNotNull(multipartUploadInitiateResponse.getUploadID()); + assertEquals(HttpStatus.SC_OK, response.getStatus()); + return multipartUploadInitiateResponse.getUploadID(); + } + } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 736660073d57..2cab6a6797fb 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -17,19 +17,17 @@ package org.apache.hadoop.ozone.s3.endpoint; -import static java.net.HttpURLConnection.HTTP_NOT_FOUND; -import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertErrorResponse; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; +import static org.apache.hadoop.ozone.s3.endpoint.TestPartUpload.initiateUpload; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.X_AMZ_CONTENT_SHA256; -import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.io.ByteArrayInputStream; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -37,7 +35,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -81,65 +79,35 @@ public void testEnableStream() { @Test public void testPartUpload() throws Exception { - - Response response = rest.initializeMultipartUpload(S3BUCKET, S3KEY); - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - String uploadID = multipartUploadInitiateResponse.getUploadID(); - - assertEquals(200, response.getStatus()); + String uploadID = initiateUpload(rest, S3BUCKET, S3KEY); String content = "Multipart Upload"; - ByteArrayInputStream body = - new ByteArrayInputStream(content.getBytes(UTF_8)); - response = rest.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, null, null, body); + Response response = put(rest, S3BUCKET, S3KEY, 1, uploadID, content); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); - } @Test public void testPartUploadWithOverride() throws Exception { - - Response response = rest.initializeMultipartUpload(S3BUCKET, S3KEY); - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - String uploadID = multipartUploadInitiateResponse.getUploadID(); - - assertEquals(200, response.getStatus()); + String uploadID = initiateUpload(rest, S3BUCKET, S3KEY); String content = "Multipart Upload"; - ByteArrayInputStream body = - new ByteArrayInputStream(content.getBytes(UTF_8)); - response = rest.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, null, null, body); - - assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + Response response = put(rest, S3BUCKET, S3KEY, 1, uploadID, content); String eTag = response.getHeaderString(OzoneConsts.ETAG); + assertNotNull(eTag); // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; - response = rest.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, null, null, body); + response = put(rest, S3BUCKET, S3KEY, 1, uploadID, content); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); } @Test - public void testPartUploadWithIncorrectUploadID() throws Exception { - OS3Exception ex = assertThrows(OS3Exception.class, () -> { - String content = "Multipart Upload With Incorrect uploadID"; - ByteArrayInputStream body = - new ByteArrayInputStream(content.getBytes(UTF_8)); - rest.put(S3BUCKET, S3KEY, content.length(), 1, - "random", null, null, body); - }); - assertEquals("NoSuchUpload", ex.getCode()); - assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + public void testPartUploadWithIncorrectUploadID() { + String content = "Multipart Upload With Incorrect uploadID"; + assertErrorResponse(S3ErrorTable.NO_SUCH_UPLOAD, () -> put(rest, S3BUCKET, S3KEY, 1, "random", content)); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index 8e9eef2d974d..043e891be751 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -18,7 +18,8 @@ package org.apache.hadoop.ozone.s3.endpoint; import static java.net.HttpURLConnection.HTTP_FORBIDDEN; -import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertErrorResponse; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; import static org.apache.hadoop.ozone.s3.util.S3Consts.X_AMZ_CONTENT_SHA256; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -34,9 +35,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.io.ByteArrayInputStream; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -53,6 +52,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.metrics.S3GatewayMetrics; import org.apache.hadoop.ozone.s3.util.S3Consts; import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams; @@ -259,8 +259,9 @@ public void testGetKey() throws IOException { .setConfig(conf) .build(); + objectEndpoint.queryParamsForTest().set(S3Consts.QueryParams.PART_NUMBER_MARKER, "marker"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.get( - "bucketName", "keyPath", 0, null, 1000, "marker", null)); + "bucketName", "keyPath", 0, 1000)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -276,10 +277,7 @@ public void testPutKey() throws IOException { .setConfig(conf) .build(); - OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "bucketName", "keyPath", 1024, 0, null, null, null, - new ByteArrayInputStream(new byte[]{}))); - assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); + assertErrorResponse(S3ErrorTable.ACCESS_DENIED, () -> put(objectEndpoint, "bucketName", "keyPath", "")); } @Test @@ -295,7 +293,7 @@ public void testDeleteKey() throws IOException { .build(); OS3Exception e = assertThrows(OS3Exception.class, () -> - objectEndpoint.delete("bucketName", "keyPath", null, null)); + objectEndpoint.delete("bucketName", "keyPath")); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -337,20 +335,9 @@ public void testObjectTagging() throws Exception { " " + ""; - InputStream tagInput = new ByteArrayInputStream(xml.getBytes(UTF_8)); - - OS3Exception e = assertThrows(OS3Exception.class, () -> - objectEndpoint.put("bucketName", "keyPath", 0, 1, - null, "", null, tagInput)); - assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); - - e = assertThrows(OS3Exception.class, () -> - objectEndpoint.delete("bucketName", "keyPath", "", "")); - assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); - - e = assertThrows(OS3Exception.class, () -> - objectEndpoint.get("bucketName", "keyPath", 0, null, - 0, null, "")); - assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); + objectEndpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); + assertErrorResponse(S3ErrorTable.ACCESS_DENIED, () -> put(objectEndpoint, "bucketName", "keyPath", xml)); + assertErrorResponse(S3ErrorTable.ACCESS_DENIED, () -> objectEndpoint.delete("bucketName", "keyPath")); + assertErrorResponse(S3ErrorTable.ACCESS_DENIED, () -> objectEndpoint.get("bucketName", "keyPath", 0, 0)); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java index dbe21601dbd3..4df8b3270d37 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java @@ -19,6 +19,8 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertSucceeds; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.X_AMZ_CONTENT_SHA256; @@ -27,12 +29,10 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.io.ByteArrayInputStream; import java.io.OutputStream; import java.util.HashMap; import java.util.Map; import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -89,12 +89,7 @@ public void testEnableStream() { @Test public void testUpload() throws Exception { - byte[] keyContent = S3_COPY_EXISTING_KEY_CONTENT.getBytes(UTF_8); - ByteArrayInputStream body = - new ByteArrayInputStream(keyContent); - Response response = rest.put(S3BUCKET, S3KEY, 0, 0, null, null, null, body); - - assertEquals(200, response.getStatus()); + assertSucceeds(() -> put(rest, S3BUCKET, S3KEY, S3_COPY_EXISTING_KEY_CONTENT)); } @Test @@ -126,9 +121,7 @@ public void testUploadWithCopy() throws Exception { .forEach((k, v) -> when(headers.getHeaderString(k)).thenReturn(v)); rest.setHeaders(headers); - Response response = rest.put(S3BUCKET, S3KEY, 0, 0, null, null, null, null); - - assertEquals(200, response.getStatus()); + assertSucceeds(() -> put(rest, S3BUCKET, S3KEY, null)); final long newDataSize = bucket.getKey(S3KEY).getDataSize(); assertEquals(dataSize, newDataSize); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java index bfc471e22d5d..5e997e81d941 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java @@ -19,7 +19,9 @@ import static java.net.HttpURLConnection.HTTP_CONFLICT; import static java.net.HttpURLConnection.HTTP_OK; -import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertErrorResponse; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertSucceeds; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; @@ -32,7 +34,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; @@ -62,6 +63,16 @@ */ public class TestS3GatewayMetrics { + private static final String PUT_TAGGING = + "" + + " " + + " " + + " tag1" + + " val1" + + " " + + " " + + ""; + private String bucketName = OzoneConsts.BUCKET; private String keyName = OzoneConsts.KEY; private OzoneClient clientStub; @@ -141,7 +152,7 @@ public void testGetBucketSuccess() throws Exception { } @Test - public void testGetBucketFailure() throws Exception { + public void testGetBucketFailure() { long oriMetric = metrics.getGetBucketFailure(); // Searching for a bucket that does not exist @@ -154,7 +165,7 @@ public void testGetBucketFailure() throws Exception { } @Test - public void testCreateBucketSuccess() throws Exception { + public void testCreateBucketSuccess() { long oriMetric = metrics.getCreateBucketSuccess(); assertDoesNotThrow(() -> bucketEndpoint.put("newBucket", null)); @@ -163,7 +174,7 @@ public void testCreateBucketSuccess() throws Exception { } @Test - public void testCreateBucketFailure() throws Exception { + public void testCreateBucketFailure() { long oriMetric = metrics.getCreateBucketFailure(); // Creating an error by trying to create a bucket that already exists @@ -214,7 +225,7 @@ public void testGetAclSuccess() throws Exception { } @Test - public void testGetAclFailure() throws Exception { + public void testGetAclFailure() { long oriMetric = metrics.getGetAclFailure(); bucketEndpoint.queryParamsForTest().set(QueryParams.ACL, ACL_MARKER); @@ -288,28 +299,18 @@ public void testHeadKeyFailure() throws Exception { @Test public void testCreateKeySuccess() throws Exception { - long oriMetric = metrics.getCreateKeySuccess(); - // Create an input stream - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - // Create the file - keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, null, null, body); - body.close(); + assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, CONTENT)); long curMetric = metrics.getCreateKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testCreateKeyFailure() throws Exception { + public void testCreateKeyFailure() { long oriMetric = metrics.getCreateKeyFailure(); // Create the file in a bucket that does not exist - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - "unknownBucket", keyName, CONTENT.length(), 1, null, null, - null, null)); - assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); + assertErrorResponse(S3ErrorTable.NO_SUCH_BUCKET, () -> put(keyEndpoint, "unknownBucket", keyName, CONTENT)); long curMetric = metrics.getCreateKeyFailure(); assertEquals(1L, curMetric - oriMetric); } @@ -319,17 +320,15 @@ public void testDeleteKeySuccess() throws Exception { long oriMetric = metrics.getDeleteKeySuccess(); bucket.createKey(keyName, 0).close(); - keyEndpoint.delete(bucketName, keyName, null, null); + keyEndpoint.delete(bucketName, keyName); long curMetric = metrics.getDeleteKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testDeleteKeyFailure() throws Exception { + public void testDeleteKeyFailure() { long oriMetric = metrics.getDeleteKeyFailure(); - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( - "unknownBucket", keyName, null, null)); - assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); + assertErrorResponse(S3ErrorTable.NO_SUCH_BUCKET, () -> keyEndpoint.delete("unknownBucket", keyName)); long curMetric = metrics.getDeleteKeyFailure(); assertEquals(1L, curMetric - oriMetric); } @@ -338,14 +337,10 @@ public void testDeleteKeyFailure() throws Exception { public void testGetKeySuccess() throws Exception { long oriMetric = metrics.getGetKeySuccess(); - // Create an input stream - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); // Create the file - keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, null, null, body); + assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, CONTENT)); // GET the key from the bucket - Response response = keyEndpoint.get(bucketName, keyName, 0, null, 0, null, null); + Response response = keyEndpoint.get(bucketName, keyName, 0, 0); StreamingOutput stream = (StreamingOutput) response.getEntity(); stream.write(new ByteArrayOutputStream()); long curMetric = metrics.getGetKeySuccess(); @@ -353,13 +348,11 @@ public void testGetKeySuccess() throws Exception { } @Test - public void testGetKeyFailure() throws Exception { + public void testGetKeyFailure() { long oriMetric = metrics.getGetKeyFailure(); // Fetching a non-existent key - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( - bucketName, "unknownKey", 0, null, 0, null, null)); - assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), e.getCode()); + assertErrorResponse(S3ErrorTable.NO_SUCH_KEY, () -> keyEndpoint.get(bucketName, "unknownKey", 0, 0)); long curMetric = metrics.getGetKeyFailure(); assertEquals(1L, curMetric - oriMetric); } @@ -374,11 +367,10 @@ public void testInitMultiPartUploadSuccess() throws Exception { } @Test - public void testInitMultiPartUploadFailure() throws Exception { + public void testInitMultiPartUploadFailure() { long oriMetric = metrics.getInitMultiPartUploadFailure(); - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint - .initializeMultipartUpload("unknownBucket", keyName)); - assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); + assertErrorResponse(S3ErrorTable.NO_SUCH_BUCKET, + () -> keyEndpoint.initializeMultipartUpload("unknownBucket", keyName)); long curMetric = metrics.getInitMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); } @@ -392,101 +384,88 @@ public void testAbortMultiPartUploadSuccess() throws Exception { long oriMetric = metrics.getAbortMultiPartUploadSuccess(); // Abort the Upload Successfully by deleting the key using the Upload-Id - keyEndpoint.delete(bucketName, keyName, uploadID, null); + keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); + keyEndpoint.delete(bucketName, keyName); long curMetric = metrics.getAbortMultiPartUploadSuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testAbortMultiPartUploadFailure() throws Exception { + public void testAbortMultiPartUploadFailure() { long oriMetric = metrics.getAbortMultiPartUploadFailure(); // Fail the Abort Method by providing wrong uploadID - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( - bucketName, keyName, "wrongId", null)); - assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); + keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, "wrongId"); + assertErrorResponse(S3ErrorTable.NO_SUCH_UPLOAD, () -> keyEndpoint.delete(bucketName, keyName)); long curMetric = metrics.getAbortMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); } @Test public void testCompleteMultiPartUploadSuccess() throws Exception { - - // Initiate the Upload and fetch the upload ID String uploadID = initiateMultipartUpload(bucketName, keyName); - long oriMetric = metrics.getCompleteMultiPartUploadSuccess(); - // complete multipart upload - CompleteMultipartUploadRequest completeMultipartUploadRequest = new - CompleteMultipartUploadRequest(); - Response response = keyEndpoint.completeMultipartUpload(bucketName, keyName, - uploadID, completeMultipartUploadRequest); + + CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest(); + keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); + assertSucceeds(() -> keyEndpoint.completeMultipartUpload(bucketName, keyName, completeMultipartUploadRequest)); + long curMetric = metrics.getCompleteMultiPartUploadSuccess(); - assertEquals(200, response.getStatus()); assertEquals(1L, curMetric - oriMetric); } @Test - public void testCompleteMultiPartUploadFailure() throws Exception { + public void testCompleteMultiPartUploadFailure() { long oriMetric = metrics.getCompleteMultiPartUploadFailure(); - CompleteMultipartUploadRequest completeMultipartUploadRequestNew = new - CompleteMultipartUploadRequest(); - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint - .completeMultipartUpload(bucketName, "key2", "random", - completeMultipartUploadRequestNew)); - assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); + keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, "random"); + CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest(); + assertErrorResponse(S3ErrorTable.NO_SUCH_UPLOAD, + () -> keyEndpoint.completeMultipartUpload(bucketName, "key2", completeMultipartUploadRequest)); long curMetric = metrics.getCompleteMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); } @Test public void testCreateMultipartKeySuccess() throws Exception { - - // Initiate the Upload and fetch the upload ID String uploadID = initiateMultipartUpload(bucketName, keyName); - long oriMetric = metrics.getCreateMultipartKeySuccess(); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - keyEndpoint.put(bucketName, keyName, CONTENT.length(), - 1, uploadID, null, null, body); + keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); + assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, CONTENT)); long curMetric = metrics.getCreateMultipartKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testCreateMultipartKeyFailure() throws Exception { + public void testCreateMultipartKeyFailure() { long oriMetric = metrics.getCreateMultipartKeyFailure(); - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, "randomId", null, null, null)); - assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); + + keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, "random"); + assertErrorResponse(S3ErrorTable.NO_SUCH_UPLOAD, () -> put(keyEndpoint, bucketName, keyName, CONTENT)); + long curMetric = metrics.getCreateMultipartKeyFailure(); assertEquals(1L, curMetric - oriMetric); } @Test public void testListPartsSuccess() throws Exception { - long oriMetric = metrics.getListPartsSuccess(); - // Initiate the Upload and fetch the upload ID String uploadID = initiateMultipartUpload(bucketName, keyName); - // Listing out the parts by providing the uploadID - keyEndpoint.get(bucketName, keyName, 0, - uploadID, 3, null, null); + keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); + keyEndpoint.get(bucketName, keyName, 0, 3); + long curMetric = metrics.getListPartsSuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testListPartsFailure() throws Exception { - + public void testListPartsFailure() { long oriMetric = metrics.getListPartsFailure(); - // Listing out the parts by providing the uploadID after aborting - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( - bucketName, keyName, 0, "wrong_id", 3, null, null)); - assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); + + keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, "wrong_id"); + assertErrorResponse(S3ErrorTable.NO_SUCH_UPLOAD, () -> keyEndpoint.get(bucketName, keyName, 0, 3)); + long curMetric = metrics.getListPartsFailure(); assertEquals(1L, curMetric - oriMetric); } @@ -502,28 +481,24 @@ public void testCopyObject() throws Exception { // Test for Success of CopyObjectSuccess Metric long oriMetric = metrics.getCopyObjectSuccess(); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - keyEndpoint.put(bucketName, keyName, - CONTENT.length(), 1, null, null, null, body); + assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, CONTENT)); // Add copy header, and then call put when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( bucketName + "/" + urlEncode(keyName)); - keyEndpoint.put(destBucket, destKey, CONTENT.length(), 1, - null, null, null, body); + assertSucceeds(() -> put(keyEndpoint, destBucket, destKey, CONTENT)); + long curMetric = metrics.getCopyObjectSuccess(); assertEquals(1L, curMetric - oriMetric); // Test for Failure of CopyObjectFailure Metric oriMetric = metrics.getCopyObjectFailure(); - // source and dest same + when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, null, null, null, body), - "Test for CopyObjectMetric failed"); + OS3Exception e = assertErrorResponse(S3ErrorTable.INVALID_REQUEST, + () -> put(keyEndpoint, bucketName, keyName, CONTENT)); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); curMetric = metrics.getCopyObjectFailure(); assertEquals(1L, curMetric - oriMetric); @@ -532,31 +507,22 @@ public void testCopyObject() throws Exception { @Test public void testPutObjectTaggingSuccess() throws Exception { long oriMetric = metrics.getPutObjectTaggingSuccess(); + assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, CONTENT)); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - // Create the file - keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, null, null, body); - body.close(); - - // Put object tagging - keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); + keyEndpoint.queryParamsForTest().set(QueryParams.TAGGING, ""); + assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, PUT_TAGGING)); long curMetric = metrics.getPutObjectTaggingSuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testPutObjectTaggingFailure() throws Exception { + public void testPutObjectTaggingFailure() { long oriMetric = metrics.getPutObjectTaggingFailure(); // Put object tagging for nonexistent key - OS3Exception ex = assertThrows(OS3Exception.class, () -> - keyEndpoint.put(bucketName, "nonexistent", 0, 1, null, "", - null, getPutTaggingBody()) - ); - assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); + keyEndpoint.queryParamsForTest().set(QueryParams.TAGGING, ""); + assertErrorResponse(S3ErrorTable.NO_SUCH_KEY, () -> put(keyEndpoint, bucketName, "nonexistent", PUT_TAGGING)); long curMetric = metrics.getPutObjectTaggingFailure(); assertEquals(1L, curMetric - oriMetric); @@ -565,34 +531,26 @@ null, getPutTaggingBody()) @Test public void testGetObjectTaggingSuccess() throws Exception { long oriMetric = metrics.getGetObjectTaggingSuccess(); + assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, CONTENT)); - // Create the file - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, null, null, body); - body.close(); - - // Put object tagging - keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); + keyEndpoint.queryParamsForTest().set(QueryParams.TAGGING, ""); + assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, PUT_TAGGING)); // Get object tagging - keyEndpoint.get(bucketName, keyName, 0, - null, 0, null, ""); + keyEndpoint.get(bucketName, keyName, 0, 0); long curMetric = metrics.getGetObjectTaggingSuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testGetObjectTaggingFailure() throws Exception { + public void testGetObjectTaggingFailure() { long oriMetric = metrics.getGetObjectTaggingFailure(); // Get object tagging for nonexistent key - OS3Exception ex = assertThrows(OS3Exception.class, () -> - keyEndpoint.get(bucketName, "nonexistent", 0, null, - 0, null, "")); - assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); + keyEndpoint.queryParamsForTest().set(QueryParams.TAGGING, ""); + assertErrorResponse(S3ErrorTable.NO_SUCH_KEY, () -> keyEndpoint.get(bucketName, "nonexistent", 0, 0)); + long curMetric = metrics.getGetObjectTaggingFailure(); assertEquals(1L, curMetric - oriMetric); } @@ -600,32 +558,25 @@ public void testGetObjectTaggingFailure() throws Exception { @Test public void testDeleteObjectTaggingSuccess() throws Exception { long oriMetric = metrics.getDeleteObjectTaggingSuccess(); + assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, CONTENT)); - // Create the file - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, null, null, body); - body.close(); - - // Put object tagging - keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); + keyEndpoint.queryParamsForTest().set(QueryParams.TAGGING, ""); + assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, PUT_TAGGING)); // Delete object tagging - keyEndpoint.delete(bucketName, keyName, null, ""); + keyEndpoint.delete(bucketName, keyName); long curMetric = metrics.getDeleteObjectTaggingSuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testDeleteObjectTaggingFailure() throws Exception { + public void testDeleteObjectTaggingFailure() { long oriMetric = metrics.getDeleteObjectTaggingFailure(); - // Delete object tagging for nonexistent key - OS3Exception ex = assertThrows(OS3Exception.class, () -> - keyEndpoint.delete(bucketName, "nonexistent", null, "")); - assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); + keyEndpoint.queryParamsForTest().set(QueryParams.TAGGING, ""); + assertErrorResponse(S3ErrorTable.NO_SUCH_KEY, () -> keyEndpoint.delete(bucketName, "nonexistent")); + long curMetric = metrics.getDeleteObjectTaggingFailure(); assertEquals(1L, curMetric - oriMetric); } @@ -645,18 +596,4 @@ private String initiateMultipartUpload(String bktName, String key) } return "Invalid-Id"; } - - private static InputStream getPutTaggingBody() { - String xml = - "" + - " " + - " " + - " tag1" + - " val1" + - " " + - " " + - ""; - - return new ByteArrayInputStream(xml.getBytes(UTF_8)); - } } From 2cb854494806bff2b310fbd8df982fcabca69a54 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Wed, 7 Jan 2026 06:52:16 +0100 Subject: [PATCH 6/7] Revert "HDDS-14209. Look up query params in ObjectEndpoint" This reverts commit ea8c924a62b9e81c67a8d30ebece6cc1a2426770. --- .../ozone/s3/endpoint/ObjectEndpoint.java | 54 ++-- .../ozone/s3/endpoint/EndpointTestUtils.java | 10 +- .../s3/endpoint/TestAbortMultipartUpload.java | 8 +- .../ozone/s3/endpoint/TestListParts.java | 48 ++-- .../endpoint/TestMultipartUploadComplete.java | 12 +- .../endpoint/TestMultipartUploadWithCopy.java | 39 ++- .../ozone/s3/endpoint/TestObjectDelete.java | 2 +- .../ozone/s3/endpoint/TestObjectGet.java | 28 +- .../s3/endpoint/TestObjectTaggingDelete.java | 18 +- .../s3/endpoint/TestObjectTaggingGet.java | 16 +- .../s3/endpoint/TestObjectTaggingPut.java | 136 +++++++--- .../ozone/s3/endpoint/TestPartUpload.java | 131 +++++++--- .../s3/endpoint/TestPartUploadWithStream.java | 58 ++++- .../s3/endpoint/TestPermissionCheck.java | 35 ++- .../s3/endpoint/TestUploadWithStream.java | 15 +- .../s3/metrics/TestS3GatewayMetrics.java | 239 +++++++++++------- 16 files changed, 540 insertions(+), 309 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index fdef787c7830..c6a2b6539098 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -168,7 +168,7 @@ public class ObjectEndpoint extends EndpointBase { /*FOR the feature Overriding Response Header https://docs.aws.amazon.com/de_de/AmazonS3/latest/API/API_GetObject.html */ - private final Map overrideQueryParameter; + private Map overrideQueryParameter; private int bufferSize; private int chunkSize; private boolean datastreamEnabled; @@ -209,18 +209,17 @@ public void init() { * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for * more details. */ - @SuppressWarnings("checkstyle:MethodLength") + @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) @PUT public Response put( @PathParam(BUCKET) String bucketName, @PathParam(PATH) String keyPath, @HeaderParam(HttpHeaders.CONTENT_LENGTH) long length, - final InputStream body - ) throws IOException, OS3Exception { - final String aclMarker = queryParams().get(QueryParams.ACL); - final int partNumber = queryParams().getInt(QueryParams.PART_NUMBER, 0); - final String taggingMarker = queryParams().get(QueryParams.TAGGING); - final String uploadID = queryParams().get(QueryParams.UPLOAD_ID); + @QueryParam(QueryParams.PART_NUMBER) int partNumber, + @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadID, + @QueryParam(QueryParams.TAGGING) String taggingMarker, + @QueryParam(QueryParams.ACL) String aclMarker, + final InputStream body) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_KEY; boolean auditSuccess = true; @@ -404,26 +403,17 @@ public Response put( * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html * for more details. */ - @SuppressWarnings("checkstyle:MethodLength") + @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) @GET - public Response get( - @PathParam(BUCKET) String bucketName, - @PathParam(PATH) String keyPath - ) throws IOException, OS3Exception { - final int partNumber = queryParams().getInt(QueryParams.PART_NUMBER, 0); - final int maxParts = queryParams().getInt(QueryParams.MAX_PARTS, 1000); - return get(bucketName, keyPath, partNumber, maxParts); - } - public Response get( @PathParam(BUCKET) String bucketName, @PathParam(PATH) String keyPath, @QueryParam(QueryParams.PART_NUMBER) int partNumber, - @QueryParam(QueryParams.MAX_PARTS) @DefaultValue("1000") int maxParts - ) throws IOException, OS3Exception { - final String uploadId = queryParams().get(QueryParams.UPLOAD_ID); - final String partNumberMarker = queryParams().get(QueryParams.PART_NUMBER_MARKER); - final String taggingMarker = queryParams().get(QueryParams.TAGGING); + @QueryParam(QueryParams.UPLOAD_ID) String uploadId, + @QueryParam(QueryParams.MAX_PARTS) @DefaultValue("1000") int maxParts, + @QueryParam(QueryParams.PART_NUMBER_MARKER) String partNumberMarker, + @QueryParam(QueryParams.TAGGING) String taggingMarker) + throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.GET_KEY; PerformanceStringBuilder perf = new PerformanceStringBuilder(); @@ -730,11 +720,10 @@ private Response abortMultipartUpload(OzoneVolume volume, String bucket, @SuppressWarnings("emptyblock") public Response delete( @PathParam(BUCKET) String bucketName, - @PathParam(PATH) String keyPath - ) throws IOException, OS3Exception { - final String taggingMarker = queryParams().get(QueryParams.TAGGING); - final String uploadId = queryParams().get(QueryParams.UPLOAD_ID); - + @PathParam(PATH) String keyPath, + @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadId, + @QueryParam(QueryParams.TAGGING) String taggingMarker) throws + IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.DELETE_KEY; @@ -809,7 +798,8 @@ public Response delete( public Response initializeMultipartUpload( @PathParam(BUCKET) String bucket, @PathParam(PATH) String key - ) throws IOException, OS3Exception { + ) + throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.INIT_MULTIPART_UPLOAD; @@ -873,9 +863,9 @@ private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket, public Response completeMultipartUpload( @PathParam(BUCKET) String bucket, @PathParam(PATH) String key, - CompleteMultipartUploadRequest multipartUploadRequest - ) throws IOException, OS3Exception { - final String uploadID = queryParams().get(QueryParams.UPLOAD_ID, ""); + @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadID, + CompleteMultipartUploadRequest multipartUploadRequest) + throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.COMPLETE_MULTIPART_UPLOAD; OzoneVolume volume = getVolume(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java index 2f67e4fcb18e..c6eff8066cd5 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java @@ -25,7 +25,6 @@ import java.io.IOException; import javax.ws.rs.core.Response; import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.apache.http.HttpStatus; import org.apache.ratis.util.function.CheckedSupplier; @@ -60,17 +59,12 @@ public static Response put( String uploadID, String content ) throws IOException, OS3Exception { - if (uploadID != null) { - subject.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); - } - subject.queryParamsForTest().setInt(S3Consts.QueryParams.PART_NUMBER, partNumber); - if (content == null) { - return subject.put(bucket, key, 0, null); + return subject.put(bucket, key, 0, partNumber, uploadID, null, null, null); } else { final long length = content.length(); try (ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8))) { - return subject.put(bucket, key, length, body); + return subject.put(bucket, key, length, partNumber, uploadID, null, null, body); } } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java index 7ec978c95635..9c46a718508f 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java @@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.Test; /** @@ -64,16 +63,15 @@ public void testAbortMultipartUpload() throws Exception { assertNotNull(multipartUploadInitiateResponse.getUploadID()); String uploadID = multipartUploadInitiateResponse.getUploadID(); + // Abort multipart upload - rest.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); - response = rest.delete(bucket, key); + response = rest.delete(bucket, key, uploadID, null); assertEquals(204, response.getStatus()); // test with unknown upload Id. try { - rest.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, "random"); - rest.delete(bucket, key); + rest.delete(bucket, key, "random", null); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), ex.getCode()); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java index e573c8393582..30be715b5305 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.s3.endpoint; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.X_AMZ_CONTENT_SHA256; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -27,6 +27,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.ByteArrayInputStream; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import org.apache.hadoop.ozone.OzoneConsts; @@ -34,7 +35,6 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -44,6 +44,7 @@ public class TestListParts { private ObjectEndpoint rest; + private String uploadID; @BeforeEach public void setUp() throws Exception { @@ -66,21 +67,34 @@ public void setUp() throws Exception { OzoneConsts.KEY); MultipartUploadInitiateResponse multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity(); - String uploadID = multipartUploadInitiateResponse.getUploadID(); - assertNotNull(uploadID); + assertNotNull(multipartUploadInitiateResponse.getUploadID()); + uploadID = multipartUploadInitiateResponse.getUploadID(); + assertEquals(200, response.getStatus()); String content = "Multipart Upload"; - for (int i = 1; i <= 3; i++) { - response = put(rest, OzoneConsts.S3_BUCKET, OzoneConsts.KEY, i, uploadID, content); - assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); - } + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); + response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, + content.length(), 1, uploadID, null, null, body); + + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + + response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, + content.length(), 2, uploadID, null, null, body); + + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + + response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, + content.length(), 3, uploadID, null, null, body); + + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @Test public void testListParts() throws Exception { - rest.queryParamsForTest().set(S3Consts.QueryParams.PART_NUMBER_MARKER, "0"); - Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 3); + Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, + uploadID, 3, "0", null); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -92,8 +106,8 @@ public void testListParts() throws Exception { @Test public void testListPartsContinuation() throws Exception { - rest.queryParamsForTest().set(S3Consts.QueryParams.PART_NUMBER_MARKER, "0"); - Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 2); + Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, + uploadID, 2, "0", null); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -101,9 +115,8 @@ public void testListPartsContinuation() throws Exception { assertEquals(2, listPartsResponse.getPartList().size()); // Continue - rest.queryParamsForTest().set(S3Consts.QueryParams.PART_NUMBER_MARKER, - Integer.toString(listPartsResponse.getNextPartNumberMarker())); - response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 2); + response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, uploadID, 2, + Integer.toString(listPartsResponse.getNextPartNumberMarker()), null); listPartsResponse = (ListPartsResponse) response.getEntity(); assertFalse(listPartsResponse.getTruncated()); @@ -113,10 +126,9 @@ public void testListPartsContinuation() throws Exception { @Test public void testListPartsWithUnknownUploadID() throws Exception { - rest.queryParamsForTest().set(S3Consts.QueryParams.PART_NUMBER_MARKER, "0"); - rest.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, "no-such-upload"); try { - rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 2); + rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, + uploadID, 2, "0", null); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), ex.getErrorMessage()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index bf0d5654ab6a..fde336f48079 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.s3.endpoint; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.X_AMZ_CONTENT_SHA256; @@ -27,6 +27,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -44,7 +45,6 @@ import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -104,7 +104,10 @@ private String initiateMultipartUpload(String key, Map metadata) private Part uploadPart(String key, String uploadID, int partNumber, String content) throws IOException, OS3Exception { - Response response = put(rest, OzoneConsts.S3_BUCKET, key, partNumber, uploadID, content); + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); + Response response = rest.put(OzoneConsts.S3_BUCKET, key, content.length(), + partNumber, uploadID, null, null, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); @@ -117,9 +120,8 @@ private Part uploadPart(String key, String uploadID, int partNumber, String private void completeMultipartUpload(String key, CompleteMultipartUploadRequest completeMultipartUploadRequest, String uploadID) throws IOException, OS3Exception { - rest.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = rest.completeMultipartUpload(OzoneConsts.S3_BUCKET, key, - completeMultipartUploadRequest); + uploadID, completeMultipartUploadRequest); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index 5e9f070dbe54..702c32d1abab 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -18,9 +18,6 @@ package org.apache.hadoop.ozone.s3.endpoint; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertSucceeds; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; -import static org.apache.hadoop.ozone.s3.endpoint.TestPartUpload.initiateUpload; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER_RANGE; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_IF_MODIFIED_SINCE; @@ -33,6 +30,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -55,9 +53,7 @@ import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.http.HttpStatus; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -316,14 +312,27 @@ public void testMultipartTSHeaders() throws Exception { private String initiateMultipartUpload(String key) throws IOException, OS3Exception { setHeaders(); - return initiateUpload(endpoint, OzoneConsts.S3_BUCKET, key); + Response response = endpoint.initializeMultipartUpload(OzoneConsts.S3_BUCKET, + key); + MultipartUploadInitiateResponse multipartUploadInitiateResponse = + (MultipartUploadInitiateResponse) response.getEntity(); + assertNotNull(multipartUploadInitiateResponse.getUploadID()); + String uploadID = multipartUploadInitiateResponse.getUploadID(); + + assertEquals(200, response.getStatus()); + + return uploadID; + } private Part uploadPart(String key, String uploadID, int partNumber, String content) throws IOException, OS3Exception { setHeaders(); - Response response = put(endpoint, OzoneConsts.S3_BUCKET, key, partNumber, uploadID, content); - assertEquals(HttpStatus.SC_OK, response.getStatus()); + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); + Response response = endpoint.put(OzoneConsts.S3_BUCKET, key, content.length(), + partNumber, uploadID, null, null, body); + assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); part.setETag(response.getHeaderString(OzoneConsts.ETAG)); @@ -365,8 +374,10 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, } setHeaders(additionalHeaders); - Response response = put(endpoint, OzoneConsts.S3_BUCKET, key, partNumber, uploadID, ""); - assertEquals(HttpStatus.SC_OK, response.getStatus()); + ByteArrayInputStream body = new ByteArrayInputStream("".getBytes(UTF_8)); + Response response = endpoint.put(OzoneConsts.S3_BUCKET, key, 0, partNumber, + uploadID, null, null, body); + assertEquals(200, response.getStatus()); CopyPartResult result = (CopyPartResult) response.getEntity(); assertNotNull(result.getETag()); @@ -380,18 +391,19 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, @Test public void testUploadWithRangeCopyContentLength() - throws Exception { + throws IOException, OS3Exception { // The contentLength specified when creating the Key should be the same as // the Content-Length, the key Commit will compare the Content-Length with // the actual length of the data written. String uploadID = initiateMultipartUpload(KEY); + ByteArrayInputStream body = new ByteArrayInputStream("".getBytes(UTF_8)); Map additionalHeaders = new HashMap<>(); additionalHeaders.put(COPY_SOURCE_HEADER, OzoneConsts.S3_BUCKET + "/" + EXISTING_KEY); additionalHeaders.put(COPY_SOURCE_HEADER_RANGE, "bytes=0-3"); setHeaders(additionalHeaders); - assertSucceeds(() -> put(endpoint, OzoneConsts.S3_BUCKET, KEY, 1, uploadID, "")); + endpoint.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, uploadID, null, null, body); OzoneMultipartUploadPartListParts parts = client.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET) .listParts(KEY, uploadID, 0, 100); @@ -403,9 +415,8 @@ private void completeMultipartUpload(String key, CompleteMultipartUploadRequest completeMultipartUploadRequest, String uploadID) throws IOException, OS3Exception { setHeaders(); - endpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = endpoint.completeMultipartUpload(OzoneConsts.S3_BUCKET, key, - completeMultipartUploadRequest); + uploadID, completeMultipartUploadRequest); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java index 3b382c9bc4f1..3974cfcf9666 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java @@ -47,7 +47,7 @@ public void delete() throws IOException, OS3Exception { .build(); //WHEN - rest.delete("b1", "key1"); + rest.delete("b1", "key1", null, null); //THEN assertFalse(bucket.listKeys("").hasNext(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java index bb9703d97c03..a9fd7da4200e 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java @@ -19,7 +19,6 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_FSO_DIRECTORY_CREATION_ENABLED; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_COUNT_HEADER; @@ -32,6 +31,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.time.format.DateTimeFormatter; import javax.ws.rs.core.HttpHeaders; @@ -88,17 +88,19 @@ public void init() throws OS3Exception, IOException { .setHeaders(headers) .build(); - put(rest, BUCKET_NAME, KEY_NAME, CONTENT); - + ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + rest.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), + 1, null, null, null, body); // Create a key with object tags when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); - put(rest, BUCKET_NAME, KEY_WITH_TAG, CONTENT); + rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), + 1, null, null, null, body); } @Test public void get() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); //THEN OzoneInputStream ozoneInputStream = @@ -120,7 +122,7 @@ public void get() throws IOException, OS3Exception { @Test public void getKeyWithTag() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, 0); + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, null); //THEN OzoneInputStream ozoneInputStream = @@ -142,7 +144,7 @@ public void getKeyWithTag() throws IOException, OS3Exception { public void inheritRequestHeader() throws IOException, OS3Exception { setDefaultHeader(); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(CONTENT_TYPE1, response.getHeaderString("Content-Type")); @@ -172,7 +174,7 @@ public void overrideResponseHeader() throws IOException, OS3Exception { CONTENT_DISPOSITION2); queryParameter.putSingle("response-content-encoding", CONTENT_ENCODING2); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(CONTENT_TYPE2, response.getHeaderString("Content-Type")); @@ -193,13 +195,13 @@ public void getRangeHeader() throws IOException, OS3Exception { Response response; when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-0"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals("1", response.getHeaderString("Content-Length")); assertEquals(String.format("bytes 0-0/%s", CONTENT.length()), response.getHeaderString("Content-Range")); when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(String.valueOf(CONTENT.length()), response.getHeaderString("Content-Length")); assertEquals( @@ -212,7 +214,7 @@ public void getRangeHeader() throws IOException, OS3Exception { @Test public void getStatusCode() throws IOException, OS3Exception { Response response; - response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(response.getStatus(), Response.Status.OK.getStatusCode()); @@ -220,7 +222,7 @@ public void getStatusCode() throws IOException, OS3Exception { // The 206 (Partial Content) status code indicates that the server is // successfully fulfilling a range request for the target resource when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-1"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(response.getStatus(), Response.Status.PARTIAL_CONTENT.getStatusCode()); assertNull(response.getHeaderString(TAG_COUNT_HEADER)); @@ -254,7 +256,7 @@ public void testGetWhenKeyIsDirectoryAndDoesNotEndWithASlash() // WHEN final OS3Exception ex = assertThrows(OS3Exception.class, - () -> rest.get(BUCKET_NAME, keyPath, 0, 0)); + () -> rest.get(BUCKET_NAME, keyPath, 0, null, 0, null, null)); // THEN assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java index 435af5a90336..488474e30390 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java @@ -20,7 +20,7 @@ import static java.net.HttpURLConnection.HTTP_NOT_FOUND; import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; import static java.net.HttpURLConnection.HTTP_NO_CONTENT; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; @@ -33,6 +33,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.ByteArrayInputStream; import java.io.IOException; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; @@ -46,7 +47,6 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -76,17 +76,18 @@ public void init() throws OS3Exception, IOException { .setHeaders(headers) .build(); + ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); // Create a key with object tags Mockito.when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); Mockito.when(headers.getHeaderString(X_AMZ_CONTENT_SHA256)) .thenReturn("mockSignature"); - put(rest, BUCKET_NAME, KEY_WITH_TAG, CONTENT); - rest.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); + rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), + 1, null, null, null, body); } @Test public void testDeleteTagging() throws IOException, OS3Exception { - Response response = rest.delete(BUCKET_NAME, KEY_WITH_TAG); + Response response = rest.delete(BUCKET_NAME, KEY_WITH_TAG, null, ""); assertEquals(HTTP_NO_CONTENT, response.getStatus()); assertTrue(client.getObjectStore().getS3Bucket(BUCKET_NAME) @@ -96,7 +97,7 @@ public void testDeleteTagging() throws IOException, OS3Exception { @Test public void testDeleteTaggingNoKeyFound() throws Exception { try { - rest.delete(BUCKET_NAME, "nonexistent"); + rest.delete(BUCKET_NAME, "nonexistent", null, ""); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -107,7 +108,7 @@ public void testDeleteTaggingNoKeyFound() throws Exception { @Test public void testDeleteTaggingNoBucketFound() throws Exception { try { - rest.delete("nonexistent", "nonexistent"); + rest.delete("nonexistent", "nonexistent", null, ""); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -134,8 +135,7 @@ public void testDeleteObjectTaggingNotImplemented() throws Exception { ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).deleteObjectTagging("dir/"); try { - endpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); - endpoint.delete("fsoBucket", "dir/"); + endpoint.delete("fsoBucket", "dir/", null, ""); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java index 5595d6d5b12e..1885e7d0cf6f 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java @@ -19,7 +19,7 @@ import static java.net.HttpURLConnection.HTTP_NOT_FOUND; import static java.net.HttpURLConnection.HTTP_OK; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; @@ -28,6 +28,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.fail; +import java.io.ByteArrayInputStream; import java.io.IOException; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; @@ -36,7 +37,6 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.endpoint.S3Tagging.Tag; import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -68,17 +68,17 @@ public void init() throws OS3Exception, IOException { .setHeaders(headers) .build(); + ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); // Create a key with object tags Mockito.when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); - put(rest, BUCKET_NAME, KEY_WITH_TAG, CONTENT); - - rest.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); + rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), + 1, null, null, null, body); } @Test public void testGetTagging() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, 0); + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, ""); assertEquals(HTTP_OK, response.getStatus()); S3Tagging s3Tagging = (S3Tagging) response.getEntity(); @@ -99,7 +99,7 @@ public void testGetTagging() throws IOException, OS3Exception { @Test public void testGetTaggingNoKeyFound() throws Exception { try { - rest.get(BUCKET_NAME, "nonexistent", 0, 0); + rest.get(BUCKET_NAME, "nonexistent", 0, null, 0, null, ""); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -110,7 +110,7 @@ public void testGetTaggingNoKeyFound() throws Exception { @Test public void testGetTaggingNoBucketFound() throws Exception { try { - rest.get("nonexistent", "nonexistent", 0, 0); + rest.get("nonexistent", "nonexistent", 0, null, 0, null, ""); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java index b001526f3146..d1651d6b59c0 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java @@ -17,21 +17,28 @@ package org.apache.hadoop.ozone.s3.endpoint; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertErrorResponse; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertSucceeds; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; +import static java.net.HttpURLConnection.HTTP_BAD_REQUEST; +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; +import static java.net.HttpURLConnection.HTTP_OK; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_XML; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; import static org.apache.hadoop.ozone.s3.util.S3Consts.X_AMZ_CONTENT_SHA256; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; import java.util.HashMap; import java.util.Map; +import java.util.function.Supplier; import javax.ws.rs.core.HttpHeaders; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.client.ObjectStore; @@ -43,6 +50,7 @@ import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -59,7 +67,7 @@ public class TestObjectTaggingPut { private static final String KEY_NAME = "key=value/1"; @BeforeEach - void setup() throws Exception { + void setup() throws IOException, OS3Exception { OzoneConfiguration config = new OzoneConfiguration(); //Create client stub and object store stub. @@ -78,19 +86,29 @@ void setup() throws Exception { .setHeaders(headers) .build(); - assertSucceeds(() -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, "")); + + ByteArrayInputStream body = + new ByteArrayInputStream("".getBytes(UTF_8)); + + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, null, null, body); } @Test - public void testPutObjectTaggingWithEmptyBody() { - objectEndpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); - assertErrorResponse(MALFORMED_XML, () -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, "")); + public void testPutObjectTaggingWithEmptyBody() throws Exception { + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", null, + null); + fail(); + } catch (OS3Exception ex) { + assertEquals(HTTP_BAD_REQUEST, ex.getHttpCode()); + assertEquals(MALFORMED_XML.getCode(), ex.getCode()); + } } @Test public void testPutValidObjectTagging() throws Exception { - objectEndpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); - assertSucceeds(() -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, twoTags())); + assertEquals(HTTP_OK, objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, + "", null, twoTags()).getStatus()); OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(2, keyDetails.getTags().size()); @@ -99,26 +117,49 @@ public void testPutValidObjectTagging() throws Exception { } @Test - public void testPutInvalidObjectTagging() { - objectEndpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); - assertErrorResponse(MALFORMED_XML, () -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, emptyBody())); - assertErrorResponse(MALFORMED_XML, () -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, invalidXmlStructure())); - assertErrorResponse(MALFORMED_XML, () -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, noTagSet())); - assertErrorResponse(MALFORMED_XML, () -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, emptyTags())); - assertErrorResponse(MALFORMED_XML, () -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, tagKeyNotSpecified())); - assertErrorResponse(MALFORMED_XML, () -> put(objectEndpoint, BUCKET_NAME, KEY_NAME, tagValueNotSpecified())); + public void testPutInvalidObjectTagging() throws Exception { + testInvalidObjectTagging(this::emptyBody, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::invalidXmlStructure, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::noTagSet, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::emptyTags, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::tagKeyNotSpecified, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::tagValueNotSpecified, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + } + + private void testInvalidObjectTagging(Supplier inputStream, + int expectedHttpCode, String expectedErrorCode) throws Exception { + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", null, + inputStream.get()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(expectedHttpCode, ex.getHttpCode()); + assertEquals(expectedErrorCode, ex.getCode()); + } } @Test - public void testPutObjectTaggingNoKeyFound() { - objectEndpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); - assertErrorResponse(NO_SUCH_KEY, () -> put(objectEndpoint, BUCKET_NAME, "nonexistent", tagValueNotSpecified())); + public void testPutObjectTaggingNoKeyFound() throws Exception { + try { + objectEndpoint.put(BUCKET_NAME, "nonexistent", 0, 1, + null, "", null, twoTags()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); + } } @Test - public void testPutObjectTaggingNoBucketFound() { - objectEndpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); - assertErrorResponse(NO_SUCH_BUCKET, () -> put(objectEndpoint, "nonexistent", "any", twoTags())); + public void testPutObjectTaggingNoBucketFound() throws Exception { + try { + objectEndpoint.put("nonexistent", "nonexistent", 0, 1, + null, "", null, twoTags()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_BUCKET.getCode(), ex.getCode()); + } } @Test @@ -144,24 +185,32 @@ public void testPutObjectTaggingNotImplemented() throws Exception { doThrow(new OMException("PutObjectTagging is not currently supported for FSO directory", ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).putObjectTagging("dir/", twoTagsMap); - endpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); - assertErrorResponse(NOT_IMPLEMENTED, () -> put(endpoint, "fsoBucket", "dir/", twoTags())); + try { + endpoint.put("fsoBucket", "dir/", 0, 1, null, "", + null, twoTags()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); + assertEquals(NOT_IMPLEMENTED.getCode(), ex.getCode()); + } } - private String emptyBody() { + private InputStream emptyBody() { return null; } - private String invalidXmlStructure() { - return + private InputStream invalidXmlStructure() { + String xml = "" + " " + " "; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); } - private String twoTags() { - return + private InputStream twoTags() { + String xml = "" + " " + " " + @@ -174,24 +223,29 @@ private String twoTags() { " " + " " + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); } - private String noTagSet() { - return + private InputStream noTagSet() { + String xml = "" + ""; + return new ByteArrayInputStream(xml.getBytes(UTF_8)); } - private String emptyTags() { - return + private InputStream emptyTags() { + String xml = "" + " " + " " + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); } - public String tagKeyNotSpecified() { - return + public InputStream tagKeyNotSpecified() { + String xml = "" + " " + " " + @@ -199,10 +253,12 @@ public String tagKeyNotSpecified() { " " + " " + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); } - public String tagValueNotSpecified() { - return + public InputStream tagValueNotSpecified() { + String xml = "" + " " + " " + @@ -210,6 +266,8 @@ public String tagValueNotSpecified() { " " + " " + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index 981eb264cbe7..4981069528a8 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -17,9 +17,8 @@ package org.apache.hadoop.ozone.s3.endpoint; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertErrorResponse; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertSucceeds; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.util.S3Consts.DECODED_CONTENT_LENGTH_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.X_AMZ_CONTENT_SHA256; @@ -27,6 +26,7 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.mock; @@ -36,6 +36,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -49,8 +50,6 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.http.HttpStatus; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.MockedStatic; @@ -84,27 +83,52 @@ public void setUp() throws Exception { @Test public void testPartUpload() throws Exception { - String uploadID = initiateUpload(OzoneConsts.KEY); - String content = "Multipart Upload"; - Response response = put(rest, OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 1, uploadID, content); + Response response = rest.initializeMultipartUpload(OzoneConsts.S3_BUCKET, + OzoneConsts.KEY); + MultipartUploadInitiateResponse multipartUploadInitiateResponse = + (MultipartUploadInitiateResponse) response.getEntity(); + assertNotNull(multipartUploadInitiateResponse.getUploadID()); + String uploadID = multipartUploadInitiateResponse.getUploadID(); + + assertEquals(200, response.getStatus()); + + String content = "Multipart Upload"; + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); + response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + } @Test public void testPartUploadWithOverride() throws Exception { - String uploadID = initiateUpload(OzoneConsts.KEY); + + Response response = rest.initializeMultipartUpload(OzoneConsts.S3_BUCKET, + OzoneConsts.KEY); + MultipartUploadInitiateResponse multipartUploadInitiateResponse = + (MultipartUploadInitiateResponse) response.getEntity(); + assertNotNull(multipartUploadInitiateResponse.getUploadID()); + String uploadID = multipartUploadInitiateResponse.getUploadID(); + + assertEquals(200, response.getStatus()); String content = "Multipart Upload"; - Response response = put(rest, OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 1, uploadID, content); + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); + response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, + content.length(), 1, uploadID, null, null, body); + + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); String eTag = response.getHeaderString(OzoneConsts.ETAG); - assertNotNull(eTag); // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; - response = put(rest, OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 1, uploadID, content); + response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); @@ -112,14 +136,20 @@ public void testPartUploadWithOverride() throws Exception { @Test public void testPartUploadWithIncorrectUploadID() throws Exception { - String content = "Multipart Upload With Incorrect uploadID"; - assertErrorResponse(S3ErrorTable.NO_SUCH_UPLOAD, - () -> put(rest, OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 1, "random", content)); + OS3Exception ex = assertThrows(OS3Exception.class, () -> { + String content = "Multipart Upload With Incorrect uploadID"; + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, + "random", null, null, body); + }); + assertEquals("NoSuchUpload", ex.getCode()); + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); } @Test public void testPartUploadStreamContentLength() - throws Exception { + throws IOException, OS3Exception { HttpHeaders headers = mock(HttpHeaders.class); when(headers.getHeaderString(X_AMZ_CONTENT_SHA256)) .thenReturn("mockSignature"); @@ -138,24 +168,39 @@ public void testPartUploadStreamContentLength() when(headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)) .thenReturn("15"); - String uploadID = initiateUpload(keyName); + Response response = objectEndpoint.initializeMultipartUpload( + OzoneConsts.S3_BUCKET, keyName); + MultipartUploadInitiateResponse multipartUploadInitiateResponse = + (MultipartUploadInitiateResponse) response.getEntity(); + assertNotNull(multipartUploadInitiateResponse.getUploadID()); + String uploadID = multipartUploadInitiateResponse.getUploadID(); + long contentLength = chunkedContent.length(); - assertSucceeds(() -> put(objectEndpoint, OzoneConsts.S3_BUCKET, keyName, 1, uploadID, chunkedContent)); + objectEndpoint.put(OzoneConsts.S3_BUCKET, keyName, contentLength, 1, + uploadID, null, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); assertContentLength(uploadID, keyName, 15); } @Test - public void testPartUploadContentLength() throws Exception { + public void testPartUploadContentLength() throws IOException, OS3Exception { // The contentLength specified when creating the Key should be the same as // the Content-Length, the key Commit will compare the Content-Length with // the actual length of the data written. String keyName = UUID.randomUUID().toString(); - String uploadID = initiateUpload(keyName); + Response response = rest.initializeMultipartUpload(OzoneConsts.S3_BUCKET, + keyName); + MultipartUploadInitiateResponse multipartUploadInitiateResponse = + (MultipartUploadInitiateResponse) response.getEntity(); + assertNotNull(multipartUploadInitiateResponse.getUploadID()); + String uploadID = multipartUploadInitiateResponse.getUploadID(); String content = "Multipart Upload"; + long contentLength = content.length(); - assertSucceeds(() -> put(rest, OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 1, uploadID, content)); - + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.put(OzoneConsts.S3_BUCKET, keyName, + contentLength, 1, uploadID, null, null, body); assertContentLength(uploadID, keyName, content.length()); } @@ -171,12 +216,21 @@ public void testPartUploadMessageDigestResetDuringException() throws IOException when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( "STANDARD"); - ObjectEndpoint objectEndpoint = spy(EndpointBuilder.newObjectEndpointBuilder() + ObjectEndpoint objectEndpoint = EndpointBuilder.newObjectEndpointBuilder() .setHeaders(headers) .setClient(clientStub) - .build()); + .build(); + + objectEndpoint = spy(objectEndpoint); - String uploadID = initiateUpload(OzoneConsts.KEY); + Response response = objectEndpoint.initializeMultipartUpload(OzoneConsts.S3_BUCKET, + OzoneConsts.KEY); + MultipartUploadInitiateResponse multipartUploadInitiateResponse = + (MultipartUploadInitiateResponse) response.getEntity(); + assertNotNull(multipartUploadInitiateResponse.getUploadID()); + String uploadID = multipartUploadInitiateResponse.getUploadID(); + + assertEquals(200, response.getStatus()); MessageDigest messageDigest = mock(MessageDigest.class); try (MockedStatic mocked = mockStatic(IOUtils.class)) { @@ -187,8 +241,17 @@ public void testPartUploadMessageDigestResetDuringException() throws IOException .thenThrow(IOException.class); String content = "Multipart Upload"; - assertThrows(IOException.class, () -> put(objectEndpoint, OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 1, uploadID, content)); - verify(messageDigest, times(1)).reset(); + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); + try { + objectEndpoint.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, + content.length(), 1, uploadID, null, null, body); + fail("Should throw IOException"); + } catch (IOException ignored) { + // Verify that the message digest is reset so that the instance can be reused for the + // next request in the same thread + verify(messageDigest, times(1)).reset(); + } } } @@ -201,18 +264,4 @@ private void assertContentLength(String uploadID, String key, assertEquals(contentLength, parts.getPartInfoList().get(0).getSize()); } - - private String initiateUpload(String key) throws IOException, OS3Exception { - return initiateUpload(rest, OzoneConsts.S3_BUCKET, key); - } - - static String initiateUpload(ObjectEndpoint subject, String bucket, String key) throws IOException, OS3Exception { - try (Response response = subject.initializeMultipartUpload(bucket, key)) { - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - assertEquals(HttpStatus.SC_OK, response.getStatus()); - return multipartUploadInitiateResponse.getUploadID(); - } - } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 2cab6a6797fb..736660073d57 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -17,17 +17,19 @@ package org.apache.hadoop.ozone.s3.endpoint; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertErrorResponse; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; -import static org.apache.hadoop.ozone.s3.endpoint.TestPartUpload.initiateUpload; +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.X_AMZ_CONTENT_SHA256; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.ByteArrayInputStream; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -35,7 +37,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -79,35 +81,65 @@ public void testEnableStream() { @Test public void testPartUpload() throws Exception { - String uploadID = initiateUpload(rest, S3BUCKET, S3KEY); + + Response response = rest.initializeMultipartUpload(S3BUCKET, S3KEY); + MultipartUploadInitiateResponse multipartUploadInitiateResponse = + (MultipartUploadInitiateResponse) response.getEntity(); + assertNotNull(multipartUploadInitiateResponse.getUploadID()); + String uploadID = multipartUploadInitiateResponse.getUploadID(); + + assertEquals(200, response.getStatus()); String content = "Multipart Upload"; - Response response = put(rest, S3BUCKET, S3KEY, 1, uploadID, content); + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); + response = rest.put(S3BUCKET, S3KEY, + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + } @Test public void testPartUploadWithOverride() throws Exception { - String uploadID = initiateUpload(rest, S3BUCKET, S3KEY); + + Response response = rest.initializeMultipartUpload(S3BUCKET, S3KEY); + MultipartUploadInitiateResponse multipartUploadInitiateResponse = + (MultipartUploadInitiateResponse) response.getEntity(); + assertNotNull(multipartUploadInitiateResponse.getUploadID()); + String uploadID = multipartUploadInitiateResponse.getUploadID(); + + assertEquals(200, response.getStatus()); String content = "Multipart Upload"; - Response response = put(rest, S3BUCKET, S3KEY, 1, uploadID, content); + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); + response = rest.put(S3BUCKET, S3KEY, + content.length(), 1, uploadID, null, null, body); + + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); String eTag = response.getHeaderString(OzoneConsts.ETAG); - assertNotNull(eTag); // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; - response = put(rest, S3BUCKET, S3KEY, 1, uploadID, content); + response = rest.put(S3BUCKET, S3KEY, + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); } @Test - public void testPartUploadWithIncorrectUploadID() { - String content = "Multipart Upload With Incorrect uploadID"; - assertErrorResponse(S3ErrorTable.NO_SUCH_UPLOAD, () -> put(rest, S3BUCKET, S3KEY, 1, "random", content)); + public void testPartUploadWithIncorrectUploadID() throws Exception { + OS3Exception ex = assertThrows(OS3Exception.class, () -> { + String content = "Multipart Upload With Incorrect uploadID"; + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.put(S3BUCKET, S3KEY, content.length(), 1, + "random", null, null, body); + }); + assertEquals("NoSuchUpload", ex.getCode()); + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index 043e891be751..8e9eef2d974d 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -18,8 +18,7 @@ package org.apache.hadoop.ozone.s3.endpoint; import static java.net.HttpURLConnection.HTTP_FORBIDDEN; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertErrorResponse; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.util.S3Consts.X_AMZ_CONTENT_SHA256; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -35,7 +34,9 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -52,7 +53,6 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.metrics.S3GatewayMetrics; import org.apache.hadoop.ozone.s3.util.S3Consts; import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams; @@ -259,9 +259,8 @@ public void testGetKey() throws IOException { .setConfig(conf) .build(); - objectEndpoint.queryParamsForTest().set(S3Consts.QueryParams.PART_NUMBER_MARKER, "marker"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.get( - "bucketName", "keyPath", 0, 1000)); + "bucketName", "keyPath", 0, null, 1000, "marker", null)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -277,7 +276,10 @@ public void testPutKey() throws IOException { .setConfig(conf) .build(); - assertErrorResponse(S3ErrorTable.ACCESS_DENIED, () -> put(objectEndpoint, "bucketName", "keyPath", "")); + OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( + "bucketName", "keyPath", 1024, 0, null, null, null, + new ByteArrayInputStream(new byte[]{}))); + assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @Test @@ -293,7 +295,7 @@ public void testDeleteKey() throws IOException { .build(); OS3Exception e = assertThrows(OS3Exception.class, () -> - objectEndpoint.delete("bucketName", "keyPath")); + objectEndpoint.delete("bucketName", "keyPath", null, null)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -335,9 +337,20 @@ public void testObjectTagging() throws Exception { " " + ""; - objectEndpoint.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); - assertErrorResponse(S3ErrorTable.ACCESS_DENIED, () -> put(objectEndpoint, "bucketName", "keyPath", xml)); - assertErrorResponse(S3ErrorTable.ACCESS_DENIED, () -> objectEndpoint.delete("bucketName", "keyPath")); - assertErrorResponse(S3ErrorTable.ACCESS_DENIED, () -> objectEndpoint.get("bucketName", "keyPath", 0, 0)); + InputStream tagInput = new ByteArrayInputStream(xml.getBytes(UTF_8)); + + OS3Exception e = assertThrows(OS3Exception.class, () -> + objectEndpoint.put("bucketName", "keyPath", 0, 1, + null, "", null, tagInput)); + assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); + + e = assertThrows(OS3Exception.class, () -> + objectEndpoint.delete("bucketName", "keyPath", "", "")); + assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); + + e = assertThrows(OS3Exception.class, () -> + objectEndpoint.get("bucketName", "keyPath", 0, null, + 0, null, "")); + assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java index 4df8b3270d37..dbe21601dbd3 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java @@ -19,8 +19,6 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertSucceeds; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.X_AMZ_CONTENT_SHA256; @@ -29,10 +27,12 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.ByteArrayInputStream; import java.io.OutputStream; import java.util.HashMap; import java.util.Map; import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.Response; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -89,7 +89,12 @@ public void testEnableStream() { @Test public void testUpload() throws Exception { - assertSucceeds(() -> put(rest, S3BUCKET, S3KEY, S3_COPY_EXISTING_KEY_CONTENT)); + byte[] keyContent = S3_COPY_EXISTING_KEY_CONTENT.getBytes(UTF_8); + ByteArrayInputStream body = + new ByteArrayInputStream(keyContent); + Response response = rest.put(S3BUCKET, S3KEY, 0, 0, null, null, null, body); + + assertEquals(200, response.getStatus()); } @Test @@ -121,7 +126,9 @@ public void testUploadWithCopy() throws Exception { .forEach((k, v) -> when(headers.getHeaderString(k)).thenReturn(v)); rest.setHeaders(headers); - assertSucceeds(() -> put(rest, S3BUCKET, S3KEY, null)); + Response response = rest.put(S3BUCKET, S3KEY, 0, 0, null, null, null, null); + + assertEquals(200, response.getStatus()); final long newDataSize = bucket.getKey(S3KEY).getDataSize(); assertEquals(dataSize, newDataSize); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java index 5e997e81d941..bfc471e22d5d 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java @@ -19,9 +19,7 @@ import static java.net.HttpURLConnection.HTTP_CONFLICT; import static java.net.HttpURLConnection.HTTP_OK; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertErrorResponse; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertSucceeds; -import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; @@ -34,6 +32,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; @@ -63,16 +62,6 @@ */ public class TestS3GatewayMetrics { - private static final String PUT_TAGGING = - "" + - " " + - " " + - " tag1" + - " val1" + - " " + - " " + - ""; - private String bucketName = OzoneConsts.BUCKET; private String keyName = OzoneConsts.KEY; private OzoneClient clientStub; @@ -152,7 +141,7 @@ public void testGetBucketSuccess() throws Exception { } @Test - public void testGetBucketFailure() { + public void testGetBucketFailure() throws Exception { long oriMetric = metrics.getGetBucketFailure(); // Searching for a bucket that does not exist @@ -165,7 +154,7 @@ public void testGetBucketFailure() { } @Test - public void testCreateBucketSuccess() { + public void testCreateBucketSuccess() throws Exception { long oriMetric = metrics.getCreateBucketSuccess(); assertDoesNotThrow(() -> bucketEndpoint.put("newBucket", null)); @@ -174,7 +163,7 @@ public void testCreateBucketSuccess() { } @Test - public void testCreateBucketFailure() { + public void testCreateBucketFailure() throws Exception { long oriMetric = metrics.getCreateBucketFailure(); // Creating an error by trying to create a bucket that already exists @@ -225,7 +214,7 @@ public void testGetAclSuccess() throws Exception { } @Test - public void testGetAclFailure() { + public void testGetAclFailure() throws Exception { long oriMetric = metrics.getGetAclFailure(); bucketEndpoint.queryParamsForTest().set(QueryParams.ACL, ACL_MARKER); @@ -299,18 +288,28 @@ public void testHeadKeyFailure() throws Exception { @Test public void testCreateKeySuccess() throws Exception { + long oriMetric = metrics.getCreateKeySuccess(); - assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, CONTENT)); + // Create an input stream + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + // Create the file + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); + body.close(); long curMetric = metrics.getCreateKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testCreateKeyFailure() { + public void testCreateKeyFailure() throws Exception { long oriMetric = metrics.getCreateKeyFailure(); // Create the file in a bucket that does not exist - assertErrorResponse(S3ErrorTable.NO_SUCH_BUCKET, () -> put(keyEndpoint, "unknownBucket", keyName, CONTENT)); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( + "unknownBucket", keyName, CONTENT.length(), 1, null, null, + null, null)); + assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getCreateKeyFailure(); assertEquals(1L, curMetric - oriMetric); } @@ -320,15 +319,17 @@ public void testDeleteKeySuccess() throws Exception { long oriMetric = metrics.getDeleteKeySuccess(); bucket.createKey(keyName, 0).close(); - keyEndpoint.delete(bucketName, keyName); + keyEndpoint.delete(bucketName, keyName, null, null); long curMetric = metrics.getDeleteKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testDeleteKeyFailure() { + public void testDeleteKeyFailure() throws Exception { long oriMetric = metrics.getDeleteKeyFailure(); - assertErrorResponse(S3ErrorTable.NO_SUCH_BUCKET, () -> keyEndpoint.delete("unknownBucket", keyName)); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( + "unknownBucket", keyName, null, null)); + assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getDeleteKeyFailure(); assertEquals(1L, curMetric - oriMetric); } @@ -337,10 +338,14 @@ public void testDeleteKeyFailure() { public void testGetKeySuccess() throws Exception { long oriMetric = metrics.getGetKeySuccess(); + // Create an input stream + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); // Create the file - assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, CONTENT)); + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); // GET the key from the bucket - Response response = keyEndpoint.get(bucketName, keyName, 0, 0); + Response response = keyEndpoint.get(bucketName, keyName, 0, null, 0, null, null); StreamingOutput stream = (StreamingOutput) response.getEntity(); stream.write(new ByteArrayOutputStream()); long curMetric = metrics.getGetKeySuccess(); @@ -348,11 +353,13 @@ public void testGetKeySuccess() throws Exception { } @Test - public void testGetKeyFailure() { + public void testGetKeyFailure() throws Exception { long oriMetric = metrics.getGetKeyFailure(); // Fetching a non-existent key - assertErrorResponse(S3ErrorTable.NO_SUCH_KEY, () -> keyEndpoint.get(bucketName, "unknownKey", 0, 0)); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( + bucketName, "unknownKey", 0, null, 0, null, null)); + assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), e.getCode()); long curMetric = metrics.getGetKeyFailure(); assertEquals(1L, curMetric - oriMetric); } @@ -367,10 +374,11 @@ public void testInitMultiPartUploadSuccess() throws Exception { } @Test - public void testInitMultiPartUploadFailure() { + public void testInitMultiPartUploadFailure() throws Exception { long oriMetric = metrics.getInitMultiPartUploadFailure(); - assertErrorResponse(S3ErrorTable.NO_SUCH_BUCKET, - () -> keyEndpoint.initializeMultipartUpload("unknownBucket", keyName)); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint + .initializeMultipartUpload("unknownBucket", keyName)); + assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getInitMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); } @@ -384,88 +392,101 @@ public void testAbortMultiPartUploadSuccess() throws Exception { long oriMetric = metrics.getAbortMultiPartUploadSuccess(); // Abort the Upload Successfully by deleting the key using the Upload-Id - keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); - keyEndpoint.delete(bucketName, keyName); + keyEndpoint.delete(bucketName, keyName, uploadID, null); long curMetric = metrics.getAbortMultiPartUploadSuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testAbortMultiPartUploadFailure() { + public void testAbortMultiPartUploadFailure() throws Exception { long oriMetric = metrics.getAbortMultiPartUploadFailure(); // Fail the Abort Method by providing wrong uploadID - keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, "wrongId"); - assertErrorResponse(S3ErrorTable.NO_SUCH_UPLOAD, () -> keyEndpoint.delete(bucketName, keyName)); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( + bucketName, keyName, "wrongId", null)); + assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getAbortMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); } @Test public void testCompleteMultiPartUploadSuccess() throws Exception { - String uploadID = initiateMultipartUpload(bucketName, keyName); - long oriMetric = metrics.getCompleteMultiPartUploadSuccess(); - CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest(); - keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); - assertSucceeds(() -> keyEndpoint.completeMultipartUpload(bucketName, keyName, completeMultipartUploadRequest)); + // Initiate the Upload and fetch the upload ID + String uploadID = initiateMultipartUpload(bucketName, keyName); + long oriMetric = metrics.getCompleteMultiPartUploadSuccess(); + // complete multipart upload + CompleteMultipartUploadRequest completeMultipartUploadRequest = new + CompleteMultipartUploadRequest(); + Response response = keyEndpoint.completeMultipartUpload(bucketName, keyName, + uploadID, completeMultipartUploadRequest); long curMetric = metrics.getCompleteMultiPartUploadSuccess(); + assertEquals(200, response.getStatus()); assertEquals(1L, curMetric - oriMetric); } @Test - public void testCompleteMultiPartUploadFailure() { + public void testCompleteMultiPartUploadFailure() throws Exception { long oriMetric = metrics.getCompleteMultiPartUploadFailure(); - keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, "random"); - CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest(); - assertErrorResponse(S3ErrorTable.NO_SUCH_UPLOAD, - () -> keyEndpoint.completeMultipartUpload(bucketName, "key2", completeMultipartUploadRequest)); + CompleteMultipartUploadRequest completeMultipartUploadRequestNew = new + CompleteMultipartUploadRequest(); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint + .completeMultipartUpload(bucketName, "key2", "random", + completeMultipartUploadRequestNew)); + assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getCompleteMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); } @Test public void testCreateMultipartKeySuccess() throws Exception { + + // Initiate the Upload and fetch the upload ID String uploadID = initiateMultipartUpload(bucketName, keyName); + long oriMetric = metrics.getCreateMultipartKeySuccess(); - keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); - assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, CONTENT)); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + keyEndpoint.put(bucketName, keyName, CONTENT.length(), + 1, uploadID, null, null, body); long curMetric = metrics.getCreateMultipartKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testCreateMultipartKeyFailure() { + public void testCreateMultipartKeyFailure() throws Exception { long oriMetric = metrics.getCreateMultipartKeyFailure(); - - keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, "random"); - assertErrorResponse(S3ErrorTable.NO_SUCH_UPLOAD, () -> put(keyEndpoint, bucketName, keyName, CONTENT)); - + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( + bucketName, keyName, CONTENT.length(), 1, "randomId", null, null, null)); + assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getCreateMultipartKeyFailure(); assertEquals(1L, curMetric - oriMetric); } @Test public void testListPartsSuccess() throws Exception { + long oriMetric = metrics.getListPartsSuccess(); + // Initiate the Upload and fetch the upload ID String uploadID = initiateMultipartUpload(bucketName, keyName); - keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); - keyEndpoint.get(bucketName, keyName, 0, 3); - + // Listing out the parts by providing the uploadID + keyEndpoint.get(bucketName, keyName, 0, + uploadID, 3, null, null); long curMetric = metrics.getListPartsSuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testListPartsFailure() { - long oriMetric = metrics.getListPartsFailure(); - - keyEndpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, "wrong_id"); - assertErrorResponse(S3ErrorTable.NO_SUCH_UPLOAD, () -> keyEndpoint.get(bucketName, keyName, 0, 3)); + public void testListPartsFailure() throws Exception { + long oriMetric = metrics.getListPartsFailure(); + // Listing out the parts by providing the uploadID after aborting + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( + bucketName, keyName, 0, "wrong_id", 3, null, null)); + assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getListPartsFailure(); assertEquals(1L, curMetric - oriMetric); } @@ -481,24 +502,28 @@ public void testCopyObject() throws Exception { // Test for Success of CopyObjectSuccess Metric long oriMetric = metrics.getCopyObjectSuccess(); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, CONTENT)); + keyEndpoint.put(bucketName, keyName, + CONTENT.length(), 1, null, null, null, body); // Add copy header, and then call put when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( bucketName + "/" + urlEncode(keyName)); - assertSucceeds(() -> put(keyEndpoint, destBucket, destKey, CONTENT)); - + keyEndpoint.put(destBucket, destKey, CONTENT.length(), 1, + null, null, null, body); long curMetric = metrics.getCopyObjectSuccess(); assertEquals(1L, curMetric - oriMetric); // Test for Failure of CopyObjectFailure Metric oriMetric = metrics.getCopyObjectFailure(); - + // source and dest same when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); - OS3Exception e = assertErrorResponse(S3ErrorTable.INVALID_REQUEST, - () -> put(keyEndpoint, bucketName, keyName, CONTENT)); + OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( + bucketName, keyName, CONTENT.length(), 1, null, null, null, body), + "Test for CopyObjectMetric failed"); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); curMetric = metrics.getCopyObjectFailure(); assertEquals(1L, curMetric - oriMetric); @@ -507,22 +532,31 @@ public void testCopyObject() throws Exception { @Test public void testPutObjectTaggingSuccess() throws Exception { long oriMetric = metrics.getPutObjectTaggingSuccess(); - assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, CONTENT)); - keyEndpoint.queryParamsForTest().set(QueryParams.TAGGING, ""); - assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, PUT_TAGGING)); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + // Create the file + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); + body.close(); + + // Put object tagging + keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); long curMetric = metrics.getPutObjectTaggingSuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testPutObjectTaggingFailure() { + public void testPutObjectTaggingFailure() throws Exception { long oriMetric = metrics.getPutObjectTaggingFailure(); // Put object tagging for nonexistent key - keyEndpoint.queryParamsForTest().set(QueryParams.TAGGING, ""); - assertErrorResponse(S3ErrorTable.NO_SUCH_KEY, () -> put(keyEndpoint, bucketName, "nonexistent", PUT_TAGGING)); + OS3Exception ex = assertThrows(OS3Exception.class, () -> + keyEndpoint.put(bucketName, "nonexistent", 0, 1, null, "", + null, getPutTaggingBody()) + ); + assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); long curMetric = metrics.getPutObjectTaggingFailure(); assertEquals(1L, curMetric - oriMetric); @@ -531,26 +565,34 @@ public void testPutObjectTaggingFailure() { @Test public void testGetObjectTaggingSuccess() throws Exception { long oriMetric = metrics.getGetObjectTaggingSuccess(); - assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, CONTENT)); - keyEndpoint.queryParamsForTest().set(QueryParams.TAGGING, ""); - assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, PUT_TAGGING)); + // Create the file + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); + body.close(); + + // Put object tagging + keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); // Get object tagging - keyEndpoint.get(bucketName, keyName, 0, 0); + keyEndpoint.get(bucketName, keyName, 0, + null, 0, null, ""); long curMetric = metrics.getGetObjectTaggingSuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testGetObjectTaggingFailure() { + public void testGetObjectTaggingFailure() throws Exception { long oriMetric = metrics.getGetObjectTaggingFailure(); // Get object tagging for nonexistent key - keyEndpoint.queryParamsForTest().set(QueryParams.TAGGING, ""); - assertErrorResponse(S3ErrorTable.NO_SUCH_KEY, () -> keyEndpoint.get(bucketName, "nonexistent", 0, 0)); - + OS3Exception ex = assertThrows(OS3Exception.class, () -> + keyEndpoint.get(bucketName, "nonexistent", 0, null, + 0, null, "")); + assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); long curMetric = metrics.getGetObjectTaggingFailure(); assertEquals(1L, curMetric - oriMetric); } @@ -558,25 +600,32 @@ public void testGetObjectTaggingFailure() { @Test public void testDeleteObjectTaggingSuccess() throws Exception { long oriMetric = metrics.getDeleteObjectTaggingSuccess(); - assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, CONTENT)); - keyEndpoint.queryParamsForTest().set(QueryParams.TAGGING, ""); - assertSucceeds(() -> put(keyEndpoint, bucketName, keyName, PUT_TAGGING)); + // Create the file + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); + body.close(); + + // Put object tagging + keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); // Delete object tagging - keyEndpoint.delete(bucketName, keyName); + keyEndpoint.delete(bucketName, keyName, null, ""); long curMetric = metrics.getDeleteObjectTaggingSuccess(); assertEquals(1L, curMetric - oriMetric); } @Test - public void testDeleteObjectTaggingFailure() { + public void testDeleteObjectTaggingFailure() throws Exception { long oriMetric = metrics.getDeleteObjectTaggingFailure(); - keyEndpoint.queryParamsForTest().set(QueryParams.TAGGING, ""); - assertErrorResponse(S3ErrorTable.NO_SUCH_KEY, () -> keyEndpoint.delete(bucketName, "nonexistent")); - + // Delete object tagging for nonexistent key + OS3Exception ex = assertThrows(OS3Exception.class, () -> + keyEndpoint.delete(bucketName, "nonexistent", null, "")); + assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); long curMetric = metrics.getDeleteObjectTaggingFailure(); assertEquals(1L, curMetric - oriMetric); } @@ -596,4 +645,18 @@ private String initiateMultipartUpload(String bktName, String key) } return "Invalid-Id"; } + + private static InputStream getPutTaggingBody() { + String xml = + "" + + " " + + " " + + " tag1" + + " val1" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } } From 7152020e781733373ee7c43daa696dce92868a2c Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Wed, 7 Jan 2026 07:34:21 +0100 Subject: [PATCH 7/7] HDDS-14209. Reduce parameter count in ObjectEndpoint --- .../ozone/s3/endpoint/ObjectEndpoint.java | 91 +++++++++---------- .../ozone/s3/endpoint/EndpointTestUtils.java | 48 +++++++--- .../s3/metrics/TestS3GatewayMetrics.java | 9 +- 3 files changed, 82 insertions(+), 66 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 45e20230337b..d33b79761f70 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -83,7 +83,6 @@ import javax.annotation.PostConstruct; import javax.ws.rs.Consumes; import javax.ws.rs.DELETE; -import javax.ws.rs.DefaultValue; import javax.ws.rs.GET; import javax.ws.rs.HEAD; import javax.ws.rs.HeaderParam; @@ -92,7 +91,6 @@ import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.MultivaluedMap; @@ -222,23 +220,23 @@ public void init() { * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for * more details. */ - @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) + @SuppressWarnings("checkstyle:MethodLength") @PUT public Response put( @PathParam(BUCKET) String bucketName, @PathParam(PATH) String keyPath, @HeaderParam(HttpHeaders.CONTENT_LENGTH) long length, - @QueryParam(QueryParams.PART_NUMBER) int partNumber, - @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadID, - @QueryParam(QueryParams.TAGGING) String taggingMarker, - @QueryParam(QueryParams.ACL) String aclMarker, - final InputStream body) throws IOException, OS3Exception { + final InputStream body + ) throws IOException, OS3Exception { + final String aclMarker = queryParams().get(QueryParams.ACL); + final String taggingMarker = queryParams().get(QueryParams.TAGGING); + final String uploadID = queryParams().get(QueryParams.UPLOAD_ID); long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_KEY; boolean auditSuccess = true; PerformanceStringBuilder perf = new PerformanceStringBuilder(); - String copyHeader = null, storageType = null, storageConfig = null; + String copyHeader = null; MultiDigestInputStream multiDigestInputStream = null; try { if (aclMarker != null) { @@ -261,17 +259,13 @@ public Response put( } // If uploadID is specified, it is a request for upload part return createMultipartKey(volume, bucket, keyPath, length, - partNumber, uploadID, body, perf); + body, perf); } copyHeader = getHeaders().getHeaderString(COPY_SOURCE_HEADER); - storageType = getHeaders().getHeaderString(STORAGE_CLASS_HEADER); - storageConfig = getHeaders().getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + STORAGE_CONFIG_HEADER); - boolean storageTypeDefault = StringUtils.isEmpty(storageType); // Normal put object - ReplicationConfig replicationConfig = - getReplicationConfig(bucket, storageType, storageConfig); + ReplicationConfig replicationConfig = getReplicationConfig(bucket); boolean enableEC = false; if ((replicationConfig != null && @@ -284,8 +278,7 @@ public Response put( //Copy object, as copy source available. s3GAction = S3GAction.COPY_OBJECT; CopyObjectResponse copyObjectResponse = copyObject(volume, - copyHeader, bucketName, keyPath, replicationConfig, - storageTypeDefault, perf); + bucketName, keyPath, replicationConfig, perf); return Response.status(Status.OK).entity(copyObjectResponse).header( "Connection", "close").build(); } @@ -431,17 +424,18 @@ public Response put( * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html * for more details. */ - @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) + @SuppressWarnings("checkstyle:MethodLength") @GET public Response get( @PathParam(BUCKET) String bucketName, - @PathParam(PATH) String keyPath, - @QueryParam(QueryParams.PART_NUMBER) int partNumber, - @QueryParam(QueryParams.UPLOAD_ID) String uploadId, - @QueryParam(QueryParams.MAX_PARTS) @DefaultValue("1000") int maxParts, - @QueryParam(QueryParams.PART_NUMBER_MARKER) String partNumberMarker, - @QueryParam(QueryParams.TAGGING) String taggingMarker) - throws IOException, OS3Exception { + @PathParam(PATH) String keyPath + ) throws IOException, OS3Exception { + final int maxParts = queryParams().getInt(QueryParams.MAX_PARTS, 1000); + final int partNumber = queryParams().getInt(QueryParams.PART_NUMBER, 0); + final String partNumberMarker = queryParams().get(QueryParams.PART_NUMBER_MARKER); + final String taggingMarker = queryParams().get(QueryParams.TAGGING); + final String uploadId = queryParams().get(QueryParams.UPLOAD_ID); + long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.GET_KEY; PerformanceStringBuilder perf = new PerformanceStringBuilder(); @@ -748,10 +742,11 @@ private Response abortMultipartUpload(OzoneVolume volume, String bucket, @SuppressWarnings("emptyblock") public Response delete( @PathParam(BUCKET) String bucketName, - @PathParam(PATH) String keyPath, - @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadId, - @QueryParam(QueryParams.TAGGING) String taggingMarker) throws - IOException, OS3Exception { + @PathParam(PATH) String keyPath + ) throws IOException, OS3Exception { + final String taggingMarker = queryParams().get(QueryParams.TAGGING); + final String uploadId = queryParams().get(QueryParams.UPLOAD_ID); + long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.DELETE_KEY; @@ -826,24 +821,20 @@ public Response delete( public Response initializeMultipartUpload( @PathParam(BUCKET) String bucket, @PathParam(PATH) String key - ) - throws IOException, OS3Exception { + ) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.INIT_MULTIPART_UPLOAD; try { OzoneBucket ozoneBucket = getBucket(bucket); S3Owner.verifyBucketOwnerCondition(getHeaders(), bucket, ozoneBucket.getOwner()); - String storageType = getHeaders().getHeaderString(STORAGE_CLASS_HEADER); - String storageConfig = getHeaders().getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + STORAGE_CONFIG_HEADER); Map customMetadata = getCustomMetadataFromHeaders(getHeaders().getRequestHeaders()); Map tags = getTaggingFromHeaders(getHeaders()); - ReplicationConfig replicationConfig = - getReplicationConfig(ozoneBucket, storageType, storageConfig); + ReplicationConfig replicationConfig = getReplicationConfig(ozoneBucket); OmMultipartInfo multipartInfo = ozoneBucket.initiateMultipartUpload(key, replicationConfig, customMetadata, tags); @@ -873,8 +864,9 @@ public Response initializeMultipartUpload( } } - private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket, - String storageType, String storageConfig) throws OS3Exception { + private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket) throws OS3Exception { + String storageType = getHeaders().getHeaderString(STORAGE_CLASS_HEADER); + String storageConfig = getHeaders().getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + STORAGE_CONFIG_HEADER); ReplicationConfig clientConfiguredReplicationConfig = OzoneClientUtils.getClientConfiguredReplicationConfig(getOzoneConfiguration()); @@ -891,9 +883,9 @@ private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket, public Response completeMultipartUpload( @PathParam(BUCKET) String bucket, @PathParam(PATH) String key, - @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadID, - CompleteMultipartUploadRequest multipartUploadRequest) - throws IOException, OS3Exception { + CompleteMultipartUploadRequest multipartUploadRequest + ) throws IOException, OS3Exception { + final String uploadID = queryParams().get(QueryParams.UPLOAD_ID, ""); long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.COMPLETE_MULTIPART_UPLOAD; OzoneVolume volume = getVolume(); @@ -962,12 +954,14 @@ public Response completeMultipartUpload( } } - @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) + @SuppressWarnings("checkstyle:MethodLength") private Response createMultipartKey(OzoneVolume volume, OzoneBucket ozoneBucket, - String key, long length, int partNumber, String uploadID, + String key, long length, final InputStream body, PerformanceStringBuilder perf) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); + final String uploadID = queryParams().get(QueryParams.UPLOAD_ID); + final int partNumber = queryParams().getInt(QueryParams.PART_NUMBER, 0); String copyHeader = null; MultiDigestInputStream multiDigestInputStream = null; final String bucketName = ozoneBucket.getName(); @@ -979,10 +973,7 @@ private Response createMultipartKey(OzoneVolume volume, OzoneBucket ozoneBucket, length = chunkInputStreamInfo.getEffectiveLength(); copyHeader = getHeaders().getHeaderString(COPY_SOURCE_HEADER); - String storageType = getHeaders().getHeaderString(STORAGE_CLASS_HEADER); - String storageConfig = getHeaders().getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + STORAGE_CONFIG_HEADER); - ReplicationConfig replicationConfig = - getReplicationConfig(ozoneBucket, storageType, storageConfig); + ReplicationConfig replicationConfig = getReplicationConfig(ozoneBucket); boolean enableEC = false; if ((replicationConfig != null && @@ -1227,12 +1218,14 @@ void copy(OzoneVolume volume, DigestInputStream src, long srcKeyLen, perf.appendSizeBytes(copyLength); } - @SuppressWarnings("checkstyle:ParameterNumber") private CopyObjectResponse copyObject(OzoneVolume volume, - String copyHeader, String destBucket, String destkey, - ReplicationConfig replicationConfig, boolean storageTypeDefault, + String destBucket, String destkey, ReplicationConfig replicationConfig, PerformanceStringBuilder perf) throws OS3Exception, IOException { + String copyHeader = getHeaders().getHeaderString(COPY_SOURCE_HEADER); + String storageType = getHeaders().getHeaderString(STORAGE_CLASS_HEADER); + boolean storageTypeDefault = StringUtils.isEmpty(storageType); + long startNanos = Time.monotonicNowNanos(); Pair result = parseSourceHeader(copyHeader); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java index ae776993555d..c82a0772c931 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java @@ -28,7 +28,9 @@ import javax.ws.rs.core.Response; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.apache.http.HttpStatus; +import org.apache.ratis.util.function.CheckedRunnable; import org.apache.ratis.util.function.CheckedSupplier; /** Utilities for unit-testing S3 endpoints. */ @@ -40,7 +42,7 @@ public static Response get( String bucket, String key ) throws IOException, OS3Exception { - return subject.get(bucket, key, 0, null, 0, null, null); + return subject.get(bucket, key); } /** Get key tags. */ @@ -49,7 +51,8 @@ public static Response getTagging( String bucket, String key ) throws IOException, OS3Exception { - return subject.get(bucket, key, 0, null, 0, null, ""); + subject.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); + return subject.get(bucket, key); } /** List parts of MPU. */ @@ -61,7 +64,10 @@ public static Response listParts( int maxParts, int nextPart ) throws IOException, OS3Exception { - return subject.get(bucket, key, 0, uploadID, maxParts, String.valueOf(nextPart), null); + subject.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); + subject.queryParamsForTest().setInt(S3Consts.QueryParams.MAX_PARTS, maxParts); + subject.queryParamsForTest().setInt(S3Consts.QueryParams.PART_NUMBER_MARKER, nextPart); + return subject.get(bucket, key); } /** Put without content. */ @@ -90,12 +96,13 @@ public static Response putTagging( String key, String content ) throws IOException, OS3Exception { + subject.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); if (content == null) { - return subject.put(bucket, key, 0, 0, null, "", null, null); + return subject.put(bucket, key, 0, null); } else { final long length = content.length(); try (ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8))) { - return subject.put(bucket, key, length, 0, null, "", null, body); + return subject.put(bucket, key, length, body); } } } @@ -109,12 +116,17 @@ public static Response put( String uploadID, String content ) throws IOException, OS3Exception { + if (uploadID != null) { + subject.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); + } + subject.queryParamsForTest().setInt(S3Consts.QueryParams.PART_NUMBER, partNumber); + if (content == null) { - return subject.put(bucket, key, 0, partNumber, uploadID, null, null, null); + return subject.put(bucket, key, 0, null); } else { final long length = content.length(); try (ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8))) { - return subject.put(bucket, key, length, partNumber, uploadID, null, null, body); + return subject.put(bucket, key, length, body); } } } @@ -125,7 +137,7 @@ public static Response delete( String bucket, String key ) throws IOException, OS3Exception { - return subject.delete(bucket, key, null, null); + return subject.delete(bucket, key); } /** Delete key tags. */ @@ -134,7 +146,8 @@ public static Response deleteTagging( String bucket, String key ) throws IOException, OS3Exception { - return subject.delete(bucket, key, null, ""); + subject.queryParamsForTest().set(S3Consts.QueryParams.TAGGING, ""); + return subject.delete(bucket, key); } /** Initiate multipart upload. @@ -185,7 +198,9 @@ public static void completeMultipartUpload( CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest(); completeMultipartUploadRequest.setPartList(parts); - try (Response response = subject.completeMultipartUpload(bucket, key, uploadID, completeMultipartUploadRequest)) { + subject.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); + + try (Response response = subject.completeMultipartUpload(bucket, key, completeMultipartUploadRequest)) { assertEquals(HttpStatus.SC_OK, response.getStatus()); CompleteMultipartUploadResponse completeMultipartUploadResponse = @@ -205,7 +220,8 @@ public static Response abortMultipartUpload( String key, String uploadID ) throws IOException, OS3Exception { - return subject.delete(bucket, key, uploadID, null); + subject.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadID); + return subject.delete(bucket, key); } /** Verify response is success for {@code request}. */ @@ -220,7 +236,15 @@ public static void assertStatus(int status, CheckedSupplie } } - /** Verify error response for {@code request} matching {@code expected} {@link OS3Exception}. */ + /** Verify error response for {@code request} matches {@code expected} {@link OS3Exception}. */ + public static OS3Exception assertErrorResponse(OS3Exception expected, CheckedRunnable request) { + OS3Exception actual = assertThrows(OS3Exception.class, request::run); + assertEquals(expected.getCode(), actual.getCode()); + assertEquals(expected.getHttpCode(), actual.getHttpCode()); + return actual; + } + + /** Verify error response for {@code request} matches {@code expected} {@link OS3Exception}. */ public static OS3Exception assertErrorResponse(OS3Exception expected, CheckedSupplier request) { OS3Exception actual = assertThrows(OS3Exception.class, () -> request.get().close()); assertEquals(expected.getCode(), actual.getCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java index 8df792be49b3..ccc8521a44fc 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java @@ -19,10 +19,12 @@ import static java.net.HttpURLConnection.HTTP_CONFLICT; import static java.net.HttpURLConnection.HTTP_OK; +import static java.util.Collections.emptyList; import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.abortMultipartUpload; import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertErrorResponse; import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertStatus; import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertSucceeds; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.completeMultipartUpload; import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.delete; import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.deleteTagging; import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.get; @@ -54,7 +56,6 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.endpoint.BucketEndpoint; -import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest; import org.apache.hadoop.ozone.s3.endpoint.EndpointBuilder; import org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils; import org.apache.hadoop.ozone.s3.endpoint.ObjectEndpoint; @@ -406,9 +407,8 @@ public void testAbortMultiPartUploadFailure() { public void testCompleteMultiPartUploadSuccess() throws Exception { long oriMetric = metrics.getCompleteMultiPartUploadSuccess(); String uploadID = initiateMultipartUpload(bucketName, keyName); - CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(); - assertSucceeds(() -> keyEndpoint.completeMultipartUpload(bucketName, keyName, uploadID, request)); + completeMultipartUpload(keyEndpoint, bucketName, keyName, uploadID, emptyList()); long curMetric = metrics.getCompleteMultiPartUploadSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -417,10 +417,9 @@ public void testCompleteMultiPartUploadSuccess() throws Exception { @Test public void testCompleteMultiPartUploadFailure() { long oriMetric = metrics.getCompleteMultiPartUploadFailure(); - CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(); assertErrorResponse(S3ErrorTable.NO_SUCH_UPLOAD, - () -> keyEndpoint.completeMultipartUpload(bucketName, "key2", "random", request)); + () -> completeMultipartUpload(keyEndpoint, bucketName, "key2", "random", emptyList())); long curMetric = metrics.getCompleteMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric);