diff --git a/.changes/next-release/feature-AWSSDKforJavav2-1a64dc3.json b/.changes/next-release/feature-AWSSDKforJavav2-1a64dc3.json new file mode 100644 index 000000000000..4dabb26bd0b1 --- /dev/null +++ b/.changes/next-release/feature-AWSSDKforJavav2-1a64dc3.json @@ -0,0 +1,6 @@ +{ + "category": "AWS SDK for Java v2", + "contributor": "", + "type": "feature", + "description": "Add support for RequestCompression trait to GZIP compress requests." +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java index 2db612a5f6dd..79bb81470f5e 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java @@ -165,6 +165,7 @@ public Map constructOperations() { operationModel.setEndpointTrait(op.getEndpoint()); operationModel.setHttpChecksumRequired(op.isHttpChecksumRequired()); operationModel.setHttpChecksum(op.getHttpChecksum()); + operationModel.setRequestCompression(op.getRequestCompression()); operationModel.setStaticContextParams(op.getStaticContextParams()); Input input = op.getInput(); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/compression/RequestCompression.java b/codegen/src/main/java/software/amazon/awssdk/codegen/compression/RequestCompression.java new file mode 100644 index 000000000000..69d53bc7e30f --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/compression/RequestCompression.java @@ -0,0 +1,36 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.compression; + +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Class to map the RequestCompression trait of an operation. + */ +@SdkInternalApi +public class RequestCompression { + + private List encodings; + + public List getEncodings() { + return encodings; + } + + public void setEncodings(List encodings) { + this.encodings = encodings; + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java index 11dbe6794b8f..1ff197191126 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.Map; import software.amazon.awssdk.codegen.checksum.HttpChecksum; +import software.amazon.awssdk.codegen.compression.RequestCompression; import software.amazon.awssdk.codegen.docs.ClientType; import software.amazon.awssdk.codegen.docs.DocConfiguration; import software.amazon.awssdk.codegen.docs.OperationDocs; @@ -71,6 +72,8 @@ public class OperationModel extends DocumentationModel { private HttpChecksum httpChecksum; + private RequestCompression requestCompression; + @JsonIgnore private Map staticContextParams; @@ -309,6 +312,14 @@ public void setHttpChecksum(HttpChecksum httpChecksum) { this.httpChecksum = httpChecksum; } + public RequestCompression getRequestCompression() { + return requestCompression; + } + + public void setRequestCompression(RequestCompression requestCompression) { + this.requestCompression = requestCompression; + } + public Map getStaticContextParams() { return staticContextParams; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java index 4f1d573b0133..e8a6826c17aa 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java @@ -18,6 +18,7 @@ import java.util.List; import java.util.Map; import software.amazon.awssdk.codegen.checksum.HttpChecksum; +import software.amazon.awssdk.codegen.compression.RequestCompression; import software.amazon.awssdk.codegen.model.intermediate.EndpointDiscovery; public class Operation { @@ -52,6 +53,8 @@ public class Operation { private HttpChecksum httpChecksum; + private RequestCompression requestCompression; + private Map staticContextParams; public String getName() { @@ -189,6 +192,14 @@ public void setHttpChecksum(HttpChecksum httpChecksum) { this.httpChecksum = httpChecksum; } + public RequestCompression getRequestCompression() { + return requestCompression; + } + + public void setRequestCompression(RequestCompression requestCompression) { + this.requestCompression = requestCompression; + } + public Map getStaticContextParams() { return staticContextParams; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java index 41361004b80f..44922d4e2b32 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java @@ -42,6 +42,7 @@ import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumRequiredTrait; import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumTrait; import software.amazon.awssdk.codegen.poet.client.traits.NoneAuthTypeRequestTrait; +import software.amazon.awssdk.codegen.poet.client.traits.RequestCompressionTrait; import software.amazon.awssdk.codegen.poet.eventstream.EventStreamUtils; import software.amazon.awssdk.codegen.poet.model.EventStreamSpecHelper; import software.amazon.awssdk.core.SdkPojoBuilder; @@ -187,7 +188,8 @@ public CodeBlock executionHandler(OperationModel opModel) { .add(".withMetricCollector(apiCallMetricCollector)") .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) - .add(NoneAuthTypeRequestTrait.create(opModel)); + .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel, model)); if (opModel.hasStreamingInput()) { codeBlock.add(".withRequestBody(requestBody)") @@ -257,6 +259,7 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel, model)) .add(".withInput($L)$L);", opModel.getInput().getVariableName(), asyncResponseTransformerVariable(isStreaming, isRestJson, opModel)); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java index 74e15930c87e..daef19b9def3 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java @@ -31,6 +31,7 @@ import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumRequiredTrait; import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumTrait; import software.amazon.awssdk.codegen.poet.client.traits.NoneAuthTypeRequestTrait; +import software.amazon.awssdk.codegen.poet.client.traits.RequestCompressionTrait; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; @@ -116,7 +117,8 @@ public CodeBlock executionHandler(OperationModel opModel) { .add(".withMetricCollector(apiCallMetricCollector)") .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) - .add(NoneAuthTypeRequestTrait.create(opModel)); + .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel, intermediateModel)); if (opModel.hasStreamingInput()) { @@ -151,7 +153,8 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper .add(".withMetricCollector(apiCallMetricCollector)\n") .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) - .add(NoneAuthTypeRequestTrait.create(opModel)); + .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel, intermediateModel)); builder.add(hostPrefixExpression(opModel) + asyncRequestBody + ".withInput($L)$L);", diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java index 59769ff51d44..3f58b49edc7b 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java @@ -37,6 +37,7 @@ import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumRequiredTrait; import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumTrait; import software.amazon.awssdk.codegen.poet.client.traits.NoneAuthTypeRequestTrait; +import software.amazon.awssdk.codegen.poet.client.traits.RequestCompressionTrait; import software.amazon.awssdk.codegen.poet.eventstream.EventStreamUtils; import software.amazon.awssdk.codegen.poet.model.EventStreamSpecHelper; import software.amazon.awssdk.core.SdkPojoBuilder; @@ -135,7 +136,8 @@ public CodeBlock executionHandler(OperationModel opModel) { .add(".withInput($L)", opModel.getInput().getVariableName()) .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) - .add(NoneAuthTypeRequestTrait.create(opModel)); + .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel, model)); s3ArnableFields(opModel, model).ifPresent(codeBlock::add); @@ -213,7 +215,8 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper .add(asyncRequestBody(opModel)) .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) - .add(NoneAuthTypeRequestTrait.create(opModel)); + .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel, model)); s3ArnableFields(opModel, model).ifPresent(builder::add); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java new file mode 100644 index 000000000000..9290e02a003a --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java @@ -0,0 +1,60 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.client.traits; + +import com.squareup.javapoet.CodeBlock; +import java.util.List; +import java.util.stream.Collectors; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.model.intermediate.OperationModel; +import software.amazon.awssdk.core.client.handler.ClientExecutionParams; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; + +/** + * The logic for handling the "requestCompression" trait within the code generator. + */ +public class RequestCompressionTrait { + + private RequestCompressionTrait() { + } + + /** + * Generate a ".putExecutionAttribute(...)" code-block for the provided operation model. This should be used within the + * context of initializing {@link ClientExecutionParams}. If request compression is not required by the operation, this will + * return an empty code-block. + */ + public static CodeBlock create(OperationModel operationModel, IntermediateModel model) { + if (operationModel.getRequestCompression() == null) { + return CodeBlock.of(""); + } + + // TODO : remove once: + // 1) S3 checksum interceptors are moved to occur after CompressRequestStage + // 2) Transfer-Encoding:chunked is supported in S3 + if (model.getMetadata().getServiceName().equals("S3")) { + throw new IllegalStateException("Request compression for S3 is not yet supported in the AWS SDK for Java."); + } + + List encodings = operationModel.getRequestCompression().getEncodings(); + + return CodeBlock.of(".putExecutionAttribute($T.REQUEST_COMPRESSION, " + + "$T.builder().encodings($L).isStreaming($L).build())", + SdkInternalExecutionAttribute.class, RequestCompression.class, + encodings.stream().collect(Collectors.joining("\", \"", "\"", "\"")), + operationModel.hasStreamingInput()); + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json index 05f73f8e6069..65d931001984 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json @@ -30,6 +30,16 @@ }, "authtype": "none" }, + "OperationWithRequestCompression": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "requestCompression": { + "encodings": ["gzip"] + } + }, "APostOperation": { "name": "APostOperation", "http": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json index 5827a53a9a27..a3c379d189d6 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json @@ -59,6 +59,16 @@ }, "authtype": "none" }, + "OperationWithRequestCompression": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "requestCompression": { + "encodings": ["gzip"] + } + }, "APostOperation": { "name": "APostOperation", "http": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/service-2.json index 66597cd7bd19..f003ba7d1e66 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/service-2.json @@ -22,6 +22,16 @@ }, "httpChecksumRequired": true }, + "OperationWithRequestCompression": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "requestCompression": { + "encodings": ["gzip"] + } + }, "APostOperation": { "name": "APostOperation", "http": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/service-2.json index 267a48381fc9..451eb30d1e28 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/service-2.json @@ -29,6 +29,16 @@ }, "authtype": "none" }, + "OperationWithRequestCompression": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "requestCompression": { + "encodings": ["gzip"] + } + }, "APostOperation": { "name": "APostOperation", "http": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java index 783d45793ecb..05c476018466 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java @@ -29,6 +29,8 @@ import software.amazon.awssdk.services.json.model.JsonRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -305,6 +307,33 @@ public CompletableFuture operationWithChe return invokeOperation(operationWithChecksumRequiredRequest, request -> delegate.operationWithChecksumRequired(request)); } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample JsonAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + return invokeOperation(operationWithRequestCompressionRequest, + request -> delegate.operationWithRequestCompression(request)); + } + /** * Some paginated operation with result_key in paginators.json file * @@ -468,7 +497,7 @@ public CompletableFuture streamingInputOutputOperation( StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, AsyncResponseTransformer asyncResponseTransformer) { return invokeOperation(streamingInputOutputOperationRequest, - request -> delegate.streamingInputOutputOperation(request, requestBody, asyncResponseTransformer)); + request -> delegate.streamingInputOutputOperation(request, requestBody, asyncResponseTransformer)); } /** diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java index cc067f5eab5b..8fc5e6c0adcd 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java @@ -23,6 +23,8 @@ import software.amazon.awssdk.services.json.model.JsonRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -195,6 +197,30 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( return invokeOperation(operationWithChecksumRequiredRequest, request -> delegate.operationWithChecksumRequired(request)); } + /** + * Invokes the OperationWithRequestCompression operation. + * + * @param operationWithRequestCompressionRequest + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public OperationWithRequestCompressionResponse operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, JsonException { + return invokeOperation(operationWithRequestCompressionRequest, + request -> delegate.operationWithRequestCompression(request)); + } + /** * Some paginated operation with result_key in paginators.json file * @@ -400,7 +426,6 @@ public ReturnT streamingOutputOperation(StreamingOutputOperationReques request -> delegate.streamingOutputOperation(request, responseTransformer)); } - /** * Creates an instance of {@link JsonUtilities} object with the configuration set on this client. */ diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java index b03bc8eb84d2..ae6973fafab0 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java @@ -39,6 +39,7 @@ import software.amazon.awssdk.core.http.HttpResponseHandler; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.protocol.VoidSdkResponse; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; @@ -75,6 +76,8 @@ import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.json.model.OperationWithNoneAuthTypeRequest; import software.amazon.awssdk.services.json.model.OperationWithNoneAuthTypeResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -99,6 +102,7 @@ import software.amazon.awssdk.services.json.transform.InputEventTwoMarshaller; import software.amazon.awssdk.services.json.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.json.transform.OperationWithNoneAuthTypeRequestMarshaller; +import software.amazon.awssdk.services.json.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithoutResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.StreamingInputOperationRequestMarshaller; @@ -679,6 +683,66 @@ public CompletableFuture operationWithNoneAut } } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample JsonAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) + .isPayloadJson(true).build(); + + HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( + operationMetadata, OperationWithRequestCompressionResponse::builder); + + HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, + operationMetadata); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); + return executeFuture; + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + /** * Some paginated operation with result_key in paginators.json file * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java index 4c480ea950ee..81eb8e1aba4e 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java @@ -43,6 +43,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.protocol.VoidSdkResponse; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; @@ -81,6 +82,8 @@ import software.amazon.awssdk.services.json.model.JsonRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -107,6 +110,7 @@ import software.amazon.awssdk.services.json.transform.InputEventMarshaller; import software.amazon.awssdk.services.json.transform.InputEventTwoMarshaller; import software.amazon.awssdk.services.json.transform.OperationWithChecksumRequiredRequestMarshaller; +import software.amazon.awssdk.services.json.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithoutResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PutOperationWithChecksumRequestMarshaller; @@ -757,6 +761,66 @@ public CompletableFuture operationWithChe } } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample JsonAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) + .isPayloadJson(true).build(); + + HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( + operationMetadata, OperationWithRequestCompressionResponse::builder); + + HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, + operationMetadata); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); + return executeFuture; + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + /** * Some paginated operation with result_key in paginators.json file * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java index bbac65db60cd..9b044964447a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java @@ -34,6 +34,8 @@ import software.amazon.awssdk.services.json.model.InputEventStreamTwo; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -618,6 +620,64 @@ default CompletableFuture operationWithCh .applyMutation(operationWithChecksumRequiredRequest).build()); } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample JsonAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + default CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + throw new UnsupportedOperationException(); + } + + /** + * Invokes the OperationWithRequestCompression operation asynchronously.
+ *

+ * This is a convenience which creates an instance of the {@link OperationWithRequestCompressionRequest.Builder} + * avoiding the need to create one manually via {@link OperationWithRequestCompressionRequest#builder()} + *

+ * + * @param operationWithRequestCompressionRequest + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest.Builder} to + * create a request. + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample JsonAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + default CompletableFuture operationWithRequestCompression( + Consumer operationWithRequestCompressionRequest) { + return operationWithRequestCompression(OperationWithRequestCompressionRequest.builder() + .applyMutation(operationWithRequestCompressionRequest).build()); + } + /** * Some paginated operation with result_key in paginators.json file * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java index 54019ade037a..a2a8905fe12a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java @@ -21,6 +21,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -49,6 +50,8 @@ import software.amazon.awssdk.services.json.model.JsonRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -67,6 +70,7 @@ import software.amazon.awssdk.services.json.transform.GetOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.json.transform.GetWithoutRequiredMembersRequestMarshaller; import software.amazon.awssdk.services.json.transform.OperationWithChecksumRequiredRequestMarshaller; +import software.amazon.awssdk.services.json.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithoutResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PutOperationWithChecksumRequestMarshaller; @@ -408,6 +412,57 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( } } + /** + * Invokes the OperationWithRequestCompression operation. + * + * @param operationWithRequestCompressionRequest + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public OperationWithRequestCompressionResponse operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, JsonException { + JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) + .isPayloadJson(true).build(); + + HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( + operationMetadata, OperationWithRequestCompressionResponse::builder); + + HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, + operationMetadata); + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withInput(operationWithRequestCompressionRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + /** * Some paginated operation with result_key in paginators.json file * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java index f49a3a1d4b89..f2586f4fd1d4 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java @@ -27,6 +27,8 @@ import software.amazon.awssdk.services.json.model.JsonException; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -397,6 +399,58 @@ default OperationWithChecksumRequiredResponse operationWithChecksumRequired( .applyMutation(operationWithChecksumRequiredRequest).build()); } + /** + * Invokes the OperationWithRequestCompression operation. + * + * @param operationWithRequestCompressionRequest + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + default OperationWithRequestCompressionResponse operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, JsonException { + throw new UnsupportedOperationException(); + } + + /** + * Invokes the OperationWithRequestCompression operation.
+ *

+ * This is a convenience which creates an instance of the {@link OperationWithRequestCompressionRequest.Builder} + * avoiding the need to create one manually via {@link OperationWithRequestCompressionRequest#builder()} + *

+ * + * @param operationWithRequestCompressionRequest + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest.Builder} to + * create a request. + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + default OperationWithRequestCompressionResponse operationWithRequestCompression( + Consumer operationWithRequestCompressionRequest) + throws AwsServiceException, SdkClientException, JsonException { + return operationWithRequestCompression(OperationWithRequestCompressionRequest.builder() + .applyMutation(operationWithRequestCompressionRequest).build()); + } + /** * Some paginated operation with result_key in paginators.json file * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java index a5e04e6abc24..b0ca9683c0c5 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java @@ -29,6 +29,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -52,6 +53,8 @@ import software.amazon.awssdk.services.query.model.OperationWithContextParamResponse; import software.amazon.awssdk.services.query.model.OperationWithNoneAuthTypeRequest; import software.amazon.awssdk.services.query.model.OperationWithNoneAuthTypeResponse; +import software.amazon.awssdk.services.query.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.query.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.query.model.OperationWithStaticContextParamsRequest; import software.amazon.awssdk.services.query.model.OperationWithStaticContextParamsResponse; import software.amazon.awssdk.services.query.model.PutOperationWithChecksumRequest; @@ -69,6 +72,7 @@ import software.amazon.awssdk.services.query.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithContextParamRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithNoneAuthTypeRequestMarshaller; +import software.amazon.awssdk.services.query.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithStaticContextParamsRequestMarshaller; import software.amazon.awssdk.services.query.transform.PutOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.query.transform.StreamingInputOperationRequestMarshaller; @@ -494,6 +498,63 @@ public CompletableFuture operationWithNoneAut } } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • QueryException Base class for all service exceptions. Unknown exceptions will be thrown as an + * instance of this type.
  • + *
+ * @sample QueryAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + + HttpResponseHandler responseHandler = protocolFactory + .createResponseHandler(OperationWithRequestCompressionResponse::builder); + + HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + return CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + /** * Invokes the OperationWithStaticContextParams operation asynchronously. * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java index d9fdd08fef61..0ca5d7837899 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java @@ -20,6 +20,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -45,6 +46,8 @@ import software.amazon.awssdk.services.query.model.OperationWithContextParamResponse; import software.amazon.awssdk.services.query.model.OperationWithNoneAuthTypeRequest; import software.amazon.awssdk.services.query.model.OperationWithNoneAuthTypeResponse; +import software.amazon.awssdk.services.query.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.query.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.query.model.OperationWithStaticContextParamsRequest; import software.amazon.awssdk.services.query.model.OperationWithStaticContextParamsResponse; import software.amazon.awssdk.services.query.model.PutOperationWithChecksumRequest; @@ -62,6 +65,7 @@ import software.amazon.awssdk.services.query.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithContextParamRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithNoneAuthTypeRequestMarshaller; +import software.amazon.awssdk.services.query.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithStaticContextParamsRequestMarshaller; import software.amazon.awssdk.services.query.transform.PutOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.query.transform.StreamingInputOperationRequestMarshaller; @@ -422,6 +426,54 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( } } + /** + * Invokes the OperationWithRequestCompression operation. + * + * @param operationWithRequestCompressionRequest + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws QueryException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample QueryClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public OperationWithRequestCompressionResponse operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, QueryException { + + HttpResponseHandler responseHandler = protocolFactory + .createResponseHandler(OperationWithRequestCompressionResponse::builder); + + HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withInput(operationWithRequestCompressionRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + /** * Invokes the OperationWithStaticContextParams operation. * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java index c1dc8837dbbb..959bfd8618bf 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java @@ -35,6 +35,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -61,6 +62,8 @@ import software.amazon.awssdk.services.xml.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.xml.model.OperationWithNoneAuthTypeRequest; import software.amazon.awssdk.services.xml.model.OperationWithNoneAuthTypeResponse; +import software.amazon.awssdk.services.xml.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.xml.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.xml.model.PutOperationWithChecksumRequest; import software.amazon.awssdk.services.xml.model.PutOperationWithChecksumResponse; import software.amazon.awssdk.services.xml.model.StreamingInputOperationRequest; @@ -76,6 +79,7 @@ import software.amazon.awssdk.services.xml.transform.GetOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.xml.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.xml.transform.OperationWithNoneAuthTypeRequestMarshaller; +import software.amazon.awssdk.services.xml.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.xml.transform.PutOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.xml.transform.StreamingInputOperationRequestMarshaller; import software.amazon.awssdk.services.xml.transform.StreamingOutputOperationRequestMarshaller; @@ -519,6 +523,62 @@ public CompletableFuture operationWithNoneAut } } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • XmlException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample XmlAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + + HttpResponseHandler> responseHandler = protocolFactory + .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); + return whenCompleteFuture; + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + /** * Invokes the PutOperationWithChecksum operation asynchronously. * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java index 43e33d67c4dc..d52550654b17 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java @@ -21,6 +21,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -45,6 +46,8 @@ import software.amazon.awssdk.services.xml.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.xml.model.OperationWithNoneAuthTypeRequest; import software.amazon.awssdk.services.xml.model.OperationWithNoneAuthTypeResponse; +import software.amazon.awssdk.services.xml.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.xml.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.xml.model.PutOperationWithChecksumRequest; import software.amazon.awssdk.services.xml.model.PutOperationWithChecksumResponse; import software.amazon.awssdk.services.xml.model.StreamingInputOperationRequest; @@ -59,6 +62,7 @@ import software.amazon.awssdk.services.xml.transform.GetOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.xml.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.xml.transform.OperationWithNoneAuthTypeRequestMarshaller; +import software.amazon.awssdk.services.xml.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.xml.transform.PutOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.xml.transform.StreamingInputOperationRequestMarshaller; import software.amazon.awssdk.services.xml.transform.StreamingOutputOperationRequestMarshaller; @@ -361,6 +365,52 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( } } + /** + * Invokes the OperationWithRequestCompression operation. + * + * @param operationWithRequestCompressionRequest + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws XmlException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample XmlClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public OperationWithRequestCompressionResponse operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, XmlException { + + HttpResponseHandler> responseHandler = protocolFactory + .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withInput(operationWithRequestCompressionRequest) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + /** * Invokes the PutOperationWithChecksum operation. * diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/chunkedencoding/AwsSignedChunkedEncodingInputStream.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/chunkedencoding/AwsSignedChunkedEncodingInputStream.java index 636fad74f9fc..3174eb7c6caa 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/chunkedencoding/AwsSignedChunkedEncodingInputStream.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/chunkedencoding/AwsSignedChunkedEncodingInputStream.java @@ -40,7 +40,6 @@ @SdkInternalApi public final class AwsSignedChunkedEncodingInputStream extends AwsChunkedEncodingInputStream { - private static final String CRLF = "\r\n"; private static final String CHUNK_SIGNATURE_HEADER = ";chunk-signature="; private static final String CHECKSUM_SIGNATURE_HEADER = "x-amz-trailer-signature:"; private String previousChunkSignature; diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java index 32804fbd44ea..3551f50c52f8 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java @@ -141,6 +141,18 @@ public final class ProfileProperty { public static final String EC2_METADATA_SERVICE_ENDPOINT = "ec2_metadata_service_endpoint"; + /** + * Whether request compression is disabled for operations marked with the RequestCompression trait. The default value is + * false, i.e., request compression is enabled. + */ + public static final String DISABLE_REQUEST_COMPRESSION = "disable_request_compression"; + + /** + * The minimum compression size in bytes, inclusive, for a request to be compressed. The default value is 10_240. + * The value must be non-negative and no greater than 10_485_760. + */ + public static final String REQUEST_MIN_COMPRESSION_SIZE_BYTES = "request_min_compression_size_bytes"; + private ProfileProperty() { } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/CompressionConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/CompressionConfiguration.java new file mode 100644 index 000000000000..60ea1b94472f --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/CompressionConfiguration.java @@ -0,0 +1,141 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core; + +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * Configuration options for operations with the RequestCompression trait to disable request configuration and set the minimum + * compression threshold in bytes. + */ +@SdkPublicApi +public final class CompressionConfiguration implements ToCopyableBuilder { + + private final Boolean requestCompressionEnabled; + private final Integer minimumCompressionThresholdInBytes; + + private CompressionConfiguration(DefaultBuilder builder) { + this.requestCompressionEnabled = builder.requestCompressionEnabled; + this.minimumCompressionThresholdInBytes = builder.minimumCompressionThresholdInBytes; + } + + /** + * If set, returns true if request compression is enabled, else false if request compression is disabled. + */ + public Boolean requestCompressionEnabled() { + return requestCompressionEnabled; + } + + /** + * If set, returns the minimum compression threshold in bytes, inclusive, in order to trigger request compression. + */ + public Integer minimumCompressionThresholdInBytes() { + return minimumCompressionThresholdInBytes; + } + + /** + * Create a {@link CompressionConfiguration.Builder}, used to create a {@link CompressionConfiguration}. + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CompressionConfiguration that = (CompressionConfiguration) o; + + if (!requestCompressionEnabled.equals(that.requestCompressionEnabled)) { + return false; + } + return Objects.equals(minimumCompressionThresholdInBytes, that.minimumCompressionThresholdInBytes); + } + + @Override + public int hashCode() { + int result = requestCompressionEnabled != null ? requestCompressionEnabled.hashCode() : 0; + result = 31 * result + (minimumCompressionThresholdInBytes != null ? minimumCompressionThresholdInBytes.hashCode() : 0); + return result; + } + + + public interface Builder extends CopyableBuilder { + + /** + * Configures whether request compression is enabled or not, for operations that the service has designated as + * supporting compression. The default value is true. + * + * @param requestCompressionEnabled + * @return This object for method chaining. + */ + Builder requestCompressionEnabled(Boolean requestCompressionEnabled); + + /** + * Configures the minimum compression threshold, inclusive, in bytes. A request whose size is less than the threshold + * will not be compressed, even if the compression trait is present. The default value is 10_240. The value must be + * non-negative and no greater than 10_485_760. + * + * @param minimumCompressionThresholdInBytes + * @return This object for method chaining. + */ + Builder minimumCompressionThresholdInBytes(Integer minimumCompressionThresholdInBytes); + } + + private static final class DefaultBuilder implements Builder { + private Boolean requestCompressionEnabled; + private Integer minimumCompressionThresholdInBytes; + + private DefaultBuilder() { + } + + private DefaultBuilder(CompressionConfiguration compressionConfiguration) { + this.requestCompressionEnabled = compressionConfiguration.requestCompressionEnabled; + this.minimumCompressionThresholdInBytes = compressionConfiguration.minimumCompressionThresholdInBytes; + } + + @Override + public Builder requestCompressionEnabled(Boolean requestCompressionEnabled) { + this.requestCompressionEnabled = requestCompressionEnabled; + return this; + } + + @Override + public Builder minimumCompressionThresholdInBytes(Integer minimumCompressionThresholdInBytes) { + this.minimumCompressionThresholdInBytes = minimumCompressionThresholdInBytes; + return this; + } + + @Override + public CompressionConfiguration build() { + return new CompressionConfiguration(this); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java index cb4daf65922a..9dc55c2ee910 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java @@ -51,8 +51,8 @@ public abstract class RequestOverrideConfiguration { private final Signer signer; private final List metricPublishers; private final ExecutionAttributes executionAttributes; - private final EndpointProvider endpointProvider; + private final CompressionConfiguration compressionConfiguration; protected RequestOverrideConfiguration(Builder builder) { this.headers = CollectionUtils.deepUnmodifiableMap(builder.headers(), () -> new TreeMap<>(String.CASE_INSENSITIVE_ORDER)); @@ -64,6 +64,7 @@ protected RequestOverrideConfiguration(Builder builder) { this.metricPublishers = Collections.unmodifiableList(new ArrayList<>(builder.metricPublishers())); this.executionAttributes = ExecutionAttributes.unmodifiableExecutionAttributes(builder.executionAttributes()); this.endpointProvider = builder.endpointProvider(); + this.compressionConfiguration = builder.compressionConfiguration(); } /** @@ -165,6 +166,15 @@ public Optional endpointProvider() { return Optional.ofNullable(endpointProvider); } + /** + * Returns the compression configuration object, if present, which includes options to enable/disable compression and set + * the minimum compression threshold. This compression config object supersedes the compression config object set on the + * client. + */ + public Optional compressionConfiguration() { + return Optional.ofNullable(compressionConfiguration); + } + @Override public boolean equals(Object o) { if (this == o) { @@ -182,7 +192,8 @@ public boolean equals(Object o) { Objects.equals(signer, that.signer) && Objects.equals(metricPublishers, that.metricPublishers) && Objects.equals(executionAttributes, that.executionAttributes) && - Objects.equals(endpointProvider, that.endpointProvider); + Objects.equals(endpointProvider, that.endpointProvider) && + Objects.equals(compressionConfiguration, that.compressionConfiguration); } @Override @@ -197,6 +208,7 @@ public int hashCode() { hashCode = 31 * hashCode + Objects.hashCode(metricPublishers); hashCode = 31 * hashCode + Objects.hashCode(executionAttributes); hashCode = 31 * hashCode + Objects.hashCode(endpointProvider); + hashCode = 31 * hashCode + Objects.hashCode(compressionConfiguration); return hashCode; } @@ -438,6 +450,26 @@ default B putRawQueryParameter(String name, String value) { EndpointProvider endpointProvider(); + /** + * Sets the {@link CompressionConfiguration} for this request. The order of precedence, from highest to lowest, + * for this setting is: 1) Per request configuration 2) Client configuration 3) Environment variables 4) Profile setting. + * + * @param compressionConfiguration Request compression configuration object for this request. + */ + B compressionConfiguration(CompressionConfiguration compressionConfiguration); + + /** + * Sets the {@link CompressionConfiguration} for this request. The order of precedence, from highest to lowest, + * for this setting is: 1) Per request configuration 2) Client configuration 3) Environment variables 4) Profile setting. + * + * @param compressionConfigurationConsumer A {@link Consumer} that accepts a {@link CompressionConfiguration.Builder} + * + * @return This object for method chaining + */ + B compressionConfiguration(Consumer compressionConfigurationConsumer); + + CompressionConfiguration compressionConfiguration(); + /** * Create a new {@code SdkRequestOverrideConfiguration} with the properties set on this builder. * @@ -455,9 +487,8 @@ protected abstract static class BuilderImpl implements Builde private Signer signer; private List metricPublishers = new ArrayList<>(); private ExecutionAttributes.Builder executionAttributesBuilder = ExecutionAttributes.builder(); - private EndpointProvider endpointProvider; - + private CompressionConfiguration compressionConfiguration; protected BuilderImpl() { } @@ -472,6 +503,7 @@ protected BuilderImpl(RequestOverrideConfiguration sdkRequestOverrideConfig) { metricPublishers(sdkRequestOverrideConfig.metricPublishers()); executionAttributes(sdkRequestOverrideConfig.executionAttributes()); endpointProvider(sdkRequestOverrideConfig.endpointProvider); + compressionConfiguration(sdkRequestOverrideConfig.compressionConfiguration); } @Override @@ -626,7 +658,6 @@ public void setExecutionAttributes(ExecutionAttributes executionAttributes) { executionAttributes(executionAttributes); } - @Override public B endpointProvider(EndpointProvider endpointProvider) { this.endpointProvider = endpointProvider; @@ -641,5 +672,24 @@ public void setEndpointProvider(EndpointProvider endpointProvider) { public EndpointProvider endpointProvider() { return endpointProvider; } + + @Override + public B compressionConfiguration(CompressionConfiguration compressionConfiguration) { + this.compressionConfiguration = compressionConfiguration; + return (B) this; + } + + @Override + public B compressionConfiguration(Consumer compressionConfigurationConsumer) { + CompressionConfiguration.Builder b = CompressionConfiguration.builder(); + compressionConfigurationConsumer.accept(b); + compressionConfiguration(b.build()); + return (B) this; + } + + @Override + public CompressionConfiguration compressionConfiguration() { + return compressionConfiguration; + } } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java index 1e5c400ca617..f04029a3b0fe 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java @@ -184,6 +184,18 @@ public enum SdkSystemSetting implements SystemSetting { */ AWS_USE_FIPS_ENDPOINT("aws.useFipsEndpoint", null), + /** + * Whether request compression is disabled for operations marked with the RequestCompression trait. The default value is + * false, i.e., request compression is enabled. + */ + AWS_DISABLE_REQUEST_COMPRESSION("aws.disableRequestCompression", null), + + /** + * Defines the minimum compression size in bytes, inclusive, for a request to be compressed. The default value is 10_240. + * The value must be non-negative and no greater than 10_485_760. + */ + AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES("aws.requestMinCompressionSizeBytes", null), + ; private final String systemProperty; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java index 898cfbbd4ea4..ecc7bfbe2d54 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java @@ -29,6 +29,7 @@ import static software.amazon.awssdk.core.client.config.SdkClientOption.ASYNC_HTTP_CLIENT; import static software.amazon.awssdk.core.client.config.SdkClientOption.CLIENT_TYPE; import static software.amazon.awssdk.core.client.config.SdkClientOption.CLIENT_USER_AGENT; +import static software.amazon.awssdk.core.client.config.SdkClientOption.COMPRESSION_CONFIGURATION; import static software.amazon.awssdk.core.client.config.SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED; import static software.amazon.awssdk.core.client.config.SdkClientOption.ENDPOINT_OVERRIDDEN; import static software.amazon.awssdk.core.client.config.SdkClientOption.EXECUTION_ATTRIBUTES; @@ -63,6 +64,8 @@ import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.annotations.SdkTestInternalApi; +import software.amazon.awssdk.core.CompressionConfiguration; +import software.amazon.awssdk.core.SdkSystemSetting; import software.amazon.awssdk.core.client.config.ClientAsyncConfiguration; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; @@ -82,9 +85,11 @@ import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.profiles.Profile; import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.profiles.ProfileFileSupplier; import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.profiles.ProfileProperty; import software.amazon.awssdk.utils.AttributeMap; import software.amazon.awssdk.utils.Either; import software.amazon.awssdk.utils.ScheduledExecutorUtils; @@ -237,6 +242,7 @@ private SdkClientConfiguration setOverrides(SdkClientConfiguration configuration builder.option(METRIC_PUBLISHERS, clientOverrideConfiguration.metricPublishers()); builder.option(EXECUTION_ATTRIBUTES, clientOverrideConfiguration.executionAttributes()); builder.option(TOKEN_SIGNER, clientOverrideConfiguration.advancedOption(TOKEN_SIGNER).orElse(null)); + builder.option(COMPRESSION_CONFIGURATION, clientOverrideConfiguration.compressionConfiguration().orElse(null)); clientOverrideConfiguration.advancedOption(ENDPOINT_OVERRIDDEN_OVERRIDE).ifPresent(value -> { builder.option(ENDPOINT_OVERRIDDEN, value); @@ -266,14 +272,83 @@ private SdkClientConfiguration mergeGlobalDefaults(SdkClientConfiguration config Optional.ofNullable(configuration.option(PROFILE_FILE_SUPPLIER)) .orElseGet(() -> ProfileFileSupplier.fixedProfileFile(ProfileFile.defaultProfileFile())); - return configuration.merge(c -> c.option(EXECUTION_INTERCEPTORS, new ArrayList<>()) - .option(ADDITIONAL_HTTP_HEADERS, new LinkedHashMap<>()) - .option(PROFILE_FILE, profileFileSupplier.get()) - .option(PROFILE_FILE_SUPPLIER, profileFileSupplier) - .option(PROFILE_NAME, ProfileFileSystemSetting.AWS_PROFILE.getStringValueOrThrow()) - .option(USER_AGENT_PREFIX, SdkUserAgent.create().userAgent()) - .option(USER_AGENT_SUFFIX, "") - .option(CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + configuration = configuration.merge(c -> c.option(EXECUTION_INTERCEPTORS, new ArrayList<>()) + .option(ADDITIONAL_HTTP_HEADERS, new LinkedHashMap<>()) + .option(PROFILE_FILE, profileFileSupplier.get()) + .option(PROFILE_FILE_SUPPLIER, profileFileSupplier) + .option(PROFILE_NAME, + ProfileFileSystemSetting.AWS_PROFILE.getStringValueOrThrow()) + .option(USER_AGENT_PREFIX, SdkUserAgent.create().userAgent()) + .option(USER_AGENT_SUFFIX, "") + .option(CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + + return addCompressionConfigGlobalDefaults(configuration); + } + + private SdkClientConfiguration addCompressionConfigGlobalDefaults(SdkClientConfiguration configuration) { + Optional requestCompressionEnabled = getCompressionEnabled(configuration); + Optional minCompressionThreshold = getCompressionThreshold(configuration); + + if (requestCompressionEnabled.isPresent() && minCompressionThreshold.isPresent()) { + return configuration; + } + + Boolean compressionEnabled = requestCompressionEnabled.orElse(null); + Integer compressionThreshold = minCompressionThreshold.orElse(null); + + if (compressionEnabled == null) { + Optional systemSetting = SdkSystemSetting.AWS_DISABLE_REQUEST_COMPRESSION.getBooleanValue(); + if (systemSetting.isPresent()) { + compressionEnabled = !systemSetting.get(); + } else { + Profile profile = configuration.option(PROFILE_FILE_SUPPLIER).get() + .profile(configuration.option(PROFILE_NAME)).orElse(null); + if (profile != null) { + Optional profileSetting = profile.booleanProperty(ProfileProperty.DISABLE_REQUEST_COMPRESSION); + if (profileSetting.isPresent()) { + compressionEnabled = !profileSetting.get(); + } + } + } + } + + if (compressionThreshold == null) { + Optional systemSetting = SdkSystemSetting.AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES.getIntegerValue(); + if (systemSetting.isPresent()) { + compressionThreshold = systemSetting.get(); + } else { + Profile profile = configuration.option(PROFILE_FILE_SUPPLIER).get() + .profile(configuration.option(PROFILE_NAME)).orElse(null); + if (profile != null) { + Optional profileSetting = profile.property(ProfileProperty.REQUEST_MIN_COMPRESSION_SIZE_BYTES); + if (profileSetting.isPresent()) { + compressionThreshold = Integer.parseInt(profileSetting.get()); + } + } + } + } + + CompressionConfiguration compressionConfig = + CompressionConfiguration.builder() + .requestCompressionEnabled(compressionEnabled) + .minimumCompressionThresholdInBytes(compressionThreshold) + .build(); + + return configuration.toBuilder().option(COMPRESSION_CONFIGURATION, compressionConfig).build(); + } + + private Optional getCompressionEnabled(SdkClientConfiguration configuration) { + if (configuration.option(COMPRESSION_CONFIGURATION) == null) { + return Optional.empty(); + } + return Optional.ofNullable(configuration.option(COMPRESSION_CONFIGURATION).requestCompressionEnabled()); + } + + private Optional getCompressionThreshold(SdkClientConfiguration configuration) { + if (configuration.option(COMPRESSION_CONFIGURATION) == null) { + return Optional.empty(); + } + return Optional.ofNullable(configuration.option(COMPRESSION_CONFIGURATION).minimumCompressionThresholdInBytes()); } /** @@ -577,6 +652,4 @@ public void close() { // Do nothing, this client is managed by the customer. } } - - } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java index 83cf2317038d..dc3ce704a4d9 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java @@ -27,6 +27,7 @@ import java.util.function.Consumer; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ToBuilderIgnoreField; +import software.amazon.awssdk.core.CompressionConfiguration; import software.amazon.awssdk.core.RequestOverrideConfiguration; import software.amazon.awssdk.core.interceptor.ExecutionAttribute; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; @@ -64,6 +65,7 @@ public final class ClientOverrideConfiguration private final List metricPublishers; private final ExecutionAttributes executionAttributes; private final ScheduledExecutorService scheduledExecutorService; + private final CompressionConfiguration compressionConfiguration; /** * Initialize this configuration. Private to require use of {@link #builder()}. @@ -80,6 +82,7 @@ private ClientOverrideConfiguration(Builder builder) { this.metricPublishers = Collections.unmodifiableList(new ArrayList<>(builder.metricPublishers())); this.executionAttributes = ExecutionAttributes.unmodifiableExecutionAttributes(builder.executionAttributes()); this.scheduledExecutorService = builder.scheduledExecutorService(); + this.compressionConfiguration = builder.compressionConfiguration(); } @Override @@ -96,7 +99,8 @@ public Builder toBuilder() { .defaultProfileName(defaultProfileName) .executionAttributes(executionAttributes) .metricPublishers(metricPublishers) - .scheduledExecutorService(scheduledExecutorService); + .scheduledExecutorService(scheduledExecutorService) + .compressionConfiguration(compressionConfiguration); } /** @@ -230,19 +234,30 @@ public ExecutionAttributes executionAttributes() { return executionAttributes; } + /** + * The compression configuration object, which includes options to enable/disable compression and set the minimum + * compression threshold. + * + * @see Builder#compressionConfiguration(CompressionConfiguration) + */ + public Optional compressionConfiguration() { + return Optional.ofNullable(compressionConfiguration); + } + @Override public String toString() { return ToString.builder("ClientOverrideConfiguration") - .add("headers", headers) - .add("retryPolicy", retryPolicy) - .add("apiCallTimeout", apiCallTimeout) - .add("apiCallAttemptTimeout", apiCallAttemptTimeout) - .add("executionInterceptors", executionInterceptors) - .add("advancedOptions", advancedOptions) - .add("profileFile", defaultProfileFile) - .add("profileName", defaultProfileName) - .add("scheduledExecutorService", scheduledExecutorService) - .build(); + .add("headers", headers) + .add("retryPolicy", retryPolicy) + .add("apiCallTimeout", apiCallTimeout) + .add("apiCallAttemptTimeout", apiCallAttemptTimeout) + .add("executionInterceptors", executionInterceptors) + .add("advancedOptions", advancedOptions) + .add("profileFile", defaultProfileFile) + .add("profileName", defaultProfileName) + .add("scheduledExecutorService", scheduledExecutorService) + .add("compressionConfiguration", compressionConfiguration) + .build(); } /** @@ -513,6 +528,22 @@ default Builder retryPolicy(RetryMode retryMode) { Builder putExecutionAttribute(ExecutionAttribute attribute, T value); ExecutionAttributes executionAttributes(); + + /** + * Sets the {@link CompressionConfiguration} for this client. + */ + Builder compressionConfiguration(CompressionConfiguration compressionConfiguration); + + /** + * Sets the {@link CompressionConfiguration} for this client. + */ + default Builder compressionConfiguration(Consumer compressionConfiguration) { + return compressionConfiguration(CompressionConfiguration.builder() + .applyMutation(compressionConfiguration) + .build()); + } + + CompressionConfiguration compressionConfiguration(); } /** @@ -530,6 +561,7 @@ private static final class DefaultClientOverrideConfigurationBuilder implements private List metricPublishers = new ArrayList<>(); private ExecutionAttributes.Builder executionAttributes = ExecutionAttributes.builder(); private ScheduledExecutorService scheduledExecutorService; + private CompressionConfiguration compressionConfiguration; @Override public Builder headers(Map> headers) { @@ -724,6 +756,21 @@ public ExecutionAttributes executionAttributes() { return executionAttributes.build(); } + @Override + public Builder compressionConfiguration(CompressionConfiguration compressionConfiguration) { + this.compressionConfiguration = compressionConfiguration; + return this; + } + + public void setRequestCompressionEnabled(CompressionConfiguration compressionConfiguration) { + compressionConfiguration(compressionConfiguration); + } + + @Override + public CompressionConfiguration compressionConfiguration() { + return compressionConfiguration; + } + @Override public ClientOverrideConfiguration build() { return new ClientOverrideConfiguration(this); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java index 07361d75f23d..f93acab2487b 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java @@ -23,6 +23,7 @@ import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.core.ClientType; +import software.amazon.awssdk.core.CompressionConfiguration; import software.amazon.awssdk.core.ServiceConfiguration; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; @@ -190,6 +191,12 @@ public final class SdkClientOption extends ClientOption { public static final SdkClientOption CLIENT_CONTEXT_PARAMS = new SdkClientOption<>(AttributeMap.class); + /** + * Option to specify the compression configuration settings. + */ + public static final SdkClientOption COMPRESSION_CONFIGURATION = + new SdkClientOption<>(CompressionConfiguration.class); + private SdkClientOption(Class valueClass) { super(valueClass); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java index 6e71448dc98f..4abbb390a60f 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java @@ -109,7 +109,6 @@ public class SdkExecutionAttribute { public static final ExecutionAttribute HTTP_RESPONSE_CHECKSUM_VALIDATION = new ExecutionAttribute<>( "HttpResponseChecksumValidation"); - protected SdkExecutionAttribute() { } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java index 3080d0fd47b3..75e999bc1020 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java @@ -18,6 +18,7 @@ import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.endpoints.Endpoint; import software.amazon.awssdk.endpoints.EndpointProvider; import software.amazon.awssdk.http.SdkHttpExecutionAttributes; @@ -92,6 +93,12 @@ public final class SdkInternalExecutionAttribute extends SdkExecutionAttribute { public static final ExecutionAttribute IS_DISCOVERED_ENDPOINT = new ExecutionAttribute<>("IsDiscoveredEndpoint"); + /** + * The supported compression algorithms for an operation, and whether the operation is streaming or not. + */ + public static final ExecutionAttribute REQUEST_COMPRESSION = + new ExecutionAttribute<>("RequestCompression"); + private SdkInternalExecutionAttribute() { } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java index c171b0787678..ee0f20b64969 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java @@ -21,14 +21,16 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.utils.Logger; -import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.builder.SdkBuilder; /** - * Class that will buffer incoming BufferBytes of totalBytes length to chunks of bufferSize* + * Class that will buffer incoming BufferBytes to chunks of bufferSize. + * If totalBytes is not provided, i.e. content-length is unknown, {@link #getBufferedData()} should be used in the Subscriber's + * {@code onComplete()} to check for a final chunk that is smaller than the chunk size, and send if present. */ @SdkInternalApi public final class ChunkBuffer { @@ -36,11 +38,9 @@ public final class ChunkBuffer { private final AtomicLong transferredBytes; private final ByteBuffer currentBuffer; private final int chunkSize; - private final long totalBytes; + private final Long totalBytes; private ChunkBuffer(Long totalBytes, Integer bufferSize) { - Validate.notNull(totalBytes, "The totalBytes must not be null"); - int chunkSize = bufferSize != null ? bufferSize : DEFAULT_ASYNC_CHUNK_SIZE; this.chunkSize = chunkSize; this.currentBuffer = ByteBuffer.allocate(chunkSize); @@ -52,14 +52,12 @@ public static Builder builder() { return new DefaultBuilder(); } - /** * Split the input {@link ByteBuffer} into multiple smaller {@link ByteBuffer}s, each of which contains {@link #chunkSize} * worth of bytes. If the last chunk of the input ByteBuffer contains less than {@link #chunkSize} data, the last chunk will * be buffered. */ public synchronized Iterable split(ByteBuffer inputByteBuffer) { - if (!inputByteBuffer.hasRemaining()) { return Collections.singletonList(inputByteBuffer); } @@ -71,7 +69,7 @@ public synchronized Iterable split(ByteBuffer inputByteBuffer) { fillCurrentBuffer(inputByteBuffer); if (isCurrentBufferFull()) { - addCurrentBufferToIterable(byteBuffers, chunkSize); + addCurrentBufferToIterable(byteBuffers); } } @@ -82,8 +80,7 @@ public synchronized Iterable split(ByteBuffer inputByteBuffer) { // If this is the last chunk, add data buffered to the iterable if (isLastChunk()) { - int remainingBytesInBuffer = currentBuffer.position(); - addCurrentBufferToIterable(byteBuffers, remainingBytesInBuffer); + addCurrentBufferToIterable(byteBuffers); } return byteBuffers; } @@ -111,19 +108,38 @@ private void splitRemainingInputByteBuffer(ByteBuffer inputByteBuffer, List getBufferedData() { + int remainingBytesInBuffer = currentBuffer.position(); + + if (remainingBytesInBuffer == 0) { + return Optional.empty(); + } + + ByteBuffer bufferedChunk = ByteBuffer.allocate(remainingBytesInBuffer); + currentBuffer.flip(); + bufferedChunk.put(currentBuffer); + bufferedChunk.flip(); + return Optional.of(bufferedChunk); + } + private boolean isLastChunk() { + if (totalBytes == null) { + return false; + } long remainingBytes = totalBytes - transferredBytes.get(); return remainingBytes != 0 && remainingBytes == currentBuffer.position(); } - private void addCurrentBufferToIterable(List byteBuffers, int capacity) { - ByteBuffer bufferedChunk = ByteBuffer.allocate(capacity); - currentBuffer.flip(); - bufferedChunk.put(currentBuffer); - bufferedChunk.flip(); - byteBuffers.add(bufferedChunk); - transferredBytes.addAndGet(bufferedChunk.remaining()); - currentBuffer.clear(); + private void addCurrentBufferToIterable(List byteBuffers) { + Optional bufferedChunk = getBufferedData(); + if (bufferedChunk.isPresent()) { + byteBuffers.add(bufferedChunk.get()); + transferredBytes.addAndGet(bufferedChunk.get().remaining()); + currentBuffer.clear(); + } } private void fillCurrentBuffer(ByteBuffer inputByteBuffer) { @@ -151,8 +167,6 @@ public interface Builder extends SdkBuilder { Builder bufferSize(int bufferSize); Builder totalBytes(long totalBytes); - - } private static final class DefaultBuilder implements Builder { diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBody.java new file mode 100644 index 000000000000..82da601f0acc --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBody.java @@ -0,0 +1,212 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import static software.amazon.awssdk.core.internal.io.AwsChunkedInputStream.DEFAULT_CHUNK_SIZE; + +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.async.DelegatingSubscriber; +import software.amazon.awssdk.utils.async.FlatteningSubscriber; +import software.amazon.awssdk.utils.builder.SdkBuilder; + +/** + * Wrapper class to wrap an AsyncRequestBody. + * This will chunk and compress the payload with the provided {@link Compressor}. + */ +@SdkInternalApi +public class CompressionAsyncRequestBody implements AsyncRequestBody { + + private final AsyncRequestBody wrapped; + private final Compressor compressor; + private final int chunkSize; + + private CompressionAsyncRequestBody(DefaultBuilder builder) { + this.wrapped = Validate.paramNotNull(builder.asyncRequestBody, "asyncRequestBody"); + this.compressor = Validate.paramNotNull(builder.compressor, "compressor"); + this.chunkSize = builder.chunkSize != null ? builder.chunkSize : DEFAULT_CHUNK_SIZE; + } + + @Override + public void subscribe(Subscriber s) { + Validate.notNull(s, "Subscription MUST NOT be null."); + + SdkPublisher> split = split(wrapped); + SdkPublisher flattening = flattening(split); + flattening.map(compressor::compress).subscribe(s); + } + + @Override + public Optional contentLength() { + return wrapped.contentLength(); + } + + @Override + public String contentType() { + return wrapped.contentType(); + } + + private SdkPublisher> split(SdkPublisher source) { + return subscriber -> source.subscribe(new SplittingSubscriber(subscriber, chunkSize)); + } + + private SdkPublisher flattening(SdkPublisher> source) { + return subscriber -> source.subscribe(new FlatteningSubscriber<>(subscriber)); + } + + /** + * @return Builder instance to construct a {@link CompressionAsyncRequestBody}. + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + public interface Builder extends SdkBuilder { + + /** + * Sets the AsyncRequestBody that will be wrapped. + * @param asyncRequestBody + * @return This builder for method chaining. + */ + Builder asyncRequestBody(AsyncRequestBody asyncRequestBody); + + /** + * Sets the compressor to compress the request. + * @param compressor + * @return This builder for method chaining. + */ + Builder compressor(Compressor compressor); + + /** + * Sets the chunk size. Default size is 128 * 1024. + * @param chunkSize + * @return This builder for method chaining. + */ + Builder chunkSize(Integer chunkSize); + } + + private static final class DefaultBuilder implements Builder { + + private AsyncRequestBody asyncRequestBody; + private Compressor compressor; + private Integer chunkSize; + + @Override + public CompressionAsyncRequestBody build() { + return new CompressionAsyncRequestBody(this); + } + + @Override + public Builder asyncRequestBody(AsyncRequestBody asyncRequestBody) { + this.asyncRequestBody = asyncRequestBody; + return this; + } + + @Override + public Builder compressor(Compressor compressor) { + this.compressor = compressor; + return this; + } + + @Override + public Builder chunkSize(Integer chunkSize) { + this.chunkSize = chunkSize; + return this; + } + } + + private static final class SplittingSubscriber extends DelegatingSubscriber> { + private final ChunkBuffer chunkBuffer; + private final AtomicBoolean upstreamDone = new AtomicBoolean(false); + private final AtomicLong downstreamDemand = new AtomicLong(); + private final Object lock = new Object(); + private volatile boolean sentFinalChunk = false; + + protected SplittingSubscriber(Subscriber> subscriber, int chunkSize) { + super(subscriber); + this.chunkBuffer = ChunkBuffer.builder() + .bufferSize(chunkSize) + .build(); + } + + @Override + public void onSubscribe(Subscription s) { + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + if (n <= 0) { + throw new IllegalArgumentException("n > 0 required but it was " + n); + } + + downstreamDemand.getAndAdd(n); + + if (upstreamDone.get()) { + sendFinalChunk(); + } else { + s.request(n); + } + } + + @Override + public void cancel() { + s.cancel(); + } + }); + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + downstreamDemand.decrementAndGet(); + Iterable buffers = chunkBuffer.split(byteBuffer); + subscriber.onNext(buffers); + } + + @Override + public void onComplete() { + upstreamDone.compareAndSet(false, true); + if (downstreamDemand.get() > 0) { + sendFinalChunk(); + } + } + + @Override + public void onError(Throwable t) { + upstreamDone.compareAndSet(false, true); + super.onError(t); + } + + private void sendFinalChunk() { + synchronized (lock) { + if (!sentFinalChunk) { + sentFinalChunk = true; + Optional byteBuffer = chunkBuffer.getBufferedData(); + byteBuffer.ifPresent(buffer -> subscriber.onNext(Collections.singletonList(buffer))); + subscriber.onComplete(); + } + } + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/Compressor.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/Compressor.java new file mode 100644 index 000000000000..503752c26dab --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/Compressor.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.compression; + +import java.io.InputStream; +import java.nio.ByteBuffer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.internal.http.pipeline.stages.CompressRequestStage; + +/** + * Interface for compressors used by {@link CompressRequestStage} to compress requests. + */ +@SdkInternalApi +public interface Compressor { + + /** + * The compression algorithm type. + * + * @return The {@link String} compression algorithm type. + */ + String compressorType(); + + /** + * Compress a {@link SdkBytes} payload. + * + * @param content + * @return The compressed {@link SdkBytes}. + */ + SdkBytes compress(SdkBytes content); + + /** + * Compress a byte[] payload. + * + * @param content + * @return The compressed byte array. + */ + default byte[] compress(byte[] content) { + return compress(SdkBytes.fromByteArray(content)).asByteArray(); + } + + /** + * Compress an {@link InputStream} payload. + * + * @param content + * @return The compressed {@link InputStream}. + */ + default InputStream compress(InputStream content) { + return compress(SdkBytes.fromInputStream(content)).asInputStream(); + } + + /** + * Compress an {@link ByteBuffer} payload. + * + * @param content + * @return The compressed {@link ByteBuffer}. + */ + default ByteBuffer compress(ByteBuffer content) { + return compress(SdkBytes.fromByteBuffer(content)).asByteBuffer(); + } +} \ No newline at end of file diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/CompressorType.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/CompressorType.java new file mode 100644 index 000000000000..6b9b1ae11085 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/CompressorType.java @@ -0,0 +1,115 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.compression; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; + +/** + * The supported compression algorithms for operations with the requestCompression trait. Each supported algorithm will have an + * {@link Compressor} implementation. + */ +@SdkInternalApi +public final class CompressorType { + + public static final CompressorType GZIP = CompressorType.of("gzip"); + + private static Map compressorMap = new HashMap() {{ + put("gzip", new GzipCompressor()); + }}; + + private final String id; + + private CompressorType(String id) { + this.id = id; + } + + /** + * Creates a new {@link CompressorType} of the given value. + */ + public static CompressorType of(String value) { + Validate.paramNotBlank(value, "compressionType"); + return CompressorTypeCache.put(value); + } + + /** + * Returns the {@link Set} of {@link String}s of compressor types supported by the SDK. + */ + public static Set compressorTypes() { + return compressorMap.keySet(); + } + + /** + * Whether or not the compressor type is supported by the SDK. + */ + public static boolean isSupported(String compressionType) { + return compressorTypes().contains(compressionType); + } + + /** + * Maps the {@link CompressorType} to its corresponding {@link Compressor}. + */ + public Compressor newCompressor() { + Compressor compressor = compressorMap.getOrDefault(this.id, null); + if (compressor == null) { + throw new UnsupportedOperationException("The compression type " + id + " does not have an implementation of " + + "Compressor"); + } + return compressor; + } + + @Override + public String toString() { + return id; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CompressorType that = (CompressorType) o; + return Objects.equals(id, that.id) + && Objects.equals(compressorMap, that.compressorMap); + } + + @Override + public int hashCode() { + int result = id != null ? id.hashCode() : 0; + result = 31 * result + (compressorMap != null ? compressorMap.hashCode() : 0); + return result; + } + + private static class CompressorTypeCache { + private static final ConcurrentHashMap VALUES = new ConcurrentHashMap<>(); + + private CompressorTypeCache() { + } + + private static CompressorType put(String value) { + return VALUES.computeIfAbsent(value, v -> new CompressorType(value)); + } + } +} \ No newline at end of file diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/GzipCompressor.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/GzipCompressor.java new file mode 100644 index 000000000000..b849b81fe0ca --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/GzipCompressor.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.compression; + +import static software.amazon.awssdk.utils.IoUtils.closeQuietly; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.zip.GZIPOutputStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkBytes; + +@SdkInternalApi +public final class GzipCompressor implements Compressor { + + private static final String COMPRESSOR_TYPE = "gzip"; + private static final Logger log = LoggerFactory.getLogger(GzipCompressor.class); + + @Override + public String compressorType() { + return COMPRESSOR_TYPE; + } + + @Override + public SdkBytes compress(SdkBytes content) { + GZIPOutputStream gzipOutputStream = null; + try { + ByteArrayOutputStream compressedOutputStream = new ByteArrayOutputStream(); + gzipOutputStream = new GZIPOutputStream(compressedOutputStream); + gzipOutputStream.write(content.asByteArray()); + gzipOutputStream.close(); + return SdkBytes.fromByteArray(compressedOutputStream.toByteArray()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } finally { + closeQuietly(gzipOutputStream, log); + } + } +} \ No newline at end of file diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java index 766b998fa710..5f00eb4cfc71 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java @@ -38,6 +38,7 @@ import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncExecutionFailureExceptionReportingStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncRetryableStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncSigningStage; +import software.amazon.awssdk.core.internal.http.pipeline.stages.CompressRequestStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.HttpChecksumStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.MakeAsyncHttpRequestStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.MakeRequestImmutableStage; @@ -171,6 +172,7 @@ public CompletableFuture execute( .then(ApplyUserAgentStage::new) .then(MergeCustomHeadersStage::new) .then(MergeCustomQueryParamsStage::new) + .then(() -> new CompressRequestStage(httpClientDependencies)) .then(() -> new HttpChecksumStage(ClientType.ASYNC)) .then(MakeRequestImmutableStage::new) .then(RequestPipelineBuilder diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java index 75cab29c6f51..aed81c4c0aed 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java @@ -36,6 +36,7 @@ import software.amazon.awssdk.core.internal.http.pipeline.stages.ApplyUserAgentStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.BeforeTransmissionExecutionInterceptorsStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.BeforeUnmarshallingExecutionInterceptorsStage; +import software.amazon.awssdk.core.internal.http.pipeline.stages.CompressRequestStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.ExecutionFailureExceptionReportingStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.HandleResponseStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.HttpChecksumStage; @@ -172,6 +173,7 @@ public OutputT execute(HttpResponseHandler> response .then(ApplyUserAgentStage::new) .then(MergeCustomHeadersStage::new) .then(MergeCustomQueryParamsStage::new) + .then(() -> new CompressRequestStage(httpClientDependencies)) .then(() -> new HttpChecksumStage(ClientType.SYNC)) .then(MakeRequestImmutableStage::new) // End of mutating request diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java new file mode 100644 index 000000000000..89920d916004 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java @@ -0,0 +1,208 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.http.pipeline.stages; + +import static software.amazon.awssdk.core.client.config.SdkClientOption.COMPRESSION_CONFIGURATION; + +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.util.List; +import java.util.Locale; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.CompressionConfiguration; +import software.amazon.awssdk.core.RequestOverrideConfiguration; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.internal.async.CompressionAsyncRequestBody; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.CompressorType; +import software.amazon.awssdk.core.internal.http.HttpClientDependencies; +import software.amazon.awssdk.core.internal.http.RequestExecutionContext; +import software.amazon.awssdk.core.internal.http.pipeline.MutableRequestToRequestPipeline; +import software.amazon.awssdk.core.internal.sync.CompressionContentStreamProvider; +import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.utils.IoUtils; + +/** + * Compress requests whose operations are marked with the "requestCompression" C2J trait. + */ +@SdkInternalApi +public class CompressRequestStage implements MutableRequestToRequestPipeline { + private static final int DEFAULT_MIN_COMPRESSION_SIZE = 10_240; + private static final int MIN_COMPRESSION_SIZE_LIMIT = 10_485_760; + private final CompressionConfiguration compressionConfig; + + public CompressRequestStage(HttpClientDependencies dependencies) { + compressionConfig = dependencies.clientConfiguration().option(COMPRESSION_CONFIGURATION); + } + + @Override + public SdkHttpFullRequest.Builder execute(SdkHttpFullRequest.Builder input, RequestExecutionContext context) + throws Exception { + + if (!shouldCompress(input, context)) { + return input; + } + + Compressor compressor = resolveCompressorType(context.executionAttributes()); + + if (!isStreaming(context)) { + compressEntirePayload(input, compressor); + updateContentEncodingHeader(input, compressor); + updateContentLengthHeader(input); + return input; + } + + if (!isTransferEncodingChunked(input)) { + return input; + } + + if (context.requestProvider() == null) { + input.contentStreamProvider(new CompressionContentStreamProvider(input.contentStreamProvider(), compressor)); + } else { + context.requestProvider(CompressionAsyncRequestBody.builder() + .asyncRequestBody(context.requestProvider()) + .compressor(compressor) + .build()); + } + + updateContentEncodingHeader(input, compressor); + return input; + } + + private boolean shouldCompress(SdkHttpFullRequest.Builder input, RequestExecutionContext context) { + if (context.executionAttributes().getAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION) == null) { + return false; + } + if (resolveCompressorType(context.executionAttributes()) == null) { + return false; + } + if (!resolveRequestCompressionEnabled(context)) { + return false; + } + if (isStreaming(context)) { + return true; + } + if (input.contentStreamProvider() == null) { + return false; + } + return isRequestSizeWithinThreshold(input, context); + } + + private boolean isStreaming(RequestExecutionContext context) { + return context.executionAttributes().getAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION).isStreaming(); + } + + private void compressEntirePayload(SdkHttpFullRequest.Builder input, Compressor compressor) { + ContentStreamProvider wrappedProvider = input.contentStreamProvider(); + ContentStreamProvider compressedStreamProvider = () -> compressor.compress(wrappedProvider.newStream()); + input.contentStreamProvider(compressedStreamProvider); + } + + private void updateContentEncodingHeader(SdkHttpFullRequest.Builder input, + Compressor compressor) { + if (input.firstMatchingHeader("Content-encoding").isPresent()) { + input.appendHeader("Content-encoding", compressor.compressorType()); + } else { + input.putHeader("Content-encoding", compressor.compressorType()); + } + } + + private void updateContentLengthHeader(SdkHttpFullRequest.Builder input) { + InputStream inputStream = input.contentStreamProvider().newStream(); + try { + byte[] bytes = IoUtils.toByteArray(inputStream); + String length = String.valueOf(bytes.length); + input.putHeader("Content-Length", length); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private boolean isTransferEncodingChunked(SdkHttpFullRequest.Builder input) { + return input.firstMatchingHeader("Transfer-Encoding") + .map(headerValue -> headerValue.equals("chunked")) + .orElse(false); + } + + private Compressor resolveCompressorType(ExecutionAttributes executionAttributes) { + List encodings = + executionAttributes.getAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION).getEncodings(); + + for (String encoding: encodings) { + encoding = encoding.toLowerCase(Locale.ROOT); + if (CompressorType.isSupported(encoding)) { + return CompressorType.of(encoding).newCompressor(); + } + } + return null; + } + + private boolean resolveRequestCompressionEnabled(RequestExecutionContext context) { + + Optional requestCompressionEnabledRequestLevel = + context.originalRequest().overrideConfiguration() + .flatMap(RequestOverrideConfiguration::compressionConfiguration) + .map(CompressionConfiguration::requestCompressionEnabled); + if (requestCompressionEnabledRequestLevel.isPresent()) { + return requestCompressionEnabledRequestLevel.get(); + } + + Boolean isEnabled = compressionConfig.requestCompressionEnabled(); + if (isEnabled != null) { + return isEnabled; + } + + return true; + } + + private boolean isRequestSizeWithinThreshold(SdkHttpFullRequest.Builder input, RequestExecutionContext context) { + int minimumCompressionThreshold = resolveMinCompressionSize(context); + validateMinCompressionSizeInput(minimumCompressionThreshold); + int requestSize = SdkBytes.fromInputStream(input.contentStreamProvider().newStream()).asByteArray().length; + return requestSize >= minimumCompressionThreshold; + } + + private int resolveMinCompressionSize(RequestExecutionContext context) { + + Optional minimumCompressionSizeRequestLevel = + context.originalRequest().overrideConfiguration() + .flatMap(RequestOverrideConfiguration::compressionConfiguration) + .map(CompressionConfiguration::minimumCompressionThresholdInBytes); + if (minimumCompressionSizeRequestLevel.isPresent()) { + return minimumCompressionSizeRequestLevel.get(); + } + + Integer threshold = compressionConfig.minimumCompressionThresholdInBytes(); + if (threshold != null) { + return threshold; + } + + return DEFAULT_MIN_COMPRESSION_SIZE; + } + + private void validateMinCompressionSizeInput(int minCompressionSize) { + if (!(minCompressionSize >= 0 && minCompressionSize <= MIN_COMPRESSION_SIZE_LIMIT)) { + throw SdkClientException.create("The minimum compression size must be non-negative with a maximum value of " + + "10485760.", new IllegalArgumentException()); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/trait/RequestCompression.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/trait/RequestCompression.java new file mode 100644 index 000000000000..5be35f0ae46f --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/trait/RequestCompression.java @@ -0,0 +1,93 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.interceptor.trait; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkInternalApi; + +@SdkInternalApi +public final class RequestCompression { + + private List encodings; + private boolean isStreaming; + + private RequestCompression(Builder builder) { + this.encodings = builder.encodings; + this.isStreaming = builder.isStreaming; + } + + public List getEncodings() { + return encodings; + } + + public boolean isStreaming() { + return isStreaming; + } + + public static Builder builder() { + return new Builder(); + } + + public static final class Builder { + + private List encodings; + private boolean isStreaming; + + public Builder encodings(List encodings) { + this.encodings = encodings; + return this; + } + + public Builder encodings(String... encodings) { + if (encodings != null) { + this.encodings = Arrays.asList(encodings); + } + return this; + } + + public Builder isStreaming(boolean isStreaming) { + this.isStreaming = isStreaming; + return this; + } + + public RequestCompression build() { + return new RequestCompression(this); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + RequestCompression that = (RequestCompression) o; + return isStreaming == that.isStreaming() + && Objects.equals(encodings, that.getEncodings()); + } + + @Override + public int hashCode() { + int hashCode = 1; + hashCode = 31 * hashCode + (isStreaming ? 1 : 0); + hashCode = 31 * hashCode + Objects.hashCode(encodings); + return hashCode; + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedEncodingInputStream.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedEncodingInputStream.java index f382bd5ced40..ec4870f5e686 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedEncodingInputStream.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedEncodingInputStream.java @@ -22,8 +22,6 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.checksums.SdkChecksum; import software.amazon.awssdk.core.internal.chunked.AwsChunkedEncodingConfig; -import software.amazon.awssdk.core.io.SdkInputStream; -import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.Validate; /** @@ -37,37 +35,18 @@ * the wrapped stream. */ @SdkInternalApi -public abstract class AwsChunkedEncodingInputStream extends SdkInputStream { +public abstract class AwsChunkedEncodingInputStream extends AwsChunkedInputStream { - public static final int DEFAULT_CHUNK_SIZE = 128 * 1024; - protected static final int SKIP_BUFFER_SIZE = 256 * 1024; protected static final String CRLF = "\r\n"; protected static final byte[] FINAL_CHUNK = new byte[0]; protected static final String HEADER_COLON_SEPARATOR = ":"; - private static final Logger log = Logger.loggerFor(AwsChunkedEncodingInputStream.class); protected byte[] calculatedChecksum = null; protected final String checksumHeaderForTrailer; protected boolean isTrailingTerminated = true; - private InputStream is = null; private final int chunkSize; private final int maxBufferSize; private final SdkChecksum sdkChecksum; private boolean isLastTrailingCrlf; - /** - * Iterator on the current chunk. - */ - private ChunkContentIterator currentChunkIterator; - - /** - * Iterator on the buffer of the decoded stream, - * Null if the wrapped stream is marksupported, - * otherwise it will be initialized when this wrapper is marked. - */ - private DecodedStreamBuffer decodedStreamBuffer; - - private boolean isAtStart = true; - private boolean isTerminating = false; - /** * Creates a chunked encoding input stream initialized with the originating stream. The configuration allows @@ -89,10 +68,10 @@ protected AwsChunkedEncodingInputStream(InputStream in, AwsChunkedEncodingInputStream originalChunkedStream = (AwsChunkedEncodingInputStream) in; providedMaxBufferSize = Math.max(originalChunkedStream.maxBufferSize, providedMaxBufferSize); is = originalChunkedStream.is; - decodedStreamBuffer = originalChunkedStream.decodedStreamBuffer; + underlyingStreamBuffer = originalChunkedStream.underlyingStreamBuffer; } else { is = in; - decodedStreamBuffer = null; + underlyingStreamBuffer = null; } this.chunkSize = awsChunkedEncodingConfig.chunkSize(); this.maxBufferSize = providedMaxBufferSize; @@ -153,19 +132,6 @@ public T checksumHeaderForTrailer(String checksumHeaderForTrailer) { } - @Override - public int read() throws IOException { - byte[] tmp = new byte[1]; - int count = read(tmp, 0, 1); - if (count > 0) { - log.debug(() -> "One byte read from the stream."); - int unsignedByte = (int) tmp[0] & 0xFF; - return unsignedByte; - } else { - return count; - } - } - @Override public int read(byte[] b, int off, int len) throws IOException { abortIfNeeded(); @@ -211,32 +177,6 @@ private boolean setUpTrailingChunks() { return true; } - @Override - public long skip(long n) throws IOException { - if (n <= 0) { - return 0; - } - long remaining = n; - int toskip = (int) Math.min(SKIP_BUFFER_SIZE, n); - byte[] temp = new byte[toskip]; - while (remaining > 0) { - int count = read(temp, 0, toskip); - if (count < 0) { - break; - } - remaining -= count; - } - return n - remaining; - } - - /** - * @see java.io.InputStream#markSupported() - */ - @Override - public boolean markSupported() { - return true; - } - /** * The readlimit parameter is ignored. */ @@ -256,7 +196,7 @@ public void mark(int readlimit) { } else { log.debug(() -> "AwsChunkedEncodingInputStream marked at the start of the stream " + "(initializing the buffer since the wrapped stream is not mark-supported)."); - decodedStreamBuffer = new DecodedStreamBuffer(maxBufferSize); + underlyingStreamBuffer = new UnderlyingStreamBuffer(maxBufferSize); } } @@ -280,8 +220,8 @@ public void reset() throws IOException { is.reset(); } else { log.debug(() -> "AwsChunkedEncodingInputStream reset (will use the buffer of the decoded stream)."); - Validate.notNull(decodedStreamBuffer, "Cannot reset the stream because the mark is not set."); - decodedStreamBuffer.startReadBuffer(); + Validate.notNull(underlyingStreamBuffer, "Cannot reset the stream because the mark is not set."); + underlyingStreamBuffer.startReadBuffer(); } isAtStart = true; isTerminating = false; @@ -298,14 +238,14 @@ private boolean setUpNextChunk() throws IOException { int chunkSizeInBytes = 0; while (chunkSizeInBytes < chunkSize) { /** Read from the buffer of the decoded stream */ - if (null != decodedStreamBuffer && decodedStreamBuffer.hasNext()) { - chunkData[chunkSizeInBytes++] = decodedStreamBuffer.next(); + if (null != underlyingStreamBuffer && underlyingStreamBuffer.hasNext()) { + chunkData[chunkSizeInBytes++] = underlyingStreamBuffer.next(); } else { /** Read from the wrapped stream */ int bytesToRead = chunkSize - chunkSizeInBytes; int count = is.read(chunkData, chunkSizeInBytes, bytesToRead); if (count != -1) { - if (null != decodedStreamBuffer) { - decodedStreamBuffer.buffer(chunkData, chunkSizeInBytes, count); + if (null != underlyingStreamBuffer) { + underlyingStreamBuffer.buffer(chunkData, chunkSizeInBytes, count); } chunkSizeInBytes += count; } else { @@ -333,13 +273,6 @@ private boolean setUpNextChunk() throws IOException { } } - - @Override - protected InputStream getWrappedInputStream() { - return is; - } - - /** * The final chunk. * @@ -361,5 +294,4 @@ protected InputStream getWrappedInputStream() { * @return ChecksumChunkHeader in bytes based on the Header name field. */ protected abstract byte[] createChecksumChunkHeader(); - -} \ No newline at end of file +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedInputStream.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedInputStream.java new file mode 100644 index 000000000000..11beb216f16f --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedInputStream.java @@ -0,0 +1,90 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.io; + +import java.io.IOException; +import java.io.InputStream; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.io.SdkInputStream; +import software.amazon.awssdk.utils.Logger; + +/** + * A wrapper of InputStream that implements streaming in chunks. + */ +@SdkInternalApi +public abstract class AwsChunkedInputStream extends SdkInputStream { + public static final int DEFAULT_CHUNK_SIZE = 128 * 1024; + protected static final int SKIP_BUFFER_SIZE = 256 * 1024; + protected static final Logger log = Logger.loggerFor(AwsChunkedInputStream.class); + protected InputStream is; + /** + * Iterator on the current chunk. + */ + protected ChunkContentIterator currentChunkIterator; + + /** + * Iterator on the buffer of the underlying stream, + * Null if the wrapped stream is marksupported, + * otherwise it will be initialized when this wrapper is marked. + */ + protected UnderlyingStreamBuffer underlyingStreamBuffer; + protected boolean isAtStart = true; + protected boolean isTerminating = false; + + @Override + public int read() throws IOException { + byte[] tmp = new byte[1]; + int count = read(tmp, 0, 1); + if (count > 0) { + log.debug(() -> "One byte read from the stream."); + int unsignedByte = (int) tmp[0] & 0xFF; + return unsignedByte; + } else { + return count; + } + } + + @Override + public long skip(long n) throws IOException { + if (n <= 0) { + return 0; + } + long remaining = n; + int toskip = (int) Math.min(SKIP_BUFFER_SIZE, n); + byte[] temp = new byte[toskip]; + while (remaining > 0) { + int count = read(temp, 0, toskip); + if (count < 0) { + break; + } + remaining -= count; + } + return n - remaining; + } + + /** + * @see InputStream#markSupported() + */ + @Override + public boolean markSupported() { + return true; + } + + @Override + protected InputStream getWrappedInputStream() { + return is; + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStream.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStream.java new file mode 100644 index 000000000000..93642bad8c47 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStream.java @@ -0,0 +1,170 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.io; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.utils.Validate; + +/** + * A wrapper class of InputStream that implements compression in chunks. + */ +@SdkInternalApi +public final class AwsCompressionInputStream extends AwsChunkedInputStream { + private final Compressor compressor; + + private AwsCompressionInputStream(InputStream in, Compressor compressor) { + this.compressor = compressor; + if (in instanceof AwsCompressionInputStream) { + // This could happen when the request is retried. + AwsCompressionInputStream originalCompressionStream = (AwsCompressionInputStream) in; + this.is = originalCompressionStream.is; + this.underlyingStreamBuffer = originalCompressionStream.underlyingStreamBuffer; + } else { + this.is = in; + this.underlyingStreamBuffer = null; + } + } + + public static Builder builder() { + return new Builder(); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + abortIfNeeded(); + Validate.notNull(b, "buff"); + if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return 0; + } + + if (currentChunkIterator == null || !currentChunkIterator.hasNext()) { + if (isTerminating) { + return -1; + } + isTerminating = setUpNextChunk(); + } + + int count = currentChunkIterator.read(b, off, len); + if (count > 0) { + isAtStart = false; + log.trace(() -> count + " byte read from the stream."); + } + return count; + } + + private boolean setUpNextChunk() throws IOException { + byte[] chunkData = new byte[DEFAULT_CHUNK_SIZE]; + int chunkSizeInBytes = 0; + while (chunkSizeInBytes < DEFAULT_CHUNK_SIZE) { + /** Read from the buffer of the uncompressed stream */ + if (underlyingStreamBuffer != null && underlyingStreamBuffer.hasNext()) { + chunkData[chunkSizeInBytes++] = underlyingStreamBuffer.next(); + } else { /** Read from the wrapped stream */ + int bytesToRead = DEFAULT_CHUNK_SIZE - chunkSizeInBytes; + int count = is.read(chunkData, chunkSizeInBytes, bytesToRead); + if (count != -1) { + if (underlyingStreamBuffer != null) { + underlyingStreamBuffer.buffer(chunkData, chunkSizeInBytes, count); + } + chunkSizeInBytes += count; + } else { + break; + } + } + } + if (chunkSizeInBytes == 0) { + return true; + } + + if (chunkSizeInBytes < chunkData.length) { + chunkData = Arrays.copyOf(chunkData, chunkSizeInBytes); + } + // Compress the chunk + byte[] compressedChunkData = compressor.compress(chunkData); + currentChunkIterator = new ChunkContentIterator(compressedChunkData); + return false; + } + + /** + * The readlimit parameter is ignored. + */ + @Override + public void mark(int readlimit) { + abortIfNeeded(); + if (!isAtStart) { + throw new UnsupportedOperationException("Compression stream only supports mark() at the start of the stream."); + } + if (is.markSupported()) { + log.debug(() -> "AwsCompressionInputStream marked at the start of the stream " + + "(will directly mark the wrapped stream since it's mark-supported)."); + is.mark(readlimit); + } else { + log.debug(() -> "AwsCompressionInputStream marked at the start of the stream " + + "(initializing the buffer since the wrapped stream is not mark-supported)."); + underlyingStreamBuffer = new UnderlyingStreamBuffer(SKIP_BUFFER_SIZE); + } + } + + /** + * Reset the stream, either by resetting the wrapped stream or using the + * buffer created by this class. + */ + @Override + public void reset() throws IOException { + abortIfNeeded(); + // Clear up any encoded data + currentChunkIterator = null; + // Reset the wrapped stream if it is mark-supported, + // otherwise use our buffered data. + if (is.markSupported()) { + log.debug(() -> "AwsCompressionInputStream reset " + + "(will reset the wrapped stream because it is mark-supported)."); + is.reset(); + } else { + log.debug(() -> "AwsCompressionInputStream reset (will use the buffer of the decoded stream)."); + Validate.notNull(underlyingStreamBuffer, "Cannot reset the stream because the mark is not set."); + underlyingStreamBuffer.startReadBuffer(); + } + isAtStart = true; + isTerminating = false; + } + + public static final class Builder { + InputStream inputStream; + Compressor compressor; + + public AwsCompressionInputStream build() { + return new AwsCompressionInputStream( + this.inputStream, this.compressor); + } + + public Builder inputStream(InputStream inputStream) { + this.inputStream = inputStream; + return this; + } + + public Builder compressor(Compressor compressor) { + this.compressor = compressor; + return this; + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/DecodedStreamBuffer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/UnderlyingStreamBuffer.java similarity index 93% rename from core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/DecodedStreamBuffer.java rename to core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/UnderlyingStreamBuffer.java index f6d3c47c0c1e..6fc086983fda 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/DecodedStreamBuffer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/UnderlyingStreamBuffer.java @@ -20,8 +20,8 @@ import software.amazon.awssdk.utils.Logger; @SdkInternalApi -class DecodedStreamBuffer { - private static final Logger log = Logger.loggerFor(DecodedStreamBuffer.class); +class UnderlyingStreamBuffer { + private static final Logger log = Logger.loggerFor(UnderlyingStreamBuffer.class); private byte[] bufferArray; private int maxBufferSize; @@ -29,7 +29,7 @@ class DecodedStreamBuffer { private int pos = -1; private boolean bufferSizeOverflow; - DecodedStreamBuffer(int maxBufferSize) { + UnderlyingStreamBuffer(int maxBufferSize) { bufferArray = new byte[maxBufferSize]; this.maxBufferSize = maxBufferSize; } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/CompressionContentStreamProvider.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/CompressionContentStreamProvider.java new file mode 100644 index 000000000000..52a222bc372c --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/CompressionContentStreamProvider.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.sync; + +import java.io.InputStream; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.io.AwsCompressionInputStream; +import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.utils.IoUtils; + +/** + * {@link ContentStreamProvider} implementation for compression. + */ +@SdkInternalApi +public class CompressionContentStreamProvider implements ContentStreamProvider { + private final ContentStreamProvider underlyingInputStreamProvider; + private InputStream currentStream; + private final Compressor compressor; + + public CompressionContentStreamProvider(ContentStreamProvider underlyingInputStreamProvider, Compressor compressor) { + this.underlyingInputStreamProvider = underlyingInputStreamProvider; + this.compressor = compressor; + } + + @Override + public InputStream newStream() { + closeCurrentStream(); + currentStream = AwsCompressionInputStream.builder() + .inputStream(underlyingInputStreamProvider.newStream()) + .compressor(compressor) + .build(); + return currentStream; + } + + private void closeCurrentStream() { + if (currentStream != null) { + IoUtils.closeQuietly(currentStream, null); + currentStream = null; + } + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/CompressionConfigurationTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/CompressionConfigurationTest.java new file mode 100644 index 000000000000..dec9d8303f69 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/CompressionConfigurationTest.java @@ -0,0 +1,43 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class CompressionConfigurationTest { + + @Test + public void equalsHashcode() { + EqualsVerifier.forClass(CompressionConfiguration.class) + .withNonnullFields("requestCompressionEnabled", "minimumCompressionThresholdInBytes") + .verify(); + } + + @Test + public void toBuilder() { + CompressionConfiguration configuration = + CompressionConfiguration.builder() + .requestCompressionEnabled(true) + .minimumCompressionThresholdInBytes(99999) + .build(); + + CompressionConfiguration another = configuration.toBuilder().build(); + assertThat(configuration).isEqualTo(another); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java index a553a55a4536..136c28695511 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java @@ -16,7 +16,6 @@ package software.amazon.awssdk.core.async; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -24,6 +23,7 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; +import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -41,19 +41,53 @@ class ChunkBufferTest { - @Test - void builderWithNoTotalSize() { - assertThatThrownBy(() -> ChunkBuffer.builder().build()).isInstanceOf(NullPointerException.class); + @ParameterizedTest + @ValueSource(ints = {1, 6, 10, 23, 25}) + void numberOfChunk_Not_MultipleOfTotalBytes_KnownLength(int totalBytes) { + int bufferSize = 5; + + String inputString = RandomStringUtils.randomAscii(totalBytes); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(bufferSize) + .totalBytes(inputString.getBytes(StandardCharsets.UTF_8).length) + .build(); + Iterable byteBuffers = + chunkBuffer.split(ByteBuffer.wrap(inputString.getBytes(StandardCharsets.UTF_8))); + + AtomicInteger index = new AtomicInteger(0); + int count = (int) Math.ceil(totalBytes / (double) bufferSize); + int remainder = totalBytes % bufferSize; + + byteBuffers.forEach(r -> { + int i = index.get(); + + try (ByteArrayInputStream inputStream = new ByteArrayInputStream(inputString.getBytes(StandardCharsets.UTF_8))) { + byte[] expected; + if (i == count - 1 && remainder != 0) { + expected = new byte[remainder]; + } else { + expected = new byte[bufferSize]; + } + inputStream.skip(i * bufferSize); + inputStream.read(expected); + byte[] actualBytes = BinaryUtils.copyBytesFrom(r); + assertThat(actualBytes).isEqualTo(expected); + index.incrementAndGet(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); } @ParameterizedTest @ValueSource(ints = {1, 6, 10, 23, 25}) - void numberOfChunk_Not_MultipleOfTotalBytes(int totalBytes) { + void numberOfChunk_Not_MultipleOfTotalBytes_UnknownLength(int totalBytes) { int bufferSize = 5; String inputString = RandomStringUtils.randomAscii(totalBytes); - ChunkBuffer chunkBuffer = - ChunkBuffer.builder().bufferSize(bufferSize).totalBytes(inputString.getBytes(StandardCharsets.UTF_8).length).build(); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(bufferSize) + .build(); Iterable byteBuffers = chunkBuffer.split(ByteBuffer.wrap(inputString.getBytes(StandardCharsets.UTF_8))); @@ -83,10 +117,12 @@ void numberOfChunk_Not_MultipleOfTotalBytes(int totalBytes) { } @Test - void zeroTotalBytesAsInput_returnsZeroByte() { + void zeroTotalBytesAsInput_returnsZeroByte_KnownLength() { byte[] zeroByte = new byte[0]; - ChunkBuffer chunkBuffer = - ChunkBuffer.builder().bufferSize(5).totalBytes(zeroByte.length).build(); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(5) + .totalBytes(zeroByte.length) + .build(); Iterable byteBuffers = chunkBuffer.split(ByteBuffer.wrap(zeroByte)); @@ -98,13 +134,30 @@ void zeroTotalBytesAsInput_returnsZeroByte() { } @Test - void emptyAllocatedBytes_returnSameNumberOfEmptyBytes() { + void zeroTotalBytesAsInput_returnsZeroByte_UnknownLength() { + byte[] zeroByte = new byte[0]; + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(5) + .build(); + Iterable byteBuffers = + chunkBuffer.split(ByteBuffer.wrap(zeroByte)); + + AtomicInteger iteratedCounts = new AtomicInteger(); + byteBuffers.forEach(r -> { + iteratedCounts.getAndIncrement(); + }); + assertThat(iteratedCounts.get()).isEqualTo(1); + } + @Test + void emptyAllocatedBytes_returnSameNumberOfEmptyBytes_knownLength() { int totalBytes = 17; int bufferSize = 5; ByteBuffer wrap = ByteBuffer.allocate(totalBytes); - ChunkBuffer chunkBuffer = - ChunkBuffer.builder().bufferSize(bufferSize).totalBytes(wrap.remaining()).build(); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(bufferSize) + .totalBytes(wrap.remaining()) + .build(); Iterable byteBuffers = chunkBuffer.split(wrap); @@ -121,6 +174,34 @@ void emptyAllocatedBytes_returnSameNumberOfEmptyBytes() { assertThat(iteratedCounts.get()).isEqualTo(4); } + @Test + void emptyAllocatedBytes_returnSameNumberOfEmptyBytes_unknownLength() { + int totalBytes = 17; + int bufferSize = 5; + ByteBuffer wrap = ByteBuffer.allocate(totalBytes); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(bufferSize) + .build(); + Iterable byteBuffers = + chunkBuffer.split(wrap); + + AtomicInteger iteratedCounts = new AtomicInteger(); + byteBuffers.forEach(r -> { + iteratedCounts.getAndIncrement(); + if (iteratedCounts.get() * bufferSize < totalBytes) { + // array of empty bytes + assertThat(BinaryUtils.copyBytesFrom(r)).isEqualTo(ByteBuffer.allocate(bufferSize).array()); + } else { + assertThat(BinaryUtils.copyBytesFrom(r)).isEqualTo(ByteBuffer.allocate(totalBytes % bufferSize).array()); + } + }); + assertThat(iteratedCounts.get()).isEqualTo(3); + + Optional lastBuffer = chunkBuffer.getBufferedData(); + assertThat(lastBuffer).isPresent(); + assertThat(lastBuffer.get().remaining()).isEqualTo(2); + } + /** * * Total bytes 11(ChunkSize) 3 (threads) @@ -152,14 +233,16 @@ void emptyAllocatedBytes_returnSameNumberOfEmptyBytes() { * 111 is given as output since we consumed all the total bytes* */ @Test - void concurrentTreads_calling_bufferAndCreateChunks() throws ExecutionException, InterruptedException { + void concurrentTreads_calling_bufferAndCreateChunks_knownLength() throws ExecutionException, InterruptedException { int totalBytes = 17; int bufferSize = 5; int threads = 8; ByteBuffer wrap = ByteBuffer.allocate(totalBytes); - ChunkBuffer chunkBuffer = - ChunkBuffer.builder().bufferSize(bufferSize).totalBytes(wrap.remaining() * threads).build(); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(bufferSize) + .totalBytes(wrap.remaining() * threads) + .build(); ExecutorService service = Executors.newFixedThreadPool(threads); @@ -198,7 +281,4 @@ void concurrentTreads_calling_bufferAndCreateChunks() throws ExecutionException, assertThat(remainderBytesBuffers.get()).isOne(); assertThat(otherSizeBuffers.get()).isZero(); } - } - - diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/CompressionAsyncRequestBodyTckTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/CompressionAsyncRequestBodyTckTest.java new file mode 100644 index 000000000000..54c74e1e97e9 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/CompressionAsyncRequestBodyTckTest.java @@ -0,0 +1,111 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.async; + +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import io.reactivex.Flowable; +import java.io.IOException; +import java.io.OutputStream; +import java.io.UncheckedIOException; +import java.nio.ByteBuffer; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Optional; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; +import software.amazon.awssdk.core.internal.async.CompressionAsyncRequestBody; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; + +public class CompressionAsyncRequestBodyTckTest extends PublisherVerification { + + private static final FileSystem fs = Jimfs.newFileSystem(Configuration.unix()); + private static final Path rootDir = fs.getRootDirectories().iterator().next(); + private static final int MAX_ELEMENTS = 1000; + private static final int CHUNK_SIZE = 128 * 1024; + private static final Compressor compressor = new GzipCompressor(); + + public CompressionAsyncRequestBodyTckTest() { + super(new TestEnvironment()); + } + + @Override + public long maxElementsFromPublisher() { + return MAX_ELEMENTS; + } + + @Override + public Publisher createPublisher(long n) { + return CompressionAsyncRequestBody.builder() + .asyncRequestBody(customAsyncRequestBodyFromFileWithoutContentLength(n)) + .compressor(compressor) + .build(); + } + + @Override + public Publisher createFailedPublisher() { + return null; + } + + private static AsyncRequestBody customAsyncRequestBodyFromFileWithoutContentLength(long nChunks) { + return new AsyncRequestBody() { + @Override + public Optional contentLength() { + return Optional.empty(); + } + + @Override + public void subscribe(Subscriber s) { + Flowable.fromPublisher(AsyncRequestBody.fromFile(fileOfNChunks(nChunks))).subscribe(s); + } + }; + } + + private static Path fileOfNChunks(long nChunks) { + String name = String.format("%d-chunks-file.dat", nChunks); + Path p = rootDir.resolve(name); + if (!Files.exists(p)) { + try (OutputStream os = Files.newOutputStream(p)) { + os.write(createCompressibleArrayOfNChunks(nChunks)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + return p; + } + + private static byte[] createCompressibleArrayOfNChunks(long nChunks) { + int size = Math.toIntExact(nChunks * CHUNK_SIZE); + ByteBuffer data = ByteBuffer.allocate(size); + + byte[] a = new byte[size / 4]; + byte[] b = new byte[size / 4]; + Arrays.fill(a, (byte) 'a'); + Arrays.fill(b, (byte) 'b'); + + data.put(a); + data.put(b); + data.put(a); + data.put(b); + + return data.array(); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/compression/CompressorTypeTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/compression/CompressorTypeTest.java new file mode 100644 index 000000000000..f67315b8e5da --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/compression/CompressorTypeTest.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.compression; + +import static org.assertj.core.api.Assertions.assertThat; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.core.internal.compression.CompressorType; + +public class CompressorTypeTest { + + @Test + public void equalsHashcode() { + EqualsVerifier.forClass(CompressorType.class) + .withNonnullFields("id") + .verify(); + } + + @Test + public void compressorType_gzip() { + CompressorType gzip = CompressorType.GZIP; + CompressorType gzipFromString = CompressorType.of("gzip"); + assertThat(gzip).isSameAs(gzipFromString); + assertThat(gzip).isEqualTo(gzipFromString); + } + + @Test + public void compressorType_usesSameInstance_when_sameCompressorTypeOfSameValue() { + CompressorType brotliFromString = CompressorType.of("brotli"); + CompressorType brotliFromStringDuplicate = CompressorType.of("brotli"); + assertThat(brotliFromString).isSameAs(brotliFromStringDuplicate); + assertThat(brotliFromString).isEqualTo(brotliFromStringDuplicate); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBodyTest.java new file mode 100644 index 000000000000..ffb15e282a13 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBodyTest.java @@ -0,0 +1,173 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.reactivex.Flowable; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.zip.GZIPInputStream; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; +import software.amazon.awssdk.core.internal.util.Mimetype; +import software.amazon.awssdk.http.async.SimpleSubscriber; + +public final class CompressionAsyncRequestBodyTest { + private static final Compressor compressor = new GzipCompressor(); + + @ParameterizedTest + @ValueSource(ints = {80, 1000}) + public void hasCorrectContent(int bodySize) throws Exception { + String testString = createCompressibleStringOfGivenSize(bodySize); + byte[] testBytes = testString.getBytes(); + int chunkSize = 133; + AsyncRequestBody provider = CompressionAsyncRequestBody.builder() + .compressor(compressor) + .asyncRequestBody(customAsyncRequestBodyWithoutContentLength(testBytes)) + .chunkSize(chunkSize) + .build(); + + ByteBuffer byteBuffer = ByteBuffer.allocate(testString.length()); + CountDownLatch done = new CountDownLatch(1); + AtomicInteger pos = new AtomicInteger(); + + Subscriber subscriber = new SimpleSubscriber(buffer -> { + byte[] bytes = new byte[buffer.remaining()]; + buffer.get(bytes); + byteBuffer.put(bytes); + + // verify each chunk + byte[] chunkToVerify = new byte[chunkSize]; + System.arraycopy(testBytes, pos.get(), chunkToVerify, 0, chunkSize); + chunkToVerify = compressor.compress(chunkToVerify); + + assertThat(bytes).isEqualTo(chunkToVerify); + pos.addAndGet(chunkSize); + }) { + @Override + public void onError(Throwable t) { + super.onError(t); + done.countDown(); + } + + @Override + public void onComplete() { + super.onComplete(); + done.countDown(); + } + }; + + provider.subscribe(subscriber); + done.await(10, TimeUnit.SECONDS); + + byte[] retrieved = byteBuffer.array(); + byte[] uncompressed = decompress(retrieved); + assertThat(new String(uncompressed)).isEqualTo(testString); + } + + @Test + public void emptyBytesConstructor_hasEmptyContent() throws Exception { + AsyncRequestBody requestBody = CompressionAsyncRequestBody.builder() + .compressor(compressor) + .asyncRequestBody(AsyncRequestBody.empty()) + .build(); + + ByteBuffer byteBuffer = ByteBuffer.allocate(0); + CountDownLatch done = new CountDownLatch(1); + + Subscriber subscriber = new SimpleSubscriber(buffer -> { + byte[] bytes = new byte[buffer.remaining()]; + buffer.get(bytes); + byteBuffer.put(bytes); + }) { + @Override + public void onError(Throwable t) { + super.onError(t); + done.countDown(); + } + + @Override + public void onComplete() { + super.onComplete(); + done.countDown(); + } + }; + + requestBody.subscribe(subscriber); + done.await(10, TimeUnit.SECONDS); + assertThat(byteBuffer.array()).isEmpty(); + assertThat(byteBuffer.array()).isEqualTo(new byte[0]); + assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); + } + + private static String createCompressibleStringOfGivenSize(int size) { + ByteBuffer data = ByteBuffer.allocate(size); + + byte[] a = new byte[size / 4]; + byte[] b = new byte[size / 4]; + Arrays.fill(a, (byte) 'a'); + Arrays.fill(b, (byte) 'b'); + + data.put(a); + data.put(b); + data.put(a); + data.put(b); + + return new String(data.array()); + } + + private static byte[] decompress(byte[] compressedData) throws IOException { + ByteArrayInputStream bais = new ByteArrayInputStream(compressedData); + GZIPInputStream gzipInputStream = new GZIPInputStream(bais); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + byte[] buffer = new byte[1024]; + int bytesRead; + while ((bytesRead = gzipInputStream.read(buffer)) != -1) { + baos.write(buffer, 0, bytesRead); + } + gzipInputStream.close(); + byte[] decompressedData = baos.toByteArray(); + return decompressedData; + } + + private static AsyncRequestBody customAsyncRequestBodyWithoutContentLength(byte[] content) { + return new AsyncRequestBody() { + @Override + public Optional contentLength() { + return Optional.empty(); + } + + @Override + public void subscribe(Subscriber s) { + Flowable.fromPublisher(AsyncRequestBody.fromBytes(content)) + .subscribe(s); + } + }; + } +} \ No newline at end of file diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/compression/GzipCompressorTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/compression/GzipCompressorTest.java new file mode 100644 index 000000000000..24fb71940f61 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/compression/GzipCompressorTest.java @@ -0,0 +1,56 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.compression; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.core.Is.is; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.zip.GZIPInputStream; +import org.junit.Test; + +public class GzipCompressorTest { + private static final Compressor gzipCompressor = new GzipCompressor(); + private static final String COMPRESSABLE_STRING = + "RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest"; + + @Test + public void compressedData_decompressesCorrectly() throws IOException { + byte[] originalData = COMPRESSABLE_STRING.getBytes(StandardCharsets.UTF_8); + byte[] compressedData = gzipCompressor.compress(originalData); + + int uncompressedSize = originalData.length; + int compressedSize = compressedData.length; + assertThat(compressedSize, lessThan(uncompressedSize)); + + ByteArrayInputStream bais = new ByteArrayInputStream(compressedData); + GZIPInputStream gzipInputStream = new GZIPInputStream(bais); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + byte[] buffer = new byte[1024]; + int bytesRead; + while ((bytesRead = gzipInputStream.read(buffer)) != -1) { + baos.write(buffer, 0, bytesRead); + } + gzipInputStream.close(); + byte[] decompressedData = baos.toByteArray(); + + assertThat(decompressedData, is(originalData)); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStreamTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStreamTest.java new file mode 100644 index 000000000000..99359dfcd58d --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStreamTest.java @@ -0,0 +1,93 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.io; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static software.amazon.awssdk.core.util.FileUtils.generateRandomAsciiFile; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Random; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; + +public class AwsCompressionInputStreamTest { + private static Compressor compressor; + + @BeforeClass + public static void setup() throws IOException { + compressor = new GzipCompressor(); + } + + @Test + public void nonMarkSupportedInputStream_marksAndResetsCorrectly() throws IOException { + File file = generateRandomAsciiFile(100); + InputStream is = new FileInputStream(file); + assertFalse(is.markSupported()); + + AwsCompressionInputStream compressionInputStream = AwsCompressionInputStream.builder() + .inputStream(is) + .compressor(compressor) + .build(); + + compressionInputStream.mark(100); + compressionInputStream.reset(); + String read1 = readInputStream(compressionInputStream); + compressionInputStream.reset(); + String read2 = readInputStream(compressionInputStream); + assertThat(read1).isEqualTo(read2); + } + + @Test + public void markSupportedInputStream_marksAndResetsCorrectly() throws IOException { + InputStream is = new ByteArrayInputStream(generateRandomBody(100)); + assertTrue(is.markSupported()); + AwsCompressionInputStream compressionInputStream = AwsCompressionInputStream.builder() + .inputStream(is) + .compressor(compressor) + .build(); + compressionInputStream.mark(100); + compressionInputStream.reset(); + String read1 = readInputStream(compressionInputStream); + compressionInputStream.reset(); + String read2 = readInputStream(compressionInputStream); + assertThat(read1).isEqualTo(read2); + } + + private byte[] generateRandomBody(int size) { + byte[] randomData = new byte[size]; + new Random().nextBytes(randomData); + return randomData; + } + + private String readInputStream(InputStream is) throws IOException { + byte[] buffer = new byte[512]; + ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + int bytesRead; + while ((bytesRead = is.read(buffer)) != -1) { + byteArrayOutputStream.write(buffer, 0, bytesRead); + } + return byteArrayOutputStream.toString(); + } +} diff --git a/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java b/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java index 01722140044f..8245d82a7ef5 100644 --- a/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java +++ b/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java @@ -39,8 +39,11 @@ import org.junit.BeforeClass; import org.junit.Test; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.CompressionConfiguration; import software.amazon.awssdk.core.SdkGlobalTime; import software.amazon.awssdk.core.exception.SdkServiceException; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.cloudwatch.model.Datapoint; import software.amazon.awssdk.services.cloudwatch.model.DeleteAlarmsRequest; @@ -108,7 +111,6 @@ public static void cleanupAlarms() { /** * Tests putting metrics and then getting them back. */ - @Test public void put_get_metricdata_list_metric_returns_success() throws InterruptedException { @@ -164,6 +166,86 @@ public void put_get_metricdata_list_metric_returns_success() throws assertTrue(seenDimensions); } + /** + * Tests putting metrics with request compression and then getting them back. + * TODO: We can remove this test once CloudWatch adds "RequestCompression" trait to PutMetricData + */ + @Test + public void put_get_metricdata_list_metric_withRequestCompression_returns_success() { + + RequestCompression requestCompressionTrait = RequestCompression.builder() + .encodings("gzip") + .isStreaming(false) + .build(); + CompressionConfiguration compressionConfiguration = CompressionConfiguration.builder() + // uncompressed payload is 404 bytes + .minimumCompressionThresholdInBytes(100) + .build(); + + CloudWatchClient requestCompressionClient = + CloudWatchClient.builder() + .credentialsProvider(getCredentialsProvider()) + .region(Region.US_WEST_2) + .overrideConfiguration(c -> c.putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + requestCompressionTrait)) + .build(); + + String measureName = this.getClass().getName() + System.currentTimeMillis(); + + MetricDatum datum = MetricDatum.builder().dimensions( + Dimension.builder().name("InstanceType").value("m1.small").build()) + .metricName(measureName).timestamp(Instant.now()) + .unit("Count").value(42.0).build(); + + requestCompressionClient.putMetricData(PutMetricDataRequest.builder() + .namespace("AWS.EC2") + .metricData(datum) + .overrideConfiguration(c -> c.compressionConfiguration(compressionConfiguration)) + .build()); + + GetMetricStatisticsResponse result = + Waiter.run(() -> requestCompressionClient + .getMetricStatistics(r -> r.startTime(Instant.now().minus(Duration.ofDays(7))) + .namespace("AWS.EC2") + .period(60 * 60) + .dimensions(Dimension.builder().name("InstanceType") + .value("m1.small").build()) + .metricName(measureName) + .statisticsWithStrings("Average", "Maximum", "Minimum", "Sum") + .endTime(Instant.now()))) + .until(r -> r.datapoints().size() == 1) + .orFailAfter(Duration.ofMinutes(2)); + + assertNotNull(result.label()); + assertEquals(measureName, result.label()); + + assertEquals(1, result.datapoints().size()); + for (Datapoint datapoint : result.datapoints()) { + assertEquals(datum.value(), datapoint.average()); + assertEquals(datum.value(), datapoint.maximum()); + assertEquals(datum.value(), datapoint.minimum()); + assertEquals(datum.value(), datapoint.sum()); + assertNotNull(datapoint.timestamp()); + assertEquals(datum.unit(), datapoint.unit()); + } + + ListMetricsResponse listResult = requestCompressionClient.listMetrics(ListMetricsRequest.builder().build()); + + boolean seenDimensions = false; + assertTrue(listResult.metrics().size() > 0); + for (Metric metric : listResult.metrics()) { + assertNotNull(metric.metricName()); + assertNotNull(metric.namespace()); + + for (Dimension dimension : metric.dimensions()) { + seenDimensions = true; + assertNotNull(dimension.name()); + assertNotNull(dimension.value()); + } + } + assertTrue(seenDimensions); + } + /** * Tests setting the state for an alarm and reading its history. */ diff --git a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/MediaStoreDataIntegrationTestBase.java b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/MediaStoreDataIntegrationTestBase.java new file mode 100644 index 000000000000..91c9994ae307 --- /dev/null +++ b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/MediaStoreDataIntegrationTestBase.java @@ -0,0 +1,152 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.mediastoredata; + +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import io.reactivex.Flowable; +import java.io.ByteArrayInputStream; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.services.mediastore.MediaStoreClient; +import software.amazon.awssdk.services.mediastore.model.Container; +import software.amazon.awssdk.services.mediastore.model.ContainerStatus; +import software.amazon.awssdk.services.mediastore.model.DescribeContainerResponse; +import software.amazon.awssdk.testutils.Waiter; +import software.amazon.awssdk.testutils.service.AwsIntegrationTestBase; + +/** + * Base class for MediaStoreData integration tests. Used for Transfer-Encoding and Request Compression testing. + */ +public class MediaStoreDataIntegrationTestBase extends AwsIntegrationTestBase { + protected static AwsCredentialsProvider credentialsProvider; + protected static MediaStoreClient mediaStoreClient; + protected static URI uri; + + @BeforeAll + public static void init() { + credentialsProvider = getCredentialsProvider(); + mediaStoreClient = MediaStoreClient.builder() + .credentialsProvider(credentialsProvider) + .httpClient(ApacheHttpClient.builder().build()) + .build(); + } + + @AfterEach + public void reset() { + CaptureTransferEncodingHeaderInterceptor.reset(); + } + + protected static Container createContainer(String containerName) { + mediaStoreClient.createContainer(r -> r.containerName(containerName)); + DescribeContainerResponse response = waitContainerToBeActive(containerName); + return response.container(); + } + + private static DescribeContainerResponse waitContainerToBeActive(String containerName) { + return Waiter.run(() -> mediaStoreClient.describeContainer(r -> r.containerName(containerName))) + .until(r -> r.container().status() == ContainerStatus.ACTIVE) + .orFailAfter(Duration.ofMinutes(3)); + } + + protected AsyncRequestBody customAsyncRequestBodyWithoutContentLength(byte[] body) { + return new AsyncRequestBody() { + @Override + public Optional contentLength() { + return Optional.empty(); + } + + @Override + public void subscribe(Subscriber s) { + Flowable.fromPublisher(AsyncRequestBody.fromBytes(body)) + .subscribe(s); + } + }; + } + + protected static class CaptureTransferEncodingHeaderInterceptor implements ExecutionInterceptor { + public static boolean isChunked; + + public static void reset() { + isChunked = false; + } + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + isChunked = context.httpRequest().matchingHeaders("Transfer-Encoding").contains("chunked"); + } + } + + protected static class TestContentProvider implements ContentStreamProvider { + private final byte[] content; + private final List createdStreams = new ArrayList<>(); + private CloseTrackingInputStream currentStream; + + protected TestContentProvider(byte[] content) { + this.content = content.clone(); + } + + @Override + public InputStream newStream() { + if (currentStream != null) { + invokeSafely(currentStream::close); + } + currentStream = new CloseTrackingInputStream(new ByteArrayInputStream(content)); + createdStreams.add(currentStream); + return currentStream; + } + + List getCreatedStreams() { + return Collections.unmodifiableList(createdStreams); + } + } + + protected static class CloseTrackingInputStream extends FilterInputStream { + private boolean isClosed = false; + + CloseTrackingInputStream(InputStream in) { + super(in); + } + + @Override + public void close() throws IOException { + super.close(); + isClosed = true; + } + + boolean isClosed() { + return isClosed; + } + } +} diff --git a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/RequestCompressionStreamingIntegrationTest.java b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/RequestCompressionStreamingIntegrationTest.java new file mode 100644 index 000000000000..228102b8f9f4 --- /dev/null +++ b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/RequestCompressionStreamingIntegrationTest.java @@ -0,0 +1,173 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.mediastoredata; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +import java.io.IOException; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.time.Instant; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.core.CompressionConfiguration; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.services.mediastoredata.model.DeleteObjectRequest; +import software.amazon.awssdk.services.mediastoredata.model.GetObjectRequest; +import software.amazon.awssdk.services.mediastoredata.model.GetObjectResponse; +import software.amazon.awssdk.services.mediastoredata.model.ObjectNotFoundException; +import software.amazon.awssdk.services.mediastoredata.model.PutObjectRequest; +import software.amazon.awssdk.testutils.Waiter; + +/** + * Integration test to verify Request Compression functionalities for streaming operations. Do not delete. + */ +public class RequestCompressionStreamingIntegrationTest extends MediaStoreDataIntegrationTestBase { + protected static final String CONTAINER_NAME = "java-sdk-test-mediastoredata-compression" + Instant.now().toEpochMilli(); + private static final String UNCOMPRESSED_BODY = + "RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest"; + private static String compressedBody; + private static MediaStoreDataClient syncClient; + private static MediaStoreDataAsyncClient asyncClient; + private static PutObjectRequest putObjectRequest; + private static DeleteObjectRequest deleteObjectRequest; + private static GetObjectRequest getObjectRequest; + + @BeforeAll + public static void setup() { + uri = URI.create(createContainer(CONTAINER_NAME).endpoint()); + + CompressionConfiguration compressionConfiguration = + CompressionConfiguration.builder() + .minimumCompressionThresholdInBytes(1) + .requestCompressionEnabled(true) + .build(); + + RequestCompression requestCompressionTrait = RequestCompression.builder() + .encodings("gzip") + .isStreaming(true) + .build(); + + syncClient = MediaStoreDataClient.builder() + .endpointOverride(uri) + .credentialsProvider(credentialsProvider) + .httpClient(ApacheHttpClient.builder().build()) + .overrideConfiguration(o -> o.addExecutionInterceptor(new CaptureTransferEncodingHeaderInterceptor()) + .addExecutionInterceptor(new CaptureContentEncodingHeaderInterceptor()) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + requestCompressionTrait) + .compressionConfiguration(compressionConfiguration)) + .build(); + + asyncClient = MediaStoreDataAsyncClient.builder() + .endpointOverride(uri) + .credentialsProvider(credentialsProvider) + .httpClient(NettyNioAsyncHttpClient.create()) + .overrideConfiguration(o -> o.addExecutionInterceptor(new CaptureTransferEncodingHeaderInterceptor()) + .addExecutionInterceptor(new CaptureContentEncodingHeaderInterceptor()) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + requestCompressionTrait) + .compressionConfiguration(compressionConfiguration)) + .build(); + + putObjectRequest = PutObjectRequest.builder() + .contentType("application/octet-stream") + .path("/foo") + .overrideConfiguration( + o -> o.compressionConfiguration( + c -> c.requestCompressionEnabled(true))) + .build(); + deleteObjectRequest = DeleteObjectRequest.builder().path("/foo").build(); + getObjectRequest = GetObjectRequest.builder().path("/foo").build(); + + Compressor compressor = new GzipCompressor(); + byte[] compressedBodyBytes = compressor.compress(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)).asByteArray(); + compressedBody = new String(compressedBodyBytes); + } + + @AfterAll + public static void tearDown() throws InterruptedException { + syncClient.deleteObject(deleteObjectRequest); + Waiter.run(() -> syncClient.describeObject(r -> r.path("/foo"))) + .untilException(ObjectNotFoundException.class) + .orFailAfter(Duration.ofMinutes(1)); + Thread.sleep(1000); + mediaStoreClient.deleteContainer(r -> r.containerName(CONTAINER_NAME)); + } + + @AfterEach + public void cleanUp() { + CaptureContentEncodingHeaderInterceptor.reset(); + } + + @Test + public void putObject_withSyncStreamingRequestCompression_compressesPayloadAndSendsCorrectly() throws IOException { + TestContentProvider provider = new TestContentProvider(UNCOMPRESSED_BODY.getBytes(StandardCharsets.UTF_8)); + syncClient.putObject(putObjectRequest, RequestBody.fromContentProvider(provider, "binary/octet-stream")); + + assertThat(CaptureTransferEncodingHeaderInterceptor.isChunked).isTrue(); + assertThat(CaptureContentEncodingHeaderInterceptor.isGzip).isTrue(); + + ResponseInputStream response = syncClient.getObject(getObjectRequest); + byte[] buffer = new byte[UNCOMPRESSED_BODY.getBytes().length]; + response.read(buffer); + String retrievedContent = new String(buffer); + assertThat(retrievedContent).isEqualTo(UNCOMPRESSED_BODY); + } + + @Test + public void putObject_withAsyncStreamingRequestCompression_compressesPayloadAndSendsCorrectly() throws IOException { + AsyncRequestBody asyncRequestBody = customAsyncRequestBodyWithoutContentLength(UNCOMPRESSED_BODY.getBytes()); + asyncClient.putObject(putObjectRequest, asyncRequestBody).join(); + + assertThat(CaptureTransferEncodingHeaderInterceptor.isChunked).isTrue(); + assertThat(CaptureContentEncodingHeaderInterceptor.isGzip).isTrue(); + + ResponseInputStream response = syncClient.getObject(getObjectRequest); + byte[] buffer = new byte[UNCOMPRESSED_BODY.getBytes().length]; + response.read(buffer); + String retrievedContent = new String(buffer); + assertThat(retrievedContent).isEqualTo(UNCOMPRESSED_BODY); + } + + private static class CaptureContentEncodingHeaderInterceptor implements ExecutionInterceptor { + public static boolean isGzip; + + public static void reset() { + isGzip = false; + } + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + isGzip = context.httpRequest().matchingHeaders("Content-Encoding").contains("gzip"); + } + } +} diff --git a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java index acab0a8d6723..b4137a14eea9 100644 --- a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java +++ b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java @@ -16,70 +16,38 @@ package software.amazon.awssdk.services.mediastoredata; import static org.assertj.core.api.AssertionsForClassTypes.assertThat; -import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; -import io.reactivex.Flowable; -import java.io.ByteArrayInputStream; -import java.io.FilterInputStream; -import java.io.IOException; -import java.io.InputStream; import java.net.URI; -import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.time.Duration; import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; import org.apache.commons.lang3.RandomStringUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import org.reactivestreams.Subscriber; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.core.async.AsyncRequestBody; -import software.amazon.awssdk.core.interceptor.Context; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.sync.RequestBody; -import software.amazon.awssdk.http.ContentStreamProvider; import software.amazon.awssdk.http.apache.ApacheHttpClient; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.urlconnection.UrlConnectionHttpClient; -import software.amazon.awssdk.services.mediastore.MediaStoreClient; -import software.amazon.awssdk.services.mediastore.model.Container; -import software.amazon.awssdk.services.mediastore.model.ContainerStatus; -import software.amazon.awssdk.services.mediastore.model.DescribeContainerResponse; import software.amazon.awssdk.services.mediastoredata.model.DeleteObjectRequest; import software.amazon.awssdk.services.mediastoredata.model.ObjectNotFoundException; import software.amazon.awssdk.services.mediastoredata.model.PutObjectRequest; import software.amazon.awssdk.testutils.Waiter; -import software.amazon.awssdk.testutils.service.AwsIntegrationTestBase; /** * Integration test to verify Transfer-Encoding:chunked functionalities for all supported HTTP clients. Do not delete. */ -public class TransferEncodingChunkedIntegrationTest extends AwsIntegrationTestBase { - private static final String CONTAINER_NAME = "java-sdk-test-" + Instant.now().toEpochMilli(); - private static MediaStoreClient mediaStoreClient; +public class TransferEncodingChunkedIntegrationTest extends MediaStoreDataIntegrationTestBase { + protected static final String CONTAINER_NAME = "java-sdk-test-mediastoredata-transferencoding" + Instant.now().toEpochMilli(); private static MediaStoreDataClient syncClientWithApache; private static MediaStoreDataClient syncClientWithUrlConnection; private static MediaStoreDataAsyncClient asyncClientWithNetty; - private static AwsCredentialsProvider credentialsProvider; - private static Container container; private static PutObjectRequest putObjectRequest; private static DeleteObjectRequest deleteObjectRequest; @BeforeAll public static void setup() { - credentialsProvider = getCredentialsProvider(); - mediaStoreClient = MediaStoreClient.builder() - .credentialsProvider(credentialsProvider) - .httpClient(ApacheHttpClient.builder().build()) - .build(); - container = createContainer(); - URI uri = URI.create(container.endpoint()); - + uri = URI.create(createContainer(CONTAINER_NAME).endpoint()); syncClientWithApache = MediaStoreDataClient.builder() .endpointOverride(uri) .credentialsProvider(credentialsProvider) @@ -112,12 +80,13 @@ public static void setup() { } @AfterAll - public static void tearDown() { + public static void tearDown() throws InterruptedException { syncClientWithApache.deleteObject(deleteObjectRequest); Waiter.run(() -> syncClientWithApache.describeObject(r -> r.path("/foo"))) .untilException(ObjectNotFoundException.class) .orFailAfter(Duration.ofMinutes(1)); - CaptureTransferEncodingHeaderInterceptor.reset(); + Thread.sleep(1000); + mediaStoreClient.deleteContainer(r -> r.containerName(CONTAINER_NAME)); } @Test @@ -136,89 +105,7 @@ public void urlConnectionClientPutObject_withoutContentLength_sendsSuccessfully( @Test public void nettyClientPutObject_withoutContentLength_sendsSuccessfully() { - asyncClientWithNetty.putObject(putObjectRequest, customAsyncRequestBodyWithoutContentLength()).join(); + asyncClientWithNetty.putObject(putObjectRequest, customAsyncRequestBodyWithoutContentLength("TestBody".getBytes())).join(); assertThat(CaptureTransferEncodingHeaderInterceptor.isChunked).isTrue(); } - - private static Container createContainer() { - mediaStoreClient.createContainer(r -> r.containerName(CONTAINER_NAME)); - DescribeContainerResponse response = waitContainerToBeActive(); - return response.container(); - } - - private static DescribeContainerResponse waitContainerToBeActive() { - return Waiter.run(() -> mediaStoreClient.describeContainer(r -> r.containerName(CONTAINER_NAME))) - .until(r -> ContainerStatus.ACTIVE.equals(r.container().status())) - .orFailAfter(Duration.ofMinutes(3)); - } - - private static class CaptureTransferEncodingHeaderInterceptor implements ExecutionInterceptor { - private static boolean isChunked; - - public static void reset() { - isChunked = false; - } - - @Override - public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { - isChunked = context.httpRequest().matchingHeaders("Transfer-Encoding").contains("chunked"); - } - } - - private AsyncRequestBody customAsyncRequestBodyWithoutContentLength() { - return new AsyncRequestBody() { - @Override - public Optional contentLength() { - return Optional.empty(); - } - - @Override - public void subscribe(Subscriber s) { - Flowable.fromPublisher(AsyncRequestBody.fromBytes("Random text".getBytes())) - .subscribe(s); - } - }; - } - - private static class TestContentProvider implements ContentStreamProvider { - private final byte[] content; - private final List createdStreams = new ArrayList<>(); - private CloseTrackingInputStream currentStream; - - private TestContentProvider(byte[] content) { - this.content = content; - } - - @Override - public InputStream newStream() { - if (currentStream != null) { - invokeSafely(currentStream::close); - } - currentStream = new CloseTrackingInputStream(new ByteArrayInputStream(content)); - createdStreams.add(currentStream); - return currentStream; - } - - List getCreatedStreams() { - return createdStreams; - } - } - - private static class CloseTrackingInputStream extends FilterInputStream { - private boolean isClosed = false; - - CloseTrackingInputStream(InputStream in) { - super(in); - } - - @Override - public void close() throws IOException { - super.close(); - isClosed = true; - } - - boolean isClosed() { - return isClosed; - } - } } diff --git a/services/mediastoredata/src/it/resources/log4j2.properties b/services/mediastoredata/src/it/resources/log4j2.properties new file mode 100644 index 000000000000..ea24f17148e6 --- /dev/null +++ b/services/mediastoredata/src/it/resources/log4j2.properties @@ -0,0 +1,38 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +status = warn + +appender.console.type = Console +appender.console.name = ConsoleAppender +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n%throwable + +rootLogger.level = info +rootLogger.appenderRef.stdout.ref = ConsoleAppender + +# Uncomment below to enable more specific logging +# +#logger.sdk.name = software.amazon.awssdk +#logger.sdk.level = debug +# +#logger.request.name = software.amazon.awssdk.request +#logger.request.level = debug +# +#logger.apache.name = org.apache.http.wire +#logger.apache.level = debug +# +#logger.netty.name = io.netty.handler.logging +#logger.netty.level = debug \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json index 6b1cb368d486..8cdb71614e38 100644 --- a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json @@ -277,6 +277,31 @@ "requestAlgorithmMember": "ChecksumAlgorithm" } }, + "PutOperationWithRequestCompression":{ + "name":"PutOperationWithRequestCompression", + "http":{ + "method":"PUT", + "requestUri":"/" + }, + "input":{"shape":"RequestCompressionStructure"}, + "output":{"shape":"RequestCompressionStructure"}, + "requestCompression": { + "encodings": ["gzip"] + } + }, + "PutOperationWithStreamingRequestCompression":{ + "name":"PutOperationWithStreamingRequestCompression", + "http":{ + "method":"PUT", + "requestUri":"/" + }, + "input":{"shape":"RequestCompressionStructureWithStreaming"}, + "output":{"shape":"RequestCompressionStructureWithStreaming"}, + "requestCompression": { + "encodings": ["gzip"] + }, + "authtype":"v4-unsigned-body" + }, "GetOperationWithChecksum":{ "name":"GetOperationWithChecksum", "http":{ @@ -1007,6 +1032,28 @@ } }, "payload":"NestedQueryParameterOperation" + }, + "RequestCompressionStructure":{ + "type":"structure", + "members":{ + "Body":{ + "shape":"Body", + "documentation":"

Object data.

", + "streaming":false + } + }, + "payload":"Body" + }, + "RequestCompressionStructureWithStreaming":{ + "type":"structure", + "members":{ + "Body":{ + "shape":"Body", + "documentation":"

Object data.

", + "streaming":true + } + }, + "payload":"Body" } } } diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AsyncRequestCompressionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AsyncRequestCompressionTest.java new file mode 100644 index 000000000000..bad3735d509d --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AsyncRequestCompressionTest.java @@ -0,0 +1,205 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.reactivex.Flowable; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Optional; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.model.PutOperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.protocolrestjson.model.PutOperationWithStreamingRequestCompressionRequest; +import software.amazon.awssdk.testutils.service.http.MockAsyncHttpClient; + +public class AsyncRequestCompressionTest { + private static final String UNCOMPRESSED_BODY = + "RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest"; + private String compressedBody; + private int compressedLen; + private MockAsyncHttpClient mockAsyncHttpClient; + private ProtocolRestJsonAsyncClient asyncClient; + private Compressor compressor; + + @BeforeEach + public void setUp() { + mockAsyncHttpClient = new MockAsyncHttpClient(); + asyncClient = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(AnonymousCredentialsProvider.create()) + .region(Region.US_EAST_1) + .httpClient(mockAsyncHttpClient) + .build(); + compressor = new GzipCompressor(); + byte[] compressedBodyBytes = compressor.compress(UNCOMPRESSED_BODY.getBytes()); + compressedBody = new String(compressedBodyBytes); + compressedLen = compressedBodyBytes.length; + } + + @AfterEach + public void reset() { + mockAsyncHttpClient.reset(); + } + + @Test + public void asyncNonStreamingOperation_compressionEnabledThresholdOverridden_compressesCorrectly() { + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .overrideConfiguration(o -> o.compressionConfiguration( + c -> c.minimumCompressionThresholdInBytes(1))) + .build(); + + asyncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedSize).isEqualTo(compressedLen); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + } + + @Test + public void asyncNonStreamingOperation_payloadSizeLessThanCompressionThreshold_doesNotCompress() { + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .build(); + + asyncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(UNCOMPRESSED_BODY); + assertThat(loggedSize).isEqualTo(UNCOMPRESSED_BODY.length()); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding")).isEmpty(); + } + + @Test + public void asyncStreamingOperation_compressionEnabled_compressesCorrectly() { + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + mockAsyncHttpClient.setAsyncRequestBodyLength(compressedBody.length()); + PutOperationWithStreamingRequestCompressionRequest request = + PutOperationWithStreamingRequestCompressionRequest.builder().build(); + asyncClient.putOperationWithStreamingRequestCompression(request, customAsyncRequestBodyWithoutContentLength(), + AsyncResponseTransformer.toBytes()).join(); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + String loggedBody = new String(mockAsyncHttpClient.getStreamingPayload().get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + assertThat(loggedRequest.matchingHeaders("Content-Length")).isEmpty(); + assertThat(loggedRequest.firstMatchingHeader("Transfer-Encoding").get()).isEqualTo("chunked"); + } + + @Test + public void asyncNonStreamingOperation_compressionEnabledThresholdOverriddenWithRetry_compressesCorrectly() { + mockAsyncHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .overrideConfiguration(o -> o.compressionConfiguration( + c -> c.minimumCompressionThresholdInBytes(1))) + .build(); + + asyncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedSize).isEqualTo(compressedLen); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + } + + @Test + public void asyncStreamingOperation_compressionEnabledWithRetry_compressesCorrectly() { + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + mockAsyncHttpClient.setAsyncRequestBodyLength(compressedBody.length()); + PutOperationWithStreamingRequestCompressionRequest request = + PutOperationWithStreamingRequestCompressionRequest.builder().build(); + asyncClient.putOperationWithStreamingRequestCompression(request, customAsyncRequestBodyWithoutContentLength(), + AsyncResponseTransformer.toBytes()).join(); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + String loggedBody = new String(mockAsyncHttpClient.getStreamingPayload().get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + assertThat(loggedRequest.matchingHeaders("Content-Length")).isEmpty(); + assertThat(loggedRequest.firstMatchingHeader("Transfer-Encoding").get()).isEqualTo("chunked"); + } + + private HttpExecuteResponse mockResponse() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(200).build()) + .build(); + } + + private HttpExecuteResponse mockErrorResponse() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(500).build()) + .build(); + } + + protected AsyncRequestBody customAsyncRequestBodyWithoutContentLength() { + return new AsyncRequestBody() { + @Override + public Optional contentLength() { + return Optional.empty(); + } + + @Override + public void subscribe(Subscriber s) { + Flowable.fromPublisher(AsyncRequestBody.fromBytes(UNCOMPRESSED_BODY.getBytes())) + .subscribe(s); + } + }; + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java new file mode 100644 index 000000000000..a4f85125e9c6 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java @@ -0,0 +1,231 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import java.io.ByteArrayInputStream; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.AfterEach; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestjson.model.PutOperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.protocolrestjson.model.PutOperationWithStreamingRequestCompressionRequest; +import software.amazon.awssdk.testutils.service.http.MockSyncHttpClient; + +public class RequestCompressionTest { + private static final String UNCOMPRESSED_BODY = + "RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest"; + private String compressedBody; + private int compressedLen; + private MockSyncHttpClient mockHttpClient; + private ProtocolRestJsonClient syncClient; + private Compressor compressor; + private RequestBody requestBody; + + @BeforeEach + public void setUp() { + mockHttpClient = new MockSyncHttpClient(); + syncClient = ProtocolRestJsonClient.builder() + .credentialsProvider(AnonymousCredentialsProvider.create()) + .region(Region.US_EAST_1) + .httpClient(mockHttpClient) + .build(); + compressor = new GzipCompressor(); + byte[] compressedBodyBytes = compressor.compress(UNCOMPRESSED_BODY.getBytes()); + compressedLen = compressedBodyBytes.length; + compressedBody = new String(compressedBodyBytes); + TestContentProvider provider = new TestContentProvider(UNCOMPRESSED_BODY.getBytes()); + requestBody = RequestBody.fromContentProvider(provider, "binary/octet-stream"); + } + + @AfterEach + public void reset() { + mockHttpClient.reset(); + } + + @Test + public void syncNonStreamingOperation_compressionEnabledThresholdOverridden_compressesCorrectly() { + mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .overrideConfiguration(o -> o.compressionConfiguration( + c -> c.minimumCompressionThresholdInBytes(1))) + .build(); + syncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedSize).isEqualTo(compressedLen); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + } + + @Test + public void syncNonStreamingOperation_payloadSizeLessThanCompressionThreshold_doesNotCompress() { + mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .build(); + syncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + + assertThat(loggedBody).isEqualTo(UNCOMPRESSED_BODY); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding")).isEmpty(); + } + + @Test + public void syncStreamingOperation_compressionEnabled_compressesCorrectly() { + mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithStreamingRequestCompressionRequest request = + PutOperationWithStreamingRequestCompressionRequest.builder().build(); + syncClient.putOperationWithStreamingRequestCompression(request, requestBody, ResponseTransformer.toBytes()); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + assertThat(loggedRequest.matchingHeaders("Content-Length")).isEmpty(); + assertThat(loggedRequest.firstMatchingHeader("Transfer-Encoding").get()).isEqualTo("chunked"); + } + + @Test + public void syncNonStreamingOperation_compressionEnabledThresholdOverriddenWithRetry_compressesCorrectly() { + mockHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); + mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .overrideConfiguration(o -> o.compressionConfiguration( + c -> c.minimumCompressionThresholdInBytes(1))) + .build(); + syncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedSize).isEqualTo(compressedLen); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + } + + @Test + public void syncStreamingOperation_compressionEnabledWithRetry_compressesCorrectly() { + mockHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); + mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithStreamingRequestCompressionRequest request = + PutOperationWithStreamingRequestCompressionRequest.builder().build(); + syncClient.putOperationWithStreamingRequestCompression(request, requestBody, ResponseTransformer.toBytes()); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + assertThat(loggedRequest.matchingHeaders("Content-Length")).isEmpty(); + assertThat(loggedRequest.firstMatchingHeader("Transfer-Encoding").get()).isEqualTo("chunked"); + } + + private HttpExecuteResponse mockResponse() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(200).build()) + .build(); + } + + private HttpExecuteResponse mockErrorResponse() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(500).build()) + .build(); + } + + private static final class TestContentProvider implements ContentStreamProvider { + private final byte[] content; + private final List createdStreams = new ArrayList<>(); + private CloseTrackingInputStream currentStream; + + private TestContentProvider(byte[] content) { + this.content = content; + } + + @Override + public InputStream newStream() { + if (currentStream != null) { + invokeSafely(currentStream::close); + } + currentStream = new CloseTrackingInputStream(new ByteArrayInputStream(content)); + createdStreams.add(currentStream); + return currentStream; + } + + List getCreatedStreams() { + return createdStreams; + } + } + + private static class CloseTrackingInputStream extends FilterInputStream { + private boolean isClosed = false; + + CloseTrackingInputStream(InputStream in) { + super(in); + } + + @Override + public void close() throws IOException { + super.close(); + isClosed = true; + } + + boolean isClosed() { + return isClosed; + } + } +} diff --git a/test/service-test-utils/src/main/java/software/amazon/awssdk/testutils/service/http/MockAsyncHttpClient.java b/test/service-test-utils/src/main/java/software/amazon/awssdk/testutils/service/http/MockAsyncHttpClient.java index 8a3f62f7838e..4716212a027a 100644 --- a/test/service-test-utils/src/main/java/software/amazon/awssdk/testutils/service/http/MockAsyncHttpClient.java +++ b/test/service-test-utils/src/main/java/software/amazon/awssdk/testutils/service/http/MockAsyncHttpClient.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Optional; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicInteger; @@ -50,6 +51,8 @@ public final class MockAsyncHttpClient implements SdkAsyncHttpClient, MockHttpCl private final List> responses = new LinkedList<>(); private final AtomicInteger responseIndex = new AtomicInteger(0); private final ExecutorService executor; + private Integer asyncRequestBodyLength; + private byte[] streamingPayload; public MockAsyncHttpClient() { this.executor = Executors.newFixedThreadPool(3); @@ -66,6 +69,11 @@ public CompletableFuture execute(AsyncExecuteRequest request) { request.responseHandler().onHeaders(nextResponse.httpResponse()); CompletableFuture.runAsync(() -> request.responseHandler().onStream(new ResponsePublisher(content, index)), executor); + + if (asyncRequestBodyLength != null && asyncRequestBodyLength > 0) { + captureStreamingPayload(request.requestContentPublisher()); + } + return CompletableFuture.completedFuture(null); } @@ -122,7 +130,29 @@ public void stubResponses(HttpExecuteResponse... responses) { this.responseIndex.set(0); } - private class ResponsePublisher implements SdkHttpContentPublisher { + /** + * Enable capturing the streaming payload by setting the length of the AsyncRequestBody. + */ + public void setAsyncRequestBodyLength(int asyncRequestBodyLength) { + this.asyncRequestBodyLength = asyncRequestBodyLength; + } + + private void captureStreamingPayload(SdkHttpContentPublisher publisher) { + ByteBuffer byteBuffer = ByteBuffer.allocate(asyncRequestBodyLength); + Subscriber subscriber = new CapturingSubscriber(byteBuffer); + publisher.subscribe(subscriber); + streamingPayload = byteBuffer.array(); + } + + /** + * Returns the streaming payload byte array, if the asyncRequestBodyLength was set correctly. Otherwise, returns empty + * Optional. + */ + public Optional getStreamingPayload() { + return streamingPayload != null ? Optional.of(streamingPayload.clone()) : Optional.empty(); + } + + private final class ResponsePublisher implements SdkHttpContentPublisher { private final byte[] content; private final int index; @@ -165,4 +195,35 @@ public void cancel() { }); } } + + private static class CapturingSubscriber implements Subscriber { + private ByteBuffer byteBuffer; + private CountDownLatch done = new CountDownLatch(1); + + CapturingSubscriber(ByteBuffer byteBuffer) { + this.byteBuffer = byteBuffer; + } + + @Override + public void onSubscribe(Subscription subscription) { + subscription.request(Long.MAX_VALUE); + } + + @Override + public void onNext(ByteBuffer buffer) { + byte[] bytes = new byte[buffer.remaining()]; + buffer.get(bytes); + byteBuffer.put(bytes); + } + + @Override + public void onError(Throwable t) { + done.countDown(); + } + + @Override + public void onComplete() { + done.countDown(); + } + } }