diff --git a/Sources/Services/AWSKinesis/models/Models.swift b/Sources/Services/AWSKinesis/models/Models.swift index 00167347f6d..e01203f757d 100644 --- a/Sources/Services/AWSKinesis/models/Models.swift +++ b/Sources/Services/AWSKinesis/models/Models.swift @@ -2442,7 +2442,7 @@ extension InternalFailureException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -2541,7 +2541,7 @@ extension KMSAccessDeniedException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -2616,7 +2616,7 @@ extension KMSDisabledException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -2691,7 +2691,7 @@ extension KMSInvalidStateException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -2766,7 +2766,7 @@ extension KMSNotFoundException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -2841,7 +2841,7 @@ extension KMSOptInRequired: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -2916,7 +2916,7 @@ extension KMSThrottlingException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -4760,7 +4760,7 @@ extension ResourceInUseException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -4835,7 +4835,7 @@ extension ResourceNotFoundException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } diff --git a/Sources/Services/AWSLexRuntimeV2/models/Models.swift b/Sources/Services/AWSLexRuntimeV2/models/Models.swift index aa6963188a2..ebcddf8ee0f 100644 --- a/Sources/Services/AWSLexRuntimeV2/models/Models.swift +++ b/Sources/Services/AWSLexRuntimeV2/models/Models.swift @@ -17,7 +17,7 @@ extension AccessDeniedException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -330,7 +330,7 @@ extension BadGatewayException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -660,7 +660,7 @@ extension ConflictException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -969,7 +969,7 @@ extension DependencyFailedException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -1809,7 +1809,7 @@ extension InternalServerException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -3048,7 +3048,7 @@ extension ResourceNotFoundException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -4102,7 +4102,7 @@ extension ThrottlingException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -4222,7 +4222,7 @@ extension ValidationException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } diff --git a/Sources/Services/AWSSageMakerRuntime/SageMakerRuntimeClient.swift b/Sources/Services/AWSSageMakerRuntime/SageMakerRuntimeClient.swift index c4aa9d999d6..7cdd9e2d8a0 100644 --- a/Sources/Services/AWSSageMakerRuntime/SageMakerRuntimeClient.swift +++ b/Sources/Services/AWSSageMakerRuntime/SageMakerRuntimeClient.swift @@ -161,4 +161,61 @@ extension SageMakerRuntimeClient: SageMakerRuntimeClientProtocol { return result } + /// Invokes a model at the specified endpoint to return the inference response as a stream. The inference stream provides the response payload incrementally as a series of parts. Before you can get an inference stream, you must have access to a model that's deployed using Amazon SageMaker hosting services, and the container for that model must support inference streaming. For more information that can help you use this API, see the following sections in the Amazon SageMaker Developer Guide: + /// + /// * For information about how to add streaming support to a model, see [How Containers Serve Requests](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-inference-code.html#your-algorithms-inference-code-how-containe-serves-requests). + /// + /// * For information about how to process the streaming response, see [Invoke real-time endpoints](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-test-endpoints.html). + /// + /// + /// Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add additional headers. You should not rely on the behavior of headers outside those enumerated in the request syntax. Calls to InvokeEndpointWithResponseStream are authenticated by using Amazon Web Services Signature Version 4. For information, see [Authenticating Requests (Amazon Web Services Signature Version 4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) in the Amazon S3 API Reference. + /// + /// - Parameter InvokeEndpointWithResponseStreamInput : [no documentation found] + /// + /// - Returns: `InvokeEndpointWithResponseStreamOutputResponse` : [no documentation found] + /// + /// - Throws: One of the exceptions listed below __Possible Exceptions__. + /// + /// __Possible Exceptions:__ + /// - `InternalFailure` : An internal failure occurred. + /// - `InternalStreamFailure` : The stream processing failed because of an unknown error, exception or failure. Try your request again. + /// - `ModelError` : Model (owned by the customer in the container) returned 4xx or 5xx error code. + /// - `ModelStreamError` : An error occurred while streaming the response body. This error can have the following error codes: ModelInvocationTimeExceeded The model failed to finish sending the response within the timeout period allowed by Amazon SageMaker. StreamBroken The Transmission Control Protocol (TCP) connection between the client and the model was reset or closed. + /// - `ServiceUnavailable` : The service is unavailable. Try your call again. + /// - `ValidationError` : Inspect your request and try again. + public func invokeEndpointWithResponseStream(input: InvokeEndpointWithResponseStreamInput) async throws -> InvokeEndpointWithResponseStreamOutputResponse + { + let context = ClientRuntime.HttpContextBuilder() + .withEncoder(value: encoder) + .withDecoder(value: decoder) + .withMethod(value: .post) + .withServiceName(value: serviceName) + .withOperation(value: "invokeEndpointWithResponseStream") + .withIdempotencyTokenGenerator(value: config.idempotencyTokenGenerator) + .withLogger(value: config.logger) + .withPartitionID(value: config.partitionID) + .withCredentialsProvider(value: config.credentialsProvider) + .withRegion(value: config.region) + .withSigningName(value: "sagemaker") + .withSigningRegion(value: config.signingRegion) + .build() + var operation = ClientRuntime.OperationStack(id: "invokeEndpointWithResponseStream") + operation.initializeStep.intercept(position: .after, middleware: ClientRuntime.URLPathMiddleware()) + operation.initializeStep.intercept(position: .after, middleware: ClientRuntime.URLHostMiddleware()) + let endpointParams = EndpointParams(endpoint: config.endpoint, region: config.region, useDualStack: config.useDualStack ?? false, useFIPS: config.useFIPS ?? false) + operation.buildStep.intercept(position: .before, middleware: EndpointResolverMiddleware(endpointResolver: config.serviceSpecific.endpointResolver, endpointParams: endpointParams)) + operation.buildStep.intercept(position: .before, middleware: AWSClientRuntime.UserAgentMiddleware(metadata: AWSClientRuntime.AWSUserAgentMetadata.fromConfig(serviceID: serviceName, version: "1.0", config: config))) + operation.serializeStep.intercept(position: .after, middleware: ClientRuntime.HeaderMiddleware()) + operation.serializeStep.intercept(position: .after, middleware: ContentTypeMiddleware(contentType: "application/octet-stream")) + operation.serializeStep.intercept(position: .after, middleware: InvokeEndpointWithResponseStreamInputBodyMiddleware()) + operation.finalizeStep.intercept(position: .before, middleware: ClientRuntime.ContentLengthMiddleware()) + operation.finalizeStep.intercept(position: .after, middleware: ClientRuntime.RetryMiddleware(options: config.retryStrategyOptions)) + let sigv4Config = AWSClientRuntime.SigV4Config(unsignedBody: false, signingAlgorithm: .sigv4) + operation.finalizeStep.intercept(position: .before, middleware: AWSClientRuntime.SigV4Middleware(config: sigv4Config)) + operation.deserializeStep.intercept(position: .after, middleware: ClientRuntime.DeserializeMiddleware()) + operation.deserializeStep.intercept(position: .after, middleware: ClientRuntime.LoggerMiddleware(clientLogMode: config.clientLogMode)) + let result = try await operation.handleMiddleware(context: context, input: input, next: client.getHandler()) + return result + } + } diff --git a/Sources/Services/AWSSageMakerRuntime/SageMakerRuntimeClientProtocol.swift b/Sources/Services/AWSSageMakerRuntime/SageMakerRuntimeClientProtocol.swift index 9328267dcdb..d79cc351a7f 100644 --- a/Sources/Services/AWSSageMakerRuntime/SageMakerRuntimeClientProtocol.swift +++ b/Sources/Services/AWSSageMakerRuntime/SageMakerRuntimeClientProtocol.swift @@ -33,6 +33,29 @@ public protocol SageMakerRuntimeClientProtocol { /// - `ServiceUnavailable` : The service is unavailable. Try your call again. /// - `ValidationError` : Inspect your request and try again. func invokeEndpointAsync(input: InvokeEndpointAsyncInput) async throws -> InvokeEndpointAsyncOutputResponse + /// Invokes a model at the specified endpoint to return the inference response as a stream. The inference stream provides the response payload incrementally as a series of parts. Before you can get an inference stream, you must have access to a model that's deployed using Amazon SageMaker hosting services, and the container for that model must support inference streaming. For more information that can help you use this API, see the following sections in the Amazon SageMaker Developer Guide: + /// + /// * For information about how to add streaming support to a model, see [How Containers Serve Requests](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-inference-code.html#your-algorithms-inference-code-how-containe-serves-requests). + /// + /// * For information about how to process the streaming response, see [Invoke real-time endpoints](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-test-endpoints.html). + /// + /// + /// Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add additional headers. You should not rely on the behavior of headers outside those enumerated in the request syntax. Calls to InvokeEndpointWithResponseStream are authenticated by using Amazon Web Services Signature Version 4. For information, see [Authenticating Requests (Amazon Web Services Signature Version 4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) in the Amazon S3 API Reference. + /// + /// - Parameter InvokeEndpointWithResponseStreamInput : [no documentation found] + /// + /// - Returns: `InvokeEndpointWithResponseStreamOutputResponse` : [no documentation found] + /// + /// - Throws: One of the exceptions listed below __Possible Exceptions__. + /// + /// __Possible Exceptions:__ + /// - `InternalFailure` : An internal failure occurred. + /// - `InternalStreamFailure` : The stream processing failed because of an unknown error, exception or failure. Try your request again. + /// - `ModelError` : Model (owned by the customer in the container) returned 4xx or 5xx error code. + /// - `ModelStreamError` : An error occurred while streaming the response body. This error can have the following error codes: ModelInvocationTimeExceeded The model failed to finish sending the response within the timeout period allowed by Amazon SageMaker. StreamBroken The Transmission Control Protocol (TCP) connection between the client and the model was reset or closed. + /// - `ServiceUnavailable` : The service is unavailable. Try your call again. + /// - `ValidationError` : Inspect your request and try again. + func invokeEndpointWithResponseStream(input: InvokeEndpointWithResponseStreamInput) async throws -> InvokeEndpointWithResponseStreamOutputResponse } public enum SageMakerRuntimeClientTypes {} diff --git a/Sources/Services/AWSSageMakerRuntime/models/Models.swift b/Sources/Services/AWSSageMakerRuntime/models/Models.swift index d54c13f4ce0..1ad18cd40d8 100644 --- a/Sources/Services/AWSSageMakerRuntime/models/Models.swift +++ b/Sources/Services/AWSSageMakerRuntime/models/Models.swift @@ -112,6 +112,80 @@ extension InternalFailureBody: Swift.Decodable { } } +extension InternalStreamFailure: Swift.Codable { + enum CodingKeys: Swift.String, Swift.CodingKey { + case message = "Message" + } + + public func encode(to encoder: Swift.Encoder) throws { + var encodeContainer = encoder.container(keyedBy: CodingKeys.self) + if let message = self.message { + try encodeContainer.encode(message, forKey: .message) + } + } + + public init(from decoder: Swift.Decoder) throws { + let containerValues = try decoder.container(keyedBy: CodingKeys.self) + let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) + properties.message = messageDecoded + } +} + +extension InternalStreamFailure { + public init(httpResponse: ClientRuntime.HttpResponse, decoder: ClientRuntime.ResponseDecoder? = nil, message: Swift.String? = nil, requestID: Swift.String? = nil) async throws { + if let data = try await httpResponse.body.readData(), + let responseDecoder = decoder { + let output: InternalStreamFailureBody = try responseDecoder.decode(responseBody: data) + self.properties.message = output.message + } else { + self.properties.message = nil + } + self.httpResponse = httpResponse + self.requestID = requestID + self.message = message + } +} + +/// The stream processing failed because of an unknown error, exception or failure. Try your request again. +public struct InternalStreamFailure: ClientRuntime.ModeledError, AWSClientRuntime.AWSServiceError, ClientRuntime.HTTPError, Swift.Error { + + public struct Properties { + public internal(set) var message: Swift.String? = nil + } + + public internal(set) var properties = Properties() + public static var typeName: Swift.String { "InternalStreamFailure" } + public static var fault: ErrorFault { .server } + public static var isRetryable: Swift.Bool { false } + public static var isThrottling: Swift.Bool { false } + public internal(set) var httpResponse = HttpResponse() + public internal(set) var message: Swift.String? + public internal(set) var requestID: Swift.String? + + public init( + message: Swift.String? = nil + ) + { + self.properties.message = message + } +} + +struct InternalStreamFailureBody: Swift.Equatable { + let message: Swift.String? +} + +extension InternalStreamFailureBody: Swift.Decodable { + enum CodingKeys: Swift.String, Swift.CodingKey { + case message = "Message" + } + + public init(from decoder: Swift.Decoder) throws { + let containerValues = try decoder.container(keyedBy: CodingKeys.self) + let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) + message = messageDecoded + } +} + extension InvokeEndpointAsyncInput: Swift.CustomDebugStringConvertible { public var debugDescription: Swift.String { "InvokeEndpointAsyncInput(accept: \(Swift.String(describing: accept)), contentType: \(Swift.String(describing: contentType)), endpointName: \(Swift.String(describing: endpointName)), inferenceId: \(Swift.String(describing: inferenceId)), inputLocation: \(Swift.String(describing: inputLocation)), invocationTimeoutSeconds: \(Swift.String(describing: invocationTimeoutSeconds)), requestTTLSeconds: \(Swift.String(describing: requestTTLSeconds)), customAttributes: \"CONTENT_REDACTED\")"} @@ -155,13 +229,13 @@ extension InvokeEndpointAsyncInput: ClientRuntime.URLPathProvider { } public struct InvokeEndpointAsyncInput: Swift.Equatable { - /// The desired MIME type of the inference in the response. + /// The desired MIME type of the inference response from the model container. public var accept: Swift.String? /// The MIME type of the input data in the request body. public var contentType: Swift.String? /// Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in [Section 3.3.6. Field Value Components](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.6) of the Hypertext Transfer Protocol (HTTP/1.1). The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with Trace ID: in your post-processing function. This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK. public var customAttributes: Swift.String? - /// The name of the endpoint that you specified when you created the endpoint using the [CreateEndpoint](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateEndpoint.html) API. + /// The name of the endpoint that you specified when you created the endpoint using the [CreateEndpoint](https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html) API. /// This member is required. public var endpointName: Swift.String? /// The identifier for the inference request. Amazon SageMaker will generate an identifier for you if none is specified. @@ -361,14 +435,14 @@ extension InvokeEndpointInput: ClientRuntime.URLPathProvider { } public struct InvokeEndpointInput: Swift.Equatable { - /// The desired MIME type of the inference in the response. + /// The desired MIME type of the inference response from the model container. public var accept: Swift.String? /// Provides input data, in the format specified in the ContentType request header. Amazon SageMaker passes all of the data in the body to the model. For information about the format of the request body, see [Common Data Formats-Inference](https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-inference.html). /// This member is required. public var body: ClientRuntime.Data? /// The MIME type of the input data in the request body. public var contentType: Swift.String? - /// Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in [Section 3.3.6. Field Value Components](https://tools.ietf.org/html/rfc7230#section-3.2.6) of the Hypertext Transfer Protocol (HTTP/1.1). The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with Trace ID: in your post-processing function. This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK. + /// Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in [Section 3.3.6. Field Value Components](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.6) of the Hypertext Transfer Protocol (HTTP/1.1). The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with Trace ID: in your post-processing function. This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK. public var customAttributes: Swift.String? /// An optional JMESPath expression used to override the EnableExplanations parameter of the ClarifyExplainerConfig API. See the [EnableExplanations](https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-online-explainability-create-endpoint.html#clarify-online-explainability-create-endpoint-enable) section in the developer guide for more information. public var enableExplanations: Swift.String? @@ -479,7 +553,7 @@ public struct InvokeEndpointOutputResponse: Swift.Equatable { /// Includes the inference provided by the model. For information about the format of the response body, see [Common Data Formats-Inference](https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-inference.html). If the explainer is activated, the body includes the explanations provided by the model. For more information, see the Response section under [Invoke the Endpoint](https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-online-explainability-invoke-endpoint.html#clarify-online-explainability-response) in the Developer Guide. /// This member is required. public var body: ClientRuntime.Data? - /// The MIME type of the inference returned in the response body. + /// The MIME type of the inference returned from the model container. public var contentType: Swift.String? /// Provides additional information in the response about the inference returned by a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to return an ID received in the CustomAttributes header of a request or other metadata that a service endpoint was programmed to produce. The value must consist of no more than 1024 visible US-ASCII characters as specified in [Section 3.3.6. Field Value Components](https://tools.ietf.org/html/rfc7230#section-3.2.6) of the Hypertext Transfer Protocol (HTTP/1.1). If the customer wants the custom attribute returned, the model must set the custom attribute to be included on the way back. The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with Trace ID: in your post-processing function. This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK. public var customAttributes: Swift.String? @@ -516,6 +590,215 @@ extension InvokeEndpointOutputResponseBody: Swift.Decodable { } } +public struct InvokeEndpointWithResponseStreamInputBodyMiddleware: ClientRuntime.Middleware { + public let id: Swift.String = "InvokeEndpointWithResponseStreamInputBodyMiddleware" + + public init() {} + + public func handle(context: Context, + input: ClientRuntime.SerializeStepInput, + next: H) async throws -> ClientRuntime.OperationOutput + where H: Handler, + Self.MInput == H.Input, + Self.MOutput == H.Output, + Self.Context == H.Context + { + if let body = input.operationInput.body { + let bodyData = body + let bodyBody = ClientRuntime.HttpBody.data(bodyData) + input.builder.withBody(bodyBody) + } + return try await next.handle(context: context, input: input) + } + + public typealias MInput = ClientRuntime.SerializeStepInput + public typealias MOutput = ClientRuntime.OperationOutput + public typealias Context = ClientRuntime.HttpContext +} + +extension InvokeEndpointWithResponseStreamInput: Swift.CustomDebugStringConvertible { + public var debugDescription: Swift.String { + "InvokeEndpointWithResponseStreamInput(accept: \(Swift.String(describing: accept)), contentType: \(Swift.String(describing: contentType)), endpointName: \(Swift.String(describing: endpointName)), inferenceId: \(Swift.String(describing: inferenceId)), targetContainerHostname: \(Swift.String(describing: targetContainerHostname)), targetVariant: \(Swift.String(describing: targetVariant)), body: \"CONTENT_REDACTED\", customAttributes: \"CONTENT_REDACTED\")"} +} + +extension InvokeEndpointWithResponseStreamInput: Swift.Encodable { + enum CodingKeys: Swift.String, Swift.CodingKey { + case body = "Body" + } + + public func encode(to encoder: Swift.Encoder) throws { + var encodeContainer = encoder.container(keyedBy: CodingKeys.self) + if let body = self.body { + try encodeContainer.encode(body.base64EncodedString(), forKey: .body) + } + } +} + +extension InvokeEndpointWithResponseStreamInput: ClientRuntime.HeaderProvider { + public var headers: ClientRuntime.Headers { + var items = ClientRuntime.Headers() + if let accept = accept { + items.add(Header(name: "X-Amzn-SageMaker-Accept", value: Swift.String(accept))) + } + if let contentType = contentType { + items.add(Header(name: "Content-Type", value: Swift.String(contentType))) + } + if let customAttributes = customAttributes { + items.add(Header(name: "X-Amzn-SageMaker-Custom-Attributes", value: Swift.String(customAttributes))) + } + if let inferenceId = inferenceId { + items.add(Header(name: "X-Amzn-SageMaker-Inference-Id", value: Swift.String(inferenceId))) + } + if let targetContainerHostname = targetContainerHostname { + items.add(Header(name: "X-Amzn-SageMaker-Target-Container-Hostname", value: Swift.String(targetContainerHostname))) + } + if let targetVariant = targetVariant { + items.add(Header(name: "X-Amzn-SageMaker-Target-Variant", value: Swift.String(targetVariant))) + } + return items + } +} + +extension InvokeEndpointWithResponseStreamInput: ClientRuntime.URLPathProvider { + public var urlPath: Swift.String? { + guard let endpointName = endpointName else { + return nil + } + return "/endpoints/\(endpointName.urlPercentEncoding())/invocations-response-stream" + } +} + +public struct InvokeEndpointWithResponseStreamInput: Swift.Equatable { + /// The desired MIME type of the inference response from the model container. + public var accept: Swift.String? + /// Provides input data, in the format specified in the ContentType request header. Amazon SageMaker passes all of the data in the body to the model. For information about the format of the request body, see [Common Data Formats-Inference](https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-inference.html). + /// This member is required. + public var body: ClientRuntime.Data? + /// The MIME type of the input data in the request body. + public var contentType: Swift.String? + /// Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in [Section 3.3.6. Field Value Components](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.6) of the Hypertext Transfer Protocol (HTTP/1.1). The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with Trace ID: in your post-processing function. This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK. + public var customAttributes: Swift.String? + /// The name of the endpoint that you specified when you created the endpoint using the [CreateEndpoint](https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html) API. + /// This member is required. + public var endpointName: Swift.String? + /// An identifier that you assign to your request. + public var inferenceId: Swift.String? + /// If the endpoint hosts multiple containers and is configured to use direct invocation, this parameter specifies the host name of the container to invoke. + public var targetContainerHostname: Swift.String? + /// Specify the production variant to send the inference request to when invoking an endpoint that is running two or more variants. Note that this parameter overrides the default behavior for the endpoint, which is to distribute the invocation traffic based on the variant weights. For information about how to use variant targeting to perform a/b testing, see [Test models in production](https://docs.aws.amazon.com/sagemaker/latest/dg/model-ab-testing.html) + public var targetVariant: Swift.String? + + public init( + accept: Swift.String? = nil, + body: ClientRuntime.Data? = nil, + contentType: Swift.String? = nil, + customAttributes: Swift.String? = nil, + endpointName: Swift.String? = nil, + inferenceId: Swift.String? = nil, + targetContainerHostname: Swift.String? = nil, + targetVariant: Swift.String? = nil + ) + { + self.accept = accept + self.body = body + self.contentType = contentType + self.customAttributes = customAttributes + self.endpointName = endpointName + self.inferenceId = inferenceId + self.targetContainerHostname = targetContainerHostname + self.targetVariant = targetVariant + } +} + +struct InvokeEndpointWithResponseStreamInputBody: Swift.Equatable { + let body: ClientRuntime.Data? +} + +extension InvokeEndpointWithResponseStreamInputBody: Swift.Decodable { + enum CodingKeys: Swift.String, Swift.CodingKey { + case body = "Body" + } + + public init(from decoder: Swift.Decoder) throws { + let containerValues = try decoder.container(keyedBy: CodingKeys.self) + let bodyDecoded = try containerValues.decodeIfPresent(ClientRuntime.Data.self, forKey: .body) + body = bodyDecoded + } +} + +public enum InvokeEndpointWithResponseStreamOutputError: ClientRuntime.HttpResponseErrorBinding { + public static func makeError(httpResponse: ClientRuntime.HttpResponse, decoder: ClientRuntime.ResponseDecoder? = nil) async throws -> Swift.Error { + let restJSONError = try await AWSClientRuntime.RestJSONError(httpResponse: httpResponse) + let requestID = httpResponse.requestId + switch restJSONError.errorType { + case "InternalFailure": return try await InternalFailure(httpResponse: httpResponse, decoder: decoder, message: restJSONError.errorMessage, requestID: requestID) + case "InternalStreamFailure": return try await InternalStreamFailure(httpResponse: httpResponse, decoder: decoder, message: restJSONError.errorMessage, requestID: requestID) + case "ModelError": return try await ModelError(httpResponse: httpResponse, decoder: decoder, message: restJSONError.errorMessage, requestID: requestID) + case "ModelStreamError": return try await ModelStreamError(httpResponse: httpResponse, decoder: decoder, message: restJSONError.errorMessage, requestID: requestID) + case "ServiceUnavailable": return try await ServiceUnavailable(httpResponse: httpResponse, decoder: decoder, message: restJSONError.errorMessage, requestID: requestID) + case "ValidationError": return try await ValidationError(httpResponse: httpResponse, decoder: decoder, message: restJSONError.errorMessage, requestID: requestID) + default: return try await AWSClientRuntime.UnknownAWSHTTPServiceError.makeError(httpResponse: httpResponse, message: restJSONError.errorMessage, requestID: requestID, typeName: restJSONError.errorType) + } + } +} + +extension InvokeEndpointWithResponseStreamOutputResponse: Swift.CustomDebugStringConvertible { + public var debugDescription: Swift.String { + "InvokeEndpointWithResponseStreamOutputResponse(body: \(Swift.String(describing: body)), contentType: \(Swift.String(describing: contentType)), invokedProductionVariant: \(Swift.String(describing: invokedProductionVariant)), customAttributes: \"CONTENT_REDACTED\")"} +} + +extension InvokeEndpointWithResponseStreamOutputResponse: ClientRuntime.HttpResponseBinding { + public init(httpResponse: ClientRuntime.HttpResponse, decoder: ClientRuntime.ResponseDecoder? = nil) async throws { + if let contentTypeHeaderValue = httpResponse.headers.value(for: "X-Amzn-SageMaker-Content-Type") { + self.contentType = contentTypeHeaderValue + } else { + self.contentType = nil + } + if let customAttributesHeaderValue = httpResponse.headers.value(for: "X-Amzn-SageMaker-Custom-Attributes") { + self.customAttributes = customAttributesHeaderValue + } else { + self.customAttributes = nil + } + if let invokedProductionVariantHeaderValue = httpResponse.headers.value(for: "x-Amzn-Invoked-Production-Variant") { + self.invokedProductionVariant = invokedProductionVariantHeaderValue + } else { + self.invokedProductionVariant = nil + } + if case let .stream(stream) = httpResponse.body, let responseDecoder = decoder { + let messageDecoder = AWSClientRuntime.AWSEventStream.AWSMessageDecoder() + let decoderStream = ClientRuntime.EventStream.DefaultMessageDecoderStream(stream: stream, messageDecoder: messageDecoder, responseDecoder: responseDecoder) + self.body = decoderStream.toAsyncStream() + } else { + self.body = nil + } + } +} + +public struct InvokeEndpointWithResponseStreamOutputResponse: Swift.Equatable { + /// A stream of payload parts. Each part contains a portion of the response for a streaming inference request. + /// This member is required. + public var body: AsyncThrowingStream? + /// The MIME type of the inference returned from the model container. + public var contentType: Swift.String? + /// Provides additional information in the response about the inference returned by a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to return an ID received in the CustomAttributes header of a request or other metadata that a service endpoint was programmed to produce. The value must consist of no more than 1024 visible US-ASCII characters as specified in [Section 3.3.6. Field Value Components](https://tools.ietf.org/html/rfc7230#section-3.2.6) of the Hypertext Transfer Protocol (HTTP/1.1). If the customer wants the custom attribute returned, the model must set the custom attribute to be included on the way back. The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with Trace ID: in your post-processing function. This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK. + public var customAttributes: Swift.String? + /// Identifies the production variant that was invoked. + public var invokedProductionVariant: Swift.String? + + public init( + body: AsyncThrowingStream? = nil, + contentType: Swift.String? = nil, + customAttributes: Swift.String? = nil, + invokedProductionVariant: Swift.String? = nil + ) + { + self.body = body + self.contentType = contentType + self.customAttributes = customAttributes + self.invokedProductionVariant = invokedProductionVariant + } +} + extension ModelError { public init(httpResponse: ClientRuntime.HttpResponse, decoder: ClientRuntime.ResponseDecoder? = nil, message: Swift.String? = nil, requestID: Swift.String? = nil) async throws { if let data = try await httpResponse.body.readData(), @@ -656,6 +939,181 @@ extension ModelNotReadyExceptionBody: Swift.Decodable { } } +extension ModelStreamError: Swift.Codable { + enum CodingKeys: Swift.String, Swift.CodingKey { + case errorCode = "ErrorCode" + case message = "Message" + } + + public func encode(to encoder: Swift.Encoder) throws { + var encodeContainer = encoder.container(keyedBy: CodingKeys.self) + if let errorCode = self.errorCode { + try encodeContainer.encode(errorCode, forKey: .errorCode) + } + if let message = self.message { + try encodeContainer.encode(message, forKey: .message) + } + } + + public init(from decoder: Swift.Decoder) throws { + let containerValues = try decoder.container(keyedBy: CodingKeys.self) + let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) + properties.message = messageDecoded + let errorCodeDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .errorCode) + properties.errorCode = errorCodeDecoded + } +} + +extension ModelStreamError { + public init(httpResponse: ClientRuntime.HttpResponse, decoder: ClientRuntime.ResponseDecoder? = nil, message: Swift.String? = nil, requestID: Swift.String? = nil) async throws { + if let data = try await httpResponse.body.readData(), + let responseDecoder = decoder { + let output: ModelStreamErrorBody = try responseDecoder.decode(responseBody: data) + self.properties.errorCode = output.errorCode + self.properties.message = output.message + } else { + self.properties.errorCode = nil + self.properties.message = nil + } + self.httpResponse = httpResponse + self.requestID = requestID + self.message = message + } +} + +/// An error occurred while streaming the response body. This error can have the following error codes: ModelInvocationTimeExceeded The model failed to finish sending the response within the timeout period allowed by Amazon SageMaker. StreamBroken The Transmission Control Protocol (TCP) connection between the client and the model was reset or closed. +public struct ModelStreamError: ClientRuntime.ModeledError, AWSClientRuntime.AWSServiceError, ClientRuntime.HTTPError, Swift.Error { + + public struct Properties { + /// This error can have the following error codes: ModelInvocationTimeExceeded The model failed to finish sending the response within the timeout period allowed by Amazon SageMaker. StreamBroken The Transmission Control Protocol (TCP) connection between the client and the model was reset or closed. + public internal(set) var errorCode: Swift.String? = nil + public internal(set) var message: Swift.String? = nil + } + + public internal(set) var properties = Properties() + public static var typeName: Swift.String { "ModelStreamError" } + public static var fault: ErrorFault { .client } + public static var isRetryable: Swift.Bool { false } + public static var isThrottling: Swift.Bool { false } + public internal(set) var httpResponse = HttpResponse() + public internal(set) var message: Swift.String? + public internal(set) var requestID: Swift.String? + + public init( + errorCode: Swift.String? = nil, + message: Swift.String? = nil + ) + { + self.properties.errorCode = errorCode + self.properties.message = message + } +} + +struct ModelStreamErrorBody: Swift.Equatable { + let message: Swift.String? + let errorCode: Swift.String? +} + +extension ModelStreamErrorBody: Swift.Decodable { + enum CodingKeys: Swift.String, Swift.CodingKey { + case errorCode = "ErrorCode" + case message = "Message" + } + + public init(from decoder: Swift.Decoder) throws { + let containerValues = try decoder.container(keyedBy: CodingKeys.self) + let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) + message = messageDecoded + let errorCodeDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .errorCode) + errorCode = errorCodeDecoded + } +} + +extension SageMakerRuntimeClientTypes.PayloadPart: Swift.Codable { + enum CodingKeys: Swift.String, Swift.CodingKey { + case bytes = "Bytes" + } + + public func encode(to encoder: Swift.Encoder) throws { + var encodeContainer = encoder.container(keyedBy: CodingKeys.self) + if let bytes = self.bytes { + try encodeContainer.encode(bytes.base64EncodedString(), forKey: .bytes) + } + } + + public init(from decoder: Swift.Decoder) throws { + let containerValues = try decoder.container(keyedBy: CodingKeys.self) + let bytesDecoded = try containerValues.decodeIfPresent(ClientRuntime.Data.self, forKey: .bytes) + bytes = bytesDecoded + } +} + +extension SageMakerRuntimeClientTypes.PayloadPart: Swift.CustomDebugStringConvertible { + public var debugDescription: Swift.String { + "PayloadPart(bytes: \"CONTENT_REDACTED\")"} +} + +extension SageMakerRuntimeClientTypes { + /// A wrapper for pieces of the payload that's returned in response to a streaming inference request. A streaming inference response consists of one or more payload parts. + public struct PayloadPart: Swift.Equatable { + /// A blob that contains part of the response for your streaming inference request. + public var bytes: ClientRuntime.Data? + + public init( + bytes: ClientRuntime.Data? = nil + ) + { + self.bytes = bytes + } + } + +} + +extension SageMakerRuntimeClientTypes.ResponseStream: ClientRuntime.MessageUnmarshallable { + public init(message: ClientRuntime.EventStream.Message, decoder: ClientRuntime.ResponseDecoder) throws { + switch try message.type() { + case .event(let params): + switch params.eventType { + case "PayloadPart": + var event = SageMakerRuntimeClientTypes.PayloadPart() + event.bytes = message.payload + self = .payloadpart(event) + default: + self = .sdkUnknown("error processing event stream, unrecognized event: \(params.eventType)") + } + case .exception(let params): + let makeError: (ClientRuntime.EventStream.Message, ClientRuntime.EventStream.MessageType.ExceptionParams) throws -> Swift.Error = { message, params in + switch params.exceptionType { + case "ModelStreamError": + return try decoder.decode(responseBody: message.payload) as ModelStreamError + case "InternalStreamFailure": + return try decoder.decode(responseBody: message.payload) as InternalStreamFailure + default: + let httpResponse = HttpResponse(body: .data(message.payload), statusCode: .ok) + return AWSClientRuntime.UnknownAWSHTTPServiceError(httpResponse: httpResponse, message: "error processing event stream, unrecognized ':exceptionType': \(params.exceptionType); contentType: \(params.contentType ?? "nil")", requestID: nil, typeName: nil) + } + } + let error = try makeError(message, params) + throw error + case .error(let params): + let httpResponse = HttpResponse(body: .data(message.payload), statusCode: .ok) + throw AWSClientRuntime.UnknownAWSHTTPServiceError(httpResponse: httpResponse, message: "error processing event stream, unrecognized ':errorType': \(params.errorCode); message: \(params.message ?? "nil")", requestID: nil, typeName: nil) + case .unknown(messageType: let messageType): + throw ClientRuntime.ClientError.unknownError("unrecognized event stream message ':message-type': \(messageType)") + } + } +} + +extension SageMakerRuntimeClientTypes { + /// A stream of payload parts. Each part contains a portion of the response for a streaming inference request. + public enum ResponseStream: Swift.Equatable { + /// A wrapper for pieces of the payload that's returned in response to a streaming inference request. A streaming inference response consists of one or more payload parts. + case payloadpart(SageMakerRuntimeClientTypes.PayloadPart) + case sdkUnknown(Swift.String) + } + +} + extension ServiceUnavailable { public init(httpResponse: ClientRuntime.HttpResponse, decoder: ClientRuntime.ResponseDecoder? = nil, message: Swift.String? = nil, requestID: Swift.String? = nil) async throws { if let data = try await httpResponse.body.readData(), diff --git a/Sources/Services/AWSTranscribeStreaming/models/Models.swift b/Sources/Services/AWSTranscribeStreaming/models/Models.swift index 6ef7059f71b..c52c6f83fda 100644 --- a/Sources/Services/AWSTranscribeStreaming/models/Models.swift +++ b/Sources/Services/AWSTranscribeStreaming/models/Models.swift @@ -163,7 +163,7 @@ extension BadRequestException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -741,7 +741,7 @@ extension ConflictException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -990,7 +990,7 @@ extension InternalFailureException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -1349,7 +1349,7 @@ extension LimitExceededException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } @@ -2326,7 +2326,7 @@ extension ServiceUnavailableException: Swift.Codable { public init(from decoder: Swift.Decoder) throws { let containerValues = try decoder.container(keyedBy: CodingKeys.self) let messageDecoded = try containerValues.decodeIfPresent(Swift.String.self, forKey: .message) - message = messageDecoded + properties.message = messageDecoded } } diff --git a/codegen/sdk-codegen/aws-models/sagemaker-runtime.2017-05-13.json b/codegen/sdk-codegen/aws-models/sagemaker-runtime.2017-05-13.json index fddfbe13bd1..87841548987 100644 --- a/codegen/sdk-codegen/aws-models/sagemaker-runtime.2017-05-13.json +++ b/codegen/sdk-codegen/aws-models/sagemaker-runtime.2017-05-13.json @@ -38,6 +38,9 @@ }, { "target": "com.amazonaws.sagemakerruntime#InvokeEndpointAsync" + }, + { + "target": "com.amazonaws.sagemakerruntime#InvokeEndpointWithResponseStream" } ], "traits": { @@ -46,6 +49,7 @@ "arnNamespace": "sagemaker", "cloudFormationName": "SageMakerRuntime", "cloudTrailEventSource": "sagemakerruntime.amazonaws.com", + "docId": "runtime.sagemaker-2017-05-13", "endpointPrefix": "runtime.sagemaker" }, "aws.auth#sigv4": { @@ -1113,6 +1117,9 @@ "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" } }, + "com.amazonaws.sagemakerruntime#ErrorCode": { + "type": "string" + }, "com.amazonaws.sagemakerruntime#Header": { "type": "string", "traits": { @@ -1169,6 +1176,18 @@ "smithy.api#httpError": 500 } }, + "com.amazonaws.sagemakerruntime#InternalStreamFailure": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.sagemakerruntime#Message" + } + }, + "traits": { + "smithy.api#documentation": "

The stream processing failed because of an unknown error, exception or failure. Try your request again.

", + "smithy.api#error": "server" + } + }, "com.amazonaws.sagemakerruntime#InvocationTimeoutSecondsHeader": { "type": "integer", "traits": { @@ -1207,7 +1226,7 @@ } ], "traits": { - "smithy.api#documentation": "

After you deploy a model into production using Amazon SageMaker hosting services, your\n client applications use this API to get inferences from the model hosted at the\n specified endpoint.

\n

For an overview of Amazon SageMaker, see How It Works.

\n

Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add\n additional headers. You should not rely on the behavior of headers outside those\n enumerated in the request syntax.

\n

Calls to InvokeEndpoint are authenticated by using Amazon Web Services\n Signature Version 4. For information, see Authenticating\n Requests (Amazon Web Services Signature Version 4) in the Amazon S3 API\n Reference.

\n

A customer's model containers must respond to requests within 60 seconds. The model\n itself can have a maximum processing time of 60 seconds before responding to\n invocations. If your model is going to take 50-60 seconds of processing time, the SDK\n socket timeout should be set to be 70 seconds.

\n \n

Endpoints are scoped to an individual account, and are not public. The URL does\n not contain the account ID, but Amazon SageMaker determines the account ID from the\n authentication token that is supplied by the caller.

\n
", + "smithy.api#documentation": "

After you deploy a model into production using Amazon SageMaker hosting services,\n your client applications use this API to get inferences from the model hosted at the\n specified endpoint.

\n

For an overview of Amazon SageMaker, see How It Works.

\n

Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add\n additional headers. You should not rely on the behavior of headers outside those\n enumerated in the request syntax.

\n

Calls to InvokeEndpoint are authenticated by using Amazon Web Services\n Signature Version 4. For information, see Authenticating\n Requests (Amazon Web Services Signature Version 4) in the Amazon S3 API Reference.

\n

A customer's model containers must respond to requests within 60 seconds. The model\n itself can have a maximum processing time of 60 seconds before responding to\n invocations. If your model is going to take 50-60 seconds of processing time, the SDK\n socket timeout should be set to be 70 seconds.

\n \n

Endpoints are scoped to an individual account, and are not public. The URL does\n not contain the account ID, but Amazon SageMaker determines the account ID from\n the authentication token that is supplied by the caller.

\n
", "smithy.api#http": { "method": "POST", "uri": "/endpoints/{EndpointName}/invocations", @@ -1235,7 +1254,7 @@ } ], "traits": { - "smithy.api#documentation": "

After you deploy a model into production using Amazon SageMaker hosting services, your client\n applications use this API to get inferences from the model hosted at the specified\n endpoint in an asynchronous manner.

\n

Inference requests sent to this API are enqueued for asynchronous processing. The\n processing of the inference request may or may not complete before you receive a\n response from this API. The response from this API will not contain the result of the\n inference request but contain information about where you can locate it.

\n

Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker\n might add additional headers. You should not rely on the behavior of headers outside\n those enumerated in the request syntax.

\n

Calls to InvokeEndpointAsync are authenticated by using Amazon Web Services Signature Version 4. For information, see Authenticating Requests (Amazon Web Services Signature Version 4) in the\n Amazon S3 API Reference.

", + "smithy.api#documentation": "

After you deploy a model into production using Amazon SageMaker hosting services,\n your client applications use this API to get inferences from the model hosted at the\n specified endpoint in an asynchronous manner.

\n

Inference requests sent to this API are enqueued for asynchronous processing. The\n processing of the inference request may or may not complete before you receive a\n response from this API. The response from this API will not contain the result of the\n inference request but contain information about where you can locate it.

\n

Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add\n additional headers. You should not rely on the behavior of headers outside those\n enumerated in the request syntax.

\n

Calls to InvokeEndpointAsync are authenticated by using Amazon Web Services Signature Version 4. For information, see Authenticating\n Requests (Amazon Web Services Signature Version 4) in the Amazon S3 API Reference.

", "smithy.api#http": { "method": "POST", "uri": "/endpoints/{EndpointName}/async-invocations", @@ -1249,7 +1268,7 @@ "EndpointName": { "target": "com.amazonaws.sagemakerruntime#EndpointName", "traits": { - "smithy.api#documentation": "

The name of the endpoint that you specified when you created the endpoint using the\n \n CreateEndpoint\n API.

", + "smithy.api#documentation": "

The name of the endpoint that you specified when you created the endpoint using the\n CreateEndpoint API.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1264,21 +1283,21 @@ "Accept": { "target": "com.amazonaws.sagemakerruntime#Header", "traits": { - "smithy.api#documentation": "

The desired MIME type of the inference in the response.

", + "smithy.api#documentation": "

The desired MIME type of the inference response from the model container.

", "smithy.api#httpHeader": "X-Amzn-SageMaker-Accept" } }, "CustomAttributes": { "target": "com.amazonaws.sagemakerruntime#CustomAttributesHeader", "traits": { - "smithy.api#documentation": "

Provides additional information about a request for an inference submitted to a model\n hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded\n verbatim. You could use this value, for example, to provide an ID that you can use to\n track a request or to provide other metadata that a service endpoint was programmed to\n process. The value must consist of no more than 1024 visible US-ASCII characters as\n specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol\n (HTTP/1.1).

\n

The code in your model is responsible for setting or updating any custom attributes in\n the response. If your code does not set this value in the response, an empty value is\n returned. For example, if a custom attribute represents the trace ID, your model can\n prepend the custom attribute with Trace ID: in your post-processing\n function.

\n

This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker\n Python SDK.

", + "smithy.api#documentation": "

Provides additional information about a request for an inference submitted to a model\n hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded\n verbatim. You could use this value, for example, to provide an ID that you can use to\n track a request or to provide other metadata that a service endpoint was programmed to\n process. The value must consist of no more than 1024 visible US-ASCII characters as\n specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol\n (HTTP/1.1).

\n

The code in your model is responsible for setting or updating any custom attributes in\n the response. If your code does not set this value in the response, an empty value is\n returned. For example, if a custom attribute represents the trace ID, your model can\n prepend the custom attribute with Trace ID: in your post-processing\n function.

\n

This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker\n Python SDK.

", "smithy.api#httpHeader": "X-Amzn-SageMaker-Custom-Attributes" } }, "InferenceId": { "target": "com.amazonaws.sagemakerruntime#InferenceId", "traits": { - "smithy.api#documentation": "

The identifier for the inference request. Amazon SageMaker will generate an identifier for you if\n none is specified.

", + "smithy.api#documentation": "

The identifier for the inference request. Amazon SageMaker will generate an\n identifier for you if none is specified.

", "smithy.api#httpHeader": "X-Amzn-SageMaker-Inference-Id" } }, @@ -1293,14 +1312,14 @@ "RequestTTLSeconds": { "target": "com.amazonaws.sagemakerruntime#RequestTTLSecondsHeader", "traits": { - "smithy.api#documentation": "

Maximum age in seconds a request can be in the queue before it is marked as\n expired. The default is 6 hours, or 21,600 seconds.

", + "smithy.api#documentation": "

Maximum age in seconds a request can be in the queue before it is marked as expired.\n The default is 6 hours, or 21,600 seconds.

", "smithy.api#httpHeader": "X-Amzn-SageMaker-RequestTTLSeconds" } }, "InvocationTimeoutSeconds": { "target": "com.amazonaws.sagemakerruntime#InvocationTimeoutSecondsHeader", "traits": { - "smithy.api#documentation": "

Maximum amount of time in seconds a request can be processed before it is marked as expired. The default is 15 minutes, or 900 seconds.

", + "smithy.api#documentation": "

Maximum amount of time in seconds a request can be processed before it is marked as\n expired. The default is 15 minutes, or 900 seconds.

", "smithy.api#httpHeader": "X-Amzn-SageMaker-InvocationTimeoutSeconds" } } @@ -1315,7 +1334,7 @@ "InferenceId": { "target": "com.amazonaws.sagemakerruntime#Header", "traits": { - "smithy.api#documentation": "

Identifier for an inference request. This will be the same as the\n InferenceId specified in the input. Amazon SageMaker will generate an identifier\n for you if you do not specify one.

" + "smithy.api#documentation": "

Identifier for an inference request. This will be the same as the\n InferenceId specified in the input. Amazon SageMaker will generate\n an identifier for you if you do not specify one.

" } }, "OutputLocation": { @@ -1328,7 +1347,7 @@ "FailureLocation": { "target": "com.amazonaws.sagemakerruntime#Header", "traits": { - "smithy.api#documentation": "

The Amazon S3 URI where the inference failure response payload is stored.

", + "smithy.api#documentation": "

The Amazon S3 URI where the inference failure response payload is\n stored.

", "smithy.api#httpHeader": "X-Amzn-SageMaker-FailureLocation" } } @@ -1343,7 +1362,7 @@ "EndpointName": { "target": "com.amazonaws.sagemakerruntime#EndpointName", "traits": { - "smithy.api#documentation": "

The name of the endpoint that you specified when you created the endpoint using the\n CreateEndpoint API.

", + "smithy.api#documentation": "

The name of the endpoint that you specified when you created the endpoint using the\n CreateEndpoint API.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1351,7 +1370,7 @@ "Body": { "target": "com.amazonaws.sagemakerruntime#BodyBlob", "traits": { - "smithy.api#documentation": "

Provides input data, in the format specified in the ContentType\n request header. Amazon SageMaker passes all of the data in the body to the model.

\n

For information about the format of the request body, see Common Data\n Formats-Inference.

", + "smithy.api#documentation": "

Provides input data, in the format specified in the ContentType\n request header. Amazon SageMaker passes all of the data in the body to the model.

\n

For information about the format of the request body, see Common Data\n Formats-Inference.

", "smithy.api#httpPayload": {}, "smithy.api#required": {} } @@ -1366,14 +1385,14 @@ "Accept": { "target": "com.amazonaws.sagemakerruntime#Header", "traits": { - "smithy.api#documentation": "

The desired MIME type of the inference in the response.

", + "smithy.api#documentation": "

The desired MIME type of the inference response from the model container.

", "smithy.api#httpHeader": "Accept" } }, "CustomAttributes": { "target": "com.amazonaws.sagemakerruntime#CustomAttributesHeader", "traits": { - "smithy.api#documentation": "

Provides additional information about a request for an inference submitted to a model\n hosted at an Amazon SageMaker endpoint. The information is an opaque value that is\n forwarded verbatim. You could use this value, for example, to provide an ID that you can\n use to track a request or to provide other metadata that a service endpoint was\n programmed to process. The value must consist of no more than 1024 visible US-ASCII\n characters as specified in Section 3.3.6. Field Value\n Components of the Hypertext Transfer Protocol (HTTP/1.1).

\n

The code in your model is responsible for setting or updating any custom attributes in\n the response. If your code does not set this value in the response, an empty value is\n returned. For example, if a custom attribute represents the trace ID, your model can\n prepend the custom attribute with Trace ID: in your post-processing\n function.

\n

This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker\n Python SDK.

", + "smithy.api#documentation": "

Provides additional information about a request for an inference submitted to a model\n hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded\n verbatim. You could use this value, for example, to provide an ID that you can use to\n track a request or to provide other metadata that a service endpoint was programmed to\n process. The value must consist of no more than 1024 visible US-ASCII characters as\n specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol\n (HTTP/1.1).

\n

The code in your model is responsible for setting or updating any custom attributes in\n the response. If your code does not set this value in the response, an empty value is\n returned. For example, if a custom attribute represents the trace ID, your model can\n prepend the custom attribute with Trace ID: in your post-processing\n function.

\n

This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker\n Python SDK.

", "smithy.api#httpHeader": "X-Amzn-SageMaker-Custom-Attributes" } }, @@ -1387,14 +1406,14 @@ "TargetVariant": { "target": "com.amazonaws.sagemakerruntime#TargetVariantHeader", "traits": { - "smithy.api#documentation": "

Specify the production variant to send the inference request to when invoking an\n endpoint that is running two or more variants. Note that this parameter overrides the\n default behavior for the endpoint, which is to distribute the invocation traffic based\n on the variant weights.

\n

For information about how to use variant targeting to perform a/b testing, see Test models in\n production\n

", + "smithy.api#documentation": "

Specify the production variant to send the inference request to when invoking an\n endpoint that is running two or more variants. Note that this parameter overrides the\n default behavior for the endpoint, which is to distribute the invocation traffic based\n on the variant weights.

\n

For information about how to use variant targeting to perform a/b testing, see Test models in\n production\n

", "smithy.api#httpHeader": "X-Amzn-SageMaker-Target-Variant" } }, "TargetContainerHostname": { "target": "com.amazonaws.sagemakerruntime#TargetContainerHostnameHeader", "traits": { - "smithy.api#documentation": "

If the endpoint hosts multiple containers and is configured to use direct invocation,\n this parameter specifies the host name of the container to invoke.

", + "smithy.api#documentation": "

If the endpoint hosts multiple containers and is configured to use direct invocation,\n this parameter specifies the host name of the container to invoke.

", "smithy.api#httpHeader": "X-Amzn-SageMaker-Target-Container-Hostname" } }, @@ -1423,7 +1442,7 @@ "Body": { "target": "com.amazonaws.sagemakerruntime#BodyBlob", "traits": { - "smithy.api#documentation": "

Includes the inference provided by the model.

\n

For information about the format of the response body, see Common Data\n Formats-Inference.

\n

If the explainer is activated, the\n body includes the explanations provided by the model. For more information, see the\n Response section under Invoke the Endpoint in the Developer Guide.

", + "smithy.api#documentation": "

Includes the inference provided by the model.

\n

For information about the format of the response body, see Common Data\n Formats-Inference.

\n

If the explainer is activated, the body includes the explanations provided by the\n model. For more information, see the Response section\n under Invoke the Endpoint in the Developer Guide.

", "smithy.api#httpPayload": {}, "smithy.api#required": {} } @@ -1431,7 +1450,7 @@ "ContentType": { "target": "com.amazonaws.sagemakerruntime#Header", "traits": { - "smithy.api#documentation": "

The MIME type of the inference returned in the response body.

", + "smithy.api#documentation": "

The MIME type of the inference returned from the model container.

", "smithy.api#httpHeader": "Content-Type" } }, @@ -1445,7 +1464,146 @@ "CustomAttributes": { "target": "com.amazonaws.sagemakerruntime#CustomAttributesHeader", "traits": { - "smithy.api#documentation": "

Provides additional information in the response about the inference returned by a\n model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is\n forwarded verbatim. You could use this value, for example, to return an ID received in\n the CustomAttributes header of a request or other metadata that a service\n endpoint was programmed to produce. The value must consist of no more than 1024 visible\n US-ASCII characters as specified in Section 3.3.6. Field Value\n Components of the Hypertext Transfer Protocol (HTTP/1.1). If the customer\n wants the custom attribute returned, the model must set the custom attribute to be\n included on the way back.

\n

The code in your model is responsible for setting or updating any custom attributes in\n the response. If your code does not set this value in the response, an empty value is\n returned. For example, if a custom attribute represents the trace ID, your model can\n prepend the custom attribute with Trace ID: in your post-processing\n function.

\n

This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker\n Python SDK.

", + "smithy.api#documentation": "

Provides additional information in the response about the inference returned by a\n model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is\n forwarded verbatim. You could use this value, for example, to return an ID received in\n the CustomAttributes header of a request or other metadata that a service\n endpoint was programmed to produce. The value must consist of no more than 1024 visible\n US-ASCII characters as specified in Section 3.3.6. Field Value\n Components of the Hypertext Transfer Protocol (HTTP/1.1). If the customer\n wants the custom attribute returned, the model must set the custom attribute to be\n included on the way back.

\n

The code in your model is responsible for setting or updating any custom attributes in\n the response. If your code does not set this value in the response, an empty value is\n returned. For example, if a custom attribute represents the trace ID, your model can\n prepend the custom attribute with Trace ID: in your post-processing\n function.

\n

This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker\n Python SDK.

", + "smithy.api#httpHeader": "X-Amzn-SageMaker-Custom-Attributes" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.sagemakerruntime#InvokeEndpointWithResponseStream": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemakerruntime#InvokeEndpointWithResponseStreamInput" + }, + "output": { + "target": "com.amazonaws.sagemakerruntime#InvokeEndpointWithResponseStreamOutput" + }, + "errors": [ + { + "target": "com.amazonaws.sagemakerruntime#InternalFailure" + }, + { + "target": "com.amazonaws.sagemakerruntime#InternalStreamFailure" + }, + { + "target": "com.amazonaws.sagemakerruntime#ModelError" + }, + { + "target": "com.amazonaws.sagemakerruntime#ModelStreamError" + }, + { + "target": "com.amazonaws.sagemakerruntime#ServiceUnavailable" + }, + { + "target": "com.amazonaws.sagemakerruntime#ValidationError" + } + ], + "traits": { + "smithy.api#documentation": "

Invokes a model at the specified endpoint to return the inference response as a\n stream. The inference stream provides the response payload incrementally as a series of\n parts. Before you can get an inference stream, you must have access to a model that's\n deployed using Amazon SageMaker hosting services, and the container for that model\n must support inference streaming.

\n

For more information that can help you use this API, see the following sections in the\n Amazon SageMaker Developer Guide:

\n \n

Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add\n additional headers. You should not rely on the behavior of headers outside those\n enumerated in the request syntax.

\n

Calls to InvokeEndpointWithResponseStream are authenticated by using\n Amazon Web Services Signature Version 4. For information, see Authenticating Requests (Amazon Web Services Signature Version 4) in the\n Amazon S3 API Reference.

", + "smithy.api#http": { + "method": "POST", + "uri": "/endpoints/{EndpointName}/invocations-response-stream", + "code": 200 + } + } + }, + "com.amazonaws.sagemakerruntime#InvokeEndpointWithResponseStreamInput": { + "type": "structure", + "members": { + "EndpointName": { + "target": "com.amazonaws.sagemakerruntime#EndpointName", + "traits": { + "smithy.api#documentation": "

The name of the endpoint that you specified when you created the endpoint using the\n CreateEndpoint API.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Body": { + "target": "com.amazonaws.sagemakerruntime#BodyBlob", + "traits": { + "smithy.api#documentation": "

Provides input data, in the format specified in the ContentType\n request header. Amazon SageMaker passes all of the data in the body to the model.

\n

For information about the format of the request body, see Common Data\n Formats-Inference.

", + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + }, + "ContentType": { + "target": "com.amazonaws.sagemakerruntime#Header", + "traits": { + "smithy.api#documentation": "

The MIME type of the input data in the request body.

", + "smithy.api#httpHeader": "Content-Type" + } + }, + "Accept": { + "target": "com.amazonaws.sagemakerruntime#Header", + "traits": { + "smithy.api#documentation": "

The desired MIME type of the inference response from the model container.

", + "smithy.api#httpHeader": "X-Amzn-SageMaker-Accept" + } + }, + "CustomAttributes": { + "target": "com.amazonaws.sagemakerruntime#CustomAttributesHeader", + "traits": { + "smithy.api#documentation": "

Provides additional information about a request for an inference submitted to a model\n hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded\n verbatim. You could use this value, for example, to provide an ID that you can use to\n track a request or to provide other metadata that a service endpoint was programmed to\n process. The value must consist of no more than 1024 visible US-ASCII characters as\n specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol\n (HTTP/1.1).

\n

The code in your model is responsible for setting or updating any custom attributes in\n the response. If your code does not set this value in the response, an empty value is\n returned. For example, if a custom attribute represents the trace ID, your model can\n prepend the custom attribute with Trace ID: in your post-processing\n function.

\n

This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker\n Python SDK.

", + "smithy.api#httpHeader": "X-Amzn-SageMaker-Custom-Attributes" + } + }, + "TargetVariant": { + "target": "com.amazonaws.sagemakerruntime#TargetVariantHeader", + "traits": { + "smithy.api#documentation": "

Specify the production variant to send the inference request to when invoking an\n endpoint that is running two or more variants. Note that this parameter overrides the\n default behavior for the endpoint, which is to distribute the invocation traffic based\n on the variant weights.

\n

For information about how to use variant targeting to perform a/b testing, see Test models in\n production\n

", + "smithy.api#httpHeader": "X-Amzn-SageMaker-Target-Variant" + } + }, + "TargetContainerHostname": { + "target": "com.amazonaws.sagemakerruntime#TargetContainerHostnameHeader", + "traits": { + "smithy.api#documentation": "

If the endpoint hosts multiple containers and is configured to use direct invocation,\n this parameter specifies the host name of the container to invoke.

", + "smithy.api#httpHeader": "X-Amzn-SageMaker-Target-Container-Hostname" + } + }, + "InferenceId": { + "target": "com.amazonaws.sagemakerruntime#InferenceId", + "traits": { + "smithy.api#documentation": "

An identifier that you assign to your request.

", + "smithy.api#httpHeader": "X-Amzn-SageMaker-Inference-Id" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.sagemakerruntime#InvokeEndpointWithResponseStreamOutput": { + "type": "structure", + "members": { + "Body": { + "target": "com.amazonaws.sagemakerruntime#ResponseStream", + "traits": { + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + }, + "ContentType": { + "target": "com.amazonaws.sagemakerruntime#Header", + "traits": { + "smithy.api#documentation": "

The MIME type of the inference returned from the model container.

", + "smithy.api#httpHeader": "X-Amzn-SageMaker-Content-Type" + } + }, + "InvokedProductionVariant": { + "target": "com.amazonaws.sagemakerruntime#Header", + "traits": { + "smithy.api#documentation": "

Identifies the production variant that was invoked.

", + "smithy.api#httpHeader": "x-Amzn-Invoked-Production-Variant" + } + }, + "CustomAttributes": { + "target": "com.amazonaws.sagemakerruntime#CustomAttributesHeader", + "traits": { + "smithy.api#documentation": "

Provides additional information in the response about the inference returned by a\n model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is\n forwarded verbatim. You could use this value, for example, to return an ID received in\n the CustomAttributes header of a request or other metadata that a service\n endpoint was programmed to produce. The value must consist of no more than 1024 visible\n US-ASCII characters as specified in Section 3.3.6. Field Value\n Components of the Hypertext Transfer Protocol (HTTP/1.1). If the customer\n wants the custom attribute returned, the model must set the custom attribute to be\n included on the way back.

\n

The code in your model is responsible for setting or updating any custom attributes in\n the response. If your code does not set this value in the response, an empty value is\n returned. For example, if a custom attribute represents the trace ID, your model can\n prepend the custom attribute with Trace ID: in your post-processing\n function.

\n

This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker\n Python SDK.

", "smithy.api#httpHeader": "X-Amzn-SageMaker-Custom-Attributes" } } @@ -1514,6 +1672,45 @@ "smithy.api#httpError": 429 } }, + "com.amazonaws.sagemakerruntime#ModelStreamError": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.sagemakerruntime#Message" + }, + "ErrorCode": { + "target": "com.amazonaws.sagemakerruntime#ErrorCode", + "traits": { + "smithy.api#documentation": "

This error can have the following error codes:

\n
\n
ModelInvocationTimeExceeded
\n
\n

The model failed to finish sending the response within the timeout period\n allowed by Amazon SageMaker.

\n
\n
StreamBroken
\n
\n

The Transmission Control Protocol (TCP) connection between the client and\n the model was reset or closed.

\n
\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

An error occurred while streaming the response body. This error can have the\n following error codes:

\n
\n
ModelInvocationTimeExceeded
\n
\n

The model failed to finish sending the response within the timeout period allowed by Amazon SageMaker.

\n
\n
StreamBroken
\n
\n

The Transmission Control Protocol (TCP) connection between the client and\n the model was reset or closed.

\n
\n
", + "smithy.api#error": "client" + } + }, + "com.amazonaws.sagemakerruntime#PartBlob": { + "type": "blob", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.sagemakerruntime#PayloadPart": { + "type": "structure", + "members": { + "Bytes": { + "target": "com.amazonaws.sagemakerruntime#PartBlob", + "traits": { + "smithy.api#documentation": "

A blob that contains part of the response for your streaming inference request.

", + "smithy.api#eventPayload": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A wrapper for pieces of the payload that's returned in response to a streaming\n inference request. A streaming inference response consists of one or more payload parts.\n

" + } + }, "com.amazonaws.sagemakerruntime#RequestTTLSecondsHeader": { "type": "integer", "traits": { @@ -1523,6 +1720,33 @@ } } }, + "com.amazonaws.sagemakerruntime#ResponseStream": { + "type": "union", + "members": { + "PayloadPart": { + "target": "com.amazonaws.sagemakerruntime#PayloadPart", + "traits": { + "smithy.api#documentation": "

A wrapper for pieces of the payload that's returned in response to a streaming\n inference request. A streaming inference response consists of one or more payload parts.\n

" + } + }, + "ModelStreamError": { + "target": "com.amazonaws.sagemakerruntime#ModelStreamError", + "traits": { + "smithy.api#documentation": "

An error occurred while streaming the response body. This error can have the\n following error codes:

\n
\n
ModelInvocationTimeExceeded
\n
\n

The model failed to finish sending the response within the timeout period allowed by Amazon SageMaker.

\n
\n
StreamBroken
\n
\n

The Transmission Control Protocol (TCP) connection between the client and\n the model was reset or closed.

\n
\n
" + } + }, + "InternalStreamFailure": { + "target": "com.amazonaws.sagemakerruntime#InternalStreamFailure", + "traits": { + "smithy.api#documentation": "

The stream processing failed because of an unknown error, exception or failure. Try your request again.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A stream of payload parts. Each part contains a portion of the response for a\n streaming inference request.

", + "smithy.api#streaming": {} + } + }, "com.amazonaws.sagemakerruntime#ServiceUnavailable": { "type": "structure", "members": { diff --git a/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/AWSHttpBindingProtocolGenerator.kt b/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/AWSHttpBindingProtocolGenerator.kt index 12795bbaeb5..ffc54e8d904 100644 --- a/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/AWSHttpBindingProtocolGenerator.kt +++ b/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/AWSHttpBindingProtocolGenerator.kt @@ -96,6 +96,7 @@ abstract class AWSHttpBindingProtocolGenerator : HttpBindingProtocolGenerator() members: List, writer: SwiftWriter, defaultTimestampFormat: TimestampFormatTrait.Format, + path: String ) { val encodeGenerator = StructEncodeGenerator(ctx, members, writer, defaultTimestampFormat) encodeGenerator.render() @@ -106,9 +107,10 @@ abstract class AWSHttpBindingProtocolGenerator : HttpBindingProtocolGenerator() shapeMetaData: Map, members: List, writer: SwiftWriter, - defaultTimestampFormat: TimestampFormatTrait.Format + defaultTimestampFormat: TimestampFormatTrait.Format, + path: String ) { - val decoder = StructDecodeGenerator(ctx, members, writer, defaultTimestampFormat) + val decoder = StructDecodeGenerator(ctx, members, writer, defaultTimestampFormat, path) decoder.render() } diff --git a/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/awsquery/AwsQueryProtocolGenerator.kt b/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/awsquery/AwsQueryProtocolGenerator.kt index 3455fba1a5e..0fea55e1484 100644 --- a/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/awsquery/AwsQueryProtocolGenerator.kt +++ b/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/awsquery/AwsQueryProtocolGenerator.kt @@ -61,6 +61,7 @@ open class AwsQueryProtocolGenerator : AWSHttpBindingProtocolGenerator() { members: List, writer: SwiftWriter, defaultTimestampFormat: TimestampFormatTrait.Format, + path: String ) { val customizations = AwsQueryFormURLEncodeCustomizations() val encoder = StructEncodeFormURLGenerator(ctx, customizations, shapeContainingMembers, shapeMetadata, members, writer, defaultTimestampFormat) @@ -72,7 +73,8 @@ open class AwsQueryProtocolGenerator : AWSHttpBindingProtocolGenerator() { shapeMetadata: Map, members: List, writer: SwiftWriter, - defaultTimestampFormat: TimestampFormatTrait.Format + defaultTimestampFormat: TimestampFormatTrait.Format, + path: String ) { val decoder = AwsQueryStructDecodeXMLGenerator(ctx, members, shapeMetadata, writer, defaultTimestampFormat) decoder.render() diff --git a/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/ec2query/Ec2QueryProtocolGenerator.kt b/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/ec2query/Ec2QueryProtocolGenerator.kt index 57cba41dbbd..9a257c3af03 100644 --- a/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/ec2query/Ec2QueryProtocolGenerator.kt +++ b/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/ec2query/Ec2QueryProtocolGenerator.kt @@ -57,6 +57,7 @@ class Ec2QueryProtocolGenerator : AWSHttpBindingProtocolGenerator() { members: List, writer: SwiftWriter, defaultTimestampFormat: TimestampFormatTrait.Format, + path: String ) { val customizations = Ec2QueryFormURLEncodeCustomizations() val encoder = StructEncodeFormURLGenerator(ctx, customizations, shapeContainingMembers, shapeMetadata, members, writer, defaultTimestampFormat) @@ -68,7 +69,8 @@ class Ec2QueryProtocolGenerator : AWSHttpBindingProtocolGenerator() { shapeMetadata: Map, members: List, writer: SwiftWriter, - defaultTimestampFormat: TimestampFormatTrait.Format + defaultTimestampFormat: TimestampFormatTrait.Format, + path: String ) { val decoder = StructDecodeXMLGenerator(ctx, members, mapOf(), writer, defaultTimestampFormat) decoder.render() diff --git a/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/restxml/RestXmlProtocolGenerator.kt b/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/restxml/RestXmlProtocolGenerator.kt index d252b62889a..c50dc50a73f 100644 --- a/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/restxml/RestXmlProtocolGenerator.kt +++ b/codegen/smithy-aws-swift-codegen/src/main/kotlin/software/amazon/smithy/aws/swift/codegen/restxml/RestXmlProtocolGenerator.kt @@ -43,6 +43,7 @@ class RestXmlProtocolGenerator : AWSHttpBindingProtocolGenerator() { members: List, writer: SwiftWriter, defaultTimestampFormat: TimestampFormatTrait.Format, + path: String ) { val encoder = StructEncodeXMLGenerator(ctx, shapeContainingMembers, members, writer, defaultTimestampFormat) encoder.render() @@ -57,7 +58,8 @@ class RestXmlProtocolGenerator : AWSHttpBindingProtocolGenerator() { shapeMetadata: Map, members: List, writer: SwiftWriter, - defaultTimestampFormat: TimestampFormatTrait.Format + defaultTimestampFormat: TimestampFormatTrait.Format, + path: String ) { val decoder = RestXmlStructDecodeXMLGenerator(ctx, members, shapeMetadata, writer, defaultTimestampFormat) decoder.render()