Skip to content

Commit

Permalink
feat(client-bedrock-agent): Removing support for topK property in Pro…
Browse files Browse the repository at this point in the history
…mptModelInferenceConfiguration object, Making PromptTemplateConfiguration property as required, Limiting the maximum PromptVariant to 1
  • Loading branch information
awstools committed Oct 17, 2024
1 parent f4c5267 commit d4d3af5
Show file tree
Hide file tree
Showing 13 changed files with 6 additions and 32 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@ export interface CreateFlowCommandOutput extends CreateFlowResponse, __MetadataB
* text: { // PromptModelInferenceConfiguration
* temperature: Number("float"),
* topP: Number("float"),
* topK: Number("int"),
* maxTokens: Number("int"),
* stopSequences: [ // StopSequences
* "STRING_VALUE",
Expand Down Expand Up @@ -221,7 +220,6 @@ export interface CreateFlowCommandOutput extends CreateFlowResponse, __MetadataB
* // text: { // PromptModelInferenceConfiguration
* // temperature: Number("float"),
* // topP: Number("float"),
* // topK: Number("int"),
* // maxTokens: Number("int"),
* // stopSequences: [ // StopSequences
* // "STRING_VALUE",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,6 @@ export interface CreateFlowVersionCommandOutput extends CreateFlowVersionRespons
* // text: { // PromptModelInferenceConfiguration
* // temperature: Number("float"),
* // topP: Number("float"),
* // topK: Number("int"),
* // maxTokens: Number("int"),
* // stopSequences: [ // StopSequences
* // "STRING_VALUE",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ export interface CreatePromptCommandOutput extends CreatePromptResponse, __Metad
* text: { // PromptModelInferenceConfiguration
* temperature: Number("float"),
* topP: Number("float"),
* topK: Number("int"),
* maxTokens: Number("int"),
* stopSequences: [ // StopSequences
* "STRING_VALUE",
Expand Down Expand Up @@ -110,7 +109,6 @@ export interface CreatePromptCommandOutput extends CreatePromptResponse, __Metad
* // text: { // PromptModelInferenceConfiguration
* // temperature: Number("float"),
* // topP: Number("float"),
* // topK: Number("int"),
* // maxTokens: Number("int"),
* // stopSequences: [ // StopSequences
* // "STRING_VALUE",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ export interface CreatePromptVersionCommandOutput extends CreatePromptVersionRes
* // text: { // PromptModelInferenceConfiguration
* // temperature: Number("float"),
* // topP: Number("float"),
* // topK: Number("int"),
* // maxTokens: Number("int"),
* // stopSequences: [ // StopSequences
* // "STRING_VALUE",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,6 @@ export interface GetFlowCommandOutput extends GetFlowResponse, __MetadataBearer
* // text: { // PromptModelInferenceConfiguration
* // temperature: Number("float"),
* // topP: Number("float"),
* // topK: Number("int"),
* // maxTokens: Number("int"),
* // stopSequences: [ // StopSequences
* // "STRING_VALUE",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,6 @@ export interface GetFlowVersionCommandOutput extends GetFlowVersionResponse, __M
* // text: { // PromptModelInferenceConfiguration
* // temperature: Number("float"),
* // topP: Number("float"),
* // topK: Number("int"),
* // maxTokens: Number("int"),
* // stopSequences: [ // StopSequences
* // "STRING_VALUE",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ export interface GetIngestionJobCommandInput extends GetIngestionJobRequest {}
export interface GetIngestionJobCommandOutput extends GetIngestionJobResponse, __MetadataBearer {}

/**
* <p>Gets information about a data ingestion job. Data sources are ingested into your knowledge base so that Large Lanaguage Models (LLMs) can use your data.</p>
* <p>Gets information about a data ingestion job. Data sources are ingested into your knowledge base so that Large Language Models (LLMs) can use your data.</p>
* @example
* Use a bare-bones client and the command you need to make an API call.
* ```javascript
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ export interface GetPromptCommandOutput extends GetPromptResponse, __MetadataBea
* // text: { // PromptModelInferenceConfiguration
* // temperature: Number("float"),
* // topP: Number("float"),
* // topK: Number("int"),
* // maxTokens: Number("int"),
* // stopSequences: [ // StopSequences
* // "STRING_VALUE",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@ export interface UpdateFlowCommandOutput extends UpdateFlowResponse, __MetadataB
* text: { // PromptModelInferenceConfiguration
* temperature: Number("float"),
* topP: Number("float"),
* topK: Number("int"),
* maxTokens: Number("int"),
* stopSequences: [ // StopSequences
* "STRING_VALUE",
Expand Down Expand Up @@ -218,7 +217,6 @@ export interface UpdateFlowCommandOutput extends UpdateFlowResponse, __MetadataB
* // text: { // PromptModelInferenceConfiguration
* // temperature: Number("float"),
* // topP: Number("float"),
* // topK: Number("int"),
* // maxTokens: Number("int"),
* // stopSequences: [ // StopSequences
* // "STRING_VALUE",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ export interface UpdatePromptCommandOutput extends UpdatePromptResponse, __Metad
* text: { // PromptModelInferenceConfiguration
* temperature: Number("float"),
* topP: Number("float"),
* topK: Number("int"),
* maxTokens: Number("int"),
* stopSequences: [ // StopSequences
* "STRING_VALUE",
Expand Down Expand Up @@ -107,7 +106,6 @@ export interface UpdatePromptCommandOutput extends UpdatePromptResponse, __Metad
* // text: { // PromptModelInferenceConfiguration
* // temperature: Number("float"),
* // topP: Number("float"),
* // topK: Number("int"),
* // maxTokens: Number("int"),
* // stopSequences: [ // StopSequences
* // "STRING_VALUE",
Expand Down
8 changes: 1 addition & 7 deletions clients/client-bedrock-agent/src/models/models_0.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3972,12 +3972,6 @@ export interface PromptModelInferenceConfiguration {
*/
topP?: number;

/**
* <p>The number of most-likely candidates that the model considers for the next token during generation.</p>
* @public
*/
topK?: number;

/**
* <p>The maximum number of tokens to return in the response.</p>
* @public
Expand Down Expand Up @@ -7514,7 +7508,7 @@ export interface PromptVariant {
* <p>Contains configurations for the prompt template.</p>
* @public
*/
templateConfiguration?: PromptTemplateConfiguration;
templateConfiguration: PromptTemplateConfiguration | undefined;

/**
* <p>The unique identifier of the model or <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html">inference profile</a> with which to run inference on the prompt.</p>
Expand Down
2 changes: 0 additions & 2 deletions clients/client-bedrock-agent/src/protocols/Aws_restJson1.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3533,7 +3533,6 @@ const se_PromptModelInferenceConfiguration = (
maxTokens: [],
stopSequences: _json,
temperature: __serializeFloat,
topK: [],
topP: __serializeFloat,
});
};
Expand Down Expand Up @@ -4484,7 +4483,6 @@ const de_PromptModelInferenceConfiguration = (
maxTokens: __expectInt32,
stopSequences: _json,
temperature: __limitedParseFloat32,
topK: __expectInt32,
topP: __limitedParseFloat32,
}) as any;
};
Expand Down
13 changes: 4 additions & 9 deletions codegen/sdk-codegen/aws-models/bedrock-agent.json
Original file line number Diff line number Diff line change
Expand Up @@ -7088,7 +7088,7 @@
}
],
"traits": {
"smithy.api#documentation": "<p>Gets information about a data ingestion job. Data sources are ingested into your knowledge base so that Large Lanaguage Models (LLMs) can use your data.</p>",
"smithy.api#documentation": "<p>Gets information about a data ingestion job. Data sources are ingested into your knowledge base so that Large Language Models (LLMs) can use your data.</p>",
"smithy.api#http": {
"code": 200,
"method": "GET",
Expand Down Expand Up @@ -10520,12 +10520,6 @@
"smithy.api#documentation": "<p>The percentage of most-likely candidates that the model considers for the next token.</p>"
}
},
"topK": {
"target": "com.amazonaws.bedrockagent#TopK",
"traits": {
"smithy.api#documentation": "<p>The number of most-likely candidates that the model considers for the next token during generation.</p>"
}
},
"maxTokens": {
"target": "com.amazonaws.bedrockagent#MaximumLength",
"traits": {
Expand Down Expand Up @@ -10762,7 +10756,8 @@
"templateConfiguration": {
"target": "com.amazonaws.bedrockagent#PromptTemplateConfiguration",
"traits": {
"smithy.api#documentation": "<p>Contains configurations for the prompt template.</p>"
"smithy.api#documentation": "<p>Contains configurations for the prompt template.</p>",
"smithy.api#required": {}
}
},
"modelId": {
Expand Down Expand Up @@ -10796,7 +10791,7 @@
},
"traits": {
"smithy.api#length": {
"max": 3
"max": 1
},
"smithy.api#sensitive": {}
}
Expand Down

0 comments on commit d4d3af5

Please sign in to comment.