Skip to content

Commit

Permalink
feat(api): add o1 models (#1061)
Browse files Browse the repository at this point in the history
  • Loading branch information
Stainless Bot committed Sep 12, 2024
1 parent 8958d97 commit 224cc04
Show file tree
Hide file tree
Showing 9 changed files with 77 additions and 42 deletions.
2 changes: 1 addition & 1 deletion .stats.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
configured_endpoints: 68
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-85a85e0c08de456441431c0ae4e9c078cc8f9748c29430b9a9058340db6389ee.yml
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-501122aa32adaa2abb3d4487880ab9cdf2141addce2e6c3d1bd9bb6b44c318a8.yml
36 changes: 19 additions & 17 deletions src/resources/beta/assistants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -151,11 +151,11 @@ export interface Assistant {
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* Outputs which ensures the model will match your supplied JSON schema. Learn more
* in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
* message the model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to
Expand Down Expand Up @@ -665,7 +665,8 @@ export namespace FileSearchTool {
max_num_results?: number;

/**
* The ranking options for the file search.
* The ranking options for the file search. If not specified, the file search tool
* will use the `auto` ranker and a score_threshold of 0.
*
* See the
* [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
Expand All @@ -676,24 +677,25 @@ export namespace FileSearchTool {

export namespace FileSearch {
/**
* The ranking options for the file search.
* The ranking options for the file search. If not specified, the file search tool
* will use the `auto` ranker and a score_threshold of 0.
*
* See the
* [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
* for more information.
*/
export interface RankingOptions {
/**
* The ranker to use for the file search. If not specified will use the `auto`
* ranker.
* The score threshold for the file search. All values must be a floating point
* number between 0 and 1.
*/
ranker?: 'auto' | 'default_2024_08_21';
score_threshold: number;

/**
* The score threshold for the file search. All values must be a floating point
* number between 0 and 1.
* The ranker to use for the file search. If not specified will use the `auto`
* ranker.
*/
score_threshold?: number;
ranker?: 'auto' | 'default_2024_08_21';
}
}
}
Expand Down Expand Up @@ -1125,11 +1127,11 @@ export interface AssistantCreateParams {
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* Outputs which ensures the model will match your supplied JSON schema. Learn more
* in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
* message the model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to
Expand Down Expand Up @@ -1283,11 +1285,11 @@ export interface AssistantUpdateParams {
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* Outputs which ensures the model will match your supplied JSON schema. Learn more
* in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
* message the model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to
Expand Down
12 changes: 6 additions & 6 deletions src/resources/beta/threads/runs/runs.ts
Original file line number Diff line number Diff line change
Expand Up @@ -429,11 +429,11 @@ export interface Run {
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* Outputs which ensures the model will match your supplied JSON schema. Learn more
* in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
* message the model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to
Expand Down Expand Up @@ -709,11 +709,11 @@ export interface RunCreateParamsBase {
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* Outputs which ensures the model will match your supplied JSON schema. Learn more
* in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
* message the model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to
Expand Down
12 changes: 6 additions & 6 deletions src/resources/beta/threads/threads.ts
Original file line number Diff line number Diff line change
Expand Up @@ -126,11 +126,11 @@ export class Threads extends APIResource {
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* Outputs which ensures the model will match your supplied JSON schema. Learn more
* in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
* message the model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to
Expand Down Expand Up @@ -522,11 +522,11 @@ export interface ThreadCreateAndRunParamsBase {
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* Outputs which ensures the model will match your supplied JSON schema. Learn more
* in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
* message the model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to
Expand Down
7 changes: 6 additions & 1 deletion src/resources/chat/chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,14 @@ export class Chat extends APIResource {
}

export type ChatModel =
| 'o1-preview'
| 'o1-preview-2024-09-12'
| 'o1-mini'
| 'o1-mini-2024-09-12'
| 'gpt-4o'
| 'gpt-4o-2024-05-13'
| 'gpt-4o-2024-08-06'
| 'gpt-4o-2024-05-13'
| 'chatgpt-4o-latest'
| 'gpt-4o-mini'
| 'gpt-4o-mini-2024-07-18'
| 'gpt-4-turbo'
Expand Down
30 changes: 20 additions & 10 deletions src/resources/chat/completions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -788,14 +788,21 @@ export interface ChatCompletionCreateParamsBase {
*/
logprobs?: boolean | null;

/**
* An upper bound for the number of tokens that can be generated for a completion,
* including visible output tokens and
* [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
*/
max_completion_tokens?: number | null;

/**
* The maximum number of [tokens](/tokenizer) that can be generated in the chat
* completion.
* completion. This value can be used to control
* [costs](https://openai.com/api/pricing/) for text generated via API.
*
* The total length of input tokens and generated tokens is limited by the model's
* context length.
* [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
* for counting tokens.
* This value is now deprecated in favor of `max_completion_tokens`, and is not
* compatible with
* [o1 series models](https://platform.openai.com/docs/guides/reasoning).
*/
max_tokens?: number | null;

Expand Down Expand Up @@ -830,11 +837,11 @@ export interface ChatCompletionCreateParamsBase {
* all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* Outputs which ensures the model will match your supplied JSON schema. Learn more
* in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
* message the model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to
Expand Down Expand Up @@ -863,8 +870,11 @@ export interface ChatCompletionCreateParamsBase {
* Specifies the latency tier to use for processing the request. This parameter is
* relevant for customers subscribed to the scale tier service:
*
* - If set to 'auto', the system will utilize scale tier credits until they are
* exhausted.
* - If set to 'auto', and the Project is Scale tier enabled, the system will
* utilize scale tier credits until they are exhausted.
* - If set to 'auto', and the Project is not Scale tier enabled, the request will
* be processed using the default service tier with a lower uptime SLA and no
* latency guarentee.
* - If set to 'default', the request will be processed using the default service
* tier with a lower uptime SLA and no latency guarentee.
* - When not set, the default behavior is 'auto'.
Expand Down
17 changes: 17 additions & 0 deletions src/resources/completions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,23 @@ export interface CompletionUsage {
* Total number of tokens used in the request (prompt + completion).
*/
total_tokens: number;

/**
* Breakdown of tokens used in a completion.
*/
completion_tokens_details?: CompletionUsage.CompletionTokensDetails;
}

export namespace CompletionUsage {
/**
* Breakdown of tokens used in a completion.
*/
export interface CompletionTokensDetails {
/**
* Tokens generated by the model for reasoning.
*/
reasoning_tokens?: number;
}
}

export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming;
Expand Down
2 changes: 1 addition & 1 deletion src/resources/fine-tuning/jobs/jobs.ts
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@ export interface JobCreateParams {
seed?: number | null;

/**
* A string of up to 18 characters that will be added to your fine-tuned model
* A string of up to 64 characters that will be added to your fine-tuned model
* name.
*
* For example, a `suffix` of "custom-model-name" would produce a model name like
Expand Down
1 change: 1 addition & 0 deletions tests/api-resources/chat/completions.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ describe('resource completions', () => {
functions: [{ name: 'name', description: 'description', parameters: { foo: 'bar' } }],
logit_bias: { foo: 0 },
logprobs: true,
max_completion_tokens: 0,
max_tokens: 0,
n: 1,
parallel_tool_calls: true,
Expand Down

0 comments on commit 224cc04

Please sign in to comment.