From c86239862876af4f1545a8480346eb1c072310a2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 24 Jul 2024 20:14:11 +0000 Subject: [PATCH] refactor: extract model out to a named type and rename partialjson (#612) --- .stats.yml | 2 +- api.md | 3 +- src/anthropic/resources/completions.py | 49 +++---- src/anthropic/resources/messages.py | 135 +++--------------- src/anthropic/types/__init__.py | 4 +- src/anthropic/types/completion.py | 9 +- .../types/completion_create_params.py | 10 +- src/anthropic/types/input_json_delta.py | 4 +- src/anthropic/types/message.py | 9 +- src/anthropic/types/message_create_params.py | 22 +-- src/anthropic/types/model.py | 19 +++ src/anthropic/types/model_param.py | 21 +++ .../types/raw_content_block_delta_event.py | 4 +- tests/api_resources/test_completions.py | 32 ++--- 14 files changed, 134 insertions(+), 189 deletions(-) create mode 100644 src/anthropic/types/model.py create mode 100644 src/anthropic/types/model_param.py diff --git a/.stats.yml b/.stats.yml index 4f03232f..ff2805b6 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 2 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/anthropic-e38cd52aed438cef6e0a25eeeab8ff6000583c3cf152a10f0c3610ceb3da7b4e.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/anthropic-5903ec2fd4efd7f261908bc4ec8ecd6b19cb9efa79637ad273583f1b763f80fd.yml diff --git a/api.md b/api.md index 5ac54252..63896541 100644 --- a/api.md +++ b/api.md @@ -9,7 +9,7 @@ from anthropic.types import ( ContentBlockStartEvent, ContentBlockStopEvent, ImageBlockParam, - InputJsonDelta, + InputJSONDelta, Message, MessageDeltaEvent, MessageDeltaUsage, @@ -17,6 +17,7 @@ from anthropic.types import ( MessageStartEvent, MessageStopEvent, MessageStreamEvent, + Model, RawContentBlockDeltaEvent, RawContentBlockStartEvent, RawContentBlockStopEvent, diff --git a/src/anthropic/resources/completions.py b/src/anthropic/resources/completions.py index f0c9afaf..d34a42f2 100644 --- a/src/anthropic/resources/completions.py +++ b/src/anthropic/resources/completions.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union, overload +from typing import List, overload from typing_extensions import Literal import httpx @@ -21,6 +21,7 @@ from .._streaming import Stream, AsyncStream from .._base_client import make_request_options from ..types.completion import Completion +from ..types.model_param import ModelParam __all__ = ["Completions", "AsyncCompletions"] @@ -39,7 +40,7 @@ def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -71,9 +72,8 @@ def create( Note that our models may stop _before_ reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. prompt: The prompt that you want Claude to complete. @@ -144,7 +144,7 @@ def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, stream: Literal[True], metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, @@ -176,9 +176,8 @@ def create( Note that our models may stop _before_ reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. prompt: The prompt that you want Claude to complete. @@ -249,7 +248,7 @@ def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, stream: bool, metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, @@ -281,9 +280,8 @@ def create( Note that our models may stop _before_ reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. prompt: The prompt that you want Claude to complete. @@ -354,7 +352,7 @@ def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -408,7 +406,7 @@ async def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -440,9 +438,8 @@ async def create( Note that our models may stop _before_ reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. prompt: The prompt that you want Claude to complete. @@ -513,7 +510,7 @@ async def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, stream: Literal[True], metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, @@ -545,9 +542,8 @@ async def create( Note that our models may stop _before_ reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. prompt: The prompt that you want Claude to complete. @@ -618,7 +614,7 @@ async def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, stream: bool, metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, @@ -650,9 +646,8 @@ async def create( Note that our models may stop _before_ reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. prompt: The prompt that you want Claude to complete. @@ -723,7 +718,7 @@ async def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, diff --git a/src/anthropic/resources/messages.py b/src/anthropic/resources/messages.py index c7d3639f..dd7f7ef5 100644 --- a/src/anthropic/resources/messages.py +++ b/src/anthropic/resources/messages.py @@ -24,6 +24,7 @@ from ..lib.streaming import MessageStreamManager, AsyncMessageStreamManager from ..types.message import Message from ..types.tool_param import ToolParam +from ..types.model_param import ModelParam from ..types.message_param import MessageParam from ..types.text_block_param import TextBlockParam from ..types.raw_message_stream_event import RawMessageStreamEvent @@ -46,18 +47,7 @@ def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, stream: Literal[False] | NotGiven = NOT_GIVEN, @@ -178,9 +168,8 @@ def create( the top-level `system` parameter — there is no `"system"` role for input messages in the Messages API. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. metadata: An object describing metadata about the request. @@ -321,18 +310,7 @@ def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, stream: Literal[True], metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -453,9 +431,8 @@ def create( the top-level `system` parameter — there is no `"system"` role for input messages in the Messages API. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. stream: Whether to incrementally stream the response using server-sent events. @@ -596,18 +573,7 @@ def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, stream: bool, metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -728,9 +694,8 @@ def create( the top-level `system` parameter — there is no `"system"` role for input messages in the Messages API. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. stream: Whether to incrementally stream the response using server-sent events. @@ -871,18 +836,7 @@ def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, @@ -1008,18 +962,7 @@ async def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, stream: Literal[False] | NotGiven = NOT_GIVEN, @@ -1140,9 +1083,8 @@ async def create( the top-level `system` parameter — there is no `"system"` role for input messages in the Messages API. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. metadata: An object describing metadata about the request. @@ -1283,18 +1225,7 @@ async def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, stream: Literal[True], metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -1415,9 +1346,8 @@ async def create( the top-level `system` parameter — there is no `"system"` role for input messages in the Messages API. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. stream: Whether to incrementally stream the response using server-sent events. @@ -1558,18 +1488,7 @@ async def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, stream: bool, metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -1690,9 +1609,8 @@ async def create( the top-level `system` parameter — there is no `"system"` role for input messages in the Messages API. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. stream: Whether to incrementally stream the response using server-sent events. @@ -1833,18 +1751,7 @@ async def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, diff --git a/src/anthropic/types/__init__.py b/src/anthropic/types/__init__.py index f6165eca..9839b5a2 100644 --- a/src/anthropic/types/__init__.py +++ b/src/anthropic/types/__init__.py @@ -2,16 +2,18 @@ from __future__ import annotations +from .model import Model as Model from .usage import Usage as Usage from .message import Message as Message from .completion import Completion as Completion from .text_block import TextBlock as TextBlock from .text_delta import TextDelta as TextDelta from .tool_param import ToolParam as ToolParam +from .model_param import ModelParam as ModelParam from .content_block import ContentBlock as ContentBlock from .message_param import MessageParam as MessageParam from .tool_use_block import ToolUseBlock as ToolUseBlock -from .input_json_delta import InputJsonDelta as InputJsonDelta +from .input_json_delta import InputJSONDelta as InputJSONDelta from .text_block_param import TextBlockParam as TextBlockParam from .image_block_param import ImageBlockParam as ImageBlockParam from .message_stop_event import MessageStopEvent as MessageStopEvent diff --git a/src/anthropic/types/completion.py b/src/anthropic/types/completion.py index d55a0fe6..e6293210 100644 --- a/src/anthropic/types/completion.py +++ b/src/anthropic/types/completion.py @@ -3,6 +3,7 @@ from typing import Optional from typing_extensions import Literal +from .model import Model from .._models import BaseModel __all__ = ["Completion"] @@ -18,8 +19,12 @@ class Completion(BaseModel): completion: str """The resulting completion up to and excluding the stop sequences.""" - model: str - """The model that handled the request.""" + model: Model + """ + The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional + details and options. + """ stop_reason: Optional[str] = None """The reason that we stopped. diff --git a/src/anthropic/types/completion_create_params.py b/src/anthropic/types/completion_create_params.py index be824ebf..5c9ed7a6 100644 --- a/src/anthropic/types/completion_create_params.py +++ b/src/anthropic/types/completion_create_params.py @@ -5,6 +5,8 @@ from typing import List, Union, Optional from typing_extensions import Literal, Required, TypedDict +from .model_param import ModelParam + __all__ = [ "CompletionRequestStreamingMetadata", "CompletionRequestNonStreamingMetadata", @@ -25,10 +27,10 @@ class CompletionCreateParamsBase(TypedDict, total=False): only specifies the absolute maximum number of tokens to generate. """ - model: Required[Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]]] - """The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: Required[ModelParam] + """ + The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. """ diff --git a/src/anthropic/types/input_json_delta.py b/src/anthropic/types/input_json_delta.py index 6391d4bf..14a33feb 100644 --- a/src/anthropic/types/input_json_delta.py +++ b/src/anthropic/types/input_json_delta.py @@ -4,10 +4,10 @@ from .._models import BaseModel -__all__ = ["InputJsonDelta"] +__all__ = ["InputJSONDelta"] -class InputJsonDelta(BaseModel): +class InputJSONDelta(BaseModel): partial_json: str type: Literal["input_json_delta"] diff --git a/src/anthropic/types/message.py b/src/anthropic/types/message.py index 9ef967ea..3ddbf20b 100644 --- a/src/anthropic/types/message.py +++ b/src/anthropic/types/message.py @@ -3,6 +3,7 @@ from typing import List, Optional from typing_extensions import Literal +from .model import Model from .usage import Usage from .._models import BaseModel from .content_block import ContentBlock, ContentBlock as ContentBlock @@ -52,8 +53,12 @@ class Message(BaseModel): ``` """ - model: str - """The model that handled the request.""" + model: Model + """ + The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional + details and options. + """ role: Literal["assistant"] """Conversational role of the generated message. diff --git a/src/anthropic/types/message_create_params.py b/src/anthropic/types/message_create_params.py index a76bc0f7..a50e5946 100644 --- a/src/anthropic/types/message_create_params.py +++ b/src/anthropic/types/message_create_params.py @@ -6,6 +6,7 @@ from typing_extensions import Literal, Required, TypedDict from .tool_param import ToolParam +from .model_param import ModelParam from .message_param import MessageParam from .text_block_param import TextBlockParam @@ -120,23 +121,10 @@ class MessageCreateParamsBase(TypedDict, total=False): messages in the Messages API. """ - model: Required[ - Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ] - ] - """The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: Required[ModelParam] + """ + The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. """ diff --git a/src/anthropic/types/model.py b/src/anthropic/types/model.py new file mode 100644 index 00000000..0ada4aeb --- /dev/null +++ b/src/anthropic/types/model.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal + +__all__ = ["Model"] + +Model = Union[ + str, + Literal[ + "claude-3-5-sonnet-20240620", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307", + "claude-2.1", + "claude-2.0", + "claude-instant-1.2", + ], +] diff --git a/src/anthropic/types/model_param.py b/src/anthropic/types/model_param.py new file mode 100644 index 00000000..d932500f --- /dev/null +++ b/src/anthropic/types/model_param.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal + +__all__ = ["ModelParam"] + +ModelParam = Union[ + str, + Literal[ + "claude-3-5-sonnet-20240620", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307", + "claude-2.1", + "claude-2.0", + "claude-instant-1.2", + ], +] diff --git a/src/anthropic/types/raw_content_block_delta_event.py b/src/anthropic/types/raw_content_block_delta_event.py index e1370fdb..845d502a 100644 --- a/src/anthropic/types/raw_content_block_delta_event.py +++ b/src/anthropic/types/raw_content_block_delta_event.py @@ -6,11 +6,11 @@ from .._utils import PropertyInfo from .._models import BaseModel from .text_delta import TextDelta -from .input_json_delta import InputJsonDelta +from .input_json_delta import InputJSONDelta __all__ = ["RawContentBlockDeltaEvent", "Delta"] -Delta = Annotated[Union[TextDelta, InputJsonDelta], PropertyInfo(discriminator="type")] +Delta = Annotated[Union[TextDelta, InputJSONDelta], PropertyInfo(discriminator="type")] class RawContentBlockDeltaEvent(BaseModel): diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py index f46b7bb7..cf87d9fe 100644 --- a/tests/api_resources/test_completions.py +++ b/tests/api_resources/test_completions.py @@ -21,7 +21,7 @@ class TestCompletions: def test_method_create_overload_1(self, client: Anthropic) -> None: completion = client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) assert_matches_type(Completion, completion, path=["response"]) @@ -30,7 +30,7 @@ def test_method_create_overload_1(self, client: Anthropic) -> None: def test_method_create_with_all_params_overload_1(self, client: Anthropic) -> None: completion = client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"}, stop_sequences=["string", "string", "string"], @@ -45,7 +45,7 @@ def test_method_create_with_all_params_overload_1(self, client: Anthropic) -> No def test_raw_response_create_overload_1(self, client: Anthropic) -> None: response = client.completions.with_raw_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) @@ -58,7 +58,7 @@ def test_raw_response_create_overload_1(self, client: Anthropic) -> None: def test_streaming_response_create_overload_1(self, client: Anthropic) -> None: with client.completions.with_streaming_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) as response: assert not response.is_closed @@ -73,7 +73,7 @@ def test_streaming_response_create_overload_1(self, client: Anthropic) -> None: def test_method_create_overload_2(self, client: Anthropic) -> None: completion_stream = client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, ) @@ -83,7 +83,7 @@ def test_method_create_overload_2(self, client: Anthropic) -> None: def test_method_create_with_all_params_overload_2(self, client: Anthropic) -> None: completion_stream = client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"}, @@ -98,7 +98,7 @@ def test_method_create_with_all_params_overload_2(self, client: Anthropic) -> No def test_raw_response_create_overload_2(self, client: Anthropic) -> None: response = client.completions.with_raw_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, ) @@ -111,7 +111,7 @@ def test_raw_response_create_overload_2(self, client: Anthropic) -> None: def test_streaming_response_create_overload_2(self, client: Anthropic) -> None: with client.completions.with_streaming_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, ) as response: @@ -131,7 +131,7 @@ class TestAsyncCompletions: async def test_method_create_overload_1(self, async_client: AsyncAnthropic) -> None: completion = await async_client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) assert_matches_type(Completion, completion, path=["response"]) @@ -140,7 +140,7 @@ async def test_method_create_overload_1(self, async_client: AsyncAnthropic) -> N async def test_method_create_with_all_params_overload_1(self, async_client: AsyncAnthropic) -> None: completion = await async_client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"}, stop_sequences=["string", "string", "string"], @@ -155,7 +155,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn async def test_raw_response_create_overload_1(self, async_client: AsyncAnthropic) -> None: response = await async_client.completions.with_raw_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) @@ -168,7 +168,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncAnthropic async def test_streaming_response_create_overload_1(self, async_client: AsyncAnthropic) -> None: async with async_client.completions.with_streaming_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) as response: assert not response.is_closed @@ -183,7 +183,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncAnt async def test_method_create_overload_2(self, async_client: AsyncAnthropic) -> None: completion_stream = await async_client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, ) @@ -193,7 +193,7 @@ async def test_method_create_overload_2(self, async_client: AsyncAnthropic) -> N async def test_method_create_with_all_params_overload_2(self, async_client: AsyncAnthropic) -> None: completion_stream = await async_client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"}, @@ -208,7 +208,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn async def test_raw_response_create_overload_2(self, async_client: AsyncAnthropic) -> None: response = await async_client.completions.with_raw_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, ) @@ -221,7 +221,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncAnthropic async def test_streaming_response_create_overload_2(self, async_client: AsyncAnthropic) -> None: async with async_client.completions.with_streaming_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, ) as response: