diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 61d831bd..f04d0896 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.31.2" + ".": "0.32.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 3ad8fd53..ff2805b6 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 2 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/anthropic-e2a51f04a202c13736b6fa2061a89a0c443f99ab166d965d702baf371eb1ca8f.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/anthropic-5903ec2fd4efd7f261908bc4ec8ecd6b19cb9efa79637ad273583f1b763f80fd.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index eb6ddbf4..516cd35c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,32 @@ # Changelog +## 0.32.0 (2024-07-29) + +Full Changelog: [v0.31.2...v0.32.0](https://github.com/anthropics/anthropic-sdk-python/compare/v0.31.2...v0.32.0) + +### Features + +* add back compat alias for InputJsonDelta ([25a5b6c](https://github.com/anthropics/anthropic-sdk-python/commit/25a5b6c81ffb5996ef697aab22a22d8be5751bc1)) + + +### Bug Fixes + +* change signatures for the stream function ([c9eb11b](https://github.com/anthropics/anthropic-sdk-python/commit/c9eb11b1f9656202ee88e9869e59160bc37f5434)) +* **client:** correctly apply client level timeout for messages ([#615](https://github.com/anthropics/anthropic-sdk-python/issues/615)) ([5f8d88f](https://github.com/anthropics/anthropic-sdk-python/commit/5f8d88f6fcc2ba05cd9fc6f8ae7aa8c61dc6b0d0)) + + +### Chores + +* **docs:** document how to do per-request http client customization ([#603](https://github.com/anthropics/anthropic-sdk-python/issues/603)) ([5161f62](https://github.com/anthropics/anthropic-sdk-python/commit/5161f626a0bec757b96217dc0f81e8908546f29a)) +* **internal:** add type construction helper ([#613](https://github.com/anthropics/anthropic-sdk-python/issues/613)) ([5e36940](https://github.com/anthropics/anthropic-sdk-python/commit/5e36940a42e401c3f0c1e42aa248d431fdf7192c)) +* sync spec ([#605](https://github.com/anthropics/anthropic-sdk-python/issues/605)) ([6b7707f](https://github.com/anthropics/anthropic-sdk-python/commit/6b7707f62788fca2e166209e82935a2a2fa8204a)) +* **tests:** update prism version ([#607](https://github.com/anthropics/anthropic-sdk-python/issues/607)) ([1797dc6](https://github.com/anthropics/anthropic-sdk-python/commit/1797dc6139ffaca6436ed897972471e67ba1b828)) + + +### Refactors + +* extract model out to a named type and rename partialjson ([#612](https://github.com/anthropics/anthropic-sdk-python/issues/612)) ([c53efc7](https://github.com/anthropics/anthropic-sdk-python/commit/c53efc786fa95831a398f37740a81b42f7b64c94)) + ## 0.31.2 (2024-07-17) Full Changelog: [v0.31.1...v0.31.2](https://github.com/anthropics/anthropic-sdk-python/compare/v0.31.1...v0.31.2) diff --git a/README.md b/README.md index fcacca0f..0fd0f870 100644 --- a/README.md +++ b/README.md @@ -508,6 +508,12 @@ client = Anthropic( ) ``` +You can also customize the client on a per-request basis by using `with_options()`: + +```python +client.with_options(http_client=DefaultHttpxClient(...)) +``` + ### Managing HTTP resources By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. diff --git a/api.md b/api.md index 5ac54252..63896541 100644 --- a/api.md +++ b/api.md @@ -9,7 +9,7 @@ from anthropic.types import ( ContentBlockStartEvent, ContentBlockStopEvent, ImageBlockParam, - InputJsonDelta, + InputJSONDelta, Message, MessageDeltaEvent, MessageDeltaUsage, @@ -17,6 +17,7 @@ from anthropic.types import ( MessageStartEvent, MessageStopEvent, MessageStreamEvent, + Model, RawContentBlockDeltaEvent, RawContentBlockStartEvent, RawContentBlockStopEvent, diff --git a/pyproject.toml b/pyproject.toml index 10921bf7..ac92a41f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "anthropic" -version = "0.31.2" +version = "0.32.0" description = "The official Python library for the anthropic API" dynamic = ["readme"] license = "MIT" diff --git a/scripts/mock b/scripts/mock index fe89a1d0..f5861576 100755 --- a/scripts/mock +++ b/scripts/mock @@ -21,7 +21,7 @@ echo "==> Starting mock server with URL ${URL}" # Run prism mock on the given spec if [ "$1" == "--daemon" ]; then - npm exec --package=@stoplight/prism-cli@~5.8 -- prism mock "$URL" &> .prism.log & + npm exec --package=@stainless-api/prism-cli@5.8.4 -- prism mock "$URL" &> .prism.log & # Wait for server to come online echo -n "Waiting for server" @@ -37,5 +37,5 @@ if [ "$1" == "--daemon" ]; then echo else - npm exec --package=@stoplight/prism-cli@~5.8 -- prism mock "$URL" + npm exec --package=@stainless-api/prism-cli@5.8.4 -- prism mock "$URL" fi diff --git a/src/anthropic/_models.py b/src/anthropic/_models.py index eb7ce3bd..5148d5a7 100644 --- a/src/anthropic/_models.py +++ b/src/anthropic/_models.py @@ -406,6 +406,15 @@ def build( return cast(_BaseModelT, construct_type(type_=base_model_cls, value=kwargs)) +def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T: + """Loose coercion to the expected type with construction of nested values. + + Note: the returned value from this function is not guaranteed to match the + given type. + """ + return cast(_T, construct_type(value=value, type_=type_)) + + def construct_type(*, value: object, type_: object) -> object: """Loose coercion to the expected type with construction of nested values. diff --git a/src/anthropic/_utils/_reflection.py b/src/anthropic/_utils/_reflection.py index 9a53c7bd..89aa712a 100644 --- a/src/anthropic/_utils/_reflection.py +++ b/src/anthropic/_utils/_reflection.py @@ -34,7 +34,7 @@ def assert_signatures_in_sync( if custom_param.annotation != source_param.annotation: errors.append( - f"types for the `{name}` param are do not match; source={repr(source_param.annotation)} checking={repr(source_param.annotation)}" + f"types for the `{name}` param are do not match; source={repr(source_param.annotation)} checking={repr(custom_param.annotation)}" ) continue diff --git a/src/anthropic/_version.py b/src/anthropic/_version.py index 040dba2e..722453a0 100644 --- a/src/anthropic/_version.py +++ b/src/anthropic/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "anthropic" -__version__ = "0.31.2" # x-release-please-version +__version__ = "0.32.0" # x-release-please-version diff --git a/src/anthropic/resources/completions.py b/src/anthropic/resources/completions.py index f0c9afaf..7369f485 100644 --- a/src/anthropic/resources/completions.py +++ b/src/anthropic/resources/completions.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union, overload +from typing import List, overload from typing_extensions import Literal import httpx @@ -11,6 +11,7 @@ from ..types import completion_create_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven from .._utils import ( + is_given, required_args, maybe_transform, async_maybe_transform, @@ -18,9 +19,11 @@ from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from .._constants import DEFAULT_TIMEOUT from .._streaming import Stream, AsyncStream from .._base_client import make_request_options from ..types.completion import Completion +from ..types.model_param import ModelParam __all__ = ["Completions", "AsyncCompletions"] @@ -39,7 +42,7 @@ def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -52,7 +55,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion: """[Legacy] Create a Text Completion. @@ -71,9 +74,8 @@ def create( Note that our models may stop _before_ reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. prompt: The prompt that you want Claude to complete. @@ -144,7 +146,7 @@ def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, stream: Literal[True], metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, @@ -157,7 +159,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Stream[Completion]: """[Legacy] Create a Text Completion. @@ -176,9 +178,8 @@ def create( Note that our models may stop _before_ reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. prompt: The prompt that you want Claude to complete. @@ -249,7 +250,7 @@ def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, stream: bool, metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, @@ -262,7 +263,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion | Stream[Completion]: """[Legacy] Create a Text Completion. @@ -281,9 +282,8 @@ def create( Note that our models may stop _before_ reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. prompt: The prompt that you want Claude to complete. @@ -354,7 +354,7 @@ def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -367,8 +367,10 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion | Stream[Completion]: + if not is_given(timeout) and self._client.timeout == DEFAULT_TIMEOUT: + timeout = 600 return self._post( "/v1/complete", body=maybe_transform( @@ -408,7 +410,7 @@ async def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -421,7 +423,7 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion: """[Legacy] Create a Text Completion. @@ -440,9 +442,8 @@ async def create( Note that our models may stop _before_ reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. prompt: The prompt that you want Claude to complete. @@ -513,7 +514,7 @@ async def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, stream: Literal[True], metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, @@ -526,7 +527,7 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncStream[Completion]: """[Legacy] Create a Text Completion. @@ -545,9 +546,8 @@ async def create( Note that our models may stop _before_ reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. prompt: The prompt that you want Claude to complete. @@ -618,7 +618,7 @@ async def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, stream: bool, metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, @@ -631,7 +631,7 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion | AsyncStream[Completion]: """[Legacy] Create a Text Completion. @@ -650,9 +650,8 @@ async def create( Note that our models may stop _before_ reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. prompt: The prompt that you want Claude to complete. @@ -723,7 +722,7 @@ async def create( self, *, max_tokens_to_sample: int, - model: Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]], + model: ModelParam, prompt: str, metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -736,8 +735,10 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion | AsyncStream[Completion]: + if not is_given(timeout) and self._client.timeout == DEFAULT_TIMEOUT: + timeout = 600 return await self._post( "/v1/complete", body=await async_maybe_transform( diff --git a/src/anthropic/resources/messages.py b/src/anthropic/resources/messages.py index c7d3639f..8b6ab106 100644 --- a/src/anthropic/resources/messages.py +++ b/src/anthropic/resources/messages.py @@ -12,6 +12,7 @@ from ..types import message_create_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven from .._utils import ( + is_given, required_args, maybe_transform, async_maybe_transform, @@ -19,11 +20,13 @@ from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from .._constants import DEFAULT_TIMEOUT from .._streaming import Stream, AsyncStream from .._base_client import make_request_options from ..lib.streaming import MessageStreamManager, AsyncMessageStreamManager from ..types.message import Message from ..types.tool_param import ToolParam +from ..types.model_param import ModelParam from ..types.message_param import MessageParam from ..types.text_block_param import TextBlockParam from ..types.raw_message_stream_event import RawMessageStreamEvent @@ -46,18 +49,7 @@ def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, stream: Literal[False] | NotGiven = NOT_GIVEN, @@ -72,7 +64,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Message: """ Create a Message. @@ -178,9 +170,8 @@ def create( the top-level `system` parameter — there is no `"system"` role for input messages in the Messages API. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. metadata: An object describing metadata about the request. @@ -321,18 +312,7 @@ def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, stream: Literal[True], metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -347,7 +327,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Stream[RawMessageStreamEvent]: """ Create a Message. @@ -453,9 +433,8 @@ def create( the top-level `system` parameter — there is no `"system"` role for input messages in the Messages API. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. stream: Whether to incrementally stream the response using server-sent events. @@ -596,18 +575,7 @@ def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, stream: bool, metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -622,7 +590,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Message | Stream[RawMessageStreamEvent]: """ Create a Message. @@ -728,9 +696,8 @@ def create( the top-level `system` parameter — there is no `"system"` role for input messages in the Messages API. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. stream: Whether to incrementally stream the response using server-sent events. @@ -871,18 +838,7 @@ def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, @@ -897,8 +853,10 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Message | Stream[RawMessageStreamEvent]: + if not is_given(timeout) and self._client.timeout == DEFAULT_TIMEOUT: + timeout = 600 return self._post( "/v1/messages", body=maybe_transform( @@ -931,18 +889,7 @@ def stream( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, system: Union[str, Iterable[TextBlockParam]] | NotGiven = NOT_GIVEN, @@ -1008,18 +955,7 @@ async def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, stream: Literal[False] | NotGiven = NOT_GIVEN, @@ -1034,7 +970,7 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Message: """ Create a Message. @@ -1140,9 +1076,8 @@ async def create( the top-level `system` parameter — there is no `"system"` role for input messages in the Messages API. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. metadata: An object describing metadata about the request. @@ -1283,18 +1218,7 @@ async def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, stream: Literal[True], metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -1309,7 +1233,7 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncStream[RawMessageStreamEvent]: """ Create a Message. @@ -1415,9 +1339,8 @@ async def create( the top-level `system` parameter — there is no `"system"` role for input messages in the Messages API. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. stream: Whether to incrementally stream the response using server-sent events. @@ -1558,18 +1481,7 @@ async def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, stream: bool, metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -1584,7 +1496,7 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Message | AsyncStream[RawMessageStreamEvent]: """ Create a Message. @@ -1690,9 +1602,8 @@ async def create( the top-level `system` parameter — there is no `"system"` role for input messages in the Messages API. - model: The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. stream: Whether to incrementally stream the response using server-sent events. @@ -1833,18 +1744,7 @@ async def create( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, @@ -1859,8 +1759,10 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = 600, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Message | AsyncStream[RawMessageStreamEvent]: + if not is_given(timeout) and self._client.timeout == DEFAULT_TIMEOUT: + timeout = 600 return await self._post( "/v1/messages", body=await async_maybe_transform( @@ -1893,18 +1795,7 @@ def stream( *, max_tokens: int, messages: Iterable[MessageParam], - model: Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ], + model: ModelParam, metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, system: Union[str, Iterable[TextBlockParam]] | NotGiven = NOT_GIVEN, diff --git a/src/anthropic/types/__init__.py b/src/anthropic/types/__init__.py index f6165eca..9839b5a2 100644 --- a/src/anthropic/types/__init__.py +++ b/src/anthropic/types/__init__.py @@ -2,16 +2,18 @@ from __future__ import annotations +from .model import Model as Model from .usage import Usage as Usage from .message import Message as Message from .completion import Completion as Completion from .text_block import TextBlock as TextBlock from .text_delta import TextDelta as TextDelta from .tool_param import ToolParam as ToolParam +from .model_param import ModelParam as ModelParam from .content_block import ContentBlock as ContentBlock from .message_param import MessageParam as MessageParam from .tool_use_block import ToolUseBlock as ToolUseBlock -from .input_json_delta import InputJsonDelta as InputJsonDelta +from .input_json_delta import InputJSONDelta as InputJSONDelta from .text_block_param import TextBlockParam as TextBlockParam from .image_block_param import ImageBlockParam as ImageBlockParam from .message_stop_event import MessageStopEvent as MessageStopEvent diff --git a/src/anthropic/types/completion.py b/src/anthropic/types/completion.py index d55a0fe6..e6293210 100644 --- a/src/anthropic/types/completion.py +++ b/src/anthropic/types/completion.py @@ -3,6 +3,7 @@ from typing import Optional from typing_extensions import Literal +from .model import Model from .._models import BaseModel __all__ = ["Completion"] @@ -18,8 +19,12 @@ class Completion(BaseModel): completion: str """The resulting completion up to and excluding the stop sequences.""" - model: str - """The model that handled the request.""" + model: Model + """ + The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional + details and options. + """ stop_reason: Optional[str] = None """The reason that we stopped. diff --git a/src/anthropic/types/completion_create_params.py b/src/anthropic/types/completion_create_params.py index be824ebf..5c9ed7a6 100644 --- a/src/anthropic/types/completion_create_params.py +++ b/src/anthropic/types/completion_create_params.py @@ -5,6 +5,8 @@ from typing import List, Union, Optional from typing_extensions import Literal, Required, TypedDict +from .model_param import ModelParam + __all__ = [ "CompletionRequestStreamingMetadata", "CompletionRequestNonStreamingMetadata", @@ -25,10 +27,10 @@ class CompletionCreateParamsBase(TypedDict, total=False): only specifies the absolute maximum number of tokens to generate. """ - model: Required[Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]]] - """The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: Required[ModelParam] + """ + The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. """ diff --git a/src/anthropic/types/input_json_delta.py b/src/anthropic/types/input_json_delta.py index 6391d4bf..3028d7ac 100644 --- a/src/anthropic/types/input_json_delta.py +++ b/src/anthropic/types/input_json_delta.py @@ -4,10 +4,12 @@ from .._models import BaseModel -__all__ = ["InputJsonDelta"] +__all__ = ["InputJSONDelta", "InputJsonDelta"] - -class InputJsonDelta(BaseModel): +class InputJSONDelta(BaseModel): partial_json: str type: Literal["input_json_delta"] + +InputJsonDelta = InputJSONDelta + diff --git a/src/anthropic/types/message.py b/src/anthropic/types/message.py index 9ef967ea..3ddbf20b 100644 --- a/src/anthropic/types/message.py +++ b/src/anthropic/types/message.py @@ -3,6 +3,7 @@ from typing import List, Optional from typing_extensions import Literal +from .model import Model from .usage import Usage from .._models import BaseModel from .content_block import ContentBlock, ContentBlock as ContentBlock @@ -52,8 +53,12 @@ class Message(BaseModel): ``` """ - model: str - """The model that handled the request.""" + model: Model + """ + The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional + details and options. + """ role: Literal["assistant"] """Conversational role of the generated message. diff --git a/src/anthropic/types/message_create_params.py b/src/anthropic/types/message_create_params.py index a76bc0f7..a50e5946 100644 --- a/src/anthropic/types/message_create_params.py +++ b/src/anthropic/types/message_create_params.py @@ -6,6 +6,7 @@ from typing_extensions import Literal, Required, TypedDict from .tool_param import ToolParam +from .model_param import ModelParam from .message_param import MessageParam from .text_block_param import TextBlockParam @@ -120,23 +121,10 @@ class MessageCreateParamsBase(TypedDict, total=False): messages in the Messages API. """ - model: Required[ - Union[ - str, - Literal[ - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2", - ], - ] - ] - """The model that will complete your prompt. - - See [models](https://docs.anthropic.com/en/docs/models-overview) for additional + model: Required[ModelParam] + """ + The model that will complete your prompt.\n\nSee + [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. """ diff --git a/src/anthropic/types/model.py b/src/anthropic/types/model.py new file mode 100644 index 00000000..0ada4aeb --- /dev/null +++ b/src/anthropic/types/model.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal + +__all__ = ["Model"] + +Model = Union[ + str, + Literal[ + "claude-3-5-sonnet-20240620", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307", + "claude-2.1", + "claude-2.0", + "claude-instant-1.2", + ], +] diff --git a/src/anthropic/types/model_param.py b/src/anthropic/types/model_param.py new file mode 100644 index 00000000..d932500f --- /dev/null +++ b/src/anthropic/types/model_param.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal + +__all__ = ["ModelParam"] + +ModelParam = Union[ + str, + Literal[ + "claude-3-5-sonnet-20240620", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307", + "claude-2.1", + "claude-2.0", + "claude-instant-1.2", + ], +] diff --git a/src/anthropic/types/raw_content_block_delta_event.py b/src/anthropic/types/raw_content_block_delta_event.py index e1370fdb..845d502a 100644 --- a/src/anthropic/types/raw_content_block_delta_event.py +++ b/src/anthropic/types/raw_content_block_delta_event.py @@ -6,11 +6,11 @@ from .._utils import PropertyInfo from .._models import BaseModel from .text_delta import TextDelta -from .input_json_delta import InputJsonDelta +from .input_json_delta import InputJSONDelta __all__ = ["RawContentBlockDeltaEvent", "Delta"] -Delta = Annotated[Union[TextDelta, InputJsonDelta], PropertyInfo(discriminator="type")] +Delta = Annotated[Union[TextDelta, InputJSONDelta], PropertyInfo(discriminator="type")] class RawContentBlockDeltaEvent(BaseModel): diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py index f46b7bb7..cf87d9fe 100644 --- a/tests/api_resources/test_completions.py +++ b/tests/api_resources/test_completions.py @@ -21,7 +21,7 @@ class TestCompletions: def test_method_create_overload_1(self, client: Anthropic) -> None: completion = client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) assert_matches_type(Completion, completion, path=["response"]) @@ -30,7 +30,7 @@ def test_method_create_overload_1(self, client: Anthropic) -> None: def test_method_create_with_all_params_overload_1(self, client: Anthropic) -> None: completion = client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"}, stop_sequences=["string", "string", "string"], @@ -45,7 +45,7 @@ def test_method_create_with_all_params_overload_1(self, client: Anthropic) -> No def test_raw_response_create_overload_1(self, client: Anthropic) -> None: response = client.completions.with_raw_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) @@ -58,7 +58,7 @@ def test_raw_response_create_overload_1(self, client: Anthropic) -> None: def test_streaming_response_create_overload_1(self, client: Anthropic) -> None: with client.completions.with_streaming_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) as response: assert not response.is_closed @@ -73,7 +73,7 @@ def test_streaming_response_create_overload_1(self, client: Anthropic) -> None: def test_method_create_overload_2(self, client: Anthropic) -> None: completion_stream = client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, ) @@ -83,7 +83,7 @@ def test_method_create_overload_2(self, client: Anthropic) -> None: def test_method_create_with_all_params_overload_2(self, client: Anthropic) -> None: completion_stream = client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"}, @@ -98,7 +98,7 @@ def test_method_create_with_all_params_overload_2(self, client: Anthropic) -> No def test_raw_response_create_overload_2(self, client: Anthropic) -> None: response = client.completions.with_raw_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, ) @@ -111,7 +111,7 @@ def test_raw_response_create_overload_2(self, client: Anthropic) -> None: def test_streaming_response_create_overload_2(self, client: Anthropic) -> None: with client.completions.with_streaming_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, ) as response: @@ -131,7 +131,7 @@ class TestAsyncCompletions: async def test_method_create_overload_1(self, async_client: AsyncAnthropic) -> None: completion = await async_client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) assert_matches_type(Completion, completion, path=["response"]) @@ -140,7 +140,7 @@ async def test_method_create_overload_1(self, async_client: AsyncAnthropic) -> N async def test_method_create_with_all_params_overload_1(self, async_client: AsyncAnthropic) -> None: completion = await async_client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"}, stop_sequences=["string", "string", "string"], @@ -155,7 +155,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn async def test_raw_response_create_overload_1(self, async_client: AsyncAnthropic) -> None: response = await async_client.completions.with_raw_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) @@ -168,7 +168,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncAnthropic async def test_streaming_response_create_overload_1(self, async_client: AsyncAnthropic) -> None: async with async_client.completions.with_streaming_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) as response: assert not response.is_closed @@ -183,7 +183,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncAnt async def test_method_create_overload_2(self, async_client: AsyncAnthropic) -> None: completion_stream = await async_client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, ) @@ -193,7 +193,7 @@ async def test_method_create_overload_2(self, async_client: AsyncAnthropic) -> N async def test_method_create_with_all_params_overload_2(self, async_client: AsyncAnthropic) -> None: completion_stream = await async_client.completions.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"}, @@ -208,7 +208,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn async def test_raw_response_create_overload_2(self, async_client: AsyncAnthropic) -> None: response = await async_client.completions.with_raw_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, ) @@ -221,7 +221,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncAnthropic async def test_streaming_response_create_overload_2(self, async_client: AsyncAnthropic) -> None: async with async_client.completions.with_streaming_response.create( max_tokens_to_sample=256, - model="claude-2.1", + model="string", prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, ) as response: diff --git a/tests/lib/streaming/test_messages.py b/tests/lib/streaming/test_messages.py index 408546de..e719516e 100644 --- a/tests/lib/streaming/test_messages.py +++ b/tests/lib/streaming/test_messages.py @@ -192,7 +192,7 @@ def test_stream_method_definition_in_sync(sync: bool) -> None: if custom_param.annotation != generated_param.annotation: errors.append( - f"types for the `{name}` param are do not match; generated={repr(generated_param.annotation)} custom={repr(generated_param.annotation)}" + f"types for the `{name}` param are do not match; generated={repr(generated_param.annotation)} custom={repr(custom_param.annotation)}" ) continue