From 30215c15df613cf9c36cafd717af79158c9db3e5 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 19 Aug 2024 17:22:47 -0400 Subject: [PATCH 1/3] fix(json schema): remove `None` defaults (#1663) --- src/openai/lib/_pydantic.py | 7 ++++ tests/lib/chat/test_completions.py | 60 +++++++++++++++++++++++++++++- tests/lib/schema_types/query.py | 3 +- tests/lib/test_pydantic.py | 6 ++- 4 files changed, 72 insertions(+), 4 deletions(-) diff --git a/src/openai/lib/_pydantic.py b/src/openai/lib/_pydantic.py index ad3b6eb29f..f989ce3ed0 100644 --- a/src/openai/lib/_pydantic.py +++ b/src/openai/lib/_pydantic.py @@ -5,6 +5,7 @@ import pydantic +from .._types import NOT_GIVEN from .._utils import is_dict as _is_dict, is_list from .._compat import model_json_schema @@ -76,6 +77,12 @@ def _ensure_strict_json_schema( for i, entry in enumerate(all_of) ] + # strip `None` defaults as there's no meaningful distinction here + # the schema will still be `nullable` and the model will default + # to using `None` anyway + if json_schema.get("default", NOT_GIVEN) is None: + json_schema.pop("default") + # we can't use `$ref`s if there are also other properties defined, e.g. # `{"$ref": "...", "description": "my description"}` # diff --git a/tests/lib/chat/test_completions.py b/tests/lib/chat/test_completions.py index f003866653..aea449b097 100644 --- a/tests/lib/chat/test_completions.py +++ b/tests/lib/chat/test_completions.py @@ -3,7 +3,7 @@ import os import json from enum import Enum -from typing import Any, Callable +from typing import Any, Callable, Optional from typing_extensions import Literal, TypeVar import httpx @@ -135,6 +135,63 @@ class Location(BaseModel): ) +@pytest.mark.respx(base_url=base_url) +def test_parse_pydantic_model_optional_default( + client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch +) -> None: + class Location(BaseModel): + city: str + temperature: float + units: Optional[Literal["c", "f"]] = None + + completion = _make_snapshot_request( + lambda c: c.beta.chat.completions.parse( + model="gpt-4o-2024-08-06", + messages=[ + { + "role": "user", + "content": "What's the weather like in SF?", + }, + ], + response_format=Location, + ), + content_snapshot=snapshot( + '{"id": "chatcmpl-9y39Q2jGzWmeEZlm5CoNVOuQzcxP4", "object": "chat.completion", "created": 1724098820, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":62,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 17, "completion_tokens": 14, "total_tokens": 31}, "system_fingerprint": "fp_2a322c9ffc"}' + ), + mock_client=client, + respx_mock=respx_mock, + ) + + assert print_obj(completion, monkeypatch) == snapshot( + """\ +ParsedChatCompletion[Location]( + choices=[ + ParsedChoice[Location]( + finish_reason='stop', + index=0, + logprobs=None, + message=ParsedChatCompletionMessage[Location]( + content='{"city":"San Francisco","temperature":62,"units":"f"}', + function_call=None, + parsed=Location(city='San Francisco', temperature=62.0, units='f'), + refusal=None, + role='assistant', + tool_calls=[] + ) + ) + ], + created=1724098820, + id='chatcmpl-9y39Q2jGzWmeEZlm5CoNVOuQzcxP4', + model='gpt-4o-2024-08-06', + object='chat.completion', + service_tier=None, + system_fingerprint='fp_2a322c9ffc', + usage=CompletionUsage(completion_tokens=14, prompt_tokens=17, total_tokens=31) +) +""" + ) + + @pytest.mark.respx(base_url=base_url) def test_parse_pydantic_model_enum(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None: class Color(Enum): @@ -320,6 +377,7 @@ def test_pydantic_tool_model_all_types(client: OpenAI, respx_mock: MockRouter, m value=DynamicValue(column_name='expected_delivery_date') ) ], + name=None, order_by=, table_name= ) diff --git a/tests/lib/schema_types/query.py b/tests/lib/schema_types/query.py index d2284424f0..03439fb17f 100644 --- a/tests/lib/schema_types/query.py +++ b/tests/lib/schema_types/query.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import List, Union +from typing import List, Union, Optional from pydantic import BaseModel @@ -45,6 +45,7 @@ class Condition(BaseModel): class Query(BaseModel): + name: Optional[str] = None table_name: Table columns: List[Column] conditions: List[Condition] diff --git a/tests/lib/test_pydantic.py b/tests/lib/test_pydantic.py index 531a89df58..99b9e96d21 100644 --- a/tests/lib/test_pydantic.py +++ b/tests/lib/test_pydantic.py @@ -62,6 +62,7 @@ def test_most_types() -> None: "Table": {"enum": ["orders", "customers", "products"], "title": "Table", "type": "string"}, }, "properties": { + "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "table_name": {"$ref": "#/$defs/Table"}, "columns": { "items": {"$ref": "#/$defs/Column"}, @@ -75,7 +76,7 @@ def test_most_types() -> None: }, "order_by": {"$ref": "#/$defs/OrderBy"}, }, - "required": ["table_name", "columns", "conditions", "order_by"], + "required": ["name", "table_name", "columns", "conditions", "order_by"], "title": "Query", "type": "object", "additionalProperties": False, @@ -91,6 +92,7 @@ def test_most_types() -> None: "title": "Query", "type": "object", "properties": { + "name": {"title": "Name", "type": "string"}, "table_name": {"$ref": "#/definitions/Table"}, "columns": {"type": "array", "items": {"$ref": "#/definitions/Column"}}, "conditions": { @@ -100,7 +102,7 @@ def test_most_types() -> None: }, "order_by": {"$ref": "#/definitions/OrderBy"}, }, - "required": ["table_name", "columns", "conditions", "order_by"], + "required": ["name", "table_name", "columns", "conditions", "order_by"], "definitions": { "Table": { "title": "Table", From 822c37de49eb2ffe8c05122f7520ba87bd76e30b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2024 21:25:16 +0000 Subject: [PATCH 2/3] chore(client): fix parsing union responses when non-json is returned (#1665) --- src/openai/_models.py | 2 ++ tests/test_legacy_response.py | 22 +++++++++++++++++++- tests/test_response.py | 39 ++++++++++++++++++++++++++++++++++- 3 files changed, 61 insertions(+), 2 deletions(-) diff --git a/src/openai/_models.py b/src/openai/_models.py index 5148d5a7b3..d386eaa3a4 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -380,6 +380,8 @@ def is_basemodel(type_: type) -> bool: def is_basemodel_type(type_: type) -> TypeGuard[type[BaseModel] | type[GenericModel]]: origin = get_origin(type_) or type_ + if not inspect.isclass(origin): + return False return issubclass(origin, BaseModel) or issubclass(origin, GenericModel) diff --git a/tests/test_legacy_response.py b/tests/test_legacy_response.py index 3659ee12c1..3c2df53e58 100644 --- a/tests/test_legacy_response.py +++ b/tests/test_legacy_response.py @@ -1,5 +1,5 @@ import json -from typing import cast +from typing import Any, Union, cast from typing_extensions import Annotated import httpx @@ -81,3 +81,23 @@ def test_response_parse_annotated_type(client: OpenAI) -> None: ) assert obj.foo == "hello!" assert obj.bar == 2 + + +class OtherModel(pydantic.BaseModel): + a: str + + +@pytest.mark.parametrize("client", [False], indirect=True) # loose validation +def test_response_parse_expect_model_union_non_json_content(client: OpenAI) -> None: + response = LegacyAPIResponse( + raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = response.parse(to=cast(Any, Union[CustomModel, OtherModel])) + assert isinstance(obj, str) + assert obj == "foo" diff --git a/tests/test_response.py b/tests/test_response.py index 6ea1be1a1a..b7d88bdbde 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -1,5 +1,5 @@ import json -from typing import List, cast +from typing import Any, List, Union, cast from typing_extensions import Annotated import httpx @@ -188,3 +188,40 @@ async def test_async_response_parse_annotated_type(async_client: AsyncOpenAI) -> ) assert obj.foo == "hello!" assert obj.bar == 2 + + +class OtherModel(BaseModel): + a: str + + +@pytest.mark.parametrize("client", [False], indirect=True) # loose validation +def test_response_parse_expect_model_union_non_json_content(client: OpenAI) -> None: + response = APIResponse( + raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = response.parse(to=cast(Any, Union[CustomModel, OtherModel])) + assert isinstance(obj, str) + assert obj == "foo" + + +@pytest.mark.asyncio +@pytest.mark.parametrize("async_client", [False], indirect=True) # loose validation +async def test_async_response_parse_expect_model_union_non_json_content(async_client: AsyncOpenAI) -> None: + response = AsyncAPIResponse( + raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), + client=async_client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = await response.parse(to=cast(Any, Union[CustomModel, OtherModel])) + assert isinstance(obj, str) + assert obj == "foo" From ca8d36c2a26baf8e787de53cbe501dd2c461b333 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2024 21:25:45 +0000 Subject: [PATCH 3/3] release: 1.41.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 16 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 4bce58a11b..5a1b26c33e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.41.0" + ".": "1.41.1" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index cfcfdfe3eb..e80a4fcc8a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 1.41.1 (2024-08-19) + +Full Changelog: [v1.41.0...v1.41.1](https://github.com/openai/openai-python/compare/v1.41.0...v1.41.1) + +### Bug Fixes + +* **json schema:** remove `None` defaults ([#1663](https://github.com/openai/openai-python/issues/1663)) ([30215c1](https://github.com/openai/openai-python/commit/30215c15df613cf9c36cafd717af79158c9db3e5)) + + +### Chores + +* **client:** fix parsing union responses when non-json is returned ([#1665](https://github.com/openai/openai-python/issues/1665)) ([822c37d](https://github.com/openai/openai-python/commit/822c37de49eb2ffe8c05122f7520ba87bd76e30b)) + ## 1.41.0 (2024-08-16) Full Changelog: [v1.40.8...v1.41.0](https://github.com/openai/openai-python/compare/v1.40.8...v1.41.0) diff --git a/pyproject.toml b/pyproject.toml index 052afada08..b6530b8523 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.41.0" +version = "1.41.1" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index c44c93d3e9..22ed2bff90 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.41.0" # x-release-please-version +__version__ = "1.41.1" # x-release-please-version