Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

release: 1.41.1 #1664

Merged
merged 3 commits into from
Aug 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "1.41.0"
".": "1.41.1"
}
13 changes: 13 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,18 @@
# Changelog

## 1.41.1 (2024-08-19)

Full Changelog: [v1.41.0...v1.41.1](https://github.com/openai/openai-python/compare/v1.41.0...v1.41.1)

### Bug Fixes

* **json schema:** remove `None` defaults ([#1663](https://github.com/openai/openai-python/issues/1663)) ([30215c1](https://github.com/openai/openai-python/commit/30215c15df613cf9c36cafd717af79158c9db3e5))


### Chores

* **client:** fix parsing union responses when non-json is returned ([#1665](https://github.com/openai/openai-python/issues/1665)) ([822c37d](https://github.com/openai/openai-python/commit/822c37de49eb2ffe8c05122f7520ba87bd76e30b))

## 1.41.0 (2024-08-16)

Full Changelog: [v1.40.8...v1.41.0](https://github.com/openai/openai-python/compare/v1.40.8...v1.41.0)
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "openai"
version = "1.41.0"
version = "1.41.1"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
Expand Down
2 changes: 2 additions & 0 deletions src/openai/_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -380,6 +380,8 @@ def is_basemodel(type_: type) -> bool:

def is_basemodel_type(type_: type) -> TypeGuard[type[BaseModel] | type[GenericModel]]:
origin = get_origin(type_) or type_
if not inspect.isclass(origin):
return False
return issubclass(origin, BaseModel) or issubclass(origin, GenericModel)


Expand Down
2 changes: 1 addition & 1 deletion src/openai/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "openai"
__version__ = "1.41.0" # x-release-please-version
__version__ = "1.41.1" # x-release-please-version
7 changes: 7 additions & 0 deletions src/openai/lib/_pydantic.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

import pydantic

from .._types import NOT_GIVEN
from .._utils import is_dict as _is_dict, is_list
from .._compat import model_json_schema

Expand Down Expand Up @@ -76,6 +77,12 @@ def _ensure_strict_json_schema(
for i, entry in enumerate(all_of)
]

# strip `None` defaults as there's no meaningful distinction here
# the schema will still be `nullable` and the model will default
# to using `None` anyway
if json_schema.get("default", NOT_GIVEN) is None:
json_schema.pop("default")

# we can't use `$ref`s if there are also other properties defined, e.g.
# `{"$ref": "...", "description": "my description"}`
#
Expand Down
60 changes: 59 additions & 1 deletion tests/lib/chat/test_completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import os
import json
from enum import Enum
from typing import Any, Callable
from typing import Any, Callable, Optional
from typing_extensions import Literal, TypeVar

import httpx
Expand Down Expand Up @@ -135,6 +135,63 @@ class Location(BaseModel):
)


@pytest.mark.respx(base_url=base_url)
def test_parse_pydantic_model_optional_default(
client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch
) -> None:
class Location(BaseModel):
city: str
temperature: float
units: Optional[Literal["c", "f"]] = None

completion = _make_snapshot_request(
lambda c: c.beta.chat.completions.parse(
model="gpt-4o-2024-08-06",
messages=[
{
"role": "user",
"content": "What's the weather like in SF?",
},
],
response_format=Location,
),
content_snapshot=snapshot(
'{"id": "chatcmpl-9y39Q2jGzWmeEZlm5CoNVOuQzcxP4", "object": "chat.completion", "created": 1724098820, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":62,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 17, "completion_tokens": 14, "total_tokens": 31}, "system_fingerprint": "fp_2a322c9ffc"}'
),
mock_client=client,
respx_mock=respx_mock,
)

assert print_obj(completion, monkeypatch) == snapshot(
"""\
ParsedChatCompletion[Location](
choices=[
ParsedChoice[Location](
finish_reason='stop',
index=0,
logprobs=None,
message=ParsedChatCompletionMessage[Location](
content='{"city":"San Francisco","temperature":62,"units":"f"}',
function_call=None,
parsed=Location(city='San Francisco', temperature=62.0, units='f'),
refusal=None,
role='assistant',
tool_calls=[]
)
)
],
created=1724098820,
id='chatcmpl-9y39Q2jGzWmeEZlm5CoNVOuQzcxP4',
model='gpt-4o-2024-08-06',
object='chat.completion',
service_tier=None,
system_fingerprint='fp_2a322c9ffc',
usage=CompletionUsage(completion_tokens=14, prompt_tokens=17, total_tokens=31)
)
"""
)


@pytest.mark.respx(base_url=base_url)
def test_parse_pydantic_model_enum(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
class Color(Enum):
Expand Down Expand Up @@ -320,6 +377,7 @@ def test_pydantic_tool_model_all_types(client: OpenAI, respx_mock: MockRouter, m
value=DynamicValue(column_name='expected_delivery_date')
)
],
name=None,
order_by=<OrderBy.asc: 'asc'>,
table_name=<Table.orders: 'orders'>
)
Expand Down
3 changes: 2 additions & 1 deletion tests/lib/schema_types/query.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from enum import Enum
from typing import List, Union
from typing import List, Union, Optional

from pydantic import BaseModel

Expand Down Expand Up @@ -45,6 +45,7 @@ class Condition(BaseModel):


class Query(BaseModel):
name: Optional[str] = None
table_name: Table
columns: List[Column]
conditions: List[Condition]
Expand Down
6 changes: 4 additions & 2 deletions tests/lib/test_pydantic.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ def test_most_types() -> None:
"Table": {"enum": ["orders", "customers", "products"], "title": "Table", "type": "string"},
},
"properties": {
"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"},
"table_name": {"$ref": "#/$defs/Table"},
"columns": {
"items": {"$ref": "#/$defs/Column"},
Expand All @@ -75,7 +76,7 @@ def test_most_types() -> None:
},
"order_by": {"$ref": "#/$defs/OrderBy"},
},
"required": ["table_name", "columns", "conditions", "order_by"],
"required": ["name", "table_name", "columns", "conditions", "order_by"],
"title": "Query",
"type": "object",
"additionalProperties": False,
Expand All @@ -91,6 +92,7 @@ def test_most_types() -> None:
"title": "Query",
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"table_name": {"$ref": "#/definitions/Table"},
"columns": {"type": "array", "items": {"$ref": "#/definitions/Column"}},
"conditions": {
Expand All @@ -100,7 +102,7 @@ def test_most_types() -> None:
},
"order_by": {"$ref": "#/definitions/OrderBy"},
},
"required": ["table_name", "columns", "conditions", "order_by"],
"required": ["name", "table_name", "columns", "conditions", "order_by"],
"definitions": {
"Table": {
"title": "Table",
Expand Down
22 changes: 21 additions & 1 deletion tests/test_legacy_response.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import json
from typing import cast
from typing import Any, Union, cast
from typing_extensions import Annotated

import httpx
Expand Down Expand Up @@ -81,3 +81,23 @@ def test_response_parse_annotated_type(client: OpenAI) -> None:
)
assert obj.foo == "hello!"
assert obj.bar == 2


class OtherModel(pydantic.BaseModel):
a: str


@pytest.mark.parametrize("client", [False], indirect=True) # loose validation
def test_response_parse_expect_model_union_non_json_content(client: OpenAI) -> None:
response = LegacyAPIResponse(
raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)

obj = response.parse(to=cast(Any, Union[CustomModel, OtherModel]))
assert isinstance(obj, str)
assert obj == "foo"
39 changes: 38 additions & 1 deletion tests/test_response.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import json
from typing import List, cast
from typing import Any, List, Union, cast
from typing_extensions import Annotated

import httpx
Expand Down Expand Up @@ -188,3 +188,40 @@ async def test_async_response_parse_annotated_type(async_client: AsyncOpenAI) ->
)
assert obj.foo == "hello!"
assert obj.bar == 2


class OtherModel(BaseModel):
a: str


@pytest.mark.parametrize("client", [False], indirect=True) # loose validation
def test_response_parse_expect_model_union_non_json_content(client: OpenAI) -> None:
response = APIResponse(
raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)

obj = response.parse(to=cast(Any, Union[CustomModel, OtherModel]))
assert isinstance(obj, str)
assert obj == "foo"


@pytest.mark.asyncio
@pytest.mark.parametrize("async_client", [False], indirect=True) # loose validation
async def test_async_response_parse_expect_model_union_non_json_content(async_client: AsyncOpenAI) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}),
client=async_client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)

obj = await response.parse(to=cast(Any, Union[CustomModel, OtherModel]))
assert isinstance(obj, str)
assert obj == "foo"