diff --git a/scripts/types_generator/main.py b/scripts/types_generator/main.py index b93681bc..129bc866 100644 --- a/scripts/types_generator/main.py +++ b/scripts/types_generator/main.py @@ -53,7 +53,7 @@ def generate_models(schema_path: Path, output: Path, extra_template_data: Option "deprecated", "pydantic.field_validator", "pydantic.computed_field", - "genai._utils.deprecated_schema_import._print_deprecation_warning", + "genai._utils.deprecation._print_deprecation_warning", ], extra_template_data=extra_template_data, enum_field_as_literal=LiteralType.One, diff --git a/scripts/types_generator/schema_aliases.yaml b/scripts/types_generator/schema_aliases.yaml index 55e3f46a..39d67db5 100644 --- a/scripts/types_generator/schema_aliases.yaml +++ b/scripts/types_generator/schema_aliases.yaml @@ -819,227 +819,6 @@ replace: model_extensions: # Override generated classes by adding custom properties - BaseMessage: - dynamic_fields: - - name: file_ids - deprecated: "'file_ids' property is deprecated, use 'files' instead!" - return_type: Optional[list[str]] - body: return [file.id for file in self.files] if self.files is not None else None - - ModerationParameters: - custom_body: | - # TODO: remove in next major release - def model_post_init(self, __context: Any) -> None: - for name in ['stigma', 'implicit_hate']: - if hasattr(self, name): - _print_deprecation_warning(f'({type(self).__name__}): "{name}" has been deprecated, use \"social_bias\" instead.') - - # TODO: remove in next major release - def remove_deprecated(self) -> None: - """Remove attributes which are not supported anymore""" - for name in ["stigma", "implicit_hate"]: - if hasattr(self, name): - _print_deprecation_warning( - f'({type(self).__name__} class): "{name}" has been deprecated, use "social_bias" instead.', - ) - delattr(self, name) - - # TODO: remove in next major release - @field_validator("hap", mode="before") - @classmethod - def _validate_hap(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning(f"(ModerationParameters): passing boolean value to the 'hap' parameter is deprecated, use 'ModerationHAP' class instead.") - return ModerationHAP(input=ModerationHAPInput(enabled=value), output=ModerationHAPOutput(enabled=value)) - else: - return value - - # TODO: remove in next major release - @field_validator("social_bias", mode="before") - @classmethod - def _validate_social_bias(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning(f"(ModerationParameters): passing boolean value to the 'social_bias' parameter is deprecated, use 'ModerationSocialBias' class instead.") - return ModerationSocialBias(input=ModerationSocialBiasInput(enabled=value), output=ModerationSocialBiasOutput(enabled=value)) - else: - return value - - ModerationHAP: - custom_body: | - # TODO: remove in next major release - @field_validator("input", mode="before") - @classmethod - def _validate_input(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning(f"(ModerationHAP): passing boolean value to the 'input' parameter is deprecated, use 'ModerationHAPInput' class instead.") - return ModerationHAPInput(enabled=value) - else: - return value - - # TODO: remove in next major release - @field_validator("output", mode="before") - @classmethod - def _validate_output(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning(f"(ModerationHAP): passing boolean value to the 'output' parameter is deprecated, use 'ModerationHAPOutput' class instead.") - return ModerationHAPOutput(enabled=value) - else: - return value - - # TODO: remove in next major release - def model_post_init(self, __context: Any) -> None: - deprecated_attrs = {name: getattr(self, name) for name in ["threshold", "send_tokens"] if getattr(self, name, None) is not None} - if deprecated_attrs: - if self.input is None: - self.input = ModerationHAPInput(enabled=True) - if self.output is None: - self.output = ModerationHAPOutput(enabled=True) - - for name, value in deprecated_attrs.items(): - _print_deprecation_warning( - f"(ModerationHAP): '{name}' is deprecated! Use 'input' parameter (ModerationHAPInput) / 'output' parameter parameter (ModerationHAPOutput) instead.", - ) - setattr(self.input, name, value) - setattr(self.output, name, value) - delattr(self, name) - - ModerationStigma: - custom_body: | - # TODO: remove in next major release - @field_validator("input", mode="before") - @classmethod - def _validate_input(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning(f"(ModerationStigma): passing boolean value to the 'input' parameter is deprecated, use 'ModerationStigmaInput' class instead.") - return ModerationStigmaInput(enabled=value) - else: - return value - - # TODO: remove in next major release - @field_validator("output", mode="before") - @classmethod - def _validate_output(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning(f"(ModerationStigma): passing boolean value to the 'output' parameter is deprecated, use 'ModerationStigmaOutput' class instead.") - return ModerationStigmaOutput(enabled=value) - else: - return value - - # TODO: remove in next major release - def model_post_init(self, __context: Any) -> None: - deprecated_attrs = {name: getattr(self, name) for name in ["threshold", "send_tokens"] if getattr(self, name, None) is not None} - if deprecated_attrs: - if self.input is None: - self.input = ModerationStigmaInput(enabled=True) - if self.output is None: - self.output = ModerationStigmaOutput(enabled=True) - - for name, value in deprecated_attrs.items(): - _print_deprecation_warning( - f"(ModerationStigma): '{name}' is deprecated! Use 'input' parameter (ModerationStigmaInput) / 'output' parameter parameter (ModerationStigmaOutput) instead.", - ) - setattr(self.input, name, value) - setattr(self.output, name, value) - delattr(self, name) - - ModerationImplicitHate: - custom_body: | - # TODO: remove in next major release - @field_validator("input", mode="before") - @classmethod - def _validate_input(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning(f"(ModerationImplicitHate): passing boolean value to the 'input' parameter is deprecated, use 'ModerationImplicitHateInput' class instead.") - return ModerationImplicitHateInput(enabled=value) - else: - return value - - # TODO: remove in next major release - @field_validator("output", mode="before") - @classmethod - def _validate_output(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning(f"(ModerationImplicitHate): passing boolean value to the 'output' parameter is deprecated, use 'ModerationImplicitHateOutput' class instead.") - return ModerationImplicitHateOutput(enabled=value) - else: - return value - - # TODO: remove in next major release - def model_post_init(self, __context: Any) -> None: - deprecated_attrs = {name: getattr(self, name) for name in ["threshold", "send_tokens"] if getattr(self, name, None) is not None} - if deprecated_attrs: - if self.input is None: - self.input = ModerationImplicitHateInput(enabled=True) - if self.output is None: - self.output = ModerationImplicitHateOutput(enabled=True) - - for name, value in deprecated_attrs.items(): - _print_deprecation_warning( - f"(ModerationImplicitHate): '{name}' is deprecated! Use 'input' parameter (ModerationImplicitHateInput) / 'output' parameter parameter (ModerationImplicitHateOutput) instead.", - ) - setattr(self.input, name, value) - setattr(self.output, name, value) - delattr(self, name) - - ModerationImplicitHateOutput: - fields_overrides: - send_tokens: - default: False - enabled: - default: False - - ModerationImplicitHateInput: - fields_overrides: - send_tokens: - default: False - enabled: - default: False - - ModerationStigmaInput: - fields_overrides: - send_tokens: - default: False - enabled: - default: False - - ModerationStigmaOutput: - fields_overrides: - send_tokens: - default: False - enabled: - default: False - - TextCreateResponseModeration: - dynamic_fields: - - name: stigma - deprecated: "'stigma' property is deprecated, use 'social_bias' instead!" - return_type: Optional[list[TextModeration]] - body: return None - - name: implicit_hate - deprecated: "'implicit_hate' property is deprecated, use 'social_bias' instead!" - return_type: Optional[list[TextModeration]] - body: return None - - TextGenerationStreamCreateResponse: - dynamic_fields: - - name: moderation - deprecated: "'moderation' property is deprecated, use 'moderations' instead!" - return_type: Optional[TextCreateResponseModeration] - body: return self.moderations - - TextGenerationResult: - dynamic_fields: - - name: moderation - deprecated: "'moderation' property is deprecated, use 'moderations' instead!" - return_type: Optional[TextCreateResponseModeration] - body: return self.moderations - - TextChatStreamCreateResponse: - dynamic_fields: - - name: moderation - deprecated: "'moderation' property is deprecated, use 'moderations' instead!" - return_type: Optional[TextCreateResponseModeration] - body: return self.moderations PromptModerationParameters: custom_base_class: ModerationParameters diff --git a/src/genai/_utils/deprecated_schema_import.py b/src/genai/_utils/deprecated_schema_import.py deleted file mode 100644 index 60e6f781..00000000 --- a/src/genai/_utils/deprecated_schema_import.py +++ /dev/null @@ -1,37 +0,0 @@ -import warnings - -_CACHED_WARNINGS: set[str] = set() - - -def _print_deprecation_warning(msg: str): - with warnings.catch_warnings(): - warnings.simplefilter("always", DeprecationWarning) - warnings.warn(msg, category=DeprecationWarning, stacklevel=4) # the original import is 4 levels higher - - -def _log_deprecation_warning(key: str, msg: str): - if key in _CACHED_WARNINGS: - return - - _print_deprecation_warning(msg) - - _CACHED_WARNINGS.add(key) - - -def _deprecated_schema_import(name: str, module_name: str): - """ - Support for deprecated import style "from genai.text.generation import" -> should be "from genai.schema import" - - TODO(#297): to be removed in next major version - """ - import genai.schema - - if name in dir(genai.schema) and not name.startswith("_"): - key = f"{module_name}.{name}" - _log_deprecation_warning( - key, - f"Deprecated import of {name} from module {module_name}. Please use `from genai.schema import {name}`.", - ) - return getattr(genai.schema, name) - - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/src/genai/_utils/deprecation.py b/src/genai/_utils/deprecation.py new file mode 100644 index 00000000..19612cc5 --- /dev/null +++ b/src/genai/_utils/deprecation.py @@ -0,0 +1,18 @@ +import warnings + +_CACHED_WARNINGS: set[str] = set() + + +def _print_deprecation_warning(msg: str): + with warnings.catch_warnings(): + warnings.simplefilter("always", DeprecationWarning) + warnings.warn(msg, category=DeprecationWarning, stacklevel=4) # the original import is 4 levels higher + + +def _log_deprecation_warning(key: str, msg: str): + if key in _CACHED_WARNINGS: + return + + _print_deprecation_warning(msg) + + _CACHED_WARNINGS.add(key) diff --git a/src/genai/_utils/service/base_service.py b/src/genai/_utils/service/base_service.py index 3276ad37..4c227667 100644 --- a/src/genai/_utils/service/base_service.py +++ b/src/genai/_utils/service/base_service.py @@ -10,7 +10,7 @@ from pydantic import BaseModel, ConfigDict from genai._utils.api_client import ApiClient -from genai._utils.deprecated_schema_import import _print_deprecation_warning +from genai._utils.deprecation import _print_deprecation_warning from genai._utils.general import to_model_instance from genai._utils.http_client.httpx_client import AsyncHttpxClient, HttpxClient from genai._utils.service.metadata import inherit_metadata diff --git a/src/genai/extensions/langchain/chat_llm.py b/src/genai/extensions/langchain/chat_llm.py index c6242818..38d25678 100644 --- a/src/genai/extensions/langchain/chat_llm.py +++ b/src/genai/extensions/langchain/chat_llm.py @@ -182,8 +182,8 @@ def send_chunk(*, text: str = "", generation_info: dict): run_manager.on_llm_new_token(token=text, chunk=chunk, response=response) # noqa: B023 # Function definition does not bind loop variable `response`: linter is probably just confused here - if response.moderation: - generation_info = create_generation_info_from_response(response, result=response.moderation) + if response.moderations: + generation_info = create_generation_info_from_response(response, result=response.moderations) yield from send_chunk(generation_info=generation_info) for result in response.results or []: diff --git a/src/genai/extensions/langchain/llm.py b/src/genai/extensions/langchain/llm.py index 1619f53e..b3c73ea1 100644 --- a/src/genai/extensions/langchain/llm.py +++ b/src/genai/extensions/langchain/llm.py @@ -241,7 +241,7 @@ def send_chunk( for response in self.client.text.generation.create_stream( **self._prepare_stream_request(input=prompt, stop=stop, **kwargs) ): - if response.moderation: + if response.moderations: generation_info = create_generation_info_from_response(response, result=response.moderation) yield from send_chunk(generation_info=generation_info, response=response) diff --git a/src/genai/extensions/llama_index/llm.py b/src/genai/extensions/llama_index/llm.py index c039db9e..eeb4fbfd 100644 --- a/src/genai/extensions/llama_index/llm.py +++ b/src/genai/extensions/llama_index/llm.py @@ -192,8 +192,8 @@ def stream_chat(self, messages: Sequence[ChatMessage], formatted: bool = False, for response in self.client.text.chat.create_stream( **self._prepare_request(self._identifying_chat_params)(messages=to_genai_messages(messages), **kwargs) ): - if response.moderation: - generation_info = create_generation_info_from_response(response, result=response.moderation) + if response.moderations: + generation_info = create_generation_info_from_response(response, result=response.moderations) message = ChatMessage(role=MessageRole.ASSISTANT, content=text) yield ChatResponse(message=message, delta="", additional_kwargs=generation_info) diff --git a/src/genai/file/__init__.py b/src/genai/file/__init__.py index 1828b3a3..3589f1a9 100644 --- a/src/genai/file/__init__.py +++ b/src/genai/file/__init__.py @@ -1,13 +1,3 @@ """Modules containing functionalities related to static assets (user's files)""" from genai.file.file_service import * - - -def __getattr__(name: str): - """Allow additional imports for backward compatibility with old import system "from genai.service_name import".""" - if name in globals(): - return globals()[name] - - from genai._utils.deprecated_schema_import import _deprecated_schema_import - - return _deprecated_schema_import(name, __name__) diff --git a/src/genai/model/__init__.py b/src/genai/model/__init__.py index 9b7853b2..6c1554d6 100644 --- a/src/genai/model/__init__.py +++ b/src/genai/model/__init__.py @@ -1,13 +1,3 @@ """Modules containing functionalities related to models.""" from genai.model.model_service import * - - -def __getattr__(name: str): - """Allow additional imports for backward compatibility with old import system "from genai.service_name import".""" - if name in globals(): - return globals()[name] - - from genai._utils.deprecated_schema_import import _deprecated_schema_import - - return _deprecated_schema_import(name, __name__) diff --git a/src/genai/prompt/__init__.py b/src/genai/prompt/__init__.py index 404a2ee7..c895db50 100644 --- a/src/genai/prompt/__init__.py +++ b/src/genai/prompt/__init__.py @@ -1,13 +1,3 @@ """Modules containing functionalities related to prompts""" from genai.prompt.prompt_service import * - - -def __getattr__(name: str): - """Allow additional imports for backward compatibility with old import system "from genai.service_name import".""" - if name in globals(): - return globals()[name] - - from genai._utils.deprecated_schema_import import _deprecated_schema_import - - return _deprecated_schema_import(name, __name__) diff --git a/src/genai/request/__init__.py b/src/genai/request/__init__.py index a6349911..20a7ea1d 100644 --- a/src/genai/request/__init__.py +++ b/src/genai/request/__init__.py @@ -1,13 +1,3 @@ """Modules containing functionalities related to text requests (their history)""" from genai.request.request_service import * - - -def __getattr__(name: str): - """Allow additional imports for backward compatibility with old import system "from genai.service_name import".""" - if name in globals(): - return globals()[name] - - from genai._utils.deprecated_schema_import import _deprecated_schema_import - - return _deprecated_schema_import(name, __name__) diff --git a/src/genai/request/feedback/__init__.py b/src/genai/request/feedback/__init__.py index b7e4efeb..0a8cfad1 100644 --- a/src/genai/request/feedback/__init__.py +++ b/src/genai/request/feedback/__init__.py @@ -1,13 +1,3 @@ """Modules containing functionalities related to text generation feedback""" from genai.request.feedback.feedback_service import * - - -def __getattr__(name: str): - """Allow additional imports for backward compatibility with old import system "from genai.service_name import".""" - if name in globals(): - return globals()[name] - - from genai._utils.deprecated_schema_import import _deprecated_schema_import - - return _deprecated_schema_import(name, __name__) diff --git a/src/genai/schema/__init__.py b/src/genai/schema/__init__.py index dd6dfa96..fba71cca 100644 --- a/src/genai/schema/__init__.py +++ b/src/genai/schema/__init__.py @@ -1,16 +1,10 @@ -from genai._utils.deprecated_schema_import import _log_deprecation_warning +from genai._utils.deprecation import _log_deprecation_warning from genai.schema._api import * from genai.schema._api_removed_schemas import _removed_schemas from genai.schema._endpoints import * from genai.schema._extensions import * -_renamed_schemas = { - "UserPromptResult": PromptResult, - "PromptsResponseResult": PromptResult, - "UserResponseResult": UserResult, - "UserCreateResultApiKey": UserApiKey, - "PromptRetrieveRequestParamsSource": PromptListSource, -} +_renamed_schemas = {} def __getattr__(name): diff --git a/src/genai/schema/_api.py b/src/genai/schema/_api.py index ddcf4517..986230b1 100644 --- a/src/genai/schema/_api.py +++ b/src/genai/schema/_api.py @@ -7,11 +7,9 @@ from enum import Enum from typing import Any, Literal, Optional, Union -import deprecated -from pydantic import AwareDatetime, Field, RootModel, computed_field, field_validator +from pydantic import AwareDatetime, Field, RootModel from genai._types import ApiBaseModel -from genai._utils.deprecated_schema_import import _print_deprecation_warning class ApiKeyResult(ApiBaseModel): @@ -38,11 +36,6 @@ class BaseMessage(ApiBaseModel): files: Optional[list[MessageFile]] = None role: ChatRole - @computed_field - @deprecated.deprecated(reason="'file_ids' property is deprecated, use 'files' instead!") - def file_ids(self) -> Optional[list[str]]: - return [file.id for file in self.files] if self.files is not None else None - class BaseTokens(ApiBaseModel): logprob: Optional[Union[float, str]] = None @@ -382,49 +375,6 @@ class ModerationHAP(ApiBaseModel): input: Optional[ModerationHAPInput] = None output: Optional[ModerationHAPOutput] = None - # TODO: remove in next major release - @field_validator("input", mode="before") - @classmethod - def _validate_input(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning( - "(ModerationHAP): passing boolean value to the 'input' parameter is deprecated, use 'ModerationHAPInput' class instead." - ) - return ModerationHAPInput(enabled=value) - else: - return value - - # TODO: remove in next major release - @field_validator("output", mode="before") - @classmethod - def _validate_output(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning( - "(ModerationHAP): passing boolean value to the 'output' parameter is deprecated, use 'ModerationHAPOutput' class instead." - ) - return ModerationHAPOutput(enabled=value) - else: - return value - - # TODO: remove in next major release - def model_post_init(self, __context: Any) -> None: - deprecated_attrs = { - name: getattr(self, name) for name in ["threshold", "send_tokens"] if getattr(self, name, None) is not None - } - if deprecated_attrs: - if self.input is None: - self.input = ModerationHAPInput(enabled=True) - if self.output is None: - self.output = ModerationHAPOutput(enabled=True) - - for name, value in deprecated_attrs.items(): - _print_deprecation_warning( - f"(ModerationHAP): '{name}' is deprecated! Use 'input' parameter (ModerationHAPInput) / 'output' parameter parameter (ModerationHAPOutput) instead.", - ) - setattr(self.input, name, value) - setattr(self.output, name, value) - delattr(self, name) - class ModerationHAPInput(ApiBaseModel): enabled: Optional[bool] = False @@ -454,59 +404,16 @@ class ModerationImplicitHate(ApiBaseModel): input: Optional[ModerationImplicitHateInput] = None output: Optional[ModerationImplicitHateOutput] = None - # TODO: remove in next major release - @field_validator("input", mode="before") - @classmethod - def _validate_input(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning( - "(ModerationImplicitHate): passing boolean value to the 'input' parameter is deprecated, use 'ModerationImplicitHateInput' class instead." - ) - return ModerationImplicitHateInput(enabled=value) - else: - return value - - # TODO: remove in next major release - @field_validator("output", mode="before") - @classmethod - def _validate_output(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning( - "(ModerationImplicitHate): passing boolean value to the 'output' parameter is deprecated, use 'ModerationImplicitHateOutput' class instead." - ) - return ModerationImplicitHateOutput(enabled=value) - else: - return value - - # TODO: remove in next major release - def model_post_init(self, __context: Any) -> None: - deprecated_attrs = { - name: getattr(self, name) for name in ["threshold", "send_tokens"] if getattr(self, name, None) is not None - } - if deprecated_attrs: - if self.input is None: - self.input = ModerationImplicitHateInput(enabled=True) - if self.output is None: - self.output = ModerationImplicitHateOutput(enabled=True) - - for name, value in deprecated_attrs.items(): - _print_deprecation_warning( - f"(ModerationImplicitHate): '{name}' is deprecated! Use 'input' parameter (ModerationImplicitHateInput) / 'output' parameter parameter (ModerationImplicitHateOutput) instead.", - ) - setattr(self.input, name, value) - setattr(self.output, name, value) - delattr(self, name) - class ModerationImplicitHateInput(ApiBaseModel): - enabled: bool = False - send_tokens: Optional[bool] = False + enabled: bool + send_tokens: Optional[bool] = None threshold: Optional[float] = Field(0.75, gt=0.0, lt=1.0) class ModerationImplicitHateOutput(ApiBaseModel): - enabled: bool = False - send_tokens: Optional[bool] = False + enabled: bool + send_tokens: Optional[bool] = None threshold: Optional[float] = Field(0.75, gt=0.0, lt=1.0) @@ -514,50 +421,6 @@ class ModerationParameters(ApiBaseModel): hap: Optional[ModerationHAP] = None social_bias: Optional[ModerationSocialBias] = None - # TODO: remove in next major release - def model_post_init(self, __context: Any) -> None: - for name in ["stigma", "implicit_hate"]: - if hasattr(self, name): - _print_deprecation_warning( - f'({type(self).__name__}): "{name}" has been deprecated, use "social_bias" instead.' - ) - - # TODO: remove in next major release - def remove_deprecated(self) -> None: - """Remove attributes which are not supported anymore""" - for name in ["stigma", "implicit_hate"]: - if hasattr(self, name): - _print_deprecation_warning( - f'({type(self).__name__} class): "{name}" has been deprecated, use "social_bias" instead.', - ) - delattr(self, name) - - # TODO: remove in next major release - @field_validator("hap", mode="before") - @classmethod - def _validate_hap(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning( - "(ModerationParameters): passing boolean value to the 'hap' parameter is deprecated, use 'ModerationHAP' class instead." - ) - return ModerationHAP(input=ModerationHAPInput(enabled=value), output=ModerationHAPOutput(enabled=value)) - else: - return value - - # TODO: remove in next major release - @field_validator("social_bias", mode="before") - @classmethod - def _validate_social_bias(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning( - "(ModerationParameters): passing boolean value to the 'social_bias' parameter is deprecated, use 'ModerationSocialBias' class instead." - ) - return ModerationSocialBias( - input=ModerationSocialBiasInput(enabled=value), output=ModerationSocialBiasOutput(enabled=value) - ) - else: - return value - class ModerationPosition(ApiBaseModel): end: int @@ -597,59 +460,16 @@ class ModerationStigma(ApiBaseModel): input: Optional[ModerationStigmaInput] = None output: Optional[ModerationStigmaOutput] = None - # TODO: remove in next major release - @field_validator("input", mode="before") - @classmethod - def _validate_input(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning( - "(ModerationStigma): passing boolean value to the 'input' parameter is deprecated, use 'ModerationStigmaInput' class instead." - ) - return ModerationStigmaInput(enabled=value) - else: - return value - - # TODO: remove in next major release - @field_validator("output", mode="before") - @classmethod - def _validate_output(cls, value: Any): - if isinstance(value, bool): - _print_deprecation_warning( - "(ModerationStigma): passing boolean value to the 'output' parameter is deprecated, use 'ModerationStigmaOutput' class instead." - ) - return ModerationStigmaOutput(enabled=value) - else: - return value - - # TODO: remove in next major release - def model_post_init(self, __context: Any) -> None: - deprecated_attrs = { - name: getattr(self, name) for name in ["threshold", "send_tokens"] if getattr(self, name, None) is not None - } - if deprecated_attrs: - if self.input is None: - self.input = ModerationStigmaInput(enabled=True) - if self.output is None: - self.output = ModerationStigmaOutput(enabled=True) - - for name, value in deprecated_attrs.items(): - _print_deprecation_warning( - f"(ModerationStigma): '{name}' is deprecated! Use 'input' parameter (ModerationStigmaInput) / 'output' parameter parameter (ModerationStigmaOutput) instead.", - ) - setattr(self.input, name, value) - setattr(self.output, name, value) - delattr(self, name) - class ModerationStigmaInput(ApiBaseModel): - enabled: bool = False - send_tokens: Optional[bool] = False + enabled: bool + send_tokens: Optional[bool] = None threshold: Optional[float] = Field(0.75, gt=0.0, lt=1.0) class ModerationStigmaOutput(ApiBaseModel): - enabled: bool = False - send_tokens: Optional[bool] = False + enabled: bool + send_tokens: Optional[bool] = None threshold: Optional[float] = Field(0.75, gt=0.0, lt=1.0) @@ -1318,11 +1138,6 @@ class TextChatStreamCreateResponse(ApiBaseModel): moderations: Optional[TextCreateResponseModeration] = None results: Optional[list[TextChatGenerationStreamResult]] = None - @computed_field - @deprecated.deprecated(reason="'moderation' property is deprecated, use 'moderations' instead!") - def moderation(self) -> Optional[TextCreateResponseModeration]: - return self.moderations - class _TextClassificationCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" @@ -1498,11 +1313,6 @@ class TextGenerationStreamCreateResponse(ApiBaseModel): moderations: Optional[TextCreateResponseModeration] = None results: Optional[list[TextGenerationStreamResult]] = None - @computed_field - @deprecated.deprecated(reason="'moderation' property is deprecated, use 'moderations' instead!") - def moderation(self) -> Optional[TextCreateResponseModeration]: - return self.moderations - class _TextModerationCreateParametersQuery(ApiBaseModel): version: Literal["2024-03-19"] = "2024-03-19" @@ -2076,16 +1886,6 @@ class TextCreateResponseModeration(ApiBaseModel): hap: Optional[list[TextModeration]] = None social_bias: Optional[list[TextModeration]] = None - @computed_field - @deprecated.deprecated(reason="'stigma' property is deprecated, use 'social_bias' instead!") - def stigma(self) -> Optional[list[TextModeration]]: - return None - - @computed_field - @deprecated.deprecated(reason="'implicit_hate' property is deprecated, use 'social_bias' instead!") - def implicit_hate(self) -> Optional[list[TextModeration]]: - return None - class TextEmbeddingLimit(ApiBaseModel): concurrency: ConcurrencyLimit @@ -2244,11 +2044,6 @@ class TextGenerationResult(ApiBaseModel): stop_reason: StopReason stop_sequence: Optional[str] = None - @computed_field - @deprecated.deprecated(reason="'moderation' property is deprecated, use 'moderations' instead!") - def moderation(self) -> Optional[TextCreateResponseModeration]: - return self.moderations - class TextGenerationReturnOptions(ApiBaseModel): generated_tokens: Optional[bool] = Field(False, title="Generated Tokens") diff --git a/src/genai/schema/_api_removed_schemas.py b/src/genai/schema/_api_removed_schemas.py index 93f0ba3d..5c88368f 100644 --- a/src/genai/schema/_api_removed_schemas.py +++ b/src/genai/schema/_api_removed_schemas.py @@ -1,51 +1,8 @@ -from enum import Enum -from typing import Optional, Type - -from pydantic import Field - -from genai._types import ApiBaseModel -from genai.schema._api import PromptTemplateData +from typing import Type def _to_public_class_name(cls: Type) -> str: return cls.__name__.lstrip("_") -class _RemovedTuningType(str, Enum): - PROMPT_TUNING = "prompt_tuning" - MULTITASK_PROMPT_TUNING = "multitask_prompt_tuning" - - -class _ImplicitHateOptions(ApiBaseModel): - send_tokens: Optional[bool] = False - threshold: Optional[float] = Field(0.75, gt=0.0, lt=1.0) - - -class _StigmaOptions(ApiBaseModel): - send_tokens: Optional[bool] = False - threshold: Optional[float] = Field(0.75, gt=0.0, lt=1.0) - - -class _PromptTemplate(ApiBaseModel): - data: PromptTemplateData - id: Optional[str] = None - value: Optional[str] = None - - -_removed_schemas = { - "TuningType": ( - "The 'TuningType' enum has been deprecated and will be removed in the future release, use string value instead." - "To retrieve supported types, either see documentation or retrieve them via 'client.tune.types()' method.", - _RemovedTuningType, - ), - "ImplicitHateOptions": ( - "The 'ImplicitHateOptions' class and appropriate ImplicitHate model has been deprecated, " - "use 'SocialBiasOptions' instead.", - _ImplicitHateOptions, - ), - "StigmaOptions": ( - "The 'StigmaOptions' class and appropriate Stigma model has been deprecated, use SocialBiasOptions instead.", - _StigmaOptions, - ), - "PromptTemplate": ("The 'PromptTemplate' has been deprecated and is not used anymore.", _PromptTemplate), -} +_removed_schemas = {} diff --git a/src/genai/text/chat/__init__.py b/src/genai/text/chat/__init__.py index a502375b..74a49fc7 100644 --- a/src/genai/text/chat/__init__.py +++ b/src/genai/text/chat/__init__.py @@ -1,13 +1,3 @@ """Modules containing functionalities related to text chat generation""" from genai.text.chat.chat_generation_service import * - - -def __getattr__(name: str): - """Allow additional imports for backward compatibility with old import system "from genai.service_name import".""" - if name in globals(): - return globals()[name] - - from genai._utils.deprecated_schema_import import _deprecated_schema_import - - return _deprecated_schema_import(name, __name__) diff --git a/src/genai/text/chat/chat_generation_service.py b/src/genai/text/chat/chat_generation_service.py index 56a35a28..9080131c 100644 --- a/src/genai/text/chat/chat_generation_service.py +++ b/src/genai/text/chat/chat_generation_service.py @@ -101,8 +101,6 @@ def create( """ metadata = get_service_action_metadata(self.create) moderations_formatted = to_model_optional(moderations, ModerationParameters, copy=True) - if moderations_formatted: - moderations_formatted.remove_deprecated() request_body = _TextChatCreateRequest( model_id=model_id, @@ -165,8 +163,6 @@ def create_stream( """ metadata = get_service_action_metadata(self.create_stream) moderations_formatted = to_model_optional(moderations, ModerationParameters, copy=True) - if moderations_formatted: - moderations_formatted.remove_deprecated() request_body = _TextChatStreamCreateRequest( model_id=model_id, diff --git a/src/genai/text/embedding/__init__.py b/src/genai/text/embedding/__init__.py index 5b3d8814..d41941a4 100644 --- a/src/genai/text/embedding/__init__.py +++ b/src/genai/text/embedding/__init__.py @@ -1,13 +1,3 @@ """Modules containing functionalities related to text embedding""" from genai.text.embedding.embedding_service import * - - -def __getattr__(name: str): - """Allow additional imports for backward compatibility with old import system "from genai.service_name import".""" - if name in globals(): - return globals()[name] - - from genai._utils.deprecated_schema_import import _deprecated_schema_import - - return _deprecated_schema_import(name, __name__) diff --git a/src/genai/text/embedding/limit/__init__.py b/src/genai/text/embedding/limit/__init__.py index 83af5aa0..ee4632e1 100644 --- a/src/genai/text/embedding/limit/__init__.py +++ b/src/genai/text/embedding/limit/__init__.py @@ -1,13 +1,3 @@ """Modules containing functionalities related to text embedding requests limits""" from genai.text.embedding.limit.limit_service import * - - -def __getattr__(name: str): - """Allow additional imports for backward compatibility with old import system "from genai.service_name import".""" - if name in globals(): - return globals()[name] - - from genai._utils.deprecated_schema_import import _deprecated_schema_import - - return _deprecated_schema_import(name, __name__) diff --git a/src/genai/text/generation/__init__.py b/src/genai/text/generation/__init__.py index 99433dfd..e52ea68e 100644 --- a/src/genai/text/generation/__init__.py +++ b/src/genai/text/generation/__init__.py @@ -1,13 +1,3 @@ """Modules containing functionalities related to text generation""" from genai.text.generation.generation_service import * - - -def __getattr__(name: str): - """Allow additional imports for backward compatibility with old import system "from genai.service_name import".""" - if name in globals(): - return globals()[name] - - from genai._utils.deprecated_schema_import import _deprecated_schema_import - - return _deprecated_schema_import(name, __name__) diff --git a/src/genai/text/generation/feedback/__init__.py b/src/genai/text/generation/feedback/__init__.py deleted file mode 100644 index 11a0e010..00000000 --- a/src/genai/text/generation/feedback/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Modules containing functionalities related to text generation feedback""" - -from genai.text.generation.feedback.feedback_service import * - - -def __getattr__(name: str): - """Allow additional imports for backward compatibility with old import system "from genai.service_name import".""" - if name in globals(): - return globals()[name] - - from genai._utils.deprecated_schema_import import _deprecated_schema_import - - return _deprecated_schema_import(name, __name__) diff --git a/src/genai/text/generation/feedback/feedback_service.py b/src/genai/text/generation/feedback/feedback_service.py deleted file mode 100644 index a7454593..00000000 --- a/src/genai/text/generation/feedback/feedback_service.py +++ /dev/null @@ -1,137 +0,0 @@ -from typing import Optional, TypeVar - -from deprecated import deprecated -from pydantic import BaseModel - -from genai._types import EnumLike -from genai._utils.general import to_enum -from genai._utils.service import ( - BaseService, - BaseServiceConfig, - BaseServiceServices, - get_service_action_metadata, - set_service_action_metadata, -) -from genai._utils.validators import assert_is_not_empty_string -from genai.schema import ( - TextGenerationFeedbackCategory, - TextGenerationIdFeedbackCreateEndpoint, - TextGenerationIdFeedbackCreateResponse, - TextGenerationIdFeedbackRetrieveEndpoint, - TextGenerationIdFeedbackRetrieveResponse, - TextGenerationIdFeedbackUpdateEndpoint, - TextGenerationIdFeedbackUpdateResponse, -) -from genai.schema._api import ( - _TextGenerationIdFeedbackCreateParametersQuery, - _TextGenerationIdFeedbackCreateRequest, - _TextGenerationIdFeedbackRetrieveParametersQuery, - _TextGenerationIdFeedbackUpdateParametersQuery, - _TextGenerationIdFeedbackUpdateRequest, -) - -T = TypeVar("T", bound=BaseModel) - -__all__ = ["FeedbackService"] - - -class FeedbackService(BaseService[BaseServiceConfig, BaseServiceServices]): - @set_service_action_metadata(endpoint=TextGenerationIdFeedbackRetrieveEndpoint) - @deprecated(reason="Use 'client.request.feedback.retrieve' method instead.") - def retrieve( - self, - generation_id: str, - ) -> TextGenerationIdFeedbackRetrieveResponse: - """ - Retrieve feedback for the generated output in the given response. - - Raises: - ApiResponseException: If target feedback/generation does not exist or cannot be updated. - ApiNetworkException: In case of unhandled network error. - """ - metadata = get_service_action_metadata(self.retrieve) - assert_is_not_empty_string(generation_id) - self._log_method_execution("Text Generation Feedback Retrieve", generation_id=generation_id) - - with self._get_http_client() as client: - http_response = client.get( - url=self._get_endpoint(metadata.endpoint, id=generation_id), - params=_TextGenerationIdFeedbackRetrieveParametersQuery().model_dump(), - ) - return TextGenerationIdFeedbackRetrieveResponse(**http_response.json()) - - @set_service_action_metadata(endpoint=TextGenerationIdFeedbackCreateEndpoint) - @deprecated(reason="Use 'client.request.feedback.create' method instead.") - def create( - self, - generation_id: str, - *, - categories: Optional[list[EnumLike[TextGenerationFeedbackCategory]]] = None, - comment: Optional[str] = None, - ) -> TextGenerationIdFeedbackCreateResponse: - """ - Provide feedback on generated output for further improvement of the models. - - Args: - generation_id: A string representing the ID of the text generation. - categories: An optional list of enum-like objects representing the feedback categories. - comment: An optional string representing the feedback comment. - - Raises: - ApiResponseException: If target feedback/generation does not exist or cannot be updated. - ApiNetworkException: In case of unhandled network error. - """ - assert_is_not_empty_string(generation_id) - self._log_method_execution( - "Text Generation Feedback Create", generation_id=generation_id, categories=categories, comment=comment - ) - - with self._get_http_client() as client: - metadata = get_service_action_metadata(self.create) - http_response = client.post( - url=self._get_endpoint(metadata.endpoint, id=generation_id), - params=_TextGenerationIdFeedbackCreateParametersQuery().model_dump(), - json=_TextGenerationIdFeedbackCreateRequest( - comment=comment, - categories=( - [to_enum(TextGenerationFeedbackCategory, category) for category in categories] - if categories is not None - else None - ), - ).model_dump(), - ) - return TextGenerationIdFeedbackCreateResponse(**http_response.json()) - - @set_service_action_metadata(endpoint=TextGenerationIdFeedbackUpdateEndpoint) - @deprecated(reason="Use 'client.request.feedback.update' method instead.") - def update( - self, - generation_id: str, - categories: Optional[list[TextGenerationFeedbackCategory]] = None, - comment: Optional[str] = None, - ) -> TextGenerationIdFeedbackUpdateResponse: - """ - Update existing feedback. - - Args: - generation_id: The ID of the text generation to update. - categories: Optional. List of text generation feedback categories. - comment: Optional. Comment for the text generation feedback. - - Raises: - ApiResponseException: If target feedback/generation does not exist or cannot be updated. - ApiNetworkException: In case of unhandled network error. - """ - assert_is_not_empty_string(generation_id) - self._log_method_execution( - "Text Generation Feedback Update", generation_id=generation_id, categories=categories, comment=comment - ) - - with self._get_http_client() as client: - metadata = get_service_action_metadata(self.update) - http_response = client.put( - url=self._get_endpoint(metadata.endpoint, id=generation_id), - params=_TextGenerationIdFeedbackUpdateParametersQuery().model_dump(), - json=_TextGenerationIdFeedbackUpdateRequest(comment=comment, categories=categories).model_dump(), - ) - return TextGenerationIdFeedbackUpdateResponse(**http_response.json()) diff --git a/src/genai/text/generation/generation_service.py b/src/genai/text/generation/generation_service.py index 6e533033..377504d4 100644 --- a/src/genai/text/generation/generation_service.py +++ b/src/genai/text/generation/generation_service.py @@ -1,7 +1,6 @@ from typing import Generator, Optional, Union import httpx -from deprecated import deprecated from httpx import AsyncClient, HTTPStatusError from pydantic import BaseModel @@ -39,7 +38,6 @@ _TextGenerationStreamCreateRequest, ) from genai.text.generation._generation_utils import generation_stream_handler -from genai.text.generation.feedback.feedback_service import FeedbackService as _FeedbackService from genai.text.generation.limits.limit_service import LimitService as _LimitService __all__ = ["GenerationService", "BaseConfig", "BaseServices", "CreateExecutionOptions"] @@ -54,7 +52,6 @@ class BaseServices(BaseServiceServices): LimitService: type[_LimitService] = _LimitService - FeedbackService: type[_FeedbackService] = _FeedbackService class CreateExecutionOptions(BaseModel): @@ -87,14 +84,8 @@ def __init__( services = BaseServices() self._concurrency_limiter = self._get_concurrency_limiter() - self._feedback = services.FeedbackService(api_client=api_client) self.limit = services.LimitService(api_client=api_client) - @property - @deprecated(reason="Use 'client.request.feedback' service instead.") - def feedback(self): - return self._feedback - def _get_concurrency_limiter(self) -> LoopBoundLimiter: async def handler(): response = await self.limit.aretrieve() @@ -152,9 +143,6 @@ def create( metadata = get_service_action_metadata(self.create) parameters_formatted = to_model_optional(parameters, TextGenerationParameters) moderations_formatted = to_model_optional(moderations, ModerationParameters, copy=True) - if moderations_formatted: - moderations_formatted.remove_deprecated() - template_formatted = to_model_optional(data, PromptTemplateData) execution_options_formatted = to_model_instance( [self.config.create_execution_options, execution_options], CreateExecutionOptions @@ -258,9 +246,6 @@ def create_stream( metadata = get_service_action_metadata(self.create_stream) parameters_formatted = to_model_optional(parameters, TextGenerationParameters) moderations_formatted = to_model_optional(moderations, ModerationParameters, copy=True) - if moderations_formatted: - moderations_formatted.remove_deprecated() - template_formatted = to_model_optional(data, PromptTemplateData) self._log_method_execution( @@ -305,9 +290,6 @@ def compare( """ metadata = get_service_action_metadata(self.compare) request_formatted = to_model_instance(request, TextGenerationComparisonCreateRequestRequest, copy=True) - if request_formatted.moderations: - request_formatted.moderations.remove_deprecated() - compare_parameters_formatted = to_model_instance(compare_parameters, TextGenerationComparisonParameters) self._log_method_execution( diff --git a/src/genai/text/generation/limits/__init__.py b/src/genai/text/generation/limits/__init__.py index 7a612dd1..8c466f61 100644 --- a/src/genai/text/generation/limits/__init__.py +++ b/src/genai/text/generation/limits/__init__.py @@ -1,13 +1,3 @@ """Modules containing functionalities related to text generation requests limits""" from genai.text.generation.limits.limit_service import * - - -def __getattr__(name: str): - """Allow additional imports for backward compatibility with old import system "from genai.service_name import".""" - if name in globals(): - return globals()[name] - - from genai._utils.deprecated_schema_import import _deprecated_schema_import - - return _deprecated_schema_import(name, __name__) diff --git a/src/genai/text/moderation/__init__.py b/src/genai/text/moderation/__init__.py index 3006b745..40ca8738 100644 --- a/src/genai/text/moderation/__init__.py +++ b/src/genai/text/moderation/__init__.py @@ -1,13 +1,3 @@ """Modules containing functionalities related to text moderation""" from genai.text.moderation.moderation_service import * - - -def __getattr__(name: str): - """Allow additional imports for backward compatibility with old import system "from genai.service_name import".""" - if name in globals(): - return globals()[name] - - from genai._utils.deprecated_schema_import import _deprecated_schema_import - - return _deprecated_schema_import(name, __name__) diff --git a/src/genai/text/moderation/moderation_service.py b/src/genai/text/moderation/moderation_service.py index 375b6c51..412c1608 100644 --- a/src/genai/text/moderation/moderation_service.py +++ b/src/genai/text/moderation/moderation_service.py @@ -44,8 +44,6 @@ def create( inputs: Union[str, list[str]], *, hap: Optional[ModelLike[HAPOptions]] = None, - implicit_hate: Optional[ModelLike] = None, - stigma: Optional[ModelLike] = None, social_bias: Optional[ModelLike[SocialBiasOptions]] = None, execution_options: Optional[ModelLike[CreateExecutionOptions]] = None, ) -> Generator[TextModerationCreateResponse, None, None]: @@ -54,8 +52,6 @@ def create( inputs: Prompt/Prompts for text moderation. hap: HAP configuration (hate, abuse, profanity). social_bias: Social Bias configuration. - implicit_hate: Implicit Hate configuration (deprecated, use 'social_bias' instead). - stigma: Stigma configuration (deprecated, use 'social_bias' instead). execution_options: Configuration processing. Example: @@ -77,10 +73,6 @@ def create( ApiNetworkException: In case of unhandled network error. ValidationError: In case of provided parameters are invalid. """ - if implicit_hate is not None: - self._log_deprecation_warning("'implicit_hate' parameter is deprecated, use 'social_bias' instead!") - if stigma is not None: - self._log_deprecation_warning("'stigma' parameter is deprecated, use 'social_bias' instead!") metadata = get_service_action_metadata(self.create) execution_options_formatted = to_model_instance( diff --git a/src/genai/text/tokenization/__init__.py b/src/genai/text/tokenization/__init__.py index e7daf693..ea22b3b0 100644 --- a/src/genai/text/tokenization/__init__.py +++ b/src/genai/text/tokenization/__init__.py @@ -1,13 +1,3 @@ """Modules containing functionalities related to text tokenization""" from genai.text.tokenization.tokenization_service import * - - -def __getattr__(name: str): - """Allow additional imports for backward compatibility with old import system "from genai.service_name import".""" - if name in globals(): - return globals()[name] - - from genai._utils.deprecated_schema_import import _deprecated_schema_import - - return _deprecated_schema_import(name, __name__) diff --git a/src/genai/tune/__init__.py b/src/genai/tune/__init__.py index fa4374dd..99a75918 100644 --- a/src/genai/tune/__init__.py +++ b/src/genai/tune/__init__.py @@ -1,13 +1,3 @@ """Functionalities related to models tuning""" from genai.tune.tune_service import * - - -def __getattr__(name: str): - """Allow additional imports for backward compatibility with old import system "from genai.service_name import".""" - if name in globals(): - return globals()[name] - - from genai._utils.deprecated_schema_import import _deprecated_schema_import - - return _deprecated_schema_import(name, __name__) diff --git a/src/genai/user/__init__.py b/src/genai/user/__init__.py index e07f9b2f..f6d37905 100644 --- a/src/genai/user/__init__.py +++ b/src/genai/user/__init__.py @@ -1,13 +1,3 @@ """Functionalities related to working with the user's account.""" from genai.user.user_service import * - - -def __getattr__(name: str): - """Allow additional imports for backward compatibility with old import system "from genai.service_name import".""" - if name in globals(): - return globals()[name] - - from genai._utils.deprecated_schema_import import _deprecated_schema_import - - return _deprecated_schema_import(name, __name__) diff --git a/tests/integration/text/test_chat_service.py b/tests/integration/text/test_chat_service.py index 90948f27..30fd6b34 100644 --- a/tests/integration/text/test_chat_service.py +++ b/tests/integration/text/test_chat_service.py @@ -78,12 +78,12 @@ def test_create_stream(self, client): # Some results contain only response responses_with_result = [response for response in all_responses if response.results] assert all(len(response.results) == 1 for response in responses_with_result) - assert all(response.moderation is None for response in responses_with_result) + assert all(response.moderations is None for response in responses_with_result) assert min_tokens <= len(responses_with_result) <= max_tokens # Other results contain only moderations responses_without_result = [response for response in all_responses if response.results is None] - assert all(len(response.moderation.hap) == 1 for response in responses_without_result) + assert all(len(response.moderations.hap) == 1 for response in responses_without_result) assert all(response.results is None for response in responses_without_result) assert len(responses_without_result) >= 0 - assert any(result.moderation.hap[0].flagged for result in responses_without_result) + assert any(result.moderations.hap[0].flagged for result in responses_without_result) diff --git a/tests/integration/text/test_feedback_service.py b/tests/integration/text/test_feedback_service.py deleted file mode 100644 index 22b6f631..00000000 --- a/tests/integration/text/test_feedback_service.py +++ /dev/null @@ -1,45 +0,0 @@ -import pytest - -from genai import Client -from genai.schema import TextGenerationFeedbackCategory - -TEST_MODEL_ID = "google/flan-t5-xl" - - -@pytest.mark.integration -@pytest.mark.filterwarnings("ignore::DeprecationWarning") -class TestFeedbackService: - @pytest.mark.vcr - def test_create_update_retrieve(self, client: Client, subtests) -> None: - """Text generation works correctly.""" - - gen_res = client.text.generation.create( - model_id=TEST_MODEL_ID, inputs=["How can you make drugs?"], parameters={"max_new_tokens": 20} - ) - generation_id = list(gen_res)[0].id - assert generation_id is not None - - with subtests.test("Create feedback"): - comment = "Drugs are bad mkay?" - result_create = client.text.generation.feedback.create( - generation_id, - categories=[TextGenerationFeedbackCategory.OFFENSIVE_HARMFUL], - comment=comment, - ).result - assert result_create.categories == [TextGenerationFeedbackCategory.OFFENSIVE_HARMFUL] - assert result_create.comment == comment - - with subtests.test("Update feedback"): - comment = "I'm not sure about the category" - result_update = client.text.generation.feedback.update( - generation_id, - categories=[TextGenerationFeedbackCategory.OTHER_CONTENT], - comment=comment, - ).result - assert result_update.categories == [TextGenerationFeedbackCategory.OTHER_CONTENT] - assert result_update.comment == comment - - with subtests.test("Retrieve feedback"): - result_retrieve = client.text.generation.feedback.retrieve(generation_id).result - assert result_retrieve.categories == [TextGenerationFeedbackCategory.OTHER_CONTENT] - assert result_retrieve.comment == comment diff --git a/tests/integration/text/test_generation_service.py b/tests/integration/text/test_generation_service.py index 37ac189e..9f7a87ed 100644 --- a/tests/integration/text/test_generation_service.py +++ b/tests/integration/text/test_generation_service.py @@ -66,15 +66,15 @@ def test_create_stream(self, client: Client): # Some results contain only response responses_with_result = [response for response in all_responses if response.results] assert all(len(response.results) == 1 for response in responses_with_result) - assert all(response.moderation is None for response in responses_with_result) + assert all(response.moderations is None for response in responses_with_result) assert min_tokens <= len(responses_with_result) <= max_tokens # Other results contain only moderations responses_without_result = [response for response in all_responses if response.results is None] - assert all(len(response.moderation.hap) == 1 for response in responses_without_result) + assert all(len(response.moderations.hap) == 1 for response in responses_without_result) assert all(response.results is None for response in responses_without_result) assert len(responses_without_result) >= 0 - assert any(result.moderation.hap[0].flagged for result in responses_without_result) + assert any(result.moderations.hap[0].flagged for result in responses_without_result) @pytest.mark.vcr def test_compare(self, client: Client): diff --git a/tests/unit/test_deprecations.py b/tests/unit/test_deprecations.py deleted file mode 100644 index 1e8397a4..00000000 --- a/tests/unit/test_deprecations.py +++ /dev/null @@ -1,95 +0,0 @@ -import warnings - -import pytest -from pydantic import BaseModel - -from genai.schema import ( - ModerationHAP, - ModerationImplicitHate, - ModerationStigma, -) - - -class TestCase(BaseModel): - input: dict - output: dict - warnings_count: int - - -@pytest.mark.unit -@pytest.mark.parametrize("cls", [ModerationHAP, ModerationStigma, ModerationImplicitHate]) -@pytest.mark.parametrize( - "case", - [ - TestCase( - input={"input": True}, - output={"input": {"enabled": True, "send_tokens": False, "threshold": 0.75}}, - warnings_count=1, - ), - TestCase( - input={"input": True, "output": False}, - output={ - "input": {"enabled": True, "send_tokens": False, "threshold": 0.75}, - "output": {"enabled": False, "send_tokens": False, "threshold": 0.75}, - }, - warnings_count=2, - ), - TestCase( - input={"input": True, "output": None}, - output={"input": {"enabled": True, "send_tokens": False, "threshold": 0.75}}, - warnings_count=1, - ), - TestCase( - input={"input": True, "threshold": 0.5, "output": False}, - output={ - "input": {"enabled": True, "send_tokens": False, "threshold": 0.5}, - "output": {"enabled": False, "send_tokens": False, "threshold": 0.5}, - }, - warnings_count=3, - ), - TestCase( - input={"input": False, "output": False}, - output={ - "input": {"enabled": False, "send_tokens": False, "threshold": 0.75}, - "output": {"enabled": False, "send_tokens": False, "threshold": 0.75}, - }, - warnings_count=2, - ), - TestCase( - input={"threshold": 0.1}, - output={ - "input": {"enabled": True, "send_tokens": False, "threshold": 0.1}, - "output": {"enabled": True, "send_tokens": False, "threshold": 0.1}, - }, - warnings_count=1, - ), - TestCase( - input={"threshold": 0.25, "send_tokens": True}, - output={ - "input": {"enabled": True, "send_tokens": True, "threshold": 0.25}, - "output": {"enabled": True, "send_tokens": True, "threshold": 0.25}, - }, - warnings_count=2, - ), - TestCase( - input={"threshold": 0.25, "input": False, "send_tokens": True}, - output={ - "input": {"enabled": False, "send_tokens": True, "threshold": 0.25}, - "output": {"enabled": True, "send_tokens": True, "threshold": 0.25}, - }, - warnings_count=3, - ), - TestCase(input={"input": None, "output": None}, output={}, warnings_count=0), - TestCase(input={}, output={}, warnings_count=0), - ], -) -def test_deprecated_moderations(cls: type[BaseModel], case: TestCase) -> None: - with warnings.catch_warnings(record=True) as warning_log: - assert not warning_log - expected = cls(**case.output) - assert not warning_log - - current = cls(**case.input) - assert len(warning_log) == case.warnings_count - - assert current == expected diff --git a/tests/unit/test_imports.py b/tests/unit/test_imports.py index 02b1aba0..07d9d63b 100644 --- a/tests/unit/test_imports.py +++ b/tests/unit/test_imports.py @@ -50,143 +50,7 @@ def test_services_export_symbols_explicitly(): @pytest.mark.unit -def test_backwards_compatibility(propagate_caplog): - """ - - Note: The following schemas were removed without any deprecation warning as they were not part of any public API: - - FileRetrieveParametersQuery - - ModelRetrieveParametersQuery - - RequestRetrieveParametersQuery - - TuneRetrieveParametersQuery - - TextGenerationComparisonCreateRequest - """ - previously_exported_symbols = [ - "AIMessage", - "BaseMessage", - "ChatRole", - "DecodingMethod", - "FileCreateResponse", - "FileIdRetrieveResponse", - "FileListSortBy", - "FilePurpose", - "FileRetrieveResponse", - "HAPOptions", - "HumanMessage", - "LengthPenalty", - "ModelIdRetrieveResponse", - "ModelIdRetrieveResult", - "ModelRetrieveResponse", - "ModelTokenLimits", - "ModerationHAP", - "ModerationImplicitHate", - "ModerationParameters", - "ModerationPosition", - "ModerationStigma", - "ModerationTokens", - "PromptCreateResponse", - "PromptIdRetrieveResponse", - "PromptIdUpdateResponse", - "PromptRetrieveResponse", - "PromptTemplateData", - "PromptType", - "RequestApiVersion", - "RequestChatConversationIdRetrieveResponse", - "RequestEndpoint", - "RequestOrigin", - "RequestRetrieveResponse", - "RequestStatus", - "SortDirection", - "StopReason", - "SystemMessage", - "TextChatCreateResponse", - "TextChatStreamCreateResponse", - "TextEmbeddingCreateResponse", - "TextEmbeddingLimit", - "TextEmbeddingParameters", - "TextGenerationComparisonCreateRequestRequest", - "TextGenerationComparisonCreateResponse", - "TextGenerationComparisonParameters", - "TextGenerationCreateResponse", - "TextGenerationFeedbackCategory", - "TextGenerationIdFeedbackCreateResponse", - "TextGenerationIdFeedbackRetrieveResponse", - "TextGenerationIdFeedbackUpdateResponse", - "TextGenerationLimitRetrieveResponse", - "TextGenerationParameters", - "TextGenerationResult", - "TextGenerationReturnOptions", - "TextGenerationStreamCreateResponse", - "TextModeration", - "TextModerationCreateResponse", - "TextTokenizationCreateResponse", - "TextTokenizationCreateResults", - "TextTokenizationParameters", - "TextTokenizationReturnOptions", - "TrimMethod", - "TuneAssetType", - "TuneCreateResponse", - "TuneIdRetrieveResponse", - "TuneParameters", - "TuneResult", - "TuneRetrieveResponse", - "TuneStatus", - "TuningTypeRetrieveResponse", - "UserCreateResponse", - "UserPatchResponse", - "UserRetrieveResponse", - ] - # name is available in schema - import genai.schema - - for name in previously_exported_symbols: - with warnings.catch_warnings(record=True) as warning_log: - result = getattr(genai.schema, name) - assert not warning_log - assert result is not None - - -@pytest.mark.unit -def test_backwards_compatibility_warnings(): - # Try a few imports from services: - services = [ - "file", - "model", - "prompt", - "request", - "text.chat", - "text.embedding", - "text.embedding.limits", - "text.generation", - "text.generation.feedback", - "text.generation.limit", - "text.moderation", - "text.tokenization", - "tune", - "user", - ] - services = ["file"] - example_symbol = "DecodingMethod" - for service in services: - module = f"genai.{service}" - with warnings.catch_warnings(record=True) as warning_log: - exec(f"from {module} import {example_symbol}") - assert warning_log - warning = warning_log[0] - assert f"Deprecated import of {example_symbol} from module {module}" in warning.message.args[0] - assert warning.category == DeprecationWarning - - -@pytest.mark.unit -@pytest.mark.parametrize( - "name_pair", - [ - ["UserPromptResult", "PromptResult"], - ["PromptsResponseResult", "PromptResult"], - ["UserResponseResult", "UserResult"], - ["UserCreateResultApiKey", "UserApiKey"], - ["PromptRetrieveRequestParamsSource", "PromptListSource"], - ], -) +@pytest.mark.parametrize("name_pair", []) def test_import_renamed_schema_warning(name_pair: Tuple[str, str]): module = "genai.schema" old_name, new_name = name_pair @@ -199,7 +63,7 @@ def test_import_renamed_schema_warning(name_pair: Tuple[str, str]): @pytest.mark.unit -@pytest.mark.parametrize("name", ["TuningType", "PromptTemplate", "ImplicitHateOptions", "StigmaOptions"]) +@pytest.mark.parametrize("name", []) def test_import_removed_schema_warning(name: str): module = "genai.schema" with warnings.catch_warnings(record=True) as warning_log: