Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

v0.2.0 beta build | Add-json-and-raw-output-response-options-to-openai-and-gemini #4

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -161,4 +161,6 @@ cython_debug/
.vscode/settings.json

## test files
test.py
tests/gemini.txt
tests/openai.txt
tests/deepl.txt
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ setuptools_scm>=6.0

tomli

google-generativeai==0.5.0
google-generativeai==0.5.1

deepl==1.16.1

Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,15 @@ build-backend = "setuptools.build_meta"

[project]
dependencies = [
"google-generativeai==0.5.0",
"google-generativeai==0.5.1",
"deepl==1.16.1",
"openai==1.13.3",
"backoff==2.2.1",
"tiktoken==0.6.0"
]

name = "easytl"
version = "v0.1.2"
version = "v0.2.0-beta"
authors = [
{ name="Bikatr7", email="Tetralon07@gmail.com" },
]
Expand Down
9 changes: 7 additions & 2 deletions src/easytl/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,13 @@
__author__ = "Bikatr7 <Tetralon07@gmail.com>"

from .easytl import EasyTL
from .classes import Language, SplitSentences, Formality, GlossaryInfo, ModelTranslationMessage, SystemTranslationMessage, Message
from .util import MODEL_COSTS, ALLOWED_GEMINI_MODELS, ALLOWED_OPENAI_MODELS

from .classes import Language, SplitSentences, Formality, GlossaryInfo, TextResult
from .classes import Message, SystemTranslationMessage, ModelTranslationMessage
from .classes import ChatCompletion
from .classes import GenerateContentResponse, AsyncGenerateContentResponse, GenerationConfig

from .util import MODEL_COSTS, ALLOWED_GEMINI_MODELS, ALLOWED_OPENAI_MODELS, VALID_JSON_OPENAI_MODELS

from .exceptions import DeepLException, GoogleAPIError, OpenAIError, EasyTLException, InvalidAPIKeyException, InvalidEasyTLSettings
from .exceptions import AuthenticationError, InternalServerError, RateLimitError, APITimeoutError, APIConnectionError, APIStatusError
Expand Down
7 changes: 7 additions & 0 deletions src/easytl/classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,13 @@
## deepl api data used by deepl_service to type check
from deepl.api_data import Language, SplitSentences, Formality, GlossaryInfo, TextResult

## openai api data used by openai_service to type check
from openai.types.chat.chat_completion import ChatCompletion

## gemini api data used by gemini_service to type check
from google.generativeai import GenerationConfig
from google.generativeai.types import GenerateContentResponse, AsyncGenerateContentResponse

##-------------------start-of-Message--------------------------------------------------------------------------------------------------------------------------------------------------------------------------

class Message:
Expand Down
8 changes: 4 additions & 4 deletions src/easytl/deepl_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,14 +42,14 @@ class DeepLService:
##-------------------start-of-_set_decorator()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------

@staticmethod
def _set_decorator(decorator:typing.Callable) -> None:
def _set_decorator(decorator:typing.Callable | None) -> None:

"""

Sets the decorator to use for the Gemini service. Should be a callable that returns a decorator.
Sets the decorator to use for the DeepL service. Should be a callable that returns a decorator or None.

Parameters:
decorator (callable) : The decorator to use.
decorator (callable or None) : The decorator to use for the DeepL service.

"""

Expand Down Expand Up @@ -259,7 +259,7 @@ def _get_decorator() -> typing.Union[typing.Callable, None]:

"""

Returns the decorator to use for the Gemini service.
Returns the decorator to use for the DeepL service.

Returns:
decorator (callable) : The decorator to use.
Expand Down
155 changes: 97 additions & 58 deletions src/easytl/easytl.py

Large diffs are not rendered by default.

81 changes: 56 additions & 25 deletions src/easytl/gemini_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,14 @@
import typing
import asyncio

## third party libraries
from google.generativeai import GenerationConfig
from google.generativeai.types import GenerateContentResponse, AsyncGenerateContentResponse

import google.generativeai as genai

## custom modules
from .util import _estimate_cost, _convert_iterable_to_str, _is_iterable_of_strings
from .decorators import _async_logging_decorator, _sync_logging_decorator

from .classes import GenerationConfig, GenerateContentResponse, AsyncGenerateContentResponse

class GeminiService:

_default_translation_instructions:str = "Please translate the following text into English."
Expand All @@ -42,7 +40,7 @@ class GeminiService:

_log_directory:str | None = None

## I don't plan to allow users to change these settings, as I believe that translations should be as accurate as possible, avoiding any censorship or filtering of content.
## I don't plan to easily allowing users to change these settings, as I believe that translations should be as accurate as possible, avoiding any censorship or filtering of content.
_safety_settings = [
{
"category": "HARM_CATEGORY_DANGEROUS",
Expand All @@ -66,6 +64,8 @@ class GeminiService:
},
]

_json_mode:bool = False

##-------------------start-of-_set_api_key()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------

@staticmethod
Expand All @@ -85,19 +85,35 @@ def _set_api_key(api_key:str) -> None:
##-------------------start-of-_set_decorator()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------

@staticmethod
def _set_decorator(decorator:typing.Callable) -> None:
def _set_decorator(decorator:typing.Callable | None) -> None:

"""

Sets the decorator to use for the Gemini service. Should be a callable that returns a decorator.
Sets the decorator to use for the Gemini service. Should be a callable that returns a decorator or None.

Parameters:
decorator (callable) : The decorator to use.
decorator (callable | None) : The decorator to use.

"""

GeminiService._decorator_to_use = decorator

##-------------------start-of-_set_json_mode()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------

@staticmethod
def _set_json_mode(json_mode:bool) -> None:

"""

Sets the JSON mode for the Gemini service.

Parameters:
json_mode (bool) : True if the service should return JSON responses, False if it should return text responses.

"""

GeminiService._json_mode = json_mode

##-------------------start-of-_set_attributes()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------

@staticmethod
Expand Down Expand Up @@ -135,9 +151,15 @@ def _set_attributes(model:str="gemini-pro",
GeminiService._semaphore_value = semaphore

else:
GeminiService._semaphore_value = 15 if GeminiService._model != "gemini--1.5-pro-latest" else 2
GeminiService._semaphore_value = 15 if GeminiService._model != "gemini-1.5-pro-latest" else 2

GeminiService._log_directory = logging_directory

if(GeminiService._json_mode and GeminiService._model != "gemini-1.5-pro-latest"):
GeminiService._default_translation_instructions = "Please translate the following text into English. Make sure to return the translated text in JSON format."

else:
GeminiService._default_translation_instructions = "Please translate the following text into English."

##-------------------start-of-_redefine_client()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------

Expand All @@ -150,25 +172,34 @@ def _redefine_client() -> None:

"""

## as of now, the only model that allows for system instructions is gemini--1.5-pro-latest
if(GeminiService._model == "gemini--1.5-pro-latest"):
response_mime_type = "text/plain"

GeminiService._client = genai.GenerativeModel(model_name=GeminiService._model,
safety_settings=GeminiService._safety_settings,
system_instruction=GeminiService._system_message,
)
else:
GeminiService._client = genai.GenerativeModel(model_name=GeminiService._model,
safety_settings=GeminiService._safety_settings)
if(GeminiService._json_mode):
response_mime_type = "application/json"

gen_model_params = {
"model_name": GeminiService._model,
"safety_settings": GeminiService._safety_settings
}

## gemini 1.5 is the only model that supports json responses and system instructions
if(GeminiService._model == "gemini-1.5-pro-latest"):
gen_model_params["system_instruction"] = GeminiService._system_message
else:
response_mime_type = "text/plain"

GeminiService._client = genai.GenerativeModel(**gen_model_params)

GeminiService._generation_config = GenerationConfig(
candidate_count=GeminiService._candidate_count,
stop_sequences=GeminiService._stop_sequences,
max_output_tokens=GeminiService._max_output_tokens,
temperature=GeminiService._temperature,
top_p=GeminiService._top_p,
top_k=GeminiService._top_k,
response_mime_type=response_mime_type
)

GeminiService._generation_config = GenerationConfig(candidate_count=GeminiService._candidate_count,
stop_sequences=GeminiService._stop_sequences,
max_output_tokens=GeminiService._max_output_tokens,
temperature=GeminiService._temperature,
top_p=GeminiService._top_p,
top_k=GeminiService._top_k)

GeminiService._semaphore = asyncio.Semaphore(GeminiService._semaphore_value)

##-------------------start-of-_redefine_client_decorator()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Expand Down
43 changes: 36 additions & 7 deletions src/easytl/openai_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,10 @@

## third-party libraries
from openai import AsyncOpenAI, OpenAI
from openai.types.chat.chat_completion import ChatCompletion

## custom modules
from .classes import SystemTranslationMessage, ModelTranslationMessage
from .util import _convert_iterable_to_str, _estimate_cost, _is_iterable_of_strings
from .classes import SystemTranslationMessage, ModelTranslationMessage, ChatCompletion
from .util import _convert_iterable_to_str, _estimate_cost, _is_iterable_of_strings, VALID_JSON_OPENAI_MODELS
from .decorators import _async_logging_decorator, _sync_logging_decorator

class OpenAIService:
Expand Down Expand Up @@ -41,6 +40,8 @@ class OpenAIService:

_decorator_to_use:typing.Union[typing.Callable, None] = None

_json_mode:bool = False

_log_directory:str | None = None

##-------------------start-of-set_api_key()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Expand All @@ -63,19 +64,35 @@ def _set_api_key(api_key:str) -> None:
##-------------------start-of-set_decorator()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------

@staticmethod
def _set_decorator(decorator:typing.Callable) -> None:
def _set_decorator(decorator:typing.Callable | None) -> None:

"""

Sets the decorator to use for the OpenAI service. Should be a callable that returns a decorator.
Sets the decorator to use for the OpenAI service. Should be a callable that returns a decorator or None.

Parameters:
decorator (callable) : The decorator to use.
decorator (callable | None) : The decorator to use.

"""

OpenAIService._decorator_to_use = decorator

##-------------------start-of-set_json_mode()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------

@staticmethod
def _set_json_mode(json_mode:bool) -> None:

"""

Sets the JSON mode for the OpenAI service.

Parameters:
json_mode (bool) : True if the JSON mode is to be used, False otherwise.

"""

OpenAIService._json_mode = json_mode

##-------------------start-of-set_attributes()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------

@staticmethod
Expand Down Expand Up @@ -116,6 +133,12 @@ def _set_attributes(model:str = _default_model,

OpenAIService._log_directory = logging_directory

if(OpenAIService._json_mode and OpenAIService._model in VALID_JSON_OPENAI_MODELS):
OpenAIService._default_translation_instructions = SystemTranslationMessage("Please translate the following text into English. Make sure to return the translated text in JSON format.")

else:
OpenAIService._default_translation_instructions = SystemTranslationMessage("Please translate the following text into English.")

##-------------------start-of-_build_translation_batches()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------

@staticmethod
Expand Down Expand Up @@ -236,8 +259,11 @@ def __translate_text(instructions:SystemTranslationMessage, prompt:ModelTranslat

"""

response_format = "json_object" if OpenAIService._json_mode and OpenAIService._model in VALID_JSON_OPENAI_MODELS else "text"

response = OpenAIService._sync_client.chat.completions.create(
messages=[
response_format={ "type": response_format },
messages=[
instructions.to_dict(),
prompt.to_dict()
], # type: ignore
Expand Down Expand Up @@ -275,9 +301,12 @@ async def __translate_text_async(instruction:SystemTranslationMessage, prompt:Mo

"""

response_format = "json_object" if OpenAIService._json_mode and OpenAIService._model in VALID_JSON_OPENAI_MODELS else "text"

async with OpenAIService._semaphore:

response = await OpenAIService._async_client.chat.completions.create(
response_format={ "type": response_format },
messages=[
instruction.to_dict(),
prompt.to_dict()
Expand Down
13 changes: 10 additions & 3 deletions src/easytl/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -398,7 +398,7 @@ def _estimate_cost(text:str | typing.Iterable, model:str, price_case:int | None
raise Exception("An unknown error occurred while calculating the minimum cost of translation.")

##-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## Costs & Models are determined and updated manually, listed in USD. Updated by Bikatr7 as of 2024-04-09
## Costs & Models are determined and updated manually, listed in USD. Updated by Bikatr7 as of 2024-04-18
## https://platform.openai.com/docs/models/overview
ALLOWED_OPENAI_MODELS = [
"gpt-3.5-turbo",
Expand All @@ -423,7 +423,14 @@ def _estimate_cost(text:str | typing.Iterable, model:str, price_case:int | None
"gpt-4-1106-vision-preview",
]

## Costs & Models are determined and updated manually, listed in USD. Updated by Bikatr7 as of 2024-04-09
VALID_JSON_OPENAI_MODELS = [
"gpt-3.5-turbo-0125",
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4-turbo-2024-04-09",
]

## Costs & Models are determined and updated manually, listed in USD. Updated by Bikatr7 as of 2024-04-18
## https://ai.google.dev/models/gemini
ALLOWED_GEMINI_MODELS = [
"gemini-1.0-pro-001",
Expand All @@ -439,7 +446,7 @@ def _estimate_cost(text:str | typing.Iterable, model:str, price_case:int | None
## "gemini-ultra"
]

## Costs & Models are determined and updated manually, listed in USD. Updated by Bikatr7 as of 2024-04-09
## Costs & Models are determined and updated manually, listed in USD. Updated by Bikatr7 as of 2024-04-18
MODEL_COSTS = {
# Grouping GPT-3.5 models together
"gpt-3.5-turbo-0125": {"price_case": 7, "_input_cost": 0.0005, "_output_cost": 0.0015},
Expand Down
2 changes: 1 addition & 1 deletion src/easytl/version.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
## Use of this source code is governed by an GNU Lesser General Public License v2.1
## license that can be found in the LICENSE file.

VERSION = "0.1.2"
VERSION = "0.2.0-beta"
Loading