From 2123b82a8424ae61ae1283607f923622777e9a80 Mon Sep 17 00:00:00 2001 From: rboone Date: Wed, 27 Mar 2024 10:36:37 +0100 Subject: [PATCH 1/7] ollama settings: ability to keep LLM in memory for a longer time + ability to run ollama embedding on another instance --- private_gpt/components/embedding/embedding_component.py | 2 +- private_gpt/components/llm/llm_component.py | 1 + private_gpt/settings/settings.py | 9 ++++++++- settings-ollama.yaml | 2 ++ settings.yaml | 2 ++ 5 files changed, 14 insertions(+), 2 deletions(-) diff --git a/private_gpt/components/embedding/embedding_component.py b/private_gpt/components/embedding/embedding_component.py index 2967c38b9..77e8c3d46 100644 --- a/private_gpt/components/embedding/embedding_component.py +++ b/private_gpt/components/embedding/embedding_component.py @@ -70,7 +70,7 @@ def __init__(self, settings: Settings) -> None: ollama_settings = settings.ollama self.embedding_model = OllamaEmbedding( model_name=ollama_settings.embedding_model, - base_url=ollama_settings.api_base, + base_url=ollama_settings.embedding_api_base, ) case "azopenai": try: diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index 4e46c250b..40c33c2d4 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -132,6 +132,7 @@ def __init__(self, settings: Settings) -> None: context_window=settings.llm.context_window, additional_kwargs=settings_kwargs, request_timeout=ollama_settings.request_timeout, + keep_alive = ollama_settings.keep_alive, ) case "azopenai": try: diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index 5896f00d6..8f036910e 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -209,6 +209,10 @@ class OllamaSettings(BaseModel): "http://localhost:11434", description="Base URL of Ollama API. Example: 'https://localhost:11434'.", ) + embedding_api_base: str = Field( + api_base, # default is same as api_base, unless specified differently + description="Base URL of Ollama embedding API. Defaults to the same value as api_base", + ) llm_model: str = Field( None, description="Model to use. Example: 'llama2-uncensored'.", @@ -217,6 +221,10 @@ class OllamaSettings(BaseModel): None, description="Model to use. Example: 'nomic-embed-text'.", ) + keep_alive: str = Field( + "5m", + description="Time the model will stay loaded in memory after a request. examples: 5m, 5h, '-1' ", + ) tfs_z: float = Field( 1.0, description="Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.", @@ -246,7 +254,6 @@ class OllamaSettings(BaseModel): description="Time elapsed until ollama times out the request. Default is 120s. Format is float. ", ) - class AzureOpenAISettings(BaseModel): api_key: str azure_endpoint: str diff --git a/settings-ollama.yaml b/settings-ollama.yaml index d7e1a12ca..4f0be4ffc 100644 --- a/settings-ollama.yaml +++ b/settings-ollama.yaml @@ -14,6 +14,8 @@ ollama: llm_model: mistral embedding_model: nomic-embed-text api_base: http://localhost:11434 + keep_alive: 5m + # embedding_api_base: http://ollama_embedding:11434 # uncomment if your embedding model runs on another ollama tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) top_p: 0.9 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) diff --git a/settings.yaml b/settings.yaml index 87a63ef4f..c2207d469 100644 --- a/settings.yaml +++ b/settings.yaml @@ -95,6 +95,8 @@ ollama: llm_model: llama2 embedding_model: nomic-embed-text api_base: http://localhost:11434 + keep_alive: 5m + # embedding_api_base: http://ollama_embedding:11434 # uncomment if your embedding model runs on another ollama request_timeout: 120.0 azopenai: From e0533a40ed8e28dc392ae21f64e110a92dfd0cf1 Mon Sep 17 00:00:00 2001 From: rboone Date: Wed, 27 Mar 2024 17:17:59 +0100 Subject: [PATCH 2/7] actually do something with keep_alive parameter --- private_gpt/components/llm/custom/ollama.py | 32 +++++++++++++++++++++ private_gpt/components/llm/llm_component.py | 6 ++-- 2 files changed, 35 insertions(+), 3 deletions(-) create mode 100644 private_gpt/components/llm/custom/ollama.py diff --git a/private_gpt/components/llm/custom/ollama.py b/private_gpt/components/llm/custom/ollama.py new file mode 100644 index 000000000..af4d3702e --- /dev/null +++ b/private_gpt/components/llm/custom/ollama.py @@ -0,0 +1,32 @@ +from llama_index.llms.ollama import Ollama +from pydantic import Field + + +class CustomOllama(Ollama): + """Custom llama_index Ollama class with the only intention of passing on the keep_alive parameter.""" + + keep_alive: str = Field( + default="5m", + description="String that describes the time the model should stay in (V)RAM after last request.", + ) + + def __init__(self, *args, **kwargs) -> None: + keep_alive = kwargs.pop('keep_alive', '5m') # fetch keep_alive from kwargs or use 5m if not found. + super().__init__(*args, **kwargs) + self.keep_alive = keep_alive + + def chat(self, *args, **kwargs): + kwargs["keep_alive"] = self.keep_alive + return super().chat(*args, **kwargs) + + def stream_chat(self, *args, **kwargs): + kwargs["keep_alive"] = self.keep_alive + return super().stream_chat(*args, **kwargs) + + def complete(self, *args, **kwargs): + kwargs["keep_alive"] = self.keep_alive + return super().complete(*args, **kwargs) + + def stream_complete(self, *args, **kwargs): + kwargs["keep_alive"] = self.keep_alive + return super().stream_complete(*args, **kwargs) diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index 40c33c2d4..9b9c46ce9 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -108,7 +108,7 @@ def __init__(self, settings: Settings) -> None: ) case "ollama": try: - from llama_index.llms.ollama import Ollama # type: ignore + from private_gpt.components.llm.custom.ollama import CustomOllama # type: ignore except ImportError as e: raise ImportError( "Ollama dependencies not found, install with `poetry install --extras llms-ollama`" @@ -125,14 +125,14 @@ def __init__(self, settings: Settings) -> None: "repeat_penalty": ollama_settings.repeat_penalty, # ollama llama-cpp } - self.llm = Ollama( + self.llm = CustomOllama( model=ollama_settings.llm_model, base_url=ollama_settings.api_base, temperature=settings.llm.temperature, context_window=settings.llm.context_window, additional_kwargs=settings_kwargs, request_timeout=ollama_settings.request_timeout, - keep_alive = ollama_settings.keep_alive, + keep_alive=ollama_settings.keep_alive, ) case "azopenai": try: From 05ff1563407f2b48347aaeeef627b464815b19f6 Mon Sep 17 00:00:00 2001 From: rboone Date: Wed, 27 Mar 2024 17:25:13 +0100 Subject: [PATCH 3/7] black formatting --- private_gpt/components/llm/custom/ollama.py | 3 ++- private_gpt/settings/settings.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/private_gpt/components/llm/custom/ollama.py b/private_gpt/components/llm/custom/ollama.py index af4d3702e..80447e6e6 100644 --- a/private_gpt/components/llm/custom/ollama.py +++ b/private_gpt/components/llm/custom/ollama.py @@ -11,7 +11,8 @@ class CustomOllama(Ollama): ) def __init__(self, *args, **kwargs) -> None: - keep_alive = kwargs.pop('keep_alive', '5m') # fetch keep_alive from kwargs or use 5m if not found. + # fetch keep_alive from kwargs or use 5m if not found. + keep_alive = kwargs.pop("keep_alive", "5m") super().__init__(*args, **kwargs) self.keep_alive = keep_alive diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index 8f036910e..b6db64e29 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -254,6 +254,7 @@ class OllamaSettings(BaseModel): description="Time elapsed until ollama times out the request. Default is 120s. Format is float. ", ) + class AzureOpenAISettings(BaseModel): api_key: str azure_endpoint: str From 48c0823af30807c0e4a2c809bd37a384e79b48fa Mon Sep 17 00:00:00 2001 From: rboone Date: Wed, 27 Mar 2024 17:28:14 +0100 Subject: [PATCH 4/7] ruff formatting --- private_gpt/components/llm/custom/ollama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/private_gpt/components/llm/custom/ollama.py b/private_gpt/components/llm/custom/ollama.py index 80447e6e6..58722b5a7 100644 --- a/private_gpt/components/llm/custom/ollama.py +++ b/private_gpt/components/llm/custom/ollama.py @@ -3,7 +3,7 @@ class CustomOllama(Ollama): - """Custom llama_index Ollama class with the only intention of passing on the keep_alive parameter.""" + """Custom Ollama class to fill in "keep_alive" when sending requests.""" keep_alive: str = Field( default="5m", From a8fd51d4b0ade18a722d06462690bd341120263e Mon Sep 17 00:00:00 2001 From: rboone Date: Wed, 27 Mar 2024 17:36:32 +0100 Subject: [PATCH 5/7] also ruff formatting --- private_gpt/components/llm/llm_component.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index 9b9c46ce9..bea2637c3 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -108,7 +108,9 @@ def __init__(self, settings: Settings) -> None: ) case "ollama": try: - from private_gpt.components.llm.custom.ollama import CustomOllama # type: ignore + from private_gpt.components.llm.custom.ollama import ( + CustomOllama, # type: ignore + ) except ImportError as e: raise ImportError( "Ollama dependencies not found, install with `poetry install --extras llms-ollama`" From 437f921f87941c6fe0d23c32fd6738be7d4378b4 Mon Sep 17 00:00:00 2001 From: rboone Date: Thu, 28 Mar 2024 08:09:44 +0100 Subject: [PATCH 6/7] keep_alive classmethod wrappers instead of custom class + only wrap if keep_alive differs from default --- private_gpt/components/llm/custom/ollama.py | 33 --------------------- private_gpt/components/llm/llm_component.py | 26 +++++++++++++--- 2 files changed, 22 insertions(+), 37 deletions(-) delete mode 100644 private_gpt/components/llm/custom/ollama.py diff --git a/private_gpt/components/llm/custom/ollama.py b/private_gpt/components/llm/custom/ollama.py deleted file mode 100644 index 58722b5a7..000000000 --- a/private_gpt/components/llm/custom/ollama.py +++ /dev/null @@ -1,33 +0,0 @@ -from llama_index.llms.ollama import Ollama -from pydantic import Field - - -class CustomOllama(Ollama): - """Custom Ollama class to fill in "keep_alive" when sending requests.""" - - keep_alive: str = Field( - default="5m", - description="String that describes the time the model should stay in (V)RAM after last request.", - ) - - def __init__(self, *args, **kwargs) -> None: - # fetch keep_alive from kwargs or use 5m if not found. - keep_alive = kwargs.pop("keep_alive", "5m") - super().__init__(*args, **kwargs) - self.keep_alive = keep_alive - - def chat(self, *args, **kwargs): - kwargs["keep_alive"] = self.keep_alive - return super().chat(*args, **kwargs) - - def stream_chat(self, *args, **kwargs): - kwargs["keep_alive"] = self.keep_alive - return super().stream_chat(*args, **kwargs) - - def complete(self, *args, **kwargs): - kwargs["keep_alive"] = self.keep_alive - return super().complete(*args, **kwargs) - - def stream_complete(self, *args, **kwargs): - kwargs["keep_alive"] = self.keep_alive - return super().stream_complete(*args, **kwargs) diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index bea2637c3..a8fde62b6 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -1,4 +1,5 @@ import logging +from collections.abc import Callable from injector import inject, singleton from llama_index.core.llms import LLM, MockLLM @@ -108,8 +109,8 @@ def __init__(self, settings: Settings) -> None: ) case "ollama": try: - from private_gpt.components.llm.custom.ollama import ( - CustomOllama, # type: ignore + from llama_index.llms.ollama import ( + Ollama, # type: ignore ) except ImportError as e: raise ImportError( @@ -127,15 +128,32 @@ def __init__(self, settings: Settings) -> None: "repeat_penalty": ollama_settings.repeat_penalty, # ollama llama-cpp } - self.llm = CustomOllama( + self.llm = Ollama( model=ollama_settings.llm_model, base_url=ollama_settings.api_base, temperature=settings.llm.temperature, context_window=settings.llm.context_window, additional_kwargs=settings_kwargs, request_timeout=ollama_settings.request_timeout, - keep_alive=ollama_settings.keep_alive, ) + + if ( + ollama_settings.keep_alive + != ollama_settings.model_fields["keep_alive"].default + ): + # Modify Ollama methods to use the "keep_alive" field. + def add_keep_alive(func: Callable) -> Callable: + def wrapper(*args, **kwargs) -> Callable: + kwargs["keep_alive"] = ollama_settings.keep_alive + return func(*args, **kwargs) + + return wrapper + + Ollama.chat = add_keep_alive(Ollama.chat) + Ollama.stream_chat = add_keep_alive(Ollama.stream_chat) + Ollama.complete = add_keep_alive(Ollama.complete) + Ollama.stream_complete = add_keep_alive(Ollama.stream_complete) + case "azopenai": try: from llama_index.llms.azure_openai import ( # type: ignore From ac144fd7b79ab8207c0e9a7636ea7232b4674af7 Mon Sep 17 00:00:00 2001 From: rboone Date: Thu, 28 Mar 2024 08:25:54 +0100 Subject: [PATCH 7/7] mypy fix attempt --- private_gpt/components/llm/llm_component.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index a8fde62b6..dae997cc2 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -1,5 +1,6 @@ import logging from collections.abc import Callable +from typing import Any from injector import inject, singleton from llama_index.core.llms import LLM, MockLLM @@ -109,9 +110,7 @@ def __init__(self, settings: Settings) -> None: ) case "ollama": try: - from llama_index.llms.ollama import ( - Ollama, # type: ignore - ) + from llama_index.llms.ollama import Ollama # type: ignore except ImportError as e: raise ImportError( "Ollama dependencies not found, install with `poetry install --extras llms-ollama`" @@ -142,8 +141,8 @@ def __init__(self, settings: Settings) -> None: != ollama_settings.model_fields["keep_alive"].default ): # Modify Ollama methods to use the "keep_alive" field. - def add_keep_alive(func: Callable) -> Callable: - def wrapper(*args, **kwargs) -> Callable: + def add_keep_alive(func: Callable[..., Any]) -> Callable[..., Any]: + def wrapper(*args: Any, **kwargs: Any) -> Any: kwargs["keep_alive"] = ollama_settings.keep_alive return func(*args, **kwargs)