Skip to content

Commit

Permalink
fix: unify token_limit property (#1071)
Browse files Browse the repository at this point in the history
  • Loading branch information
Wendong-Fan authored Oct 18, 2024
1 parent 76fe7d0 commit c66fde5
Show file tree
Hide file tree
Showing 10 changed files with 20 additions and 103 deletions.
7 changes: 5 additions & 2 deletions camel/models/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,9 @@ def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
def token_limit(self) -> int:
r"""Returns the maximum token limit for a given model.
This method retrieves the maximum token limit either from the
`model_config_dict` or from the model's default token limit.
Returns:
int: The maximum token limit for the given model.
"""
Expand All @@ -128,8 +131,8 @@ def token_limit(self) -> int:

@property
def stream(self) -> bool:
r"""Returns whether the model is in stream mode,
which sends partial results each time.
r"""Returns whether the model is in stream mode, which sends partial
results each time.
Returns:
bool: Whether the model is in stream mode.
Expand Down
16 changes: 0 additions & 16 deletions camel/models/litellm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,19 +143,3 @@ def check_model_config(self):
f"Unexpected argument `{param}` is "
"input into LiteLLM model backend."
)

@property
def token_limit(self) -> int:
r"""Returns the maximum token limit for the given model.
Returns:
int: The maximum token limit for the given model.
"""
max_tokens = self.model_config_dict.get("max_tokens")
if isinstance(max_tokens, int):
return max_tokens
print(
"Must set `max_tokens` as an integer in `model_config_dict` when"
" setting up the model. Using 4096 as default value."
)
return 4096
16 changes: 0 additions & 16 deletions camel/models/ollama_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,22 +142,6 @@ def run(
)
return response

@property
def token_limit(self) -> int:
r"""Returns the maximum token limit for the given model.
Returns:
int: The maximum token limit for the given model.
"""
max_tokens = self.model_config_dict.get("max_tokens")
if isinstance(max_tokens, int):
return max_tokens
print(
"Must set `max_tokens` as an integer in `model_config_dict` when"
" setting up the model. Using 4096 as default value."
)
return 4096

@property
def stream(self) -> bool:
r"""Returns whether the model is in stream mode, which sends partial
Expand Down
16 changes: 0 additions & 16 deletions camel/models/openai_compatible_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,21 +112,5 @@ def stream(self) -> bool:
"""
return self.model_config_dict.get('stream', False)

@property
def token_limit(self) -> int:
r"""Returns the maximum token limit for the given model.
Returns:
int: The maximum token limit for the given model.
"""
max_tokens = self.model_config_dict.get("max_tokens")
if isinstance(max_tokens, int):
return max_tokens
print(
"Must set `max_tokens` as an integer in `model_config_dict` when"
" setting up the model. Using 4096 as default value."
)
return 4096

def check_model_config(self):
pass
10 changes: 0 additions & 10 deletions camel/models/samba_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,16 +346,6 @@ def _sambaverse_to_openai_response(

return obj

@property
def token_limit(self) -> int:
r"""Returns the maximum token limit for the given model.
Returns:
int: The maximum token limit for the given model.
"""
max_tokens = self.model_config_dict["max_tokens"]
return max_tokens

@property
def stream(self) -> bool:
r"""Returns whether the model is in stream mode, which sends partial
Expand Down
16 changes: 0 additions & 16 deletions camel/models/togetherai_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,19 +140,3 @@ def stream(self) -> bool:
bool: Whether the model is in stream mode.
"""
return self.model_config_dict.get('stream', False)

@property
def token_limit(self) -> int:
r"""Returns the maximum token limit for the given model.
Returns:
int: The maximum token limit for the given model.
"""
max_tokens = self.model_config_dict.get("max_tokens")
if isinstance(max_tokens, int):
return max_tokens
print(
"Must set `max_tokens` as an integer in `model_config_dict` when"
" setting up the model. Using 4096 as default value."
)
return 4096
16 changes: 0 additions & 16 deletions camel/models/vllm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,22 +144,6 @@ def run(
)
return response

@property
def token_limit(self) -> int:
r"""Returns the maximum token limit for the given model.
Returns:
int: The maximum token limit for the given model.
"""
max_tokens = self.model_config_dict.get("max_tokens")
if isinstance(max_tokens, int):
return max_tokens
print(
"Must set `max_tokens` as an integer in `model_config_dict` when"
" setting up the model. Using 4096 as default value."
)
return 4096

@property
def stream(self) -> bool:
r"""Returns whether the model is in stream mode, which sends partial
Expand Down
11 changes: 8 additions & 3 deletions camel/types/unified_model_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========

import logging
from threading import Lock
from typing import TYPE_CHECKING, ClassVar, Dict, Union, cast

Expand Down Expand Up @@ -50,8 +50,13 @@ def value_for_tiktoken(self) -> str:

@property
def token_limit(self) -> int:
r"""Returns the token limit for the model."""
return -1
r"""Returns the token limit for the model. Here we set the default
value as `999_999_999` if it's not provided from `model_config_dict`"""
logging.warning(
"Invalid or missing `max_tokens` in `model_config_dict`. "
"Defaulting to 999_999_999 tokens."
)
return 999_999_999

@property
def is_openai(self) -> bool:
Expand Down
9 changes: 4 additions & 5 deletions docs/key_modules/models.md
Original file line number Diff line number Diff line change
Expand Up @@ -84,13 +84,12 @@ ChatAgent(system_msg, model=model)
And if you want to use an OpenAI-compatible API, you can replace the `model` with the following code:

```python
from camel.models.openai_compatibility_model import OpenAICompatibilityModel

model = OpenAICompatibilityModel(
model = ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type="a-string-representing-the-model-type",
model_config_dict={"max_tokens": 4096}, # and other parameters you want
url=os.environ.get("OPENAI_COMPATIBILIY_API_BASE_URL"),
api_key=os.environ.get("OPENAI_COMPATIBILIY_API_KEY"),
url=os.environ.get("OPENAI_COMPATIBILIY_API_BASE_URL"),
model_config_dict={"temperature": 0.4, "max_tokens": 4096},
)
```

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@

# Take calling nemotron-70b-instruct model as an example
model = ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBILITY_MODEL,
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type="nvidia/llama-3.1-nemotron-70b-instruct",
api_key="nvapi-xxx",
api_key="nvapi-xx",
url="https://integrate.api.nvidia.com/v1",
model_config_dict={"temperature": 0.4},
)
Expand All @@ -31,7 +31,7 @@
content="You are a helpful assistant.",
)

agent = ChatAgent(assistant_sys_msg, model=model, token_limit=4096)
agent = ChatAgent(assistant_sys_msg, model=model)

user_msg = BaseMessage.make_user_message(
role_name="User",
Expand Down

0 comments on commit c66fde5

Please sign in to comment.