Skip to content
This repository has been archived by the owner on Sep 12, 2024. It is now read-only.

Commit

Permalink
Merge branch 'main' into feature/implement-autollm-ui
Browse files Browse the repository at this point in the history
  • Loading branch information
SeeknnDestroy authored Jan 18, 2024
2 parents 5b7525e + 4af1ce8 commit efa9358
Showing 1 changed file with 8 additions and 1 deletion.
9 changes: 8 additions & 1 deletion autollm/auto/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ def from_defaults(
model: str = "gpt-3.5-turbo",
max_tokens: Optional[int] = 256,
temperature: float = 0.1,
system_prompt: Optional[str] = None,
api_base: Optional[str] = None) -> BaseLLM:
"""
Create any LLM by model name. Check https://docs.litellm.ai/docs/providers for a list of
Expand All @@ -27,10 +28,16 @@ def from_defaults(
https://docs.litellm.ai/docs/providers for a list of supported models.
max_tokens: The maximum number of tokens to generate by the LLM.
temperature: The temperature to use when sampling from the distribution.
system_prompt: The system prompt to use for the LLM.
api_base: The API base URL to use for the LLM.
Returns:
LLM: The initialized LiteLLM instance for given model name and parameter set.
"""

return LiteLLM(model=model, max_tokens=max_tokens, temperature=temperature, api_base=api_base)
return LiteLLM(
model=model,
max_tokens=max_tokens,
temperature=temperature,
system_prompt=system_prompt,
api_base=api_base)

0 comments on commit efa9358

Please sign in to comment.