Skip to content

Commit

Permalink
refactor: streamline CLI and logging initialization
Browse files Browse the repository at this point in the history
- Simplify CLI command initialization by integrating dependency injection
- Optimize logging setup with cached initialization functions
- Remove redundant model list and fallback configurations
- Update API keys for model configurations
  • Loading branch information
liblaf committed Nov 29, 2024
1 parent 418effd commit 9e04a97
Show file tree
Hide file tree
Showing 15 changed files with 83 additions and 212 deletions.
4 changes: 3 additions & 1 deletion docs/help.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ llm-cli [OPTIONS] COMMAND [ARGS]...

**Options**:

- `--model TEXT`
- `--help`: Show this message and exit.

**Commands**:
Expand All @@ -32,6 +31,7 @@ llm-cli commit [OPTIONS] [PATH]...

- `--default-exclude / --no-default-exclude`: [default: default-exclude]
- `--verify / --no-verify`: [default: verify]
- `--model TEXT`
- `--help`: Show this message and exit.

## `llm-cli repo`
Expand Down Expand Up @@ -61,6 +61,7 @@ llm-cli repo description [OPTIONS]

**Options**:

- `--model TEXT`
- `--help`: Show this message and exit.

### `llm-cli repo topics`
Expand All @@ -73,4 +74,5 @@ llm-cli repo topics [OPTIONS]

**Options**:

- `--model TEXT`
- `--help`: Show this message and exit.
185 changes: 4 additions & 181 deletions docs/schema/config.json
Original file line number Diff line number Diff line change
Expand Up @@ -242,173 +242,6 @@
"RouterConfig": {
"properties": {
"model_list": {
"default": [
{
"model_name": "deepseek-chat",
"litellm_params": {
"api_key": null,
"api_version": null,
"base_url": null,
"deployment_id": null,
"frequency_penalty": null,
"function_call": null,
"functions": null,
"logit_bias": null,
"logprobs": null,
"max_tokens": null,
"messages": [],
"model": "deepseek/deepseek-chat",
"model_list": null,
"n": null,
"presence_penalty": null,
"response_format": null,
"seed": null,
"stop": null,
"stream": null,
"temperature": null,
"timeout": null,
"tool_choice": null,
"tools": null,
"top_logprobs": null,
"top_p": null,
"user": null
},
"tpm": null,
"rpm": null
},
{
"model_name": "qwen-max",
"litellm_params": {
"api_key": null,
"api_version": null,
"base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"deployment_id": null,
"frequency_penalty": null,
"function_call": null,
"functions": null,
"logit_bias": null,
"logprobs": null,
"max_tokens": null,
"messages": [],
"model": "openai/qwen-max",
"model_list": null,
"n": null,
"presence_penalty": null,
"response_format": null,
"seed": null,
"stop": null,
"stream": null,
"temperature": null,
"timeout": null,
"tool_choice": null,
"tools": null,
"top_logprobs": null,
"top_p": null,
"user": null
},
"tpm": null,
"rpm": null
},
{
"model_name": "qwen-plus",
"litellm_params": {
"api_key": null,
"api_version": null,
"base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"deployment_id": null,
"frequency_penalty": null,
"function_call": null,
"functions": null,
"logit_bias": null,
"logprobs": null,
"max_tokens": null,
"messages": [],
"model": "openai/qwen-plus",
"model_list": null,
"n": null,
"presence_penalty": null,
"response_format": null,
"seed": null,
"stop": null,
"stream": null,
"temperature": null,
"timeout": null,
"tool_choice": null,
"tools": null,
"top_logprobs": null,
"top_p": null,
"user": null
},
"tpm": null,
"rpm": null
},
{
"model_name": "qwen-turbo",
"litellm_params": {
"api_key": null,
"api_version": null,
"base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"deployment_id": null,
"frequency_penalty": null,
"function_call": null,
"functions": null,
"logit_bias": null,
"logprobs": null,
"max_tokens": null,
"messages": [],
"model": "openai/qwen-turbo",
"model_list": null,
"n": null,
"presence_penalty": null,
"response_format": null,
"seed": null,
"stop": null,
"stream": null,
"temperature": null,
"timeout": null,
"tool_choice": null,
"tools": null,
"top_logprobs": null,
"top_p": null,
"user": null
},
"tpm": null,
"rpm": null
},
{
"model_name": "qwen-long",
"litellm_params": {
"api_key": null,
"api_version": null,
"base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"deployment_id": null,
"frequency_penalty": null,
"function_call": null,
"functions": null,
"logit_bias": null,
"logprobs": null,
"max_tokens": null,
"messages": [],
"model": "openai/qwen-long",
"model_list": null,
"n": null,
"presence_penalty": null,
"response_format": null,
"seed": null,
"stop": null,
"stream": null,
"temperature": null,
"timeout": null,
"tool_choice": null,
"tools": null,
"top_logprobs": null,
"top_p": null,
"user": null
},
"tpm": null,
"rpm": null
}
],
"items": { "$ref": "#/$defs/ModelConfig" },
"title": "Model List",
"type": "array"
Expand Down Expand Up @@ -491,16 +324,6 @@
"title": "Set Verbose"
},
"fallbacks": {
"default": [
{
"deepseek-chat": [
"qwen-max",
"qwen-plus",
"qwen-turbo",
"qwen-long"
]
}
],
"items": {
"additionalProperties": {
"items": { "type": "string" },
Expand Down Expand Up @@ -628,7 +451,7 @@
},
{
"litellm_params": {
"api_key": null,
"api_key": "sk-45fb748c5bd14b19b4af8f47db71be59",
"api_version": null,
"base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"deployment_id": null,
Expand Down Expand Up @@ -661,7 +484,7 @@
},
{
"litellm_params": {
"api_key": null,
"api_key": "sk-45fb748c5bd14b19b4af8f47db71be59",
"api_version": null,
"base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"deployment_id": null,
Expand Down Expand Up @@ -694,7 +517,7 @@
},
{
"litellm_params": {
"api_key": null,
"api_key": "sk-45fb748c5bd14b19b4af8f47db71be59",
"api_version": null,
"base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"deployment_id": null,
Expand Down Expand Up @@ -727,7 +550,7 @@
},
{
"litellm_params": {
"api_key": null,
"api_key": "sk-45fb748c5bd14b19b4af8f47db71be59",
"api_version": null,
"base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"deployment_id": null,
Expand Down
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ dependencies = [
"python-dotenv>=1.0.1",
"rich>=13.9.4",
"transformers>=4.46.3",
"typer-di>=0.1.2",
"typer>=0.13.1",
]
description = "🚀 LLM CLI - a powerful, open-source command-line interface for AI-driven repository management. Simplifies commit message generation, repository description, and topic suggestion, enhancing productivity and collaboration for developers."
Expand All @@ -30,6 +31,7 @@ requires-python = ">=3.12"
version = "0.0.0"

[project.scripts]
aic = "llm_cli.cmd.commit:app"
llm-cli = "llm_cli:app"

[project.urls]
Expand Down
16 changes: 2 additions & 14 deletions src/llm_cli/cmd/_app.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,8 @@
from typing import Annotated
import typer_di

import typer

import llm_cli as lc
import llm_cli.cmd as lcm
import llm_cli.config as lcc
import llm_cli.utils as lcu

app: typer.Typer = typer.Typer(name="llm-cli", no_args_is_help=True)
app = typer_di.TyperDI(name="llm-cli", no_args_is_help=True)
lcu.add_command(app, lcm.repo.app)
lcu.add_command(app, lcm.commit.app)


@app.callback()
def init(model: Annotated[str | None, typer.Option()] = None) -> None:
lc.logging.init()
cfg: lcc.Config = lcc.get_config()
if model:
cfg.completion.model = model
6 changes: 5 additions & 1 deletion src/llm_cli/cmd/commit/_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,11 @@
from typing import Annotated

import typer
import typer_di

app = typer.Typer(name="commit")
import llm_cli.utils as lcu

app = typer_di.TyperDI(name="commit")


@app.command()
Expand All @@ -12,6 +15,7 @@ def main(
*,
default_exclude: Annotated[bool, typer.Option()] = True,
verify: Annotated[bool, typer.Option()] = True,
_: None = typer_di.Depends(lcu.get_config),
) -> None:
from ._main import main

Expand Down
4 changes: 2 additions & 2 deletions src/llm_cli/cmd/repo/_app.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import typer
import typer_di

import llm_cli.utils as lcu
from llm_cli import cmd as lcm

app: typer.Typer = typer.Typer(name="repo", no_args_is_help=True)
app = typer_di.TyperDI(name="repo", no_args_is_help=True)
lcu.add_command(app, lcm.repo.description.app)
lcu.add_command(app, lcm.repo.topics.app)
8 changes: 5 additions & 3 deletions src/llm_cli/cmd/repo/description/_app.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
import asyncio

import typer
import typer_di

app: typer.Typer = typer.Typer(name="description", no_args_is_help=True)
import llm_cli.utils as lcu

app = typer_di.TyperDI(name="description")


@app.command()
def main() -> None:
def main(_: None = typer_di.Depends(lcu.get_config)) -> None:
from ._main import main

asyncio.run(main())
8 changes: 5 additions & 3 deletions src/llm_cli/cmd/repo/topics/_app.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
import asyncio

import typer
import typer_di

app: typer.Typer = typer.Typer(name="topics", no_args_is_help=True)
import llm_cli.utils as lcu

app = typer_di.TyperDI(name="topics")


@app.command()
def main() -> None:
def main(_: None = typer_di.Depends(lcu.get_config)) -> None:
from ._main import main

asyncio.run(main())
11 changes: 7 additions & 4 deletions src/llm_cli/config/_router_config.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import functools

import litellm
import pydantic

import llm_cli.config as lcc

Expand Down Expand Up @@ -55,11 +56,13 @@ def default_model_list() -> list[ModelConfig]:


class RouterConfig(litellm.RouterConfig):
model_list: list[ModelConfig] = default_model_list() # pyright: ignore [reportIncompatibleVariableOverride]
model_list: list[ModelConfig] = pydantic.Field(default_factory=default_model_list) # pyright: ignore [reportIncompatibleVariableOverride]
num_retries: int = 3 # pyright: ignore [reportIncompatibleVariableOverride]
fallbacks: list[dict[str, list[str]]] = [ # pyright: ignore [reportIncompatibleVariableOverride] # noqa: RUF012
{"deepseek-chat": ["qwen-max", "qwen-plus", "qwen-turbo", "qwen-long"]}
]
fallbacks: list[dict[str, list[str]]] = pydantic.Field( # pyright: ignore [reportIncompatibleVariableOverride]
default_factory=lambda: [
{"deepseek-chat": ["qwen-max", "qwen-plus", "qwen-turbo", "qwen-long"]}
]
)

@functools.cached_property
def router(self) -> litellm.Router:
Expand Down
Loading

0 comments on commit 9e04a97

Please sign in to comment.