From 8da27194e79cf7bf7f1b29545cb1266fbe8dafe4 Mon Sep 17 00:00:00 2001 From: FireHead90544 Date: Sat, 27 Jul 2024 22:46:42 +0530 Subject: [PATCH 1/6] feat: setup llm from provider stored in config --- core/llm.py | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 core/llm.py diff --git a/core/llm.py b/core/llm.py new file mode 100644 index 0000000..0dc1c39 --- /dev/null +++ b/core/llm.py @@ -0,0 +1,9 @@ +from core.config import Config +from core.providers import LLM_PROVIDERS + +llm_config = Config().values + +LLM = LLM_PROVIDERS.get(llm_config.get("provider"))['provider']( + model = LLM_PROVIDERS.get(llm_config.get("provider"))['model'], + api_key = llm_config.get("api_key") +) \ No newline at end of file From 80cf1c34fbd7409422718350c780af879f3cad11 Mon Sep 17 00:00:00 2001 From: FireHead90544 Date: Sat, 27 Jul 2024 22:47:05 +0530 Subject: [PATCH 2/6] feat: create inference chain --- core/chains.py | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 core/chains.py diff --git a/core/chains.py b/core/chains.py new file mode 100644 index 0000000..1f43046 --- /dev/null +++ b/core/chains.py @@ -0,0 +1,4 @@ +from core.template import PROMPT_TEMPLATE +from core.llm import LLM + +HOW_CLI_CHAIN = PROMPT_TEMPLATE | LLM \ No newline at end of file From 23a0e61f85a824724ad989468a7442222ebb6ab8 Mon Sep 17 00:00:00 2001 From: FireHead90544 Date: Sat, 27 Jul 2024 22:47:43 +0530 Subject: [PATCH 3/6] feat: set Gemini's model to gemini-1.5-flash (higher context window than pro) --- core/providers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/providers.py b/core/providers.py index 03cec04..38bff74 100644 --- a/core/providers.py +++ b/core/providers.py @@ -4,5 +4,5 @@ os.environ["GRPC_VERBOSITY"] = "NONE" LLM_PROVIDERS = { - "Gemini": { "provider": ChatGoogleGenerativeAI, "model": "gemini-1.5-pro" }, + "Gemini": { "provider": ChatGoogleGenerativeAI, "model": "gemini-1.5-flash" }, } From 084df7757754392d451fe839ab2181b21b5908ef Mon Sep 17 00:00:00 2001 From: FireHead90544 Date: Sat, 27 Jul 2024 22:48:03 +0530 Subject: [PATCH 4/6] feat: create inference utlity --- infer.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 infer.py diff --git a/infer.py b/infer.py new file mode 100644 index 0000000..e3eb662 --- /dev/null +++ b/infer.py @@ -0,0 +1,28 @@ +import warnings +from core.chains import HOW_CLI_CHAIN +from core.parser import PARSER + +def get_result(task: str) -> dict[str, str | list | float]: + """Invokes the chain with the given task and returns the result. + + Args: + task (str): The task to perform. + + Returns: + dict: The result of the chain. + """ + tries = 3 + parsed = {"status": "error", "commands": [], "confidence": 0.0} + + while tries > 0: + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + res = HOW_CLI_CHAIN.invoke({ "task": task }) + + try: + parsed = PARSER.invoke(res) + break + except Exception as e: + tries -= 1 + + return parsed \ No newline at end of file From fc79b2fa7773ce232f93ac18ce39106ef9cb88a9 Mon Sep 17 00:00:00 2001 From: FireHead90544 Date: Sat, 27 Jul 2024 22:48:20 +0530 Subject: [PATCH 5/6] feat: create formatter for llm's parsed output --- formatting.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 formatting.py diff --git a/formatting.py b/formatting.py new file mode 100644 index 0000000..8631935 --- /dev/null +++ b/formatting.py @@ -0,0 +1,36 @@ +from rich.console import Console +from rich.panel import Panel +from rich.text import Text +from rich.table import Table +from rich import box + + +def display_result(task: str, result: dict): + console = Console() + + task_panel = Panel( + Text(task, style="bold magenta"), + title="Task", + border_style="cyan", + expand=False, + ) + console.print(task_panel) + + if result["status"] != "success": + console.print(f"Status: [bold red]{result['status']}[/bold red]") + return + + command_table = Table(box=box.ROUNDED, expand=True, show_header=False) + command_table.add_column("Commands", style="green") + for command in result["commands"]: + command_table.add_row(command) + + confidence_panel = Panel( + f"{result['confidence']:.2%}", + title="Confidence Score", + border_style="yellow", + expand=False, + ) + + console.print(Panel(command_table, title="Commands", border_style="green")) + console.print(confidence_panel) From b2e4b155bef223091d3d5d1bc48ceacb2223c6c6 Mon Sep 17 00:00:00 2001 From: FireHead90544 Date: Sat, 27 Jul 2024 22:48:36 +0530 Subject: [PATCH 6/6] feat: finalize to command to run inferencs --- how.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/how.py b/how.py index e775bfc..06a2526 100644 --- a/how.py +++ b/how.py @@ -2,6 +2,7 @@ from typing_extensions import Annotated from rich.prompt import Prompt from core.config import Config +from formatting import display_result app = typer.Typer( name="how", @@ -21,7 +22,9 @@ def to( typer.secho("Please setup the configuration first using `how setup`", fg="red", bold=True) raise typer.Abort() - print(f"Send {task} to LLM. Test Finished.") + from infer import get_result + result = get_result(task) + display_result(task, result) @app.command()