diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index 50a9bfb3c626..83901644635b 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -107,7 +107,6 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta): OLLAMA_LLAMA3_405B = "llama3.1:405b" # OpenRouter models GEMINI_FLASH_1_5_8B = "google/gemini-flash-1.5" - GEMINI_FLASH_1_5_EXP = "google/gemini-flash-1.5-exp" GROK_BETA = "x-ai/grok-beta" MISTRAL_NEMO = "mistralai/mistral-nemo" COHERE_COMMAND_R_08_2024 = "cohere/command-r-08-2024" @@ -117,6 +116,14 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta): PERPLEXITY_LLAMA_3_1_SONAR_LARGE_128K_ONLINE = ( "perplexity/llama-3.1-sonar-large-128k-online" ) + QWEN_QWQ_32B_PREVIEW = "qwen/qwq-32b-preview" + NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B = "nousresearch/hermes-3-llama-3.1-405b" + NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B = "nousresearch/hermes-3-llama-3.1-70b" + AMAZON_NOVA_LITE_V1 = "amazon/nova-lite-v1" + AMAZON_NOVA_MICRO_V1 = "amazon/nova-micro-v1" + AMAZON_NOVA_PRO_V1 = "amazon/nova-pro-v1" + MICROSOFT_WIZARDLM_2_8X22B = "microsoft/wizardlm-2-8x22b" + GRYPHE_MYTHOMAX_L2_13B = "gryphe/mythomax-l2-13b" @property def metadata(self) -> ModelMetadata: @@ -152,7 +159,6 @@ def context_window(self) -> int: LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192), LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192), LlmModel.GEMINI_FLASH_1_5_8B: ModelMetadata("open_router", 8192), - LlmModel.GEMINI_FLASH_1_5_EXP: ModelMetadata("open_router", 8192), LlmModel.GROK_BETA: ModelMetadata("open_router", 8192), LlmModel.MISTRAL_NEMO: ModelMetadata("open_router", 4000), LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata("open_router", 4000), @@ -162,6 +168,14 @@ def context_window(self) -> int: LlmModel.PERPLEXITY_LLAMA_3_1_SONAR_LARGE_128K_ONLINE: ModelMetadata( "open_router", 8192 ), + LlmModel.QWEN_QWQ_32B_PREVIEW: ModelMetadata("open_router", 4000), + LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata("open_router", 4000), + LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: ModelMetadata("open_router", 4000), + LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata("open_router", 4000), + LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata("open_router", 4000), + LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata("open_router", 4000), + LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata("open_router", 4000), + LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata("open_router", 4000), } for model in LlmModel: diff --git a/autogpt_platform/backend/backend/data/block_cost_config.py b/autogpt_platform/backend/backend/data/block_cost_config.py index 6b5a12409770..13cb0b82bb89 100644 --- a/autogpt_platform/backend/backend/data/block_cost_config.py +++ b/autogpt_platform/backend/backend/data/block_cost_config.py @@ -54,7 +54,6 @@ LlmModel.OLLAMA_LLAMA3_8B: 1, LlmModel.OLLAMA_LLAMA3_405B: 1, LlmModel.GEMINI_FLASH_1_5_8B: 1, - LlmModel.GEMINI_FLASH_1_5_EXP: 1, LlmModel.GROK_BETA: 5, LlmModel.MISTRAL_NEMO: 1, LlmModel.COHERE_COMMAND_R_08_2024: 1, @@ -62,6 +61,14 @@ LlmModel.EVA_QWEN_2_5_32B: 1, LlmModel.DEEPSEEK_CHAT: 2, LlmModel.PERPLEXITY_LLAMA_3_1_SONAR_LARGE_128K_ONLINE: 1, + LlmModel.QWEN_QWQ_32B_PREVIEW: 2, + LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: 1, + LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: 1, + LlmModel.AMAZON_NOVA_LITE_V1: 1, + LlmModel.AMAZON_NOVA_MICRO_V1: 1, + LlmModel.AMAZON_NOVA_PRO_V1: 1, + LlmModel.MICROSOFT_WIZARDLM_2_8X22B: 1, + LlmModel.GRYPHE_MYTHOMAX_L2_13B: 1, } for model in LlmModel: