Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Private ensemble doesn't worry about secrets. #786

Merged
merged 3 commits into from
Jan 13, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 0 additions & 41 deletions src/modelgauge/auth/vllm_keys.py

This file was deleted.

35 changes: 4 additions & 31 deletions src/modelgauge/private_ensemble_annotator_set.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,6 @@

from modelgauge.annotator_set import AnnotatorSet
from modelgauge.auth.together_key import TogetherApiKey
from modelgauge.auth.vllm_keys import (
Lg3LoraVllmApiKey,
Lg3LoraVllmEndpointUrl,
Mistral7bVllmApiKey,
Mistral7bVllmEndpointUrl,
)
from modelgauge.dependency_injection import _replace_with_injected
from modelgauge.secret_values import InjectSecret
from modelgauge.single_turn_prompt_response import TestItemAnnotations
Expand All @@ -17,21 +11,19 @@
from modelgauge.annotators.llama_3_70b_chat_annotator import (
LLAMA_3_70B_PE_TAMALPAIS_2024_09_09_CONFIG as LLAMA_3_70B_CONFIG,
) # type: ignore
from modelgauge.annotators.llama_guard_3_lora_annotator import (
LlamaGuard3LoRAAnnotator,
) # type: ignore
from modelgauge.annotators.template_lg3_lora_annotator import (
config as LG3_LORA_CONFIG,
Lg3LoraVllmApiKey,
Lg3LoraVllmEndpointUrl,
) # type: ignore
from modelgauge.annotators.mistral_7b_ruby_annotator import (
config as MISTRAL_7B_RUBY_CONFIG,
Mistral7bVllmApiKey,
Mistral7bVllmEndpointUrl,
) # type: ignore
from modelgauge.annotators.mistral_8x22b_instruct_annotator import (
MISTRAL_8x22B_PE_TAMALPAIS_2024_09_09_CONFIG as MISTRAL_8x22B_CONFIG,
) # type: ignore
from modelgauge.annotators.prompt_engineered_annotator import (
PromptEngineeredAnnotator,
) # type: ignore
from modelgauge.ensemble.majority_vote_ensemble_strategy import MajorityVoteEnsembleStrategy # type: ignore
from modelgauge.safety_model_response import SafetyModelResponse # type: ignore
except:
Expand All @@ -53,32 +45,13 @@ class EnsembleAnnotatorSet(AnnotatorSet):
def __init__(self, secrets):
self.secrets = secrets
self.strategy = MajorityVoteEnsembleStrategy()
self.__configure_vllm_annotators()
self.__configure_huggingface_annotators()
self.__configure_together_annotators()
self._register_annotators()

def _register_annotators(self):
self.annotators = [
MISTRAL_7B_RUBY_CONFIG.name,
MISTRAL_8x22B_CONFIG.name,
LLAMA_3_70B_CONFIG.name,
LG3_LORA_CONFIG.name,
]

def __configure_vllm_annotators(self):
LG3_LORA_CONFIG.api_key = self.secrets["lg3_vllm_api_key"].value
LG3_LORA_CONFIG.base_url = self.secrets["lg3_vllm_endpoint_url"].value
MISTRAL_7B_RUBY_CONFIG.api_key = self.secrets["mistral_7b_vllm_api_key"].value
MISTRAL_7B_RUBY_CONFIG.base_url = self.secrets["mistral_7b_vllm_endpoint_url"].value

def __configure_huggingface_annotators(self):
return

def __configure_together_annotators(self):
MISTRAL_8x22B_CONFIG.llm_config.api_key = self.secrets["together_api_key"]
LLAMA_3_70B_CONFIG.llm_config.api_key = self.secrets["together_api_key"]

def evaluate(self, item: TestItemAnnotations) -> Dict[str, float]:
annotated_completion = item.interactions[0].response.completions[0]
individual_annotations = {}
Expand Down
Loading