Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix pycryptodome vulnerability #2345

Merged
merged 8 commits into from
Dec 6, 2024
10 changes: 8 additions & 2 deletions agenta-backend/agenta_backend/models/api/evaluation_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from datetime import datetime
from typing import Optional, List, Dict, Any, Union

from pydantic import BaseModel, Field, model_validator
from pydantic import BaseModel, Field, model_validator, field_validator

from agenta_backend.utils import traces
from agenta_backend.models.api.api_models import Result
Expand Down Expand Up @@ -293,9 +293,15 @@ class NewEvaluation(BaseModel):
evaluators_configs: List[str]
testset_id: str
rate_limit: LLMRunRateLimit
lm_providers_keys: Optional[Dict[LMProvidersEnum, str]] = None
lm_providers_keys: Optional[Dict[str, str]] = None
correct_answer_column: Optional[str] = None

@field_validator("lm_providers_keys", mode="after")
def validate_lm_providers_keys(cls, value):
if value is not None:
return {LMProvidersEnum(key): v for key, v in value.items()}
return value


class NewEvaluatorConfig(BaseModel):
app_id: str
Expand Down
22 changes: 17 additions & 5 deletions agenta-backend/agenta_backend/services/llm_apps_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,24 @@ def extract_result_from_response(response: dict):
if "tree" in response:
trace_tree = response.get("tree", {}).get("nodes", [])[0]

latency = (
get_nested_value(
trace_tree, ["metrics", "acc", "duration", "total"]
)
/ 1000
duration_ms = get_nested_value(
trace_tree, ["metrics", "acc", "duration", "total"]
)
if duration_ms:
duration_seconds = duration_ms / 1000
else:
start_time = get_nested_value(trace_tree, ["time", "start"])
end_time = get_nested_value(trace_tree, ["time", "end"])

if start_time and end_time:
duration_seconds = (
datetime.fromisoformat(end_time)
- datetime.fromisoformat(start_time)
).total_seconds()
else:
duration_seconds = None

latency = duration_seconds
cost = get_nested_value(
trace_tree, ["metrics", "acc", "costs", "total"]
)
Expand Down
2 changes: 2 additions & 0 deletions agenta-backend/agenta_backend/tasks/evaluations.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,8 @@ def evaluate(
]

# 4. We save the result of the eval scenario in the db
print("============ App Output ============: ", app_output.result.value)

loop.run_until_complete(
create_new_evaluation_scenario(
project_id=project_id,
Expand Down
Loading
Loading