Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix evaluate bug. Add test for the case when config not set #299

Merged
merged 2 commits into from
Sep 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions sdks/python/src/opik/api_objects/opik_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -369,6 +369,8 @@ def create_experiment(
experiment_config,
)
metadata = None
else:
metadata = None

self._rest_client.experiments.create_experiment(
name=name,
Expand Down
42 changes: 42 additions & 0 deletions sdks/python/tests/e2e/test_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,3 +76,45 @@ def task(item: dataset_item.DatasetItem):
# expected_output={"output": "Warsaw"},
# ),
# ]


def test_experiment_creation__experiment_config_not_set__None_metadata_sent_to_backend(
opik_client: opik.Opik, dataset_name: str, experiment_name: str
):
dataset = opik_client.create_dataset(dataset_name)

dataset.insert(
[
{
"input": {"question": "What is the of capital of France?"},
"expected_output": {"output": "Paris"},
},
]
)

def task(item: dataset_item.DatasetItem):
if item.input == {"question": "What is the of capital of France?"}:
return {"output": "Paris", "reference": item.expected_output["output"]}

raise AssertionError(
f"Task received dataset item with an unexpected input: {item.input}"
)

equals_metric = metrics.Equals()
evaluation_result = opik.evaluate(
dataset=dataset,
task=task,
scoring_metrics=[equals_metric],
experiment_name=experiment_name,
)

opik.flush_tracker()

verifiers.verify_experiment(
opik_client=opik_client,
id=evaluation_result.experiment_id,
experiment_name=evaluation_result.experiment_name,
experiment_metadata=None,
traces_amount=1, # one trace per dataset item
feedback_scores_amount=1, # an average value of all Equals metric scores
)
Loading