Skip to content

Commit

Permalink
[BugFix] Remote evaluation script starter example (#68)
Browse files Browse the repository at this point in the history
* Fix Remote Eval Script

* Add changes from review

* Add changes from review
  • Loading branch information
gchhablani authored Dec 26, 2022
1 parent 7131a12 commit 8338085
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 36 deletions.
7 changes: 7 additions & 0 deletions remote_challenge_evaluation/eval_ai_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
URLS = {
"get_message_from_sqs_queue": "/api/jobs/challenge/queues/{}/",
"get_submission_by_pk": "/api/jobs/submission/{}",
"get_challenge_phase_by_pk": "/api/challenges/challenge/phase/{}",
"delete_message_from_sqs_queue": "/api/jobs/queues/{}/",
"update_submission": "/api/jobs/challenge/{}/update_submission/",
}
Expand Down Expand Up @@ -139,3 +140,9 @@ def get_submission_by_pk(self, submission_pk):
url = self.return_url_per_environment(url)
response = self.make_request(url, "GET")
return response

def get_challenge_phase_by_pk(self, phase_pk):
url = URLS.get("get_challenge_phase_by_pk").format(phase_pk)
url = self.return_url_per_environment(url)
response = self.make_request(url, "GET")
return response
45 changes: 20 additions & 25 deletions remote_challenge_evaluation/evaluate.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import random


def evaluate(user_submission_file, phase_codename, test_annotation_file=None, **kwargs):
Expand All @@ -11,7 +10,11 @@ def evaluate(user_submission_file, phase_codename, test_annotation_file=None, **
`test_annotations_file`: Path to test_annotation_file on the server
We recommend setting a default `test_annotation_file` or using `phase_codename`
to select the appropriate file.
to select the appropriate file. For example, you could load test annotation file
for current phase as:
```
test_annotation_file = json.loads(open("{phase_codename}_path", "r"))
```
`**kwargs`: keyword arguments that contains additional submission
metadata that challenge hosts can use to send slack notification.
You can access the submission metadata
Expand Down Expand Up @@ -39,43 +42,35 @@ def evaluate(user_submission_file, phase_codename, test_annotation_file=None, **
'submitted_at': u'2017-03-20T19:22:03.880652Z'
}
"""

'''
# Load test annotation file for current phase
test_annotation_file = json.loads(open("{phase_codename}_path", "r"))
'''
output = {}
if phase_codename == "dev":
print("Evaluating for Dev Phase")
output["result"] = [
{
"train_split": {
"Metric1": random.randint(0, 99),
"Metric2": random.randint(0, 99),
"Metric3": random.randint(0, 99),
"Total": random.randint(0, 99),
}
}
"split": "train_split",
"show_to_participant": True,
"accuracies": {"Metric1": 90},
},
]
# To display the results in the result file
output["submission_result"] = output["result"][0]["train_split"]
print("Completed evaluation for Dev Phase")
elif phase_codename == "test":
print("Evaluating for Test Phase")
output["result"] = [
{
"train_split": {
"Metric1": random.randint(0, 99),
"Metric2": random.randint(0, 99),
"Metric3": random.randint(0, 99),
"Total": random.randint(0, 99),
}
"split": "train_split",
"show_to_participant": True,
"accuracies": {"Metric1": 90},
},
{
"test_split": {
"Metric1": random.randint(0, 99),
"Metric2": random.randint(0, 99),
"Metric3": random.randint(0, 99),
"Total": random.randint(0, 99),
}
"split": "test_split",
"show_to_participant": False,
"accuracies": {"Metric1": 50, "Metric2": 40},
},
]
# To display the results in the result file
output["submission_result"] = output["result"][0]
print("Completed evaluation for Test Phase")
return output
27 changes: 16 additions & 11 deletions remote_challenge_evaluation/main.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import json
import os
import time

import requests

from eval_ai_interface import EvalAI_Interface
from evaluate import evaluate

Expand All @@ -15,17 +17,18 @@


def download(submission, save_dir):
response = requests.get(submission.input_file.url)
submission_file_path = os.path.join(save_dir, submission.input_file.name)
response = requests.get(submission["input_file"])
submission_file_path = os.path.join(
save_dir, submission["input_file"].split("/")[-1]
)
with open(submission_file_path, "wb") as f:
f.write(response.content)
return submission_file_path


def update_running(evalai, submission, job_name):
def update_running(evalai, submission_pk):
status_data = {
"submission": submission,
"job_name": job_name,
"submission": submission_pk,
"submission_status": "RUNNING",
}
update_status = evalai.update_submission_status(status_data)
Expand Down Expand Up @@ -79,7 +82,7 @@ def update_finished(
phase_pk = message_body.get("phase_pk")
# Get submission details -- This will contain the input file URL
submission = evalai.get_submission_by_pk(submission_pk)

challenge_phase = evalai.get_challenge_phase_by_pk(phase_pk)
if (
submission.get("status") == "finished"
or submission.get("status") == "failed"
Expand All @@ -89,15 +92,17 @@ def update_finished(
evalai.delete_message_from_sqs_queue(message_receipt_handle)

else:
update_running(submission, job_name="")
if submission.get("status") == "submitted":
update_running(evalai, submission_pk)
submission_file_path = download(submission, save_dir)
try:
results = evaluate(
submission_file_path,
submission.challenge_phase.codename
submission_file_path, challenge_phase["codename"]
)
update_finished(
evalai, phase_pk, submission_pk, json.dumps(results["result"])
)
update_finished(phase_pk, submission_pk, results)
except Exception as e:
update_failed(phase_pk, submission_pk, str(e))
update_failed(evalai, phase_pk, submission_pk, str(e))
# Poll challenge queue for new submissions
time.sleep(60)

0 comments on commit 8338085

Please sign in to comment.