Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enhancement: Add a Script to create challenges locally #71

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,24 @@ In order to test the evaluation script locally before uploading it to [EvalAI](h

3. Run the command `python -m worker.run` from the directory where `annotations/` `challenge_data/` and `worker/` directories are present. If the command runs successfully, then the evaluation script works locally and will work on the server as well.

## Create challenge locally (for testing)

1. Use this repository as [template](https://docs.github.com/en/free-pro-team@latest/github/creating-cloning-and-archiving-repositories/creating-a-repository-from-a-template).

2. Now, go to [EvalAI](http://127.0.0.1:8888/) to fetch the following details -
1. `evalai_user_auth_token` - Go to [profile page](https://eval.ai/web/profile) after logging in and click on `Get your Auth Token` to copy your auth token.
2. `host_team_pk` - Go to [host team page](https://eval.ai/web/challenge-host-teams) and copy the `ID` for the team you want to use for challenge creation.
3. `evalai_host_url` - Use `http://localhost:8000`

3. Run the following command from root directory
`python local/challenge_processing_script.py`

4. If challenge config contains errors then you will get the issues on terminal otherwise the challenge will be created on EvalAI.

5. Go to [Hosted Challenges](http://127.0.0.1:8888/web/hosted-challenges) to view your challenge. The challenge can be approved locally by following [here](https://evalai.readthedocs.io/en/latest/approve_challenge.html).

6. To update the challenge locally, make changes in the repository and repeat step 3.

## Facing problems in creating a challenge?

Please feel free to open issues on our [GitHub Repository](https://github.com/Cloud-CV/EvalAI-Starter/issues) or contact us at team@cloudcv.org if you have issues.
128 changes: 128 additions & 0 deletions local/challenge_processing_script.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
import http
import json
import os
import requests
import sys

from config import *
from utils import (
check_for_errors,
create_challenge_zip_file,
get_request_header,
load_host_configs,
validate_token,
)

HOST_AUTH_TOKEN = None
CHALLENGE_HOST_TEAM_PK = None
EVALAI_HOST_URL = None
GIHUB_URL = None


if __name__ == "__main__":

configs = load_host_configs(HOST_CONFIG_FILE_PATH)
if configs:
HOST_AUTH_TOKEN = configs[0]
CHALLENGE_HOST_TEAM_PK = configs[1]
EVALAI_HOST_URL = configs[2]
GIHUB_URL = configs[3]
else:
sys.exit(1)

# Creating the challenge zip file and storing in a dict to send to EvalAI
create_challenge_zip_file(CHALLENGE_ZIP_FILE_PATH,
IGNORE_DIRS, IGNORE_FILES)
zip_file = open(CHALLENGE_ZIP_FILE_PATH, "rb")

file = {"zip_configuration": zip_file}

data = {"GITHUB_REPOSITORY": GIHUB_URL}

headers = get_request_header(HOST_AUTH_TOKEN)

# Validation step
url = "{}{}".format(
EVALAI_HOST_URL,
CHALLENGE_CONFIG_VALIDATION_URL.format(CHALLENGE_HOST_TEAM_PK),
)
try:
response = requests.post(url, data=data, headers=headers, files=file)

if (
response.status_code != http.HTTPStatus.OK
and response.status_code != http.HTTPStatus.CREATED
):
response.raise_for_status()
else:
print("\n" + response.json()["Success"])
except requests.exceptions.HTTPError as err:
if response.status_code in EVALAI_ERROR_CODES:
is_token_valid = validate_token(response.json())
if is_token_valid:
error = response.json()["error"]
error_message = "\nFollowing errors occurred while validating the challenge config:\n{}".format(
error
)
print(error_message)
else:
print(
"\nFollowing errors occurred while validating the challenge config: {}".format(
err
)
)
except Exception as e:
error_message = "\nFollowing errors occurred while validating the challenge config: {}".format(
e
)
print(error_message)

# Creating or updating the challenge
url = "{}{}".format(
EVALAI_HOST_URL,
CHALLENGE_CREATE_OR_UPDATE_URL.format(CHALLENGE_HOST_TEAM_PK),
)

zip_file = open(CHALLENGE_ZIP_FILE_PATH, "rb")
file = {"zip_configuration": zip_file}
try:
response = requests.post(url, data=data, headers=headers, files=file)

if (
response.status_code != http.HTTPStatus.OK
and response.status_code != http.HTTPStatus.CREATED
):
response.raise_for_status()
else:
print("\n" + response.json()["Success"])
except requests.exceptions.HTTPError as err:
if response.status_code in EVALAI_ERROR_CODES:
is_token_valid = validate_token(response.json())
if is_token_valid:
error = response.json()["error"]
error_message = "\nFollowing errors occurred while validating the challenge config:\n{}".format(
error
)
print(error_message)
os.environ["CHALLENGE_ERRORS"] = error_message
else:
print(
"\nFollowing errors occurred while validating the challenge config: {}".format(
err
)
)
os.environ["CHALLENGE_ERRORS"] = str(err)
except Exception as e:
error_message = "\nFollowing errors occurred while processing the challenge config: {}".format(
e
)
print(error_message)

zip_file.close()
os.remove(zip_file.name)

is_valid, errors = check_for_errors()
if not is_valid:
print("Error: {}".format(errors))
print("\nExiting the {} script\n".format(
os.path.basename(__file__)))
23 changes: 23 additions & 0 deletions local/config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import os


HOST_CONFIG_FILE_PATH = "local/host_config.json"
CHALLENGE_CONFIG_VALIDATION_URL = "/api/challenges/challenge/challenge_host_team/{}/validate_challenge_config/"
CHALLENGE_CREATE_OR_UPDATE_URL = "/api/challenges/challenge/challenge_host_team/{}/create_or_update_github_challenge/"
EVALAI_ERROR_CODES = [400, 401, 406]
API_HOST_URL = "https://eval.ai"
IGNORE_DIRS = [
".git",
".github",
"github",
"code_upload_challenge_evaluation",
"remote_challenge_evaluation",
]
IGNORE_FILES = [
".gitignore",
"challenge_config.zip",
"README.md",
"run.sh",
"submission.json",
]
CHALLENGE_ZIP_FILE_PATH = "challenge_config.zip"
6 changes: 6 additions & 0 deletions local/host_config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"token": "<evalai_user_auth_token>",
"team_pk": "<host_team_pk>",
"evalai_host_url": "http://localhost:8000",
"github_repo": "EvalAI-Starters"
}
123 changes: 123 additions & 0 deletions local/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
import json
import os
import sys
import zipfile

from config import *


def check_for_errors():
"""
Checks if any errors have been recorded so far during this workflow step and returns the error if so
"""
if os.getenv("CHALLENGE_ERRORS") == "False":
return True, None
return False, os.getenv("CHALLENGE_ERRORS")


def create_challenge_zip_file(challenge_zip_file_path, ignore_dirs, ignore_files):
"""
Creates the challenge zip file at a given path

Arguments:
challenge_zip_file_path {str}: The relative path of the created zip file
ignore_dirs {list}: The list of directories to exclude from the zip file
ignore_files {list}: The list of files to exclude from the zip file
"""
working_dir = (
os.getcwd())

# Creating evaluation_script.zip file
eval_script_dir = working_dir + "/evaluation_script"
eval_script_zip = zipfile.ZipFile(
"evaluation_script.zip", "w", zipfile.ZIP_DEFLATED
)
for root, dirs, files in os.walk(eval_script_dir):
for file in files:
file_name = os.path.join(root, file)
name_in_zip_file = (
file_name[len(eval_script_dir) + 1:]
if file_name.startswith(eval_script_dir)
else file_name
)
eval_script_zip.write(file_name, name_in_zip_file)
eval_script_zip.close()

# Creating the challenge_config.zip file
zipf = zipfile.ZipFile(challenge_zip_file_path, "w", zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(working_dir):
parents = root.split("/")
if not set(parents) & set(ignore_dirs):
for file in files:
if file not in ignore_files:
file_name = os.path.join(root, file)
name_in_zip_file = (
file_name[len(working_dir) + 1:]
if file_name.startswith(working_dir)
else file_name
)
zipf.write(file_name, name_in_zip_file)
zipf.close()


def get_request_header(token):
"""
Returns user auth token formatted in header for sending requests

Arguments:
token {str}: The user token to gain access to EvalAI
"""
header = {"Authorization": "Bearer {}".format(token)}
return header


def load_host_configs(config_path):
"""
Loads token to be used for sending requests

Arguments:
config_path {str}: The path of host configs having the user token, team id and the EvalAI host url
"""
config_path = "{}/{}".format(os.getcwd(), config_path)
if os.path.exists(config_path):
with open(config_path, "r") as f:
try:
data = f.read()
except (OSError, IOError) as e:
print("\nAn error occured while loading the host configs: {}".format(e))
sys.exit(1)
data = json.loads(data)
host_auth_token = data["token"]
challenge_host_team_pk = data["team_pk"]
evalai_host_url = data["evalai_host_url"]
github_repo = data["github_repo"]
return [host_auth_token, challenge_host_team_pk, evalai_host_url, github_repo]
else:
error_message = "\nThe host config json file is not present. Please include an auth token, team_pk & evalai_host_url in it: {}".format(
config_path
)
print(error_message)
os.environ["CHALLENGE_ERRORS"] = error_message
return False


def validate_token(response):
"""
Function to check if the authentication token provided by user is valid or not

Arguments:
response {dict}: The response json dict sent back from EvalAI
"""
error = None
if "detail" in response:
if response["detail"] == "Invalid token":
error = "\nThe authentication token you are using isn't valid. Please generate it again.\n"
print(error)
os.environ["CHALLENGE_ERRORS"] = error
return False
if response["detail"] == "Token has expired":
error = "\nSorry, the token has expired. Please generate it again.\n"
print(error)
os.environ["CHALLENGE_ERRORS"] = error
return False
return True