From 0999ed73a63e0b432e28eaeb91a08a661a14f88e Mon Sep 17 00:00:00 2001 From: AyushR1 Date: Sun, 19 Mar 2023 13:20:41 +0530 Subject: [PATCH 1/2] Add a script to add challenges locally without GitHub. This scripts adds the ability to add challenges from local environment itself. Just setup the user authentication token and id in config.json then we can call the script by python local/challenge_processing_script.py Signed-off-by: AyushR1 --- local/challenge_processing_script.py | 128 +++++++++++++++++++++++++++ local/config.py | 23 +++++ local/host_config.json | 6 ++ local/utils.py | 123 +++++++++++++++++++++++++ 4 files changed, 280 insertions(+) create mode 100644 local/challenge_processing_script.py create mode 100644 local/config.py create mode 100644 local/host_config.json create mode 100644 local/utils.py diff --git a/local/challenge_processing_script.py b/local/challenge_processing_script.py new file mode 100644 index 000000000..f3d9877ac --- /dev/null +++ b/local/challenge_processing_script.py @@ -0,0 +1,128 @@ +import http +import json +import os +import requests +import sys + +from config import * +from utils import ( + check_for_errors, + create_challenge_zip_file, + get_request_header, + load_host_configs, + validate_token, +) + +HOST_AUTH_TOKEN = None +CHALLENGE_HOST_TEAM_PK = None +EVALAI_HOST_URL = None +GIHUB_URL = None + + +if __name__ == "__main__": + + configs = load_host_configs(HOST_CONFIG_FILE_PATH) + if configs: + HOST_AUTH_TOKEN = configs[0] + CHALLENGE_HOST_TEAM_PK = configs[1] + EVALAI_HOST_URL = configs[2] + GIHUB_URL = configs[3] + else: + sys.exit(1) + + # Creating the challenge zip file and storing in a dict to send to EvalAI + create_challenge_zip_file(CHALLENGE_ZIP_FILE_PATH, + IGNORE_DIRS, IGNORE_FILES) + zip_file = open(CHALLENGE_ZIP_FILE_PATH, "rb") + + file = {"zip_configuration": zip_file} + + data = {"GITHUB_REPOSITORY": GIHUB_URL} + + headers = get_request_header(HOST_AUTH_TOKEN) + + # Validation step + url = "{}{}".format( + EVALAI_HOST_URL, + CHALLENGE_CONFIG_VALIDATION_URL.format(CHALLENGE_HOST_TEAM_PK), + ) + try: + response = requests.post(url, data=data, headers=headers, files=file) + + if ( + response.status_code != http.HTTPStatus.OK + and response.status_code != http.HTTPStatus.CREATED + ): + response.raise_for_status() + else: + print("\n" + response.json()["Success"]) + except requests.exceptions.HTTPError as err: + if response.status_code in EVALAI_ERROR_CODES: + is_token_valid = validate_token(response.json()) + if is_token_valid: + error = response.json()["error"] + error_message = "\nFollowing errors occurred while validating the challenge config:\n{}".format( + error + ) + print(error_message) + else: + print( + "\nFollowing errors occurred while validating the challenge config: {}".format( + err + ) + ) + except Exception as e: + error_message = "\nFollowing errors occurred while validating the challenge config: {}".format( + e + ) + print(error_message) + + # Creating or updating the challenge + url = "{}{}".format( + EVALAI_HOST_URL, + CHALLENGE_CREATE_OR_UPDATE_URL.format(CHALLENGE_HOST_TEAM_PK), + ) + + zip_file = open(CHALLENGE_ZIP_FILE_PATH, "rb") + file = {"zip_configuration": zip_file} + try: + response = requests.post(url, data=data, headers=headers, files=file) + + if ( + response.status_code != http.HTTPStatus.OK + and response.status_code != http.HTTPStatus.CREATED + ): + response.raise_for_status() + else: + print("\n" + response.json()["Success"]) + except requests.exceptions.HTTPError as err: + if response.status_code in EVALAI_ERROR_CODES: + is_token_valid = validate_token(response.json()) + if is_token_valid: + error = response.json()["error"] + error_message = "\nFollowing errors occurred while validating the challenge config:\n{}".format( + error + ) + print(error_message) + os.environ["CHALLENGE_ERRORS"] = error_message + else: + print( + "\nFollowing errors occurred while validating the challenge config: {}".format( + err + ) + ) + os.environ["CHALLENGE_ERRORS"] = str(err) + except Exception as e: + error_message = "\nFollowing errors occurred while processing the challenge config: {}".format( + e + ) + print(error_message) + + zip_file.close() + os.remove(zip_file.name) + + is_valid, errors = check_for_errors() + if not is_valid: + print("Error: {}".format(errors)) + print("\nExiting the {} script\n".format( + os.path.basename(__file__))) diff --git a/local/config.py b/local/config.py new file mode 100644 index 000000000..89c5eeef9 --- /dev/null +++ b/local/config.py @@ -0,0 +1,23 @@ +import os + + +HOST_CONFIG_FILE_PATH = "local/host_config.json" +CHALLENGE_CONFIG_VALIDATION_URL = "/api/challenges/challenge/challenge_host_team/{}/validate_challenge_config/" +CHALLENGE_CREATE_OR_UPDATE_URL = "/api/challenges/challenge/challenge_host_team/{}/create_or_update_github_challenge/" +EVALAI_ERROR_CODES = [400, 401, 406] +API_HOST_URL = "https://eval.ai" +IGNORE_DIRS = [ + ".git", + ".github", + "github", + "code_upload_challenge_evaluation", + "remote_challenge_evaluation", +] +IGNORE_FILES = [ + ".gitignore", + "challenge_config.zip", + "README.md", + "run.sh", + "submission.json", +] +CHALLENGE_ZIP_FILE_PATH = "challenge_config.zip" diff --git a/local/host_config.json b/local/host_config.json new file mode 100644 index 000000000..55e6bc072 --- /dev/null +++ b/local/host_config.json @@ -0,0 +1,6 @@ +{ + "token": "", + "team_pk": "", + "evalai_host_url": "http://localhost:8000", + "github_repo": "EvalAI-Starters" +} diff --git a/local/utils.py b/local/utils.py new file mode 100644 index 000000000..6835e71ac --- /dev/null +++ b/local/utils.py @@ -0,0 +1,123 @@ +import json +import os +import sys +import zipfile + +from config import * + + +def check_for_errors(): + """ + Checks if any errors have been recorded so far during this workflow step and returns the error if so + """ + if os.getenv("CHALLENGE_ERRORS") == "False": + return True, None + return False, os.getenv("CHALLENGE_ERRORS") + + +def create_challenge_zip_file(challenge_zip_file_path, ignore_dirs, ignore_files): + """ + Creates the challenge zip file at a given path + + Arguments: + challenge_zip_file_path {str}: The relative path of the created zip file + ignore_dirs {list}: The list of directories to exclude from the zip file + ignore_files {list}: The list of files to exclude from the zip file + """ + working_dir = ( + os.getcwd()) + + # Creating evaluation_script.zip file + eval_script_dir = working_dir + "/evaluation_script" + eval_script_zip = zipfile.ZipFile( + "evaluation_script.zip", "w", zipfile.ZIP_DEFLATED + ) + for root, dirs, files in os.walk(eval_script_dir): + for file in files: + file_name = os.path.join(root, file) + name_in_zip_file = ( + file_name[len(eval_script_dir) + 1:] + if file_name.startswith(eval_script_dir) + else file_name + ) + eval_script_zip.write(file_name, name_in_zip_file) + eval_script_zip.close() + + # Creating the challenge_config.zip file + zipf = zipfile.ZipFile(challenge_zip_file_path, "w", zipfile.ZIP_DEFLATED) + for root, dirs, files in os.walk(working_dir): + parents = root.split("/") + if not set(parents) & set(ignore_dirs): + for file in files: + if file not in ignore_files: + file_name = os.path.join(root, file) + name_in_zip_file = ( + file_name[len(working_dir) + 1:] + if file_name.startswith(working_dir) + else file_name + ) + zipf.write(file_name, name_in_zip_file) + zipf.close() + + +def get_request_header(token): + """ + Returns user auth token formatted in header for sending requests + + Arguments: + token {str}: The user token to gain access to EvalAI + """ + header = {"Authorization": "Bearer {}".format(token)} + return header + + +def load_host_configs(config_path): + """ + Loads token to be used for sending requests + + Arguments: + config_path {str}: The path of host configs having the user token, team id and the EvalAI host url + """ + config_path = "{}/{}".format(os.getcwd(), config_path) + if os.path.exists(config_path): + with open(config_path, "r") as f: + try: + data = f.read() + except (OSError, IOError) as e: + print("\nAn error occured while loading the host configs: {}".format(e)) + sys.exit(1) + data = json.loads(data) + host_auth_token = data["token"] + challenge_host_team_pk = data["team_pk"] + evalai_host_url = data["evalai_host_url"] + github_repo = data["github_repo"] + return [host_auth_token, challenge_host_team_pk, evalai_host_url, github_repo] + else: + error_message = "\nThe host config json file is not present. Please include an auth token, team_pk & evalai_host_url in it: {}".format( + config_path + ) + print(error_message) + os.environ["CHALLENGE_ERRORS"] = error_message + return False + + +def validate_token(response): + """ + Function to check if the authentication token provided by user is valid or not + + Arguments: + response {dict}: The response json dict sent back from EvalAI + """ + error = None + if "detail" in response: + if response["detail"] == "Invalid token": + error = "\nThe authentication token you are using isn't valid. Please generate it again.\n" + print(error) + os.environ["CHALLENGE_ERRORS"] = error + return False + if response["detail"] == "Token has expired": + error = "\nSorry, the token has expired. Please generate it again.\n" + print(error) + os.environ["CHALLENGE_ERRORS"] = error + return False + return True From e46964085020d60c906729078c1b18b040639d34 Mon Sep 17 00:00:00 2001 From: AyushR1 Date: Sun, 19 Mar 2023 20:01:41 +0530 Subject: [PATCH 2/2] Update README.md Add the steps to create challenge locally. Signed-off-by: AyushR1 --- README.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/README.md b/README.md index d539c089b..53aea34e9 100644 --- a/README.md +++ b/README.md @@ -84,6 +84,24 @@ In order to test the evaluation script locally before uploading it to [EvalAI](h 3. Run the command `python -m worker.run` from the directory where `annotations/` `challenge_data/` and `worker/` directories are present. If the command runs successfully, then the evaluation script works locally and will work on the server as well. +## Create challenge locally (for testing) + +1. Use this repository as [template](https://docs.github.com/en/free-pro-team@latest/github/creating-cloning-and-archiving-repositories/creating-a-repository-from-a-template). + +2. Now, go to [EvalAI](http://127.0.0.1:8888/) to fetch the following details - + 1. `evalai_user_auth_token` - Go to [profile page](https://eval.ai/web/profile) after logging in and click on `Get your Auth Token` to copy your auth token. + 2. `host_team_pk` - Go to [host team page](https://eval.ai/web/challenge-host-teams) and copy the `ID` for the team you want to use for challenge creation. + 3. `evalai_host_url` - Use `http://localhost:8000` + +3. Run the following command from root directory + `python local/challenge_processing_script.py` + +4. If challenge config contains errors then you will get the issues on terminal otherwise the challenge will be created on EvalAI. + +5. Go to [Hosted Challenges](http://127.0.0.1:8888/web/hosted-challenges) to view your challenge. The challenge can be approved locally by following [here](https://evalai.readthedocs.io/en/latest/approve_challenge.html). + +6. To update the challenge locally, make changes in the repository and repeat step 3. + ## Facing problems in creating a challenge? Please feel free to open issues on our [GitHub Repository](https://github.com/Cloud-CV/EvalAI-Starter/issues) or contact us at team@cloudcv.org if you have issues.