Skip to content
This repository has been archived by the owner on Jul 30, 2024. It is now read-only.

Commit

Permalink
squad-download-attachments: add path to use local tuxrun's tar.xz files
Browse files Browse the repository at this point in the history
Signed-off-by: Anders Roxell <[email protected]>
  • Loading branch information
roxell committed Dec 21, 2023
1 parent 8d57e53 commit e8a1b59
Show file tree
Hide file tree
Showing 2 changed files with 101 additions and 83 deletions.
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ pip install -r requirements.txt

```
❯ pipenv run ./squad-list-changes -h
usage: squad-list-changes [-h] --group GROUP --project PROJECT --build BUILD --base-build BASE_BUILD
usage: squad-list-changes [-h] [--group GROUP --project PROJECT --build BUILD --base-build BASE_BUILD] [--path PATH_TO_TUXRUN]
List all changes for a squad build, compared to a base build
Expand All @@ -27,6 +27,7 @@ optional arguments:
--build BUILD squad build
--base-build BASE_BUILD
squad build to compare to
--path PATH_TO_TUXRUN path to tuxrun artefacts
```

#### Comparing a build to itself should return zero changes
Expand Down
181 changes: 99 additions & 82 deletions squad-download-attachments
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import json
import logging
from os import chdir
from pathlib import Path
import re
import statistics
import sys
from squad_client.core.api import SquadApi
Expand All @@ -33,107 +34,123 @@ def arg_parser():

parser.add_argument(
"--group",
required=True,
required=False,
help="squad group",
)

parser.add_argument(
"--project",
required=True,
required=False,
help="squad project",
)

parser.add_argument(
"--build",
required=True,
required=False,
help="squad build",
)

parser.add_argument(
"--path",
required=False,
help="squad build",
)

return parser


def generate_files(dirname, fileprefix):
file = glob.glob(f"{dirname}/mmtests-*.tar.xz")
# Extract the json file that contains the benchmark data.
with contextlib.closing(lzma.LZMAFile(file[0])) as xz:
with tarfile.open(fileobj=xz) as f:
f.extractall(dirname)

file = glob.glob(f"{dirname}/output/*.json")
filename = Path(file[0]).name
filename = re.sub(r'^.*CONFIG', fileprefix, filename)
file_write = f"{dirname}/{filename}"
# sort the json keys in the benchmark data file.
with open(file[0], mode="r") as read_file:
pjson = json.dumps(json.load(read_file), sort_keys=True, indent=4)
with open(file_write, mode="w") as write_file:
write_file.write(pjson)

with open(file_write.replace(".json", ".csv"), mode="w") as csv_file:
csv_writer = csv.writer(csv_file)
dict_json = json.loads(pjson)
if not dict_json["results"]:
return False
headers = ["median", "mean", "standard diviation", "name", "iteration", "name_iteration", "raw data..."]
csv_writer.writerow(headers)
for key in dict_json["results"]["_ResultData"]:
iterations = 0
for k in dict_json["results"]["_ResultData"][key]:
csv_data = []
float_arr = []
for number in k["Values"]:
float_arr.append(float(number))
csv_data.append(statistics.median(float_arr))
csv_data.append(statistics.mean(float_arr))
csv_data.append(statistics.stdev(float_arr))
csv_data.append(key)
iterations = iterations + 1
csv_data.append(f"iteration_{iterations}")
csv_data.append(f"{key}_iteration_{iterations}")
csv_data.extend(k['Values'])
csv_writer.writerow(csv_data)
return True


def run():
args = arg_parser().parse_args()

group = Squad().group(args.group)
if group is None:
logger.error(f"Get group failed. Group not found: '{args.group}'.")
return -1

project = group.project(args.project)
if project is None:
logger.error(f"Get project failed. Project not found: '{args.project}'.")
return -1

build = project.build(args.build)
if build is None:
logger.error(f"Get build failed. Build not found: '{args.build}'.")
return -1

environments = project.environments(count=ALL, ordering="slug").values()
if not environments:
logger.error("Get environments failed. No environments found.")
return -1

suites = project.suites(count=ALL, ordering="slug").values()
if not suites:
logger.error("Get suites failed. No suites found.")
return -1

attachment_dir = Path('stored_attachments/' + args.build)
testruns = build.testruns()
for testrun in testruns:
if not TestRun(testrun).attachments:
continue
env_name = Environment(getid((TestRun(testrun).environment))).slug
dirname = Path(f"{attachment_dir}/{env_name}_{str(TestRun(testrun).id)}")
print(dirname)
# Only picking up 'qemu-' environments
# The check will be 'not "build" in dirname.name' when DUT in tuxbridge supports attachments.
if "qemu-" in dirname.name:
Path.mkdir(dirname, parents=True, exist_ok=True)
chdir(dirname)
download_attachments(TestRun(testrun))
chdir(sys.path[0])

# only working for mmtests-* for now.
file = glob.glob(f"{dirname}/mmtests-*.tar.xz")
# Extract the json file that contains the benchmark data.
with contextlib.closing(lzma.LZMAFile(file[0])) as xz:
with tarfile.open(fileobj=xz) as f:
f.extractall(dirname)

file = glob.glob(f"{dirname}/output/*.json")
file_write = file[0].replace("/output", "")
# sort the json keys in the benchmark data file.
with open(file[0], mode="r") as read_file:
pjson = json.dumps(json.load(read_file), sort_keys=True, indent=4)
with open(file_write, mode="w") as write_file:
write_file.write(pjson)

with open(file_write.replace(".json", ".csv"), mode="w") as csv_file:
csv_writer = csv.writer(csv_file)
dict_json = json.loads(pjson)
if not dict_json["results"]:
continue
headers = ["median", "mean", "standard diviation", "name", "iteration", "name_iteration", "raw data..."]
csv_writer.writerow(headers)
for key in dict_json["results"]["_ResultData"]:
iterations = 0
for k in dict_json["results"]["_ResultData"][key]:
csv_data = []
float_arr = []
for number in k["Values"]:
float_arr.append(float(number))
csv_data.append(statistics.median(float_arr))
csv_data.append(statistics.mean(float_arr))
csv_data.append(statistics.stdev(float_arr))
csv_data.append(key)
iterations = iterations + 1
csv_data.append(f"iteration_{iterations}")
csv_data.append(f"{key}_iterattion_{iterations}")
csv_data.extend(k['Values'])
csv_writer.writerow(csv_data)
if args.path:
generate_files(args.path, "local-")
else:
group = Squad().group(args.group)
if group is None:
logger.error(f"Get group failed. Group not found: '{args.group}'.")
return -1

project = group.project(args.project)
if project is None:
logger.error(f"Get project failed. Project not found: '{args.project}'.")
return -1

build = project.build(args.build)
if build is None:
logger.error(f"Get build failed. Build not found: '{args.build}'.")
return -1

environments = project.environments(count=ALL, ordering="slug").values()
if not environments:
logger.error("Get environments failed. No environments found.")
return -1

suites = project.suites(count=ALL, ordering="slug").values()
if not suites:
logger.error("Get suites failed. No suites found.")
return -1

attachment_dir = Path('stored_attachments/' + args.build)
testruns = build.testruns()
for testrun in testruns:
if not TestRun(testrun).attachments:
continue
env_name = Environment(getid((TestRun(testrun).environment))).slug
dirname = Path(f"{attachment_dir}/{env_name}_{str(TestRun(testrun).id)}")
print(dirname)
# Only picking up 'qemu-' environments
# The check will be 'not "build" in dirname.name' when DUT in tuxbridge supports attachments.
if "qemu-" in dirname.name:
Path.mkdir(dirname, parents=True, exist_ok=True)
chdir(dirname)
download_attachments(TestRun(testrun))
chdir(sys.path[0])
fileprefix = f"tux-{re.sub(r'_[0-9]+$', '-', dirname.name.replace('qemu-', ''))}"

generate_files(dirname, fileprefix)


if __name__ == "__main__":
Expand Down

0 comments on commit e8a1b59

Please sign in to comment.