Skip to content

Commit d1bc449

Browse files
committed
chore(test): use record_property instead of returning from tests
Returning from a test is deprecated in pytest ``` build/test_binary_size.py::test_firecracker_binary_size PytestReturnNotNoneWarning: Expected None, but returned ('2505192 B', '2520632 +/- 5.0% B'), which will be an error in a future version of pytest. Did you mean to use `assert` instead of `return`? ``` This was used by the test reporting framework to augment test results. We will switch to a more mainstream way of doing test reports in a subsequent commit. Signed-off-by: Pablo Barbáchano <[email protected]>
1 parent 377ced0 commit d1bc449

File tree

5 files changed

+46
-51
lines changed

5 files changed

+46
-51
lines changed

tests/integration_tests/build/test_binary_size.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88

99
import host_tools.cargo_build as host
1010

11+
1112
MACHINE = platform.machine()
1213
""" Platform definition used to select the correct size target"""
1314

@@ -33,7 +34,7 @@
3334

3435

3536
@pytest.mark.timeout(500)
36-
def test_firecracker_binary_size():
37+
def test_firecracker_binary_size(record_property):
3738
"""
3839
Test if the size of the firecracker binary is within expected ranges.
3940
@@ -48,14 +49,14 @@ def test_firecracker_binary_size():
4849
BINARY_SIZE_TOLERANCE,
4950
)
5051

51-
return (
52-
f"{result} B",
53-
f"{FC_BINARY_SIZE_TARGET} +/- {BINARY_SIZE_TOLERANCE * 100}% B",
52+
record_property(
53+
"firecracker_binary_size",
54+
f"{result}B ({FC_BINARY_SIZE_TARGET}B ±{BINARY_SIZE_TOLERANCE:.0%})",
5455
)
5556

5657

5758
@pytest.mark.timeout(500)
58-
def test_jailer_binary_size():
59+
def test_jailer_binary_size(record_property):
5960
"""
6061
Test if the size of the jailer binary is within expected ranges.
6162
@@ -70,9 +71,9 @@ def test_jailer_binary_size():
7071
BINARY_SIZE_TOLERANCE,
7172
)
7273

73-
return (
74-
f"{result} B",
75-
f"{JAILER_BINARY_SIZE_TARGET} +/- {BINARY_SIZE_TOLERANCE * 100}% B",
74+
record_property(
75+
"jailer_binary_size",
76+
f"{result}B ({JAILER_BINARY_SIZE_TARGET}B ±{BINARY_SIZE_TOLERANCE:.0%})",
7677
)
7778

7879

tests/integration_tests/build/test_coverage.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@
4646

4747

4848
@pytest.mark.timeout(400)
49-
def test_coverage(test_fc_session_root_path, test_session_tmp_path):
49+
def test_coverage(test_fc_session_root_path, test_session_tmp_path, record_property):
5050
"""Test line coverage for rust tests is within bounds.
5151
5252
The result is extracted from the $KCOV_COVERAGE_FILE file created by kcov
@@ -131,6 +131,7 @@ def test_coverage(test_fc_session_root_path, test_session_tmp_path):
131131
)
132132
)
133133

134+
record_property(
135+
"coverage", f"{coverage}% {coverage_target_pct}% ±{COVERAGE_MAX_DELTA:.2}%"
136+
)
134137
assert coverage <= coverage_target_pct + COVERAGE_MAX_DELTA, coverage_high_msg
135-
136-
return (f"{coverage}%", f"{coverage_target_pct}% +/- {COVERAGE_MAX_DELTA * 100}%")

tests/integration_tests/performance/test_boottime.py

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def test_no_boottime(test_microvm_with_api):
3030
assert not timestamps
3131

3232

33-
def test_boottime_no_network(test_microvm_with_api):
33+
def test_boottime_no_network(test_microvm_with_api, record_property):
3434
"""
3535
Check boot time of microVM without a network device.
3636
@@ -40,12 +40,11 @@ def test_boottime_no_network(test_microvm_with_api):
4040
vm.jailer.extra_args.update({"boot-timer": None})
4141
_ = _configure_and_run_vm(vm)
4242
boottime_us = _test_microvm_boottime(vm)
43-
print("Boot time with no network is: " + str(boottime_us) + " us")
43+
print(f"Boot time with no network is: {boottime_us} us")
44+
record_property("boottime_no_network", f"{boottime_us} us < {MAX_BOOT_TIME_US} us")
4445

45-
return f"{boottime_us} us", f"< {MAX_BOOT_TIME_US} us"
4646

47-
48-
def test_boottime_with_network(test_microvm_with_api, network_config):
47+
def test_boottime_with_network(test_microvm_with_api, network_config, record_property):
4948
"""
5049
Check boot time of microVM with a network device.
5150
@@ -55,12 +54,13 @@ def test_boottime_with_network(test_microvm_with_api, network_config):
5554
vm.jailer.extra_args.update({"boot-timer": None})
5655
_tap = _configure_and_run_vm(vm, {"config": network_config, "iface_id": "1"})
5756
boottime_us = _test_microvm_boottime(vm)
58-
print("Boot time with network configured is: " + str(boottime_us) + " us")
59-
60-
return f"{boottime_us} us", f"< {MAX_BOOT_TIME_US} us"
57+
print(f"Boot time with network configured is: {boottime_us} us")
58+
record_property(
59+
"boottime_with_network", f"{boottime_us} us < {MAX_BOOT_TIME_US} us"
60+
)
6161

6262

63-
def test_initrd_boottime(test_microvm_with_initrd):
63+
def test_initrd_boottime(test_microvm_with_initrd, record_property):
6464
"""
6565
Check boot time of microVM when using an initrd.
6666
@@ -70,9 +70,8 @@ def test_initrd_boottime(test_microvm_with_initrd):
7070
vm.jailer.extra_args.update({"boot-timer": None})
7171
_tap = _configure_and_run_vm(vm, initrd=True)
7272
boottime_us = _test_microvm_boottime(vm, max_time_us=INITRD_BOOT_TIME_US)
73-
print("Boot time with initrd is: " + str(boottime_us) + " us")
74-
75-
return f"{boottime_us} us", f"< {INITRD_BOOT_TIME_US} us"
73+
print(f"Boot time with initrd is: {boottime_us} us")
74+
record_property("boottime_initrd", f"{boottime_us} us < {MAX_BOOT_TIME_US} us")
7675

7776

7877
def _test_microvm_boottime(vm, max_time_us=MAX_BOOT_TIME_US):

tests/integration_tests/performance/test_process_startup_time.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
# TODO: Keep a `current` startup time in S3 and validate we don't regress
1616

1717

18-
def test_startup_time_new_pid_ns(test_microvm_with_api):
18+
def test_startup_time_new_pid_ns(test_microvm_with_api, record_property):
1919
"""
2020
Check startup time when jailer is spawned in a new PID namespace.
2121
@@ -24,20 +24,20 @@ def test_startup_time_new_pid_ns(test_microvm_with_api):
2424
microvm = test_microvm_with_api
2525
microvm.bin_cloner_path = None
2626
microvm.jailer.new_pid_ns = True
27-
return _test_startup_time(microvm)
27+
record_property("startup_time_new_pid_μs", _test_startup_time(microvm))
2828

2929

30-
def test_startup_time_daemonize(test_microvm_with_api):
30+
def test_startup_time_daemonize(test_microvm_with_api, record_property):
3131
"""
3232
Check startup time when jailer spawns Firecracker in a new PID ns.
3333
3434
@type: performance
3535
"""
3636
microvm = test_microvm_with_api
37-
return _test_startup_time(microvm)
37+
record_property("startup_time_daemonize_μs", _test_startup_time(microvm))
3838

3939

40-
def test_startup_time_custom_seccomp(test_microvm_with_api):
40+
def test_startup_time_custom_seccomp(test_microvm_with_api, record_property):
4141
"""
4242
Check the startup time when using custom seccomp filters.
4343
@@ -46,8 +46,7 @@ def test_startup_time_custom_seccomp(test_microvm_with_api):
4646
microvm = test_microvm_with_api
4747

4848
_custom_filter_setup(microvm)
49-
50-
return _test_startup_time(microvm)
49+
record_property("startup_time_custom_seccomp_μs", _test_startup_time(microvm))
5150

5251

5352
def _test_startup_time(microvm):

tests/integration_tests/performance/test_versioned_serialization_benchmark.py

Lines changed: 16 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,9 @@
77
import json
88
import shutil
99
import platform
10+
from pathlib import Path
11+
12+
import pytest
1013

1114
from framework import utils
1215
from framework.defs import FC_WORKSPACE_DIR
@@ -68,14 +71,15 @@ def _check_statistics(directory, mean):
6871
attribute = "no-crc"
6972

7073
measure = BASELINES[proc_model[0]][bench][attribute]
71-
low = round(measure["target"] - measure["delta"], 3)
72-
high = round(measure["target"] + measure["delta"], 3)
73-
assert low <= mean <= high, "Benchmark result {} has changed!".format(directory)
74+
target, delta = measure["target"], measure["delta"]
75+
assert target == pytest.approx(
76+
mean, abs=delta, rel=1e-6
77+
), f"Benchmark result {directory} has changed!"
7478

75-
return directory, f"{mean} ms", f"{low} <= result <= {high}"
79+
return f"{target - delta} <= result <= {target + delta}"
7680

7781

78-
def test_serialization_benchmark():
82+
def test_serialization_benchmark(monkeypatch, record_property):
7983
"""
8084
Benchmark test for MicrovmState serialization/deserialization.
8185
@@ -84,18 +88,16 @@ def test_serialization_benchmark():
8488
logger = logging.getLogger("serialization_benchmark")
8589

8690
# Move into the benchmark directory
87-
os.chdir(BENCHMARK_DIRECTORY)
91+
monkeypatch.chdir(BENCHMARK_DIRECTORY)
8892

8993
# Run benchmark test
9094
cmd = "cargo bench --target {}".format(DEFAULT_BUILD_TARGET)
9195
result = utils.run_cmd_sync(cmd)
9296
assert result.returncode == 0
9397

94-
results_and_criteria = ["", ""]
95-
9698
# Parse each Criterion benchmark from the result folder and
9799
# check the results against a baseline
98-
results_dir = os.path.join(FC_WORKSPACE_DIR, "build/vmm_benchmark")
100+
results_dir = Path(FC_WORKSPACE_DIR) / "build/vmm_benchmark"
99101
for directory in os.listdir(results_dir):
100102
# Ignore the 'report' directory as it is of no use to us
101103
if directory == "report":
@@ -104,23 +106,16 @@ def test_serialization_benchmark():
104106
logger.info("Benchmark: %s", directory)
105107

106108
# Retrieve the 'estimates.json' file content
107-
json_file = os.path.join(
108-
results_dir, "{}/{}".format(directory, "base/estimates.json")
109-
)
110-
with open(json_file, "r", encoding="utf-8") as read_file:
111-
estimates = json.load(read_file)
109+
json_file = results_dir / directory / "base/estimates.json"
110+
estimates = json.loads(json_file.read_text())
112111

113112
# Save the Mean measurement(nanoseconds) and transform it(milliseconds)
114113
mean = estimates["mean"]["point_estimate"] / NSEC_IN_MSEC
115114
logger.info("Mean: %f", mean)
116115

117-
res = _check_statistics(directory, round(mean, 3))
118-
119-
results_and_criteria[0] += f"{res[0]}: {res[1]}, "
120-
results_and_criteria[1] += f"{res[0]}: {res[2]}, "
116+
criteria = _check_statistics(directory, mean)
117+
record_property(f"{directory}_ms", mean)
118+
record_property(f"{directory}_criteria", criteria)
121119

122120
# Cleanup the Target directory
123121
shutil.rmtree(results_dir)
124-
125-
# Return pretty formatted data for the test report.
126-
return results_and_criteria[0], results_and_criteria[1]

0 commit comments

Comments
 (0)