Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add option to automatically generate failing.llvm #2680

Merged
merged 5 commits into from
Jan 16, 2025
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions kevm-pyk/src/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,12 @@


def pytest_addoption(parser: Parser) -> None:
parser.addoption(
'--save-failing',
action='store_true',
default=False,
help='Save failing tests to the failing.llvm file',
)
parser.addoption(
'--update-expected-output',
action='store_true',
Expand Down Expand Up @@ -47,6 +53,11 @@ def pytest_addoption(parser: Parser) -> None:
)


@pytest.fixture
def save_failing(request: FixtureRequest) -> bool:
return request.config.getoption('--save-failing')


@pytest.fixture
def update_expected_output(request: FixtureRequest) -> bool:
return request.config.getoption('--update-expected-output')
Expand Down
46 changes: 33 additions & 13 deletions kevm-pyk/src/tests/integration/test_conformance.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,18 @@
TEST_DIR: Final = REPO_ROOT / 'tests/ethereum-tests'
GOLDEN: Final = (REPO_ROOT / 'tests/templates/output-success-llvm.json').read_text().rstrip()
TEST_FILES_WITH_CID_0: Final = (REPO_ROOT / 'tests/bchain.0.chainId').read_text().splitlines()
FAILING_TESTS_FILE: Final = REPO_ROOT / 'tests/failing.llvm'
SLOW_TESTS_FILE: Final = REPO_ROOT / 'tests/slow.llvm'


def _test(gst_file: Path, schedule: str, mode: str, usegas: bool) -> None:
def _test(gst_file: Path, schedule: str, mode: str, usegas: bool, save_failing: bool = False) -> None:
anvacaru marked this conversation as resolved.
Show resolved Hide resolved
anvacaru marked this conversation as resolved.
Show resolved Hide resolved
skipped_gst_tests = SKIPPED_TESTS.get(gst_file, [])
if '*' in skipped_gst_tests:
pytest.skip()

chainid = 0 if str(gst_file.relative_to(TEST_DIR)) in TEST_FILES_WITH_CID_0 else 1
failing_tests: list[str] = []
gst_file_relative_path: Final[str] = str(gst_file.relative_to(TEST_DIR))
chainid = 0 if gst_file_relative_path in TEST_FILES_WITH_CID_0 else 1

with gst_file.open() as f:
gst_data = json.load(f)
Expand All @@ -47,7 +51,23 @@ def _test(gst_file: Path, schedule: str, mode: str, usegas: bool) -> None:
if test_name in skipped_gst_tests:
continue
res = interpret({test_name: test}, schedule, mode, chainid, usegas, check=False)
_assert_exit_code_zero(res)

try:
_assert_exit_code_zero(res)
except AssertionError as _exception:
if not save_failing:
raise _exception
failing_tests.append(test_name)
anvacaru marked this conversation as resolved.
Show resolved Hide resolved

if failing_tests:
if save_failing:
anvacaru marked this conversation as resolved.
Show resolved Hide resolved
with FAILING_TESTS_FILE.open('a') as ff:
if len(failing_tests) == len(gst_data):
ff.write(f'{gst_file_relative_path},*\n')
tothtamas28 marked this conversation as resolved.
Show resolved Hide resolved
else:
for test_name in failing_tests:
anvacaru marked this conversation as resolved.
Show resolved Hide resolved
ff.write(f'{gst_file_relative_path},{test_name}\n')
raise AssertionError()
anvacaru marked this conversation as resolved.
Show resolved Hide resolved


def _assert_exit_code_zero(pattern: Pattern) -> None:
Expand All @@ -66,8 +86,8 @@ def _assert_exit_code_zero(pattern: Pattern) -> None:


def _skipped_tests() -> dict[Path, list[str]]:
slow_tests = read_csv_file(REPO_ROOT / 'tests/slow.llvm')
failing_tests = read_csv_file(REPO_ROOT / 'tests/failing.llvm')
slow_tests = read_csv_file(SLOW_TESTS_FILE)
failing_tests = read_csv_file(FAILING_TESTS_FILE)
skipped: dict[Path, list[str]] = {}
for test_file, test in slow_tests + failing_tests:
test_file = TEST_DIR / test_file
Expand All @@ -93,8 +113,8 @@ def read_csv_file(csv_file: Path) -> tuple[tuple[Path, str], ...]:
VM_TESTS,
ids=[str(test_file.relative_to(VM_TEST_DIR)) for test_file in VM_TESTS],
)
def test_vm(test_file: Path) -> None:
_test(test_file, 'DEFAULT', 'VMTESTS', True)
def test_vm(test_file: Path, save_failing: bool) -> None:
_test(test_file, 'DEFAULT', 'VMTESTS', True, save_failing)


@pytest.mark.skip(reason='failing / slow VM tests')
Expand All @@ -103,8 +123,8 @@ def test_vm(test_file: Path) -> None:
SKIPPED_VM_TESTS,
ids=[str(test_file.relative_to(VM_TEST_DIR)) for test_file in SKIPPED_VM_TESTS],
)
def test_rest_vm(test_file: Path) -> None:
_test(test_file, 'DEFAULT', 'VMTESTS', True)
def test_rest_vm(test_file: Path, save_failing: bool) -> None:
_test(test_file, 'DEFAULT', 'VMTESTS', True, save_failing)


ALL_TEST_DIR: Final = TEST_DIR / 'BlockchainTests/GeneralStateTests'
Expand All @@ -118,8 +138,8 @@ def test_rest_vm(test_file: Path) -> None:
BCHAIN_TESTS,
ids=[str(test_file.relative_to(ALL_TEST_DIR)) for test_file in BCHAIN_TESTS],
)
def test_bchain(test_file: Path) -> None:
_test(test_file, 'CANCUN', 'NORMAL', True)
def test_bchain(test_file: Path, save_failing: bool) -> None:
_test(test_file, 'CANCUN', 'NORMAL', True, save_failing)


@pytest.mark.skip(reason='failing / slow blockchain tests')
Expand All @@ -128,5 +148,5 @@ def test_bchain(test_file: Path) -> None:
SKIPPED_BCHAIN_TESTS,
ids=[str(test_file.relative_to(ALL_TEST_DIR)) for test_file in SKIPPED_BCHAIN_TESTS],
)
def test_rest_bchain(test_file: Path) -> None:
_test(test_file, 'CANCUN', 'NORMAL', True)
def test_rest_bchain(test_file: Path, save_failing: bool) -> None:
_test(test_file, 'CANCUN', 'NORMAL', True, save_failing)
Loading
Loading