diff --git a/.gitlab-ci.d/opentitan/build.yml b/.gitlab-ci.d/opentitan/build.yml new file mode 100644 index 0000000000000..9ecb7f588c163 --- /dev/null +++ b/.gitlab-ci.d/opentitan/build.yml @@ -0,0 +1,76 @@ +#------------------------------------------------------------------------------ +# QEMU OpenTitan CI +#------------------------------------------------------------------------------ + +build-clang: + tags: + - qemu_ot + stage: build + # build QEMU for OT platforms, ensure subprojects are cleaned up first + # store generated header files and ninja command file as artifacts, which are + # required to run the clang-tidy stage + # libvhost-user.c generates sign-compare warnings with clang, so disable + # warning-as-error + # the alternative would be to build here with GCC, then rebuild everything + # with clang in clang-tidy stage, as meson generates different, incompatible + # warning list between GCC and clang... + script: + - rm -rf build + - git clean -dffx subprojects + - mkdir build + - cd build + - ../configure --cc=clang-19 --disable-werror $QEMU_BUILD_OPTS + --target-list=riscv32-softmmu,riscv64-softmmu,x86_64-linux-user + - ninja + - ninja qemu-img + - strip qemu-system-riscv32 qemu-img + - QEMU_DIR="$(cd .. && pwd -P)"; cat compile_commands.json | + sed -E 's,'"$QEMU_DIR"',@QEMU_DIR@,g' > compile_commands.json.tpl + - ../scripts/opentitan/swexit.py -t ibexdemo -b 0x0 -o exit_id.bin + - ../scripts/opentitan/swexit.py -t earlgrey -b 0x80 -o exit_eg.bin + artifacts: + public: false + expire_in: 1 hour + name: "qemu-ot" + paths: + - subprojects/libtomcrypt/src/headers/*.h + - build/*.h + - build/qapi/*.h + - build/trace/*.h + - build/compile_commands.json.tpl + - build/qemu-system-riscv32 + - build/qemu-img + - build/exit_*.bin + +build-gcc: + tags: + - qemu_ot + stage: build + # build QEMU for OT platforms, ensure subprojects are cleaned up first + # build with GCC may need different warnings + script: + - rm -rf build-gcc + - mkdir build-gcc + - cd build-gcc + - ../configure --cc=gcc $QEMU_BUILD_OPTS --target-list=riscv32-softmmu,riscv64-softmmu + - ninja + +format: + tags: + - qemu_ot + stage: build + script: + - scripts/opentitan/ot-format.sh --ci -i + - git status -s + - test -z "$(git status -s)" || git diff + - test -z "$(git status -s)" + +tidy: + tags: + - qemu_ot + stage: build + needs: ["build-clang"] + script: + - QEMU_DIR="$(pwd -P)"; cat build/compile_commands.json.tpl | + sed -E 's,@QEMU_DIR@,'"$QEMU_DIR"',g' > build/compile_commands.json + - scripts/opentitan/ot-tidy.sh --ci -p build diff --git a/.gitlab-ci.d/opentitan/ot-bmtests.yml b/.gitlab-ci.d/opentitan/ot-bmtests.yml new file mode 100644 index 0000000000000..ebccb5fb59e4f --- /dev/null +++ b/.gitlab-ci.d/opentitan/ot-bmtests.yml @@ -0,0 +1,31 @@ +# Baremetal Tests for Earlgrey + +baremetal-eg-tests: + tags: + - qemu_ot + stage: test + needs: + - "build-clang" + - project: rv/sandbox/rot/baremetal_test + job: build-eg + ref: $BAREMETAL_REF + artifacts: true + variables: + BASEDIR: bmtests/eg + CI_EXEC: 1 + script: + - python3 -m virtualenv .venv + - . .venv/bin/activate + - pip3 install -r scripts/opentitan/requirements.txt + - rm -rf ${BASEDIR} + - mkdir -p ${BASEDIR} + - zstd -d --stdout ot-eg-bmtest.tar.zst | tar xf - -C ${BASEDIR} + - find ${BASEDIR} + - scripts/opentitan/pyot.py -vv -c ${BASEDIR}/data/qemu/ot-earlgrey/pyot-eflash.hjson + -w ot-earlgrey.csv -R -T 3 + artifacts: + public: false + expire_in: 1 year + name: "bm-ot-earlgrey" + paths: + - ot-earlgrey.csv diff --git a/.gitlab-ci.d/opentitan/ot-smoke.yml b/.gitlab-ci.d/opentitan/ot-smoke.yml new file mode 100644 index 0000000000000..ffdfbb7cfad4a --- /dev/null +++ b/.gitlab-ci.d/opentitan/ot-smoke.yml @@ -0,0 +1,12 @@ +smoke-tests-ot: + tags: + - qemu_ot + stage: test + needs: ["build-clang"] + script: + - build/qemu-system-riscv32 -M help | grep ibexdemo + - build/qemu-system-riscv32 -M help | grep ot-earlgrey + - timeout -s KILL 4 build/qemu-system-riscv32 -M ibexdemo -nographic + -device loader,addr=0x100080,file=build/exit_id.bin -d in_asm,int + - timeout -s KILL 4 build/qemu-system-riscv32 -M ot-earlgrey,no_epmp_cfg=true -nographic + -object ot-rom_img,id=rom,file=build/exit_eg.bin -d in_asm,int diff --git a/.gitlab-ci.d/opentitan/pylint-ot.yml b/.gitlab-ci.d/opentitan/pylint-ot.yml new file mode 100644 index 0000000000000..0f485d539226c --- /dev/null +++ b/.gitlab-ci.d/opentitan/pylint-ot.yml @@ -0,0 +1,12 @@ +pylint-ot: + tags: + - qemu_ot + stage: build + script: + # disable duplicate code as all front end are tested at once (false positive) + # disable fixme ("# TODO" comments) + - python3 -m virtualenv .venv + - . .venv/bin/activate + - pip3 install -r scripts/opentitan/requirements.txt + - pylint --rcfile scripts/opentitan/.pylintrc -d 'duplicate-code' -d 'fixme' + scripts/opentitan/*.py python/qemu/jtagtools python/qemu/ot diff --git a/.gitlab-ci.d/opentitan/qemu-ot.yml b/.gitlab-ci.d/opentitan/qemu-ot.yml new file mode 100644 index 0000000000000..e0800b57d884d --- /dev/null +++ b/.gitlab-ci.d/opentitan/qemu-ot.yml @@ -0,0 +1,9 @@ +variables: + BAREMETAL_REF: "b0-250310-1" + QEMU_BUILD_OPTS: "--disable-install-blobs" + +include: + - local: '/.gitlab-ci.d/opentitan/build.yml' + - local: '/.gitlab-ci.d/opentitan/pylint-ot.yml' + - local: '/.gitlab-ci.d/opentitan/ot-smoke.yml' + - local: '/.gitlab-ci.d/opentitan/ot-bmtests.yml' diff --git a/hw/opentitan/ot_csrng.c b/hw/opentitan/ot_csrng.c index 38ec402f1806f..948ea20ee407f 100644 --- a/hw/opentitan/ot_csrng.c +++ b/hw/opentitan/ot_csrng.c @@ -1341,7 +1341,8 @@ static int ot_csrng_handle_command(OtCSRNGState *s, unsigned slot) default: qemu_log_mask(LOG_GUEST_ERROR, "Unknown command: %u\n", acmd); // JW: check this shouldn't be CMD_STAGE_INVALID_CMD_SEQ_ALERT. - s->regs[R_RECOV_ALERT_STS] |= R_RECOV_ALERT_STS_CMD_STAGE_INVALID_ACMD_ALERT_MASK; + s->regs[R_RECOV_ALERT_STS] |= + R_RECOV_ALERT_STS_CMD_STAGE_INVALID_ACMD_ALERT_MASK; CHANGE_STATE(s, CSRNG_ERROR); ot_csrng_update_alerts(s); return -1; diff --git a/hw/opentitan/ot_flash.c b/hw/opentitan/ot_flash.c index a23c2f70b9d97..2824961d81da9 100644 --- a/hw/opentitan/ot_flash.c +++ b/hw/opentitan/ot_flash.c @@ -1402,10 +1402,10 @@ static void ot_flash_op_erase_bank(OtFlashState *s, unsigned address) } /* - * For bank erase only, if the data partition is selected, just the - * data partition is erased. If the info partition is selected, BOTH - * the data and info partitions are erased. - */ + * For bank erase only, if the data partition is selected, just the + * data partition is erased. If the info partition is selected, BOTH + * the data and info partitions are erased. + */ unsigned bank_address = address - (address % bank_size); bank_address /= sizeof(uint32_t); /* convert to word address */ unsigned data_address, info_address = 0u; @@ -1819,7 +1819,8 @@ static void ot_flash_regs_write(void *opaque, hwaddr addr, uint64_t val64, s->op.info_part = part_sel; s->op.info_sel = info_sel; s->op.erase_sel = (bool)erase_sel; - /* Erase operations neither go through FIFOs nor use/require a word count */ + /* Erase operations neither go through FIFOs nor use/require a + * word count */ s->op.count = 0u; xtrace_ot_flash_info("Erase at", s->op.address); break; diff --git a/python/qemu/ot/eflash/__init__.py b/python/qemu/ot/eflash/__init__.py new file mode 100644 index 0000000000000..02c60c55c001c --- /dev/null +++ b/python/qemu/ot/eflash/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2025 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""Embedded Flash tools.""" diff --git a/python/qemu/ot/eflash/gen.py b/python/qemu/ot/eflash/gen.py new file mode 100644 index 0000000000000..71d81324c58a8 --- /dev/null +++ b/python/qemu/ot/eflash/gen.py @@ -0,0 +1,594 @@ +# Copyright (c) 2023-2025 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""Create/update OpenTitan backend flash file. + + :author: Emmanuel Blot +""" + +from binascii import hexlify +from hashlib import sha256 +from itertools import repeat +from logging import getLogger +from os import SEEK_END, SEEK_SET, stat +from os.path import abspath, basename, exists, isfile +from struct import calcsize as scalc, pack as spack, unpack as sunpack +from typing import Any, BinaryIO, NamedTuple, Optional, Union +import re + +# pylint: disable=missing-function-docstring + +from ot.util.elf import ElfBlob + + +class BootLocation(NamedTuple): + """Boot location entry (always in two first pages of first info part) + """ + bank: int + page: int + seq: int + + +class RuntimeDescriptor(NamedTuple): + """Description of an executable binary. + """ + code_start: int + code_end: int + size: int + entry_point: int + + +class FlashGen: + """Generate a flash image file. + + :param bl_offset: offset of the BL0 storage within the data partition. + if forced to 0, do not reserve any space for BL0, i.e. + dedicated all storage space to ROM_EXT section. + :discard_elf_check: whether to ignore mismatching binary/ELF files. + :accept_invalid: accept invalid input files (fully ignore content) + :discard_time_check: whether to ignore mismatching time between binary + and ELF files. + """ + + NUM_BANKS = 2 + PAGES_PER_BANK = 256 + NUM_REGIONS = 8 + INFOS = [10, 1, 2] + WORDS_PER_PAGE = 256 + BYTES_PER_WORD = 8 + BYTES_PER_PAGE = 2048 + BYTES_PER_BANK = 524288 + CHIP_ROM_EXT_SIZE_MAX = 0x10000 + + HEADER_FORMAT = { + 'magic': '4s', # "vFSH" + 'hlength': 'I', # count of header bytes after this point + 'version': 'I', # version of the header + 'bank': 'B', # count of bank + 'info': 'B', # count of info partitions per bank + 'page': 'H', # count of pages per bank + 'psize': 'I', # page size in bytes + 'ipp': '12s', # count of pages for each info partition (up to 12 parts) + } + + BOOT_HEADER_FORMAT = { + 'sha': '32s', # SHA-256 digest of boot data + 'valid': 'Q', # Invalidate a previously entry + 'identifier': 'I', # Boot data identifier (i.e. magic) + 'counter': 'I', # used to determine the newest entry + 'min_ver_rom_ext': 'I', # Minimum required security version for ROM_EXT + 'min_ver_bl0': 'I', # Minimum required security version for BL0 + 'padding': 'Q', # Padding to make the size of header a power of 2 + } + + MANIFEST_FORMAT = { + # SigverifyRsaBuffer + 'signature': '384s', # 96u32 + # ManifestUsageConstraints + 'selector_bits': 'I', + 'device_id': '32s', # 8u32 + 'manuf_state_creator': 'I', + 'manuf_state_owner': 'I', + 'life_cycle_state': 'I', + # SigverifyRsaBuffer + 'modulus': '384s', + 'address_translation': 'I', + 'identifier': '4s', + # ManifestVersion + 'manifest_version_minor': 'H', + 'manifest_version_major': 'H', + 'signed_region_end': 'I', + 'length': 'I', + 'version_major': 'I', + 'version_minor': 'I', + 'security_version': 'I', + # Timestamp + 'timestamp': '8s', # cannot use 'Q', no longer aligned on 64-bit type + # KeymgrBindingValue + 'binding_value': '32s', # 8u32 + 'max_key_version': 'I', + 'code_start': 'I', + 'code_end': 'I', + 'entry_point': 'I', + # ManifestExtTable + 'entries': '120s' # 15*(2u32) + } + + MANIFEST_SIZE = 1024 + MANIFEST_VERSION_MINOR1 = 0x6c47 + MANIFEST_VERSION_MAJOR1 = 0x71c3 + # Allow v2 manifests, the only difference is that signatures are ECDSA. + MANIFEST_VERSION_MAJOR2 = 0x0002 + MANIFEST_EXT_TABLE_COUNT = 15 + + MANIFEST_TRUE = 0x739 # 'true' value for address_translation field + MANIFEST_FALSE = 0x1d4 # 'false' value for address_translation field + + IDENTIFIERS = { + None: b'\x00\x00\x00\x00', + 'rom_ext': b'OTRE', + 'bl0': b'OTB0', + } + + DEBUG_TRAILER_FORMAT = { + 'otre0': '256s', # optional path to the rom_ext filename in bank A + 'otb00': '256s', # optional path to the bl0 filename in bank A + 'otre1': '256s', # optional path to the rom_ext filename in bank B + 'otb01': '256s', # optional path to the bl0 filename in bank B + } + + BOOT_IDENTIFIER = 0x41444f42 + BOOT_INVALID = 0 + BOOT_VALID = (1 << 64) - 1 + BOOT_BANK = 1 + BOOT_PARTS = 2 + + def __init__(self, bl_offset: Optional[int] = None, + discard_elf_check: bool = False, + accept_invalid: bool = False, + discard_time_check: bool = False): + self._log = getLogger('flashgen') + self._check_manifest_size() + self._bl_offset = bl_offset if bl_offset is not None \ + else self.CHIP_ROM_EXT_SIZE_MAX + self._accept_invalid = accept_invalid + self._check_elf = not (discard_elf_check or self._accept_invalid) + self._check_time = not discard_time_check and self._check_elf + hfmt = ''.join(self.HEADER_FORMAT.values()) + header_size = scalc(hfmt) + assert header_size == 32 + self._header_size = header_size + bhfmt = ''.join(self.BOOT_HEADER_FORMAT.values()) + self._boot_header_size = scalc(bhfmt) + tfmt = ''.join(self.DEBUG_TRAILER_FORMAT.values()) + trailer_size = scalc(tfmt) + self._image_size = ((self.BYTES_PER_BANK + self.info_part_size()) * + self.NUM_BANKS + self._header_size + trailer_size) + self._ffp: Optional[BinaryIO] = None + + def open(self, path: str) -> None: + """Prepare flash content into a QEMU RAW stream. + """ + mode = 'r+b' if exists(path) else 'w+b' + # cannot use a context manager here + # pylint: disable=consider-using-with + self._ffp = open(path, mode) + self._ffp.seek(0, SEEK_END) + vsize = self._ffp.tell() + if vsize < self._image_size: + if vsize and mode.startswith('r'): + self._log.info('File image too short, expanding') + else: + self._log.info('Creating new image file') + header = self._build_header() + self._write(0, header) + vsize += len(header) + self._write(len(header), + bytes(repeat(0xff, self._image_size-vsize))) + self._ffp.seek(0) + if vsize > self._image_size: + self._log.info('File image too long, truncating') + self._ffp.truncate(self._image_size) + + def close(self): + if self._ffp: + pos = self._ffp.seek(0, SEEK_END) + self._ffp.close() + self._ffp = None + if pos != self._image_size: + self._log.error('Invalid image size (%d bytes)', pos) + + @property + def logger(self): + return self._log + + @classmethod + def info_part_size(cls) -> int: + return sum(cls.INFOS) * cls.BYTES_PER_PAGE + + def read_boot_info(self) -> dict[BootLocation, + dict[str, Union[int, bytes]]]: + size = self._boot_header_size + fmt = ''.join(self.BOOT_HEADER_FORMAT.values()) + boot_entries = {} + boot_bank = 1 + for page in range(self.BOOT_PARTS): + base = page * self.BYTES_PER_PAGE + for offset in range(0, self.BYTES_PER_PAGE, size): + bdata = self.read_info_partition(boot_bank, base+offset, size) + if len(bdata) != size: + raise ValueError(f'Cannot read header: {len(bdata)} ' + f'bytes @ page {page} offset ' + f'{base+offset}') + values = sunpack(f'<{fmt}', bdata) + boot = dict(zip(self.BOOT_HEADER_FORMAT, values)) + if boot['identifier'] != self.BOOT_IDENTIFIER: + continue + if boot['valid'] != self.BOOT_VALID: + continue + boot_entries[BootLocation(boot_bank, page, offset//size)] = boot + offset += size + return boot_entries + + def read_info_partition(self, bank: int, offset: int, size: int) -> bytes: + offset += (self._header_size + self.NUM_BANKS * self.BYTES_PER_BANK + + bank * self.info_part_size()) + pos = self._ffp.tell() + self._ffp.seek(offset) + data = self._ffp.read(size) + self._ffp.seek(pos) + return data + + def store_rom_ext(self, bank: int, dfp: BinaryIO, + elfpath: Optional[str] = None, + no_header: bool = False) -> None: + if not 0 <= bank < self.NUM_BANKS: + raise ValueError(f'Invalid bank {bank}') + data = dfp.read() + if len(data) > self.BYTES_PER_BANK: + raise ValueError('Data too large') + bindesc = self._check_rom_ext(data) if not no_header else None + boot_entries = self.read_boot_info() + if not boot_entries: + next_loc = BootLocation(self.BOOT_BANK, 0, 0) + next_count = 5 + self._log.info('No pre-existing BootLocation') + else: + sorted_locs = sorted(boot_entries, + key=lambda e: boot_entries[e]['counter']) + mr_loc = sorted_locs[-1] + self._log.info('Last boot location %s', mr_loc) + mr_entry = boot_entries[mr_loc] + mr_bank = mr_loc.bank + next_op_bank = mr_bank + op_locs = [loc for loc in sorted_locs if loc.bank == next_op_bank] + if op_locs: + last_op_loc = op_locs[-1] + next_op_seq = last_op_loc.seq + 1 + next_op_page = last_op_loc.page + else: + next_op_seq = 0 + next_op_page = 0 + if next_op_seq >= self.BYTES_PER_PAGE/self._boot_header_size: + next_op_page += 1 + next_op_seq = 0 + if next_op_page >= self.BOOT_PARTS: + # erase the flash? + raise ValueError('No more room to store boot location') + next_loc = BootLocation(next_op_bank, next_op_page, next_op_seq) + next_count = mr_entry['counter'] + 1 + self._write(self._header_size + bank * self.BYTES_PER_BANK, data) + boot_header = self._build_boot_header(next_count) + offset = self._get_boot_location_offset(next_loc) + self._write(offset, boot_header) + info_offset = (offset - self.NUM_BANKS * self.BYTES_PER_BANK - + self._header_size) + self._log.info('New %s stored @ abs:0x%06x / rel:0x%06x', + next_loc, offset, info_offset) + field_offset, field_data = self._build_field(self.BOOT_HEADER_FORMAT, + 'valid', self.BOOT_INVALID) + for loc, entry in boot_entries.items(): + if loc.bank != next_op_bank: + continue + if entry['valid'] != self.BOOT_INVALID: + offset = self._get_boot_location_offset(loc) + offset += field_offset + self._write(offset, field_data) + ename = f'otre{bank}' + if bindesc: + if not elfpath: + elfpath = self._get_elf_filename(dfp.name) + elif elfpath and not no_header: + self._log.warning('Discarding ELF as input binary file is invalid') + elfpath = None + if elfpath and not no_header: + elftime = stat(elfpath).st_mtime + bintime = stat(dfp.name).st_mtime + if bintime < elftime: + msg = 'Application binary file is older than ELF file' + if self._check_time: + raise RuntimeError(msg) + self._log.warning(msg) + be_match = self._compare_bin_elf(bindesc, elfpath) + if not be_match: + if be_match is None: + msg = 'Cannot verify ELF file (pyelftools not installed)' + else: + msg = 'Application ELF file does not match binary file' + if self._check_elf: + raise RuntimeError(msg) + self._log.warning(msg) + self._store_debug_info(ename, elfpath) + + def store_bootloader(self, bank: int, dfp: BinaryIO, + elfpath: Optional[str] = None, + no_header: bool = False) -> None: + if self._bl_offset == 0: + raise ValueError('Bootloader cannot be used') + if not 0 <= bank < self.NUM_BANKS: + raise ValueError(f'Invalid bank {bank}') + data = dfp.read() + if len(data) > self.BYTES_PER_BANK: + raise ValueError('Data too large') + bindesc = self._check_bootloader(data) if not no_header else None + self._write(self._header_size + self._bl_offset, data) + ename = f'otb0{bank}' + if bindesc: + if not elfpath: + elfpath = self._get_elf_filename(dfp.name) + elif elfpath and not no_header: + self._log.warning('Discarding ELF as input binary file is invalid') + elfpath = None + if elfpath and not no_header: + elftime = stat(elfpath).st_mtime + bintime = stat(dfp.name).st_mtime + if bintime < elftime: + msg = 'Boot binary file is older than ELF file' + if self._check_time: + raise RuntimeError(msg) + self._log.warning(msg) + be_match = self._compare_bin_elf(bindesc, elfpath) + if not be_match: + if be_match is None: + msg = 'Cannot verify ELF file (pyelftools not installed)' + else: + msg = 'Boot ELF file does not match binary file' + if self._check_elf: + raise RuntimeError(msg) + self._log.warning(msg) + self._store_debug_info(ename, elfpath) + + def store_ot_files(self, otdescs: list[str]) -> None: + for dpos, otdesc in enumerate(otdescs, start=1): + parts = otdesc.rsplit(':', 1) + if len(parts) > 1: + otdesc = parts[0] + elf_filename = parts[1] + else: + elf_filename = None + parts = otdesc.split('@', 1) + if len(parts) < 2: + raise ValueError('Missing address in OT descriptor') + bin_filename = parts[0] + if not isfile(bin_filename): + raise ValueError(f'No such file {bin_filename}') + try: + address = int(parts[1], 16) + except ValueError as exc: + raise ValueError('Invalid address in OT descriptor') from exc + bank = address // self.BYTES_PER_BANK + address %= self.BYTES_PER_BANK + kind = 'rom_ext' if address < self.CHIP_ROM_EXT_SIZE_MAX else \ + 'bootloader' + self._log.info( + 'Handling file #%d as %s @ 0x%x in bank %d with%s ELF', + dpos, kind, address, bank, '' if elf_filename else 'out') + with open(bin_filename, 'rb') as bfp: + # func decode should never fail, so no error handling here + getattr(self, f'store_{kind}')(bank, bfp, elf_filename) + + def _compare_bin_elf(self, bindesc: RuntimeDescriptor, elfpath: str) \ + -> Optional[bool]: + if ElfBlob.ELF_ERROR: + return None + with open(elfpath, 'rb') as efp: + elfdesc = self._load_elf_info(efp) + if not elfdesc: + return False + binep = bindesc.entry_point & (self.CHIP_ROM_EXT_SIZE_MAX - 1) + elfep = elfdesc.entry_point & (self.CHIP_ROM_EXT_SIZE_MAX - 1) + if binep != elfep: + self._log.warning('Cannot compare bin vs. elf files') + return False + offset = elfdesc.entry_point - bindesc.entry_point + self._log.debug('ELF base offset 0x%08x', offset) + relfdesc = RuntimeDescriptor(elfdesc.code_start - offset, + elfdesc.code_end - offset, + elfdesc.size, + elfdesc.entry_point - offset) + match = bindesc == relfdesc + logfunc = self._log.debug if match else self._log.warning + logfunc('start bin %08x / elf %08x', + bindesc.code_start, relfdesc.code_start) + logfunc('end bin %08x / elf %08x', + bindesc.code_end, relfdesc.code_end) + logfunc('size bin %08x / elf %08x', + bindesc.size, relfdesc.size) + logfunc('entry bin %08x / elf %08x', + bindesc.entry_point, relfdesc.entry_point) + return match + + def _write(self, offset: Optional[int], data: bytes) -> None: + pos = self._ffp.tell() + if offset is None: + offset = pos + if offset + len(data) > self._image_size: + raise ValueError(f'Invalid offset {offset}+{len(data)}, ' + f'max {self._image_size}') + self._ffp.seek(offset, SEEK_SET) + self._ffp.write(data) + self._ffp.seek(pos, SEEK_SET) + + def _get_info_part_offset(self, part: int, info: int) -> int: + offset = self._header_size + self.NUM_BANKS * self.BYTES_PER_BANK + partition = 0 + while partition < part: + offset += self.INFOS[partition]*self.BYTES_PER_PAGE + partition += 1 + offset += info * self.BYTES_PER_PAGE + return offset + + def _get_boot_location_offset(self, loc: BootLocation) -> int: + return (loc.bank * self.info_part_size() + + self._get_info_part_offset(0, 0) + + loc.page * self.BYTES_PER_PAGE + + loc.seq * self._boot_header_size) + + def _build_field(self, fmtdict: dict[str, Any], field: str, value: Any) \ + -> tuple[int, bytes]: + offset = 0 + for name, fmt in fmtdict.items(): + if name == field: + return offset, spack(f'<{fmt}', value) + offset += scalc(fmt) + raise ValueError(f'No such field: {field}') + + def _build_header(self) -> bytes: + # hlength is the length of header minus the two first items (T, L) + hfmt = self.HEADER_FORMAT + fhfmt = ''.join(hfmt.values()) + shfmt = ''.join(hfmt[k] for k in list(hfmt)[:2]) + hlen = scalc(fhfmt) - scalc(shfmt) + ipp = bytearray(self.INFOS) + ipp.extend([0] * (12 - len(ipp))) + values = { + 'magic': b'vFSH', 'hlength': hlen, 'version': 1, + 'bank': self.NUM_BANKS, 'info': len(self.INFOS), + 'page': self.PAGES_PER_BANK, 'psize': self.BYTES_PER_PAGE, + 'ipp': bytes(ipp) + } + args = [values[k] for k in hfmt] + header = spack(f'<{fhfmt}', *args) + return header + + def _build_boot_header(self, counter) -> bytes: + min_sec_ver_rom_ext = 0 + min_sec_ver_bl0 = 0 + padding = 0 + fmts = list(self.BOOT_HEADER_FORMAT.values()) + sha_fmt, pld_fmt = fmts[0], ''.join(fmts[1:]) + payload = spack(f'<{pld_fmt}', self.BOOT_VALID, self.BOOT_IDENTIFIER, + counter, min_sec_ver_rom_ext, min_sec_ver_bl0, padding) + sha = spack(sha_fmt, sha256(payload).digest()) + header = b''.join((sha, payload)) + return header + + def _get_elf_filename(self, filename: str) -> str: + pathname = abspath(filename) + radix = re.sub(r'.[a-z_]+_0.signed.bin$', '', pathname) + elfname = f'{radix}.elf' + if not exists(elfname): + self._log.warning('No ELF debug info found') + return '' + self._log.info('Using ELF %s for %s', + basename(elfname), basename(filename)) + return elfname + + def _load_elf_info(self, efp: BinaryIO) \ + -> Optional[RuntimeDescriptor]: + if ElfBlob.ELF_ERROR: + # ELF tools are not available + self._log.warning('ELF file cannot be verified') + return None + elf = ElfBlob() + elf.load(efp) + if elf.address_size != 32: + raise ValueError('Spefified ELF file {} is not an ELF32 file') + elfstart, elfend = elf.code_span + return RuntimeDescriptor(elfstart, elfend, elf.size, elf.entry_point) + + def _store_debug_info(self, entryname: str, filename: Optional[str]) \ + -> None: + fnp = filename.encode('utf8') if filename else b'' + lfnp = len(fnp) + tfmt = ''.join(self.DEBUG_TRAILER_FORMAT.values()) + trailer_size = scalc(tfmt) + trailer_offset = self._image_size - trailer_size + for name, fmt in self.DEBUG_TRAILER_FORMAT.items(): + lfmt = scalc(fmt) + if name != entryname: + trailer_offset += lfmt + continue + if lfnp < lfmt: + fnp = b''.join((fnp, bytes(lfmt-lfnp))) + elif lfnp > lfmt: + self._log.warning('ELF pathname too long to store') + return + fnp = spack(fmt, fnp) # useless, used as sanity check + self._write(trailer_offset, fnp) + break + else: + self._log.warning('Unable to find a matching debug entry: %s', + entryname) + + def _check_rom_ext(self, data: bytes) -> Optional[RuntimeDescriptor]: + max_size = self._bl_offset or self.BYTES_PER_BANK + try: + return self._check_manifest(data, 'rom_ext', max_size) + except ValueError: + if self._accept_invalid: + return None + raise + + def _check_bootloader(self, data: bytes) -> Optional[RuntimeDescriptor]: + assert self._bl_offset + max_size = self.BYTES_PER_BANK - self._bl_offset + try: + return self._check_manifest(data, 'bl0', max_size) + except ValueError: + if self._accept_invalid: + return None + raise + + def _check_manifest(self, data: bytes, kind: str, max_size: int) \ + -> RuntimeDescriptor: + if len(data) > max_size: + raise ValueError(f'{kind} too large') + mfmt = ''.join(self.MANIFEST_FORMAT.values()) + slen = scalc(mfmt) + if len(data) <= slen: + raise ValueError(f'{kind} too short') + manifest = dict(zip(self.MANIFEST_FORMAT, + sunpack(f'<{mfmt}', data[:slen]))) + self._log_manifest(manifest) + if (manifest['manifest_version_major'] not in + (self.MANIFEST_VERSION_MAJOR1, self.MANIFEST_VERSION_MAJOR2) + or manifest['manifest_version_minor'] != + self.MANIFEST_VERSION_MINOR1): + raise ValueError('Unsupported manifest version') + self._log.info('%s code start 0x%05x, end 0x%05x, exec 0x%05x', + kind, manifest['code_start'], manifest['code_end'], + manifest['entry_point']) + if manifest['identifier'] != self.IDENTIFIERS[kind]: + if manifest['identifier'] != self.IDENTIFIERS[None]: + manifest_str = hexlify(manifest["identifier"]).decode().upper() + raise ValueError(f'Specified file is not a {kind} file: ' + f'{manifest_str}') + self._log.warning('Empty %s manifest, cannot verify', kind) + return RuntimeDescriptor(manifest['code_start'], manifest['code_end'], + manifest['length'], manifest['entry_point']) + + @classmethod + def _check_manifest_size(cls): + slen = scalc(''.join(cls.MANIFEST_FORMAT.values())) + assert cls.MANIFEST_SIZE == slen, 'Invalid Manifest size' + + def _log_manifest(self, manifest): + for item, value in manifest.items(): + if isinstance(value, int): + self._log.debug('%s: 0x%08x', item, value) + elif isinstance(value, bytes): + self._log.debug('%s: (%d) %s', item, len(value), + hexlify(value).decode()) + else: + self._log.debug('%s: (%d) %s', item, len(value), value) diff --git a/python/qemu/ot/pyot/__init__.py b/python/qemu/ot/pyot/__init__.py new file mode 100644 index 0000000000000..dc51ab95f57c6 --- /dev/null +++ b/python/qemu/ot/pyot/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2023-2025 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""OpenTitan QEMU unit test sequencer. + + :author: Emmanuel Blot +""" + +DEFAULT_TIMEOUT = 60 # seconds +DEFAULT_TIMEOUT_FACTOR = 1.0 diff --git a/python/qemu/ot/pyot/context.py b/python/qemu/ot/pyot/context.py new file mode 100644 index 0000000000000..1af116f5e170f --- /dev/null +++ b/python/qemu/ot/pyot/context.py @@ -0,0 +1,172 @@ +# Copyright (c) 2023-2025 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""Test context for QEMU unit test sequencer. + + :author: Emmanuel Blot +""" + +from logging import getLogger +from os import environ, pardir, sep +from os.path import basename, dirname, normpath, relpath +from subprocess import Popen, PIPE, TimeoutExpired +from threading import Event +from typing import Optional + +from .filemgr import QEMUFileManager +from .worker import QEMUContextWorker + + +class QEMUContext: + """Execution context for QEMU session. + + Execute commands before, while and after QEMU executes. + + :param test_name: the name of the test QEMU should execute + :param qfm: the file manager + :param qemu_cmd: the command and argument to execute QEMU + :param context: the contex configuration for the current test + """ + + def __init__(self, test_name: str, qfm: QEMUFileManager, + qemu_cmd: list[str], context: dict[str, list[str]], + env: Optional[dict[str, str]] = None): + # pylint: disable=too-many-arguments + self._clog = getLogger('pyot.ctx') + self._test_name = test_name + self._qfm = qfm + self._qemu_cmd = qemu_cmd + self._context = context + self._env = env or {} + self._workers: list[Popen] = [] + self._first_error: str = '' + + def execute(self, ctx_name: str, code: int = 0, + sync: Optional[Event] = None) -> None: + """Execute all commands, in order, for the selected context. + + Synchronous commands are executed in order. If one command fails, + subsequent commands are not executed. + + Background commands are started in order, but a failure does not + stop other commands. + + :param ctx_name: the name of the execution context + :param code: a previous error completion code, if any + :param sync: an optional synchronisation event to start up the + execution + """ + ctx = self._context.get(ctx_name, None) + if ctx_name == 'post' and code: + self._clog.info("Discard execution of '%s' commands after failure " + "of '%s'", ctx_name, self._test_name) + return + env = dict(environ) + env.update(self._env) + if self._qemu_cmd: + env['PATH'] = ':'.join((env['PATH'], dirname(self._qemu_cmd[0]))) + if ctx: + for cmd in ctx: + bkgnd = ctx_name == 'with' + if cmd.endswith('!'): + bkgnd = False + cmd = cmd[:-1] + elif cmd.endswith('&'): + bkgnd = True + cmd = cmd[:-1] + cmd = normpath(cmd.rstrip()) + if bkgnd: + if ctx_name == 'post': + raise ValueError(f"Cannot execute background command " + f"in [{ctx_name}] context for " + f"'{self._test_name}'") + rcmd = relpath(cmd) + if rcmd.startswith(pardir): + rcmd = cmd + rcmd = ' '.join(p if not p.startswith(sep) else basename(p) + for p in rcmd.split(' ')) + self._clog.info('Execute "%s" in background for [%s] ' + 'context', rcmd, ctx_name) + worker = QEMUContextWorker(cmd, env, sync) + worker.run() + self._workers.append(worker) + else: + if sync: + self._clog.debug('Synchronization ignored') + cmd = normpath(cmd.rstrip()) + rcmd = relpath(cmd) + if rcmd.startswith(pardir): + rcmd = cmd + rcmd = ' '.join(p if not p.startswith(sep) else basename(p) + for p in rcmd.split(' ')) + self._clog.info('Execute "%s" in sync for [%s] context', + rcmd, ctx_name) + # pylint: disable=consider-using-with + proc = Popen(cmd, bufsize=1, stdout=PIPE, stderr=PIPE, + shell=True, env=env, encoding='utf-8', + errors='ignore', text=True) + ret = 0 + try: + outs, errs = proc.communicate(timeout=5) + ret = proc.returncode + except TimeoutExpired: + proc.kill() + outs, errs = proc.communicate() + ret = proc.returncode + if not self._first_error: + self._first_error = errs.split('\n', 1)[0] + for sfp, logger in zip( + (outs, errs), + (self._clog.debug, + self._clog.error if ret else self._clog.info)): + for line in sfp.split('\n'): + line = line.strip() + if line: + logger(line) + if ret: + self._clog.error("Fail to execute '%s' command for " + "'%s'", cmd, self._test_name) + errmsg = self._first_error or \ + f'Cannot execute [{ctx_name}] command' + raise OSError(ret, errmsg) + if ctx_name == 'post': + if not self._qfm.keep_temporary: + self._qfm.delete_default_dir(self._test_name) + + def check_error(self) -> int: + """Check if any background worker exited in error. + + :return: a non-zero value on error + """ + for worker in self._workers: + ret = worker.exit_code() + if not ret: + continue + if not self._first_error: + self._first_error = worker.first_error + self._clog.error("%s exited with %d", worker.command, ret) + return ret + return 0 + + @property + def first_error(self): + """Return the message of the first error, if any.""" + return self._first_error + + def finalize(self) -> int: + """Terminate any running background command, in reverse order. + + :return: a non-zero value if one or more workers have reported an + error + """ + rets = {0} + while self._workers: + worker = self._workers.pop() + ret = worker.stop() + rets.add(ret) + if ret: + self._clog.warning("Command '%s' has failed for '%s': %d", + worker.command, self._test_name, ret) + if not self._first_error: + self._first_error = worker.first_error + return max(rets) diff --git a/python/qemu/ot/pyot/executer.py b/python/qemu/ot/pyot/executer.py new file mode 100644 index 0000000000000..5a96c9c03eb30 --- /dev/null +++ b/python/qemu/ot/pyot/executer.py @@ -0,0 +1,694 @@ +# Copyright (c) 2023-2025 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""Test executer for QEMU unit test sequencer. + + :author: Emmanuel Blot +""" + +from argparse import Namespace +from collections import defaultdict +from csv import writer as csv_writer +from fnmatch import fnmatchcase +from glob import glob +from logging import INFO as LOG_INFO, getLogger +from os import curdir, environ, getcwd, sep +from os.path import (basename, dirname, isabs, isfile, join as joinpath, + normpath) +from traceback import format_exc +from typing import Any, Iterator, Optional + +import re +import sys + +from ot.util.file import guess_test_type +from ot.util.log import flush_memory_loggers +from ot.util.misc import EasyDict + +from . import DEFAULT_TIMEOUT, DEFAULT_TIMEOUT_FACTOR +from .context import QEMUContext +from .filemgr import QEMUFileManager +from .util import TestResult +from .wrapper import QEMUWrapper + + +class QEMUExecuter: + """Test execution sequencer. + + :param qfm: file manager that tracks temporary files + :param config: configuration dictionary + :param args: parsed arguments + """ + + RESULT_MAP = { + 0: 'PASS', + 1: 'ERROR', + 6: 'ABORT', + 11: 'CRASH', + QEMUWrapper.GUEST_ERROR_OFFSET - 1: 'GUEST_ESC', + QEMUWrapper.GUEST_ERROR_OFFSET + 1: 'FAIL', + 98: 'UNEXP_SUCCESS', + 99: 'CONTEXT', + 124: 'TIMEOUT', + 125: 'DEADLOCK', + 126: 'CONTEXT', + QEMUWrapper.NO_MATCH_RETURN_CODE: 'UNKNOWN', + } + + DEFAULT_START_DELAY = 1.0 + """Default start up delay to let QEMU initialize before connecting the + virtual UART port. + """ + + DEFAULT_SERIAL_PORT = 'serial0' + """Default VCP name.""" + + LOG_SHORTCUTS = { + 'A': 'in_asm', + 'E': 'exec', + 'G': 'guest_errors', + 'I': 'int', + 'U': 'unimp', + } + """Shortcut names for QEMU log sources.""" + + def __init__(self, qfm: QEMUFileManager, config: dict[str, any], + args: Namespace): + self._log = getLogger('pyot.exec') + self._qfm = qfm + self._config = config + self._args = args + self._argdict: dict[str, Any] = {} + self._qemu_cmd: list[str] = [] + self._suffixes = [] + self._virtual_tests: dict[str, str] = {} + if hasattr(self._args, 'opts'): + setattr(self._args, 'global_opts', getattr(self._args, 'opts')) + setattr(self._args, 'opts', []) + else: + setattr(self._args, 'global_opts', []) + + def build(self) -> None: + """Build initial QEMU arguments. + + :raise ValueError: if some argument is invalid + """ + exec_info = self._build_qemu_command(self._args) + self._qemu_cmd = exec_info.command + self._argdict = dict(self._args.__dict__) + self._suffixes = [] + suffixes = self._config.get('suffixes', []) + if not isinstance(suffixes, list): + raise ValueError('Invalid suffixes sub-section') + self._suffixes.extend(suffixes) + + def enumerate_tests(self) -> Iterator[str]: + """Enumerate tests to execute. + """ + self._argdict = dict(self._args.__dict__) + for tst in sorted(self._build_test_list()): + ttype = guess_test_type(tst) + yield f'{basename(tst)} ({ttype})' + + def run(self, debug: bool, allow_no_test: bool) -> int: + """Execute all requested tests. + + :return: success or the code of the first encountered error + """ + log_classifiers = self._config.get('logclass', {}) + qot = QEMUWrapper(log_classifiers, debug) + ret = 0 + results = defaultdict(int) + result_file = self._argdict.get('result') + # pylint: disable=consider-using-with + cfp = open(result_file, 'wt', encoding='utf-8') if result_file else None + try: + csv = csv_writer(cfp) if cfp else None + if csv: + csv.writerow((x.title() for x in TestResult._fields)) + app = self._argdict.get('exec') + if app: + assert 'timeout' in self._argdict + timeout = int(float(self._argdict.get('timeout')) * + float(self._argdict.get('timeout_factor', + DEFAULT_TIMEOUT_FACTOR))) + self._log.debug('Execute %s', basename(self._argdict['exec'])) + adef = EasyDict(command=self._qemu_cmd, timeout=timeout, + start_delay=self.DEFAULT_START_DELAY) + ret, xtime, err = qot.run(adef) + results[ret] += 1 + sret = self.RESULT_MAP.get(ret, ret) + icount = self._argdict.get('icount') + if csv: + csv.writerow(TestResult(self.get_test_radix(app), sret, + xtime, icount, err)) + cfp.flush() + tests = self._build_test_list() + tcount = len(tests) + self._log.info('Found %d tests to execute', tcount) + if not tcount and not allow_no_test: + self._log.error('No test can be run') + return 1 + targs = None + temp_files = {} + for tpos, test in enumerate(tests, start=1): + self._log.info('[TEST %s] (%d/%d)', self.get_test_radix(test), + tpos, tcount) + try: + self._qfm.define_transient({ + 'UTPATH': test, + 'UTDIR': normpath(dirname(test)), + 'UTFILE': basename(test), + }) + test_name = self.get_test_radix(test) + exec_info = self._build_qemu_test_command(test) + exec_info.test_name = test_name + exec_info.context.execute('pre') + tret, xtime, err = qot.run(exec_info) + cret = exec_info.context.finalize() + if exec_info.expect_result != 0: + if tret == exec_info.expect_result: + self._log.info('QEMU failed with expected error, ' + 'assume success') + tret = 0 + elif tret == 0: + self._log.warning('QEMU success while expected ' + 'error %d, assume error', tret) + tret = 98 + if tret == 0 and cret != 0: + tret = 99 + if tret and not err: + err = exec_info.context.first_error + exec_info.context.execute('post', tret) + # pylint: disable=broad-except + except Exception as exc: + self._log.critical('%s', str(exc)) + if debug: + print(format_exc(chain=False), file=sys.stderr) + tret = 99 + xtime = 0.0 + err = str(exc) + finally: + self._qfm.cleanup_transient() + flush_memory_loggers(['pyot', 'pyot.vcp'], LOG_INFO) + results[tret] += 1 + sret = self.RESULT_MAP.get(tret, tret) + try: + targs = exec_info.args + icount = self.get_namespace_arg(targs, 'icount') + except (AttributeError, KeyError, UnboundLocalError): + icount = None + if csv: + csv.writerow(TestResult(test_name, sret, xtime, icount, + err)) + # want to commit result as soon as possible if some client + # is live-tracking progress on long test runs + cfp.flush() + else: + self._log.info('"%s" executed in %s (%s)', + test_name, xtime, sret) + self._cleanup_temp_files(temp_files) + finally: + if cfp: + cfp.close() + for kind in sorted(results): + self._log.info('%s count: %d', + self.RESULT_MAP.get(kind, kind), + results[kind]) + # sort by the largest occurence, discarding success + errors = sorted((x for x in results.items() if x[0]), + key=lambda x: -x[1]) + # overall return code is the most common error, or success otherwise + ret = errors[0][0] if errors else 0 + self._log.info('Total count: %d, overall result: %s', + sum(results.values()), + self.RESULT_MAP.get(ret, ret)) + return ret + + def get_test_radix(self, filename: str) -> str: + """Extract the radix name from a test pathname. + + :param filename: the path to the test executable + :return: the test name + """ + test_name = basename(filename).split('.')[0] + for suffix in self._suffixes: + if not test_name.endswith(suffix): + continue + return test_name[:-len(suffix)] + return test_name + + @classmethod + def get_namespace_arg(cls, args: Namespace, name: str) -> Optional[str]: + """Extract a value from a namespace. + + :param args: the namespace + :param name: the value's key + :return: the value if any + """ + return args.__dict__.get(name) + + @staticmethod + def flatten(lst: list) -> list: + """Flatten a list. + """ + return [item for sublist in lst for item in sublist] + + @staticmethod + def abspath(path: str) -> str: + """Build absolute path""" + if isabs(path): + return normpath(path) + return normpath(joinpath(getcwd(), path)) + + def _cleanup_temp_files(self, storage: dict[str, set[str]]) -> None: + if self._qfm.keep_temporary: + return + for kind, files in storage.items(): + delete_file = getattr(self._qfm, f'delete_{kind}_image') + for filename in files: + delete_file(filename) + + def _build_qemu_fw_args(self, args: Namespace) \ + -> tuple[str, Optional[str], list[str], Optional[str]]: + rom_exec = bool(args.rom_exec) + roms = args.rom or [] + multi_rom = (len(roms) + int(rom_exec)) > 1 + # generate pre-application ROM option + fw_args: list[str] = [] + machine = args.machine + variant = args.variant + chiplet_count = 1 + if variant: + machine = f'{machine},variant={variant}' + try: + chiplet_count = sum(int(x) + for x in re.split(r'[A-Za-z]', variant) + if x) + except ValueError: + self._log.warning('Unknown variant syntax %s', variant) + rom_counts: list[int] = [0] + for chip_id in range(chiplet_count): + rom_count = 0 + for rom in roms: + rom_path = self._qfm.interpolate(rom) + if not isfile(rom_path): + raise ValueError(f'Unable to find ROM file {rom_path}') + rom_ids = [] + if args.first_soc: + if chiplet_count == 1: + rom_ids.append(f'{args.first_soc}.') + else: + rom_ids.append(f'{args.first_soc}{chip_id}.') + rom_ids.append('rom') + if multi_rom: + rom_ids.append(f'{rom_count}') + rom_id = ''.join(rom_ids) + rom_opt = f'ot-rom_img,id={rom_id},file={rom_path}' + fw_args.extend(('-object', rom_opt)) + rom_count += 1 + rom_counts.append(rom_count) + rom_count = max(rom_counts) + xtype = None + if args.exec: + exec_path = self._virtual_tests.get(args.exec) + if not exec_path: + exec_path = self.abspath(args.exec) + xtype = guess_test_type(exec_path) + if xtype == 'spiflash': + fw_args.extend(('-drive', + f'if=mtd,id=spiflash,bus=0,format=raw,' + f'file={exec_path}')) + elif xtype == 'bin': + if args.embedded_flash is None: + raise ValueError(f'{xtype} test type not supported without ' + f'embedded-flash option') + else: + if xtype != 'elf': + raise ValueError(f'No support for test type: ' + f'{xtype.upper()}') + if rom_exec: + # generate ROM option(s) for the application itself + for chip in range(chiplet_count): + rom_id_parts = [] + if args.first_soc: + if chiplet_count == 1: + rom_id_parts.append(f'{args.first_soc}.') + else: + rom_id_parts.append(f'{args.first_soc}{chip}.') + rom_id_parts.append('rom') + if multi_rom: + rom_id_parts.append(f'{rom_count}') + rom_id = ''.join(rom_id_parts) + rom_opt = f'ot-rom_img,id={rom_id},file={exec_path}' + fw_args.extend(('-object', rom_opt)) + rom_count += 1 + else: + if args.embedded_flash is None: + fw_args.extend(('-kernel', exec_path)) + else: + exec_path = None + return machine, xtype, fw_args, exec_path + + def _build_qemu_log_sources(self, args: Namespace) -> list[str]: + if not args.log: + return [] + log_args = [] + for arg in args.log: + if arg.lower() == arg: + log_args.append(arg) + continue + for upch in arg: + try: + logname = self.LOG_SHORTCUTS[upch] + except KeyError as exc: + raise ValueError(f"Unknown log name '{upch}'") from exc + log_args.append(logname) + return ['-d', ','.join(log_args)] + + def _build_qemu_vcp_args(self, args: Namespace) -> \ + tuple[list[str], dict[str, tuple[str, int]]]: + device = args.device + devdesc = device.split(':') + host = devdesc[0] + try: + port = int(devdesc[1]) + if not 0 < port < 65536: + raise ValueError(f'Invalid serial TCP port: {port}') + except IndexError as exc: + raise ValueError(f'TCP port not specified: {device}') from exc + except TypeError as exc: + raise ValueError(f'Invalid TCP serial device: {device}') from exc + mux = f'mux={"on" if args.muxserial else "off"}' + vcps = args.vcp or [self.DEFAULT_SERIAL_PORT] + vcp_args = ['-display', 'none'] + vcp_map = {} + for vix, vcp in enumerate(vcps): + vcp_map[vcp] = (host, port+vix) + vcp_args.extend(('-chardev', + f'socket,id={vcp},host={host},port={port+vix},' + f'{mux},server=on,wait=on')) + if vcp == self.DEFAULT_SERIAL_PORT: + vcp_args.extend(('-serial', 'chardev:serial0')) + return vcp_args, vcp_map + + def _build_qemu_command(self, args: Namespace, + opts: Optional[list[str]] = None) \ + -> EasyDict[str, Any]: + """Build QEMU command line from argparser values. + + :param args: the parsed arguments + :param opts: any QEMU-specific additional options + :return: a dictionary defining how to execute the command + """ + if args.qemu is None: + raise ValueError('QEMU path is not defined') + machine, xtype, fw_args, xexec = self._build_qemu_fw_args(args) + qemu_args = [args.qemu, '-M', machine] + if args.otcfg: + qemu_args.extend(('-readconfig', self.abspath(args.otcfg))) + qemu_args.extend(fw_args) + temp_files = defaultdict(set) + if all((args.otp, args.otp_raw)): + raise ValueError('OTP VMEM and RAW options are mutually exclusive') + if args.otp: + if not isfile(args.otp): + raise ValueError(f'No such OTP file: {args.otp}') + otp_file = self._qfm.create_otp_image(args.otp) + temp_files['otp'].add(otp_file) + qemu_args.extend(('-drive', + f'if=pflash,file={otp_file},format=raw')) + elif args.otp_raw: + otp_raw_path = self.abspath(args.otp_raw) + qemu_args.extend(('-drive', + f'if=pflash,file={otp_raw_path},format=raw')) + if args.flash: + if xtype == 'spiflash': + raise ValueError('Cannot use a flash file with a flash test') + if not isfile(args.flash): + raise ValueError(f'No such flash file: {args.flash}') + if any((args.exec, args.boot)): + raise ValueError('Flash file argument is mutually exclusive ' + 'with bootloader or rom extension') + flash_path = self.abspath(args.flash) + if args.embedded_flash is None: + raise ValueError('Embedded flash bus not defined') + qemu_args.extend(('-drive', f'if=mtd,id=eflash,' + f'bus={args.embedded_flash},' + f'file={flash_path},format=raw')) + elif any((xexec, args.boot)): + if xexec and not isfile(xexec): + raise ValueError(f'No such exec file: {xexec}') + if args.boot and not isfile(args.boot): + raise ValueError(f'No such bootloader file: {args.boot}') + if args.embedded_flash is not None: + no_flash_header = args.no_flash_header + flash_file = self._qfm.create_eflash_image(xexec, args.boot, + no_flash_header) + temp_files['flash'].add(flash_file) + qemu_args.extend(('-drive', f'if=mtd,id=eflash,' + f'bus={args.embedded_flash},' + f'file={flash_file},format=raw')) + if args.log_file: + qemu_args.extend(('-D', self.abspath(args.log_file))) + if args.trace: + # use a FileType to let argparser validate presence and type + args.trace.close() + qemu_args.extend(('-trace', + f'events={self.abspath(args.trace.name)}')) + qemu_args.extend(self._build_qemu_log_sources(args)) + if args.singlestep: + qemu_args.extend(('-accel', 'tcg,one-insn-per-tb=on')) + if 'icount' in args: + if args.icount is not None: + qemu_args.extend(('-icount', f'shift={args.icount}')) + try: + start_delay = float(getattr(args, 'start_delay') or + self.DEFAULT_START_DELAY) + except ValueError as exc: + raise ValueError(f'Invalid start up delay {args.start_delay}') \ + from exc + start_delay *= args.timeout_factor + trigger = getattr(args, 'trigger', '') + validate = getattr(args, 'validate', '') + if trigger and validate: + raise ValueError(f"{getattr(args, 'exec', '?')}: 'trigger' and " + f"'validate' are mutually exclusive") + vcp_args, vcp_map = self._build_qemu_vcp_args(args) + qemu_args.extend(vcp_args) + qemu_args.extend(args.global_opts or []) + if opts: + qemu_args.extend((str(o) for o in opts)) + return EasyDict(command=qemu_args, vcp_map=vcp_map, + tmpfiles=temp_files, start_delay=start_delay, + trigger=trigger, validate=validate) + + def _build_qemu_test_command(self, filename: str) -> EasyDict[str, Any]: + test_name = self.get_test_radix(filename) + args, opts, timeout, texp = self._build_test_args(test_name) + setattr(args, 'exec', filename) + exec_info = self._build_qemu_command(args, opts) + exec_info.pop('connection', None) + exec_info.args = args + exec_info.context = self._build_test_context(test_name) + exec_info.timeout = timeout + exec_info.expect_result = texp + return exec_info + + def _build_test_list(self, alphasort: bool = True) -> list[str]: + pathnames = set() + testdir = normpath(self._qfm.interpolate(self._config.get('testdir', + curdir))) + self._qfm.define({'testdir': testdir}) + cfilters = self._args.filter or [] + pfilters = [f for f in cfilters if not f.startswith('!')] + if not pfilters: + cfilters = ['*'] + cfilters + tfilters = ['*'] + pfilters + else: + tfilters = list(pfilters) + virttests = self._config.get('virtual', {}) + if not isinstance(virttests, dict): + raise ValueError('Invalid virtual tests definition') + vtests = {} + for vname, vpath in virttests.items(): + if not isinstance(vname, str): + raise ValueError(f"Invalid virtual test definition '{vname}'") + if sep in vname: + raise ValueError(f"Virtual test name cannot contain directory " + f"specifier: '{vname}'") + rpath = normpath(self._qfm.interpolate(vpath)) + if not isfile(rpath): + raise ValueError(f"Invalid virtual test '{vname}': " + f"missing file '{rpath}'") + vtests[vname] = rpath + self._virtual_tests.update(vtests) + inc_filters = self._build_config_list('include') + if inc_filters: + self._log.debug('Searching for tests from %s dir', testdir) + for path_filter in filter(None, inc_filters): + if testdir: + path_filter = joinpath(testdir, path_filter) + paths = set(glob(path_filter, recursive=True)) + for path in paths: + if isfile(path): + for tfilter in tfilters: + if fnmatchcase(self.get_test_radix(path), tfilter): + pathnames.add(path) + break + for vpath in vtests: + for tfilter in tfilters: + if fnmatchcase(self.get_test_radix(vpath), tfilter): + pathnames.add(vpath) + break + for testfile in self._enumerate_from('include_from'): + if not isfile(testfile): + raise ValueError(f'Unable to locate test file ' + f'"{testfile}"') + for tfilter in tfilters: + if fnmatchcase(self.get_test_radix(testfile), tfilter): + pathnames.add(testfile) + if not pathnames: + return [] + roms = self._argdict.get('rom', []) + pathnames -= {normpath(rom) for rom in roms} + xtfilters = [f[1:].strip() for f in cfilters if f.startswith('!')] + exc_filters = self._build_config_list('exclude') + xtfilters.extend(exc_filters) + if xtfilters: + for path_filter in filter(None, xtfilters): + if testdir: + path_filter = joinpath(testdir, path_filter) + paths = set(glob(path_filter, recursive=True)) + pathnames -= paths + vdiscards: set[str] = set() + for vpath in vtests: + if fnmatchcase(vpath, basename(path_filter)): + vdiscards.add(vpath) + pathnames -= vdiscards + pathnames -= set(self._enumerate_from('exclude_from')) + if alphasort: + return sorted(pathnames, key=basename) + return list(pathnames) + + def _enumerate_from(self, config_entry: str) -> Iterator[str]: + incf_filters = self._build_config_list(config_entry) + if incf_filters: + for incf in incf_filters: + incf = normpath(self._qfm.interpolate(incf)) + if not isfile(incf): + raise ValueError(f'Invalid test file: "{incf}"') + self._log.debug('Loading test list from %s', incf) + incf_dir = dirname(incf) + with open(incf, 'rt', encoding='utf-8') as ifp: + for testfile in ifp: + testfile = re.sub('#.*$', '', testfile).strip() + if not testfile: + continue + testfile = self._qfm.interpolate(testfile) + if not testfile.startswith(sep): + testfile = joinpath(incf_dir, testfile) + yield normpath(testfile) + + def _build_config_list(self, config_entry: str) -> list: + cfglist = [] + items = self._config.get(config_entry) + if not items: + return cfglist + if not isinstance(items, list): + raise ValueError(f'Invalid configuration file: ' + f'"{config_entry}" is not a list') + for item in items: + if isinstance(item, str): + cfglist.append(item) + continue + if isinstance(item, dict): + for dname, dval in item.items(): + try: + cond = bool(int(environ.get(dname, '0'))) + except (ValueError, TypeError): + cond = False + if not cond: + continue + if isinstance(dval, str): + dval = [dval] + if isinstance(dval, list): + for sitem in dval: + if isinstance(sitem, str): + cfglist.append(sitem) + return cfglist + + def _build_test_args(self, test_name: str) \ + -> tuple[Namespace, list[str], int, int]: + tests_cfg = self._config.get('tests', {}) + if not isinstance(tests_cfg, dict): + raise ValueError('Invalid tests sub-section') + kwargs = dict(self._args.__dict__) + test_cfg = tests_cfg.get(test_name, {}) + if test_cfg is None: + # does not default to an empty dict to differenciate empty from + # inexistent test configuration + self._log.debug('No configuration for test %s', test_name) + opts = None + else: + test_cfg = {k: v for k, v in test_cfg.items() + if k not in ('pre', 'post', 'with')} + self._log.debug('Using custom test config for %s', test_name) + discards = {k for k, v in test_cfg.items() if v == ''} + if discards: + test_cfg = dict(test_cfg) + for discard in discards: + del test_cfg[discard] + if discard in kwargs: + del kwargs[discard] + kwargs.update(test_cfg) + opts = kwargs.get('opts') + if opts and not isinstance(opts, list): + raise ValueError('fInvalid QEMU options for {test_name}') + opts = self.flatten([opt.split(' ') for opt in opts]) + opts = [self._qfm.interpolate(opt) for opt in opts] + opts = self.flatten([opt.split(' ') for opt in opts]) + opts = [self._qfm.interpolate_dirs(opt, test_name) for opt in opts] + timeout = float(kwargs.get('timeout', DEFAULT_TIMEOUT)) + tmfactor = float(kwargs.get('timeout_factor', DEFAULT_TIMEOUT_FACTOR)) + itimeout = int(timeout * tmfactor) + texpect = kwargs.get('expect', 0) + try: + texp = int(texpect) + except ValueError: + result_map = {v: k for k, v in self.RESULT_MAP.items()} + try: + texp = result_map[texpect.upper()] + except KeyError as exc: + raise ValueError(f'Unsupported expect: {texpect}') from exc + return Namespace(**kwargs), opts or [], itimeout, texp + + def _build_test_context(self, test_name: str) -> QEMUContext: + context = defaultdict(list) + tests_cfg = self._config.get('tests', {}) + test_cfg = tests_cfg.get(test_name, {}) + test_env = None + if test_cfg: + for ctx_name in ('pre', 'with', 'post'): + if ctx_name not in test_cfg: + continue + ctx = test_cfg[ctx_name] + if not isinstance(ctx, list): + raise ValueError(f'Invalid context "{ctx_name}" ' + f'for test {test_name}') + for pos, cmd in enumerate(ctx, start=1): + if not isinstance(cmd, str): + raise ValueError(f'Invalid command #{pos} in ' + f'"{ctx_name}" for test {test_name}') + cmd = re.sub(r'[\n\r]', ' ', cmd.strip()) + cmd = re.sub(r'\s{2,}', ' ', cmd) + cmd = self._qfm.interpolate(cmd) + cmd = self._qfm.interpolate_dirs(cmd, test_name) + context[ctx_name].append(cmd) + env = test_cfg.get('env') + if env: + if not isinstance(env, dict): + raise ValueError('Invalid context environment') + test_env = {k: self._qfm.interpolate(v) for k, v in env.items()} + return QEMUContext(test_name, self._qfm, self._qemu_cmd, dict(context), + test_env) diff --git a/python/qemu/ot/pyot/filemgr.py b/python/qemu/ot/pyot/filemgr.py new file mode 100644 index 0000000000000..ea1c1e09ecdd1 --- /dev/null +++ b/python/qemu/ot/pyot/filemgr.py @@ -0,0 +1,336 @@ +# Copyright (c) 2023-2025 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""File manager for QEMU unit test sequencer. + + :author: Emmanuel Blot +""" + +from atexit import register +from logging import getLogger +from io import BytesIO +from os import close, environ, sep, unlink +from os.path import abspath, basename, exists, isdir, isfile, normpath, splitext +from shutil import rmtree +from tempfile import mkdtemp, mkstemp +from typing import Any, Optional + +import re + +from ot.util.elf import ElfBlob +from ot.util.file import guess_test_type + + +class QEMUFileManager: + """Simple file manager to generate and track temporary files. + + :param keep_temp: do not automatically discard generated files on exit + """ + + DEFAULT_OTP_ECC_BITS = 6 + + def __init__(self, keep_temp: bool = False): + self._log = getLogger('pyot.file') + self._keep_temp = keep_temp + self._in_fly: set[str] = set() + self._otp_files: dict[str, tuple[str, int]] = {} + self._env: dict[str, str] = {} + self._transient_vars: set[str] = set() + self._dirs: dict[str, str] = {} + register(self._cleanup) + + @property + def keep_temporary(self) -> bool: + """Tell whether temporary files and directories should be preserved or + not. + + :return: True if temporary items should not be suppressed + """ + return self._keep_temp + + def set_qemu_src_dir(self, path: str) -> None: + """set the QEMU "source" directory. + + :param path: the path to the QEMU source directory + """ + self._env['QEMU_SRC_DIR'] = abspath(path) + + def set_qemu_bin_dir(self, path: str) -> None: + """set the QEMU executable directory. + + :param path: the path to the QEMU binary directory + """ + self._env['QEMU_BIN_DIR'] = abspath(path) + + def set_config_dir(self, path: str) -> None: + """Assign the configuration directory. + + :param path: the directory that contains the input configuration + file + """ + self._env['CONFIG'] = abspath(path) + + def set_udp_log_port(self, port: int) -> None: + """Assign the UDP logger port. + + :param port: the UDP logger port + """ + self._env['UDPLOG'] = f'{port}' + + def interpolate(self, value: Any) -> str: + """Interpolate a ${...} marker with shell substitutions or local + substitution. + + :param value: input value + :return: interpolated value as a string + """ + def replace(smo: re.Match) -> str: + name = smo.group(1) + val = self._env[name] if name in self._env \ + else environ.get(name, '') + if not val: + getLogger('pyot.file').warning("Unknown placeholder '%s'", + name) + return val + svalue = str(value) + nvalue = re.sub(r'\$\{(\w+)\}', replace, svalue) + if nvalue != svalue: + self._log.debug('Interpolate %s with %s', value, nvalue) + return nvalue + + def define(self, aliases: dict[str, Any]) -> None: + """Store interpolation variables into a local dictionary. + + Variable values are interpolated before being stored. + + :param aliases: an alias JSON (sub-)tree + """ + def replace(smo: re.Match) -> str: + name = smo.group(1) + val = self._env[name] if name in self._env \ + else environ.get(name, '') + return val + for name in aliases: + value = str(aliases[name]) + value = re.sub(r'\$\{(\w+)\}', replace, value) + if exists(value): + value = normpath(value) + aliases[name] = value + self._env[name.upper()] = value + self._log.debug('Store %s as %s', name.upper(), value) + + def define_transient(self, aliases: dict[str, Any]) -> None: + """Add short-lived aliases that are all discarded when cleanup_transient + is called. + + :param aliases: a dict of aliases + """ + for name in aliases: + name = name.upper() + # be sure not to make an existing non-transient variable transient + if name not in self._env: + self._transient_vars.add(name) + self.define(aliases) + + def cleanup_transient(self) -> None: + """Remove all transient variables.""" + for name in self._transient_vars: + if name in self._env: + del self._env[name] + self._transient_vars.clear() + + def interpolate_dirs(self, value: str, default: str) -> str: + """Resolve temporary directories, creating ones whenever required. + + :param value: the string with optional directory placeholders + :param default: the default name to use if the placeholder contains + none + :return: the interpolated string + """ + def replace(smo: re.Match) -> str: + name = smo.group(1) + if name == '': + name = default + if name not in self._dirs: + tmp_dir = mkdtemp(prefix='qemu_ot_dir_') + self._dirs[name] = tmp_dir + else: + tmp_dir = self._dirs[name] + if not tmp_dir.endswith(sep): + tmp_dir = f'{tmp_dir}{sep}' + return tmp_dir + nvalue = re.sub(r'\@\{(\w*)\}/', replace, value) + if nvalue != value: + self._log.debug('Interpolate %s with %s', value, nvalue) + return nvalue + + def delete_default_dir(self, name: str) -> None: + """Delete a temporary directory, if has been referenced. + + :param name: the name of the directory reference + """ + if name not in self._dirs: + return + if not isdir(self._dirs[name]): + return + try: + self._log.debug('Removing tree %s for %s', self._dirs[name], name) + rmtree(self._dirs[name]) + del self._dirs[name] + except OSError: + self._log.error('Cannot be removed dir %s for %s', self._dirs[name], + name) + + def create_eflash_image(self, app: Optional[str] = None, + bootloader: Optional[str] = None, + no_flash_header: bool = False) -> str: + """Generate a temporary flash image file. + + :param app: optional path to the application or the rom extension + :param bootloader: optional path to a bootloader + :param no_flash_header: input binary file do not contain an OpenTitan + application header (i.e. regular files) + :return: the full path to the temporary flash file + """ + # pylint: disable=import-outside-toplevel + from ot.eflash.gen import FlashGen + gen = FlashGen(FlashGen.CHIP_ROM_EXT_SIZE_MAX if bool(bootloader) + else 0, True) + flash_fd, flash_file = mkstemp(suffix='.raw', prefix='qemu_ot_flash_') + self._in_fly.add(flash_file) + close(flash_fd) + self._log.debug('Create %s', basename(flash_file)) + try: + gen.open(flash_file) + xfiles = ((app, 'rom_ext'), (bootloader, 'bootloader')) + for xpath, xhdlr in xfiles: + if not xpath: + continue + xtype = guess_test_type(xpath) + xstore = getattr(gen, f'store_{xhdlr}') + xname = basename(xpath) + if xtype == 'elf': + with open(xpath, 'rb') as efp: + elf = ElfBlob() + elf.load(efp) + if elf.address_size != 32: + raise RuntimeError(f'{xname}: not an ELF32 file') + xfp = BytesIO(elf.blob) + xfp.name = f'{splitext(xname)[0]}.bin' + xstore(0, xfp, xpath, no_header=no_flash_header) + elif xtype == 'bin': + with open(xpath, 'rb') as xfp: + xstore(0, xfp, no_header=no_flash_header) + else: + raise RuntimeError(f'{xname} format {xtype.upper()} is ' + f'not supported') + finally: + gen.close() + return flash_file + + def create_otp_image(self, vmem: str) -> str: + """Generate a temporary OTP image file. + + If a temporary file has already been generated for the input VMEM + file, use it instead. + + :param vmem: path to the VMEM source file + :return: the full path to the temporary OTP file + """ + # pylint: disable=import-outside-toplevel + if vmem in self._otp_files: + otp_file, ref_count = self._otp_files[vmem] + self._log.debug('Use existing %s', basename(otp_file)) + self._otp_files[vmem] = (otp_file, ref_count + 1) + return otp_file + from otptool import OtpImage + otp = OtpImage() + with open(vmem, 'rt', encoding='utf-8') as vfp: + otp.load_vmem(vfp, 'otp') + otp_fd, otp_file = mkstemp(suffix='.raw', prefix='qemu_ot_otp_') + self._log.debug('Create %s', basename(otp_file)) + self._in_fly.add(otp_file) + close(otp_fd) + with open(otp_file, 'wb') as rfp: + otp.save_raw(rfp) + self._otp_files[vmem] = (otp_file, 1) + return otp_file + + def delete_flash_image(self, filename: str) -> None: + """Delete a previously generated flash image file. + + :param filename: full path to the file to delete + """ + if not isfile(filename): + self._log.warning('No such flash image file %s', basename(filename)) + return + self._log.debug('Delete flash image file %s', basename(filename)) + unlink(filename) + self._in_fly.discard(filename) + + def delete_otp_image(self, filename: str) -> None: + """Delete a previously generated OTP image file. + + The file may be used by other tests, it is only deleted if it not + useful anymore. + + :param filename: full path to the file to delete + """ + if not isfile(filename): + self._log.warning('No such OTP image file %s', basename(filename)) + return + for vmem, (raw, count) in self._otp_files.items(): + if raw != filename: + continue + count -= 1 + if not count: + self._log.debug('Delete OTP image file %s', basename(filename)) + unlink(filename) + self._in_fly.discard(filename) + del self._otp_files[vmem] + else: + self._log.debug('Keep OTP image file %s', basename(filename)) + self._otp_files[vmem] = (raw, count) + break + + def _cleanup(self) -> None: + """Remove a generated, temporary flash image file. + """ + removed: set[str] = set() + for tmpfile in self._in_fly: + if not isfile(tmpfile): + removed.add(tmpfile) + continue + if not self._keep_temp: + self._log.debug('Delete %s', basename(tmpfile)) + try: + unlink(tmpfile) + removed.add(tmpfile) + except OSError: + self._log.error('Cannot delete %s', basename(tmpfile)) + self._in_fly -= removed + if self._in_fly: + if not self._keep_temp: + raise OSError(f'{len(self._in_fly)} temp. files cannot be ' + f'removed') + for tmpfile in self._in_fly: + self._log.warning('Temporary file %s not suppressed', tmpfile) + removed: set[str] = set() + if not self._keep_temp: + for tmpname, tmpdir in self._dirs.items(): + if not isdir(tmpdir): + removed.add(tmpname) + continue + self._log.debug('Delete dir %s', tmpdir) + try: + rmtree(tmpdir) + removed.add(tmpname) + except OSError as exc: + self._log.error('Cannot delete %s: %s', tmpdir, exc) + for tmpname in removed: + del self._dirs[tmpname] + if self._dirs: + if not self._keep_temp: + raise OSError(f'{len(self._dirs)} temp. dirs cannot be removed') + for tmpdir in self._dirs.values(): + self._log.warning('Temporary dir %s not suppressed', tmpdir) diff --git a/python/qemu/ot/pyot/util.py b/python/qemu/ot/pyot/util.py new file mode 100644 index 0000000000000..35152b89e0977 --- /dev/null +++ b/python/qemu/ot/pyot/util.py @@ -0,0 +1,130 @@ +# Copyright (c) 2023-2025 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""Utilities for QEMU unit test sequencer. + + :author: Emmanuel Blot +""" + +from csv import reader as csv_reader +from textwrap import shorten +from typing import NamedTuple, Optional + +import logging +import re + + +class ExecTime(float): + """Float with hardcoded formatter. + """ + + def __repr__(self) -> str: + return f'{self*1000:.0f} ms' + + +class TestResult(NamedTuple): + """Test result. + """ + name: str + result: str + time: ExecTime + icount: Optional[str] + error: str + + +class ResultFormatter: + """Format a result CSV file as a simple result table.""" + + def __init__(self): + self._results = [] + + def load(self, csvpath: str) -> None: + """Load a CSV file (generated with QEMUExecuter) and parse it. + + :param csvpath: the path to the CSV file. + """ + with open(csvpath, 'rt', encoding='utf-8') as cfp: + csv = csv_reader(cfp) + for row in csv: + self._results.append(row) + + def show(self, spacing: bool = False) -> None: + """Print a simple formatted ASCII table with loaded CSV results. + + :param spacing: add an empty line before and after the table + """ + results = [r[:-1] + [shorten(r[-1], width=100)] for r in self._results] + if not results: + return + if spacing: + print('') + widths = [max(len(x) for x in col) for col in zip(*results)] + self._show_line(widths, '-') + self._show_row(widths, results[0]) + self._show_line(widths, '=') + for row in results[1:]: + self._show_row(widths, row) + self._show_line(widths, '-') + if spacing: + print('') + + def _show_line(self, widths: list[int], csep: str) -> None: + print(f'+{"+".join([csep * (w+2) for w in widths])}+') + + def _show_row(self, widths: list[int], cols: list[str]) -> None: + line = '|'.join([f' {c:{">" if p else "<"}{w}s} ' + for p, (w, c) in enumerate(zip(widths, cols))]) + print(f'|{line}|') + + +class LogMessageClassifier: + """Log level classifier for log messages. + + :param classifiers: a map of loglevel, list of RE-compatible strings + to match messages + :param qemux: the QEMU executable name, to filter out useless messages + """ + + def __init__(self, classifiers: Optional[dict[str, list[str]]] = None, + qemux: Optional[str] = None): + self._qemux = qemux + if classifiers is None: + classifiers = {} + self._regexes: dict[int, re.Pattern] = {} + for klv in 'error warning info debug'.split(): + uklv = klv.upper() + cstrs = classifiers.get(klv, []) + if not isinstance(cstrs, list): + raise ValueError(f'Invalid log classifiers for {klv}') + regexes = [f'{klv}: ', f'^{uklv} ', f' {uklv} '] + for cstr in cstrs: + try: + # only sanity-check pattern, do not use result + re.compile(cstr) + except re.error as exc: + raise ValueError(f"Invalid log classifier '{cstr}' for " + f"{klv}: {exc}") from exc + regexes.append(cstr) + if regexes: + lvl = getattr(logging, uklv) + self._regexes[lvl] = re.compile(f"({'|'.join(regexes)})") + else: + lvl = getattr(logging, 'NOTSET') + # never match RE + self._regexes[lvl] = re.compile(r'\A(?!x)x') + + def classify(self, line: str, default: int = logging.ERROR) -> int: + """Classify log level of a line depending on its content. + + :param line: line to classify + :param default: defaut log level in no classification is found + :return: the logger log level to use + """ + if self._qemux and line.startswith(self._qemux): + # discard QEMU internal messages that cannot be disable from the VM + if line.find("QEMU waiting") > 0: + return logging.NOTSET + for lvl, pattern in self._regexes.items(): + if pattern.search(line): + return lvl + return default diff --git a/python/qemu/ot/pyot/worker.py b/python/qemu/ot/pyot/worker.py new file mode 100644 index 0000000000000..0ea6638d65a35 --- /dev/null +++ b/python/qemu/ot/pyot/worker.py @@ -0,0 +1,140 @@ +# Copyright (c) 2023-2025 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""Test executer for QEMU unit test sequencer. + + :author: Emmanuel Blot +""" + + +from collections import deque +from logging import getLogger +from os.path import basename, normpath +from subprocess import Popen, PIPE, TimeoutExpired +from threading import Event, Thread +from typing import Optional + +from .util import LogMessageClassifier + + +class QEMUContextWorker: + + """Background task for QEMU context. + """ + + def __init__(self, cmd: str, env: dict[str, str], + sync: Optional[Event] = None): + self._log = getLogger('pyot.cmd') + self._cmd = cmd + self._env = env + self._sync = sync + self._log_q = deque() + self._resume = False + self._thread: Optional[Thread] = None + self._ret = None + self._first_error = '' + + def run(self): + """Start the worker. + """ + self._thread = Thread(target=self._run, daemon=True) + self._thread.start() + + def stop(self) -> int: + """Stop the worker. + """ + if self._thread is None: + raise ValueError('Cannot stop idle worker') + self._resume = False + self._thread.join() + return self._ret + + def exit_code(self) -> Optional[int]: + """Return the exit code of the worker. + + :return: the exit code or None if the worked has not yet completed. + """ + return self._ret + + @property + def command(self) -> str: + """Return the executed command name. + """ + return normpath(self._cmd.split(' ', 1)[0]) + + @property + def first_error(self): + """Return the message of the first error, if any.""" + return self._first_error + + def _run(self): + self._resume = True + if self._sync and not self._sync.is_set(): + self._log.info('Waiting for sync') + while self._resume: + if self._sync.wait(0.1): + self._log.debug('Synchronized') + break + self._sync.clear() + # pylint: disable=consider-using-with + proc = Popen(self._cmd, bufsize=1, stdout=PIPE, stderr=PIPE, + shell=True, env=self._env, encoding='utf-8', + errors='ignore', text=True) + Thread(target=self._logger, args=(proc, True), daemon=True).start() + Thread(target=self._logger, args=(proc, False), daemon=True).start() + qemu_exec = f'{basename(self._cmd[0])}: ' + classifier = LogMessageClassifier(qemux=qemu_exec) + while self._resume: + while self._log_q: + err, qline = self._log_q.popleft() + if err: + if not self._first_error: + self._first_error = qline + loglevel = classifier.classify(qline) + self._log.log(loglevel, qline) + else: + self._log.debug(qline) + if proc.poll() is not None: + # worker has exited on its own + self._resume = False + break + try: + # give some time for the process to complete on its own + proc.wait(0.2) + self._ret = proc.returncode + self._log.debug("'%s' completed with '%d'", self.command, self._ret) + except TimeoutExpired: + # still executing + proc.terminate() + try: + # leave 1 second for QEMU to cleanly complete... + proc.wait(1.0) + self._ret = 0 + except TimeoutExpired: + # otherwise kill it + self._log.error("Force-killing command '%s'", self.command) + proc.kill() + self._ret = proc.returncode + # retrieve the remaining log messages + stdlog = self._log.info if self._ret else self._log.debug + try: + outs, errs = proc.communicate(timeout=0.1) + if not self._first_error: + self._first_error = errs.split('\n', 1)[0] + for sfp, logger in zip((outs, errs), (stdlog, self._log.error)): + for line in sfp.split('\n'): + line = line.strip() + if line: + logger(line) + except TimeoutExpired: + proc.kill() + if self._ret is None: + self._ret = proc.returncode + + def _logger(self, proc: Popen, err: bool): + # worker thread, blocking on VM stdout/stderr + stream = proc.stderr if err else proc.stdout + while proc.poll() is None: + line = stream.readline().strip() + if line: + self._log_q.append((err, line)) diff --git a/python/qemu/ot/pyot/wrapper.py b/python/qemu/ot/pyot/wrapper.py new file mode 100644 index 0000000000000..a3a686235d058 --- /dev/null +++ b/python/qemu/ot/pyot/wrapper.py @@ -0,0 +1,406 @@ +# Copyright (c) 2023-2025 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""QEMU wrapper for QEMU unit test sequencer. + + :author: Emmanuel Blot +""" + +from collections import deque +from os.path import basename, dirname +from select import POLLIN, POLLERR, POLLHUP, poll as spoll +from socket import socket, timeout as LegacyTimeoutError +from subprocess import Popen, PIPE, TimeoutExpired +from threading import Event, Thread +from time import time as now +from traceback import format_exc +from typing import Any, Optional + +import logging +import re +import sys + +from ot.util.log import ColorLogFormatter +from ot.util.misc import EasyDict + +from ot.pyot.util import ExecTime, LogMessageClassifier + + +class QEMUWrapper: + """A small engine to run tests with QEMU. + + :param tcpdev: a host, port pair that defines how to access the TCP + Virtual Com Port of QEMU first UART + :param debug: whether running in debug mode + """ + # pylint: disable=too-few-public-methods + + EXIT_ON = rb'(PASS|FAIL)!\r' + """Matching strings to search for in guest output. + + The return code of the script is the position plus the GUEST_ERROR_OFFSET + in the above RE group when matched, except first item which is always 0. + This offset is used to differentiate from QEMU own return codes. QEMU may + return negative values, which are the negative value of POSIX signals, + such as SIGABRT. + """ + + ANSI_CRE = re.compile(rb'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]') + """ANSI escape sequences.""" + + GUEST_ERROR_OFFSET = 40 + """Offset for guest errors. Should be larger than the host max signal value. + """ + + NO_MATCH_RETURN_CODE = 100 + """Return code when no matching string is found in guest output.""" + + def __init__(self, log_classifiers: dict[str, list[str]], debug: bool): + self._log_classifiers = log_classifiers + self._debug = debug + self._log = logging.getLogger('pyot') + self._qlog = logging.getLogger('pyot.qemu') + + def run(self, tdef: EasyDict[str, Any]) -> tuple[int, ExecTime, str]: + """Execute the specified QEMU command, aborting execution if QEMU does + not exit after the specified timeout. + + :param tdef: test definition and parameters + - command, a list of strings defining the QEMU command to + execute with all its options + - vcp_map: how to connect to QEMU virtual communication ports + - timeout, the allowed time for the command to execute, + specified as a real number + - expect_result, the expected outcome of QEMU (exit code). Some + tests may expect that QEMU terminates with a non-zero + exit code + - context, an option QEMUContextWorker instance, to execute + concurrently with the QEMU process. Many tests + expect to communicate with the QEMU process. + - trigger, a string to match on the QEMU virtual comm port + output to trigger the context execution. It may be + defined as a regular expression. + - validate, a string to match on the QEMU virtual comm port + output to early exit. It may be defined as a regular + expression. + - start_delay, the delay to wait before starting the execution + of the context once QEMU command has been started. + :return: a 3-uple of exit code, execution time, and last guest error + """ + # stdout and stderr belongs to QEMU VM + # OT's UART0 is redirected to a TCP stream that can be accessed through + # self._device. The VM pauses till the TCP socket is connected + xre = re.compile(self.EXIT_ON) + sync_event = None + if tdef.trigger: + sync_event = Event() + match_pattern = tdef.trigger or tdef.validate + if match_pattern: + if match_pattern.startswith("r'") and match_pattern.endswith("'"): + try: + tmo = re.compile(match_pattern[2:-1].encode()) + except re.error as exc: + raise ValueError('Invalid regex: {exc}') from exc + + def trig_match(bline): + return tmo.match(bline) + else: + btrigger = match_pattern.encode() + + def trig_match(bline): + return bline.find(btrigger) >= 0 + else: + trig_match = None + ret = None + proc = None + xstart = None + xend = None + log = self._log + last_error = '' + vcp_map = tdef.vcp_map + vcp_ctxs: dict[int, list[str, socket, bytearray, logging.Logger]] = {} + try: + workdir = dirname(tdef.command[0]) + log.debug('Executing QEMU as %s', ' '.join(tdef.command)) + # pylint: disable=consider-using-with + proc = Popen(tdef.command, bufsize=1, cwd=workdir, stdout=PIPE, + stderr=PIPE, encoding='utf-8', errors='ignore', + text=True) + try: + proc.wait(0.1) + except TimeoutExpired: + pass + else: + ret = proc.returncode + log.error('QEMU bailed out: %d for "%s"', ret, tdef.test_name) + raise OSError() + log.debug('Execute QEMU for %.0f secs', tdef.timeout) + # unfortunately, subprocess's stdout calls are blocking, so the + # only way to get near real-time output from QEMU is to use a + # dedicated thread that may block whenever no output is available + # from the VM. This thread reads and pushes back lines to a local + # queue, which is popped and logged to the local logger on each + # loop. Note that Popen's communicate() also relies on threads to + # perform stdout/stderr read out. + log_q = deque() + Thread(target=self._qemu_logger, name='qemu_out_logger', + args=(proc, log_q, True), daemon=True).start() + Thread(target=self._qemu_logger, name='qemu_err_logger', + args=(proc, log_q, False), daemon=True).start() + poller = spoll() + connect_map = vcp_map.copy() + timeout = now() + tdef.start_delay + # ensure that QEMU starts and give some time for it to set up + # when multiple VCPs are set to 'wait', one VCP can be connected at + # a time, i.e. QEMU does not open all connections at once. + vcp_lognames = [] + vcplogname = 'pyot.vcp' + while connect_map: + if now() > timeout: + minfo = ', '.join(f'{d} @ {r[0]}:{r[1]}' + for d, r in connect_map.items()) + raise TimeoutError(f'Cannot connect to QEMU VCPs: {minfo}') + connected = [] + for vcpid, (host, port) in connect_map.items(): + try: + # timeout for connecting to VCP + sock = socket() + sock.settimeout(1) + sock.connect((host, port)) + connected.append(vcpid) + vcp_name = re.sub(r'^.*[-\.+]', '', vcpid) + vcp_lognames.append(vcp_name) + vcp_log = logging.getLogger(f'{vcplogname}.{vcp_name}') + vcp_ctxs[sock.fileno()] = [vcpid, sock, bytearray(), + vcp_log] + # remove timeout for VCP comm, as poll is used + sock.settimeout(None) + poller.register(sock, POLLIN | POLLERR | POLLHUP) + except ConnectionRefusedError: + continue + except OSError as exc: + log.error('Cannot setup QEMU VCP connection %s: %s', + vcpid, exc) + print(format_exc(chain=False), file=sys.stderr) + raise + # removal from dictionary cannot be done while iterating it + for vcpid in connected: + del connect_map[vcpid] + self._colorize_vcp_log(vcplogname, vcp_lognames) + xstart = now() + if tdef.context: + try: + tdef.context.execute('with', sync=sync_event) + except OSError as exc: + ret = exc.errno + last_error = exc.strerror + raise + # pylint: disable=broad-except + except Exception as exc: + ret = 126 + last_error = str(exc) + raise + qemu_exec = f'{basename(tdef.command[0])}: ' + classifier = LogMessageClassifier(classifiers=self._log_classifiers, + qemux=qemu_exec) + abstimeout = float(tdef.timeout) + now() + qemu_default_log = logging.ERROR + vcp_default_log = logging.DEBUG + while now() < abstimeout: + while log_q: + err, qline = log_q.popleft() + if err: + level = classifier.classify(qline, qemu_default_log) + if level == logging.INFO and \ + qline.find('QEMU waiting for connection') >= 0: + level = logging.DEBUG + else: + level = logging.INFO + self._qlog.log(level, qline) + if tdef.context: + wret = tdef.context.check_error() + if wret: + ret = wret + last_error = tdef.context.first_error or \ + 'Fail to execute worker' + raise OSError(wret, last_error) + xret = proc.poll() + if xret is not None: + if xend is None: + xend = now() + ret = xret + if ret != 0: + if ret != tdef.expect_result: + logfn = getattr(log, 'critical') + else: + logfn = getattr(log, 'warning') + logfn('Abnormal QEMU termination: %d for "%s"', + ret, tdef.test_name) + break + for vfd, event in poller.poll(0.01): + if event in (POLLERR, POLLHUP): + poller.modify(vfd, 0) + continue + vcpid, vcp, vcp_buf, vcp_log = vcp_ctxs[vfd] + try: + data = vcp.recv(4096) + except (TimeoutError, LegacyTimeoutError): + log.error('Unexpected timeout w/ poll on %s', vcp) + continue + vcp_buf += data + if not vcp_buf: + continue + lines = vcp_buf.split(b'\n') + vcp_buf[:] = bytearray(lines[-1]) + for line in lines[:-1]: + line = self.ANSI_CRE.sub(b'', line) + if trig_match and trig_match(line): + # reset timeout from this event + abstimeout = float(tdef.timeout) + now() + trig_match = None + if sync_event: + log.info('Trigger pattern detected, resuming ' + 'for %.0f secs', tdef.timeout) + sync_event.set() + else: + log.info('Validation pattern detected, exiting') + ret = 0 + break + xmo = xre.search(line) + if xmo: + xend = now() + exit_word = xmo.group(1).decode('utf-8', + errors='ignore') + ret = self._get_exit_code(xmo) + log.info("Exit sequence detected: '%s' -> %d", + exit_word, ret) + if ret == 0: + last_error = '' + break + sline = line.decode('utf-8', errors='ignore').rstrip() + level = classifier.classify(sline, vcp_default_log) + if level == logging.ERROR: + err = re.sub(r'^.*:\d+]', '', sline).lstrip() + # be sure not to preserve comma as this char is + # used as a CSV separator. + last_error = err.strip('"').replace(',', ';') + vcp_log.log(level, sline) + else: + # no match for exit sequence on current VCP + continue + if ret is not None: + # match for exit sequence on current VCP + break + if ret is not None: + # match for exit sequence on last VCP + break + if ret is None: + log.warning('Execution timed out for "%s"', tdef.test_name) + ret = 124 # timeout + except (OSError, ValueError) as exc: + if ret is None: + log.error('Unable to execute QEMU: %s', exc) + ret = proc.returncode if proc.poll() is not None else 125 + finally: + if xend is None: + xend = now() + for _, sock, _, _ in vcp_ctxs.values(): + sock.close() + vcp_ctxs.clear() + if proc: + if xend is None: + xend = now() + proc.terminate() + try: + # leave 1 second for QEMU to cleanly complete... + proc.wait(1.0) + except TimeoutExpired: + # otherwise kill it + log.error('Force-killing QEMU') + proc.kill() + if ret is None: + ret = proc.returncode + # retrieve the remaining log messages + stdlog = self._qlog.info if ret else self._qlog.debug + for msg, logger in zip(proc.communicate(timeout=0.1), + (stdlog, self._qlog.error)): + for line in msg.split('\n'): + line = line.strip() + if line: + logger(line) + xtime = ExecTime(xend-xstart) if xstart and xend else 0.0 + return abs(ret) or 0, xtime, last_error + + @classmethod + def classify_log(cls, line: str, default: int = logging.ERROR, + qemux: Optional[str] = None) -> int: + """Classify log level of a line depending on its content. + + :param line: line to classify + :param default: defaut log level in no classification is found + :return: the logger log level to use + """ + if qemux and line.startswith(qemux): + # discard QEMU internal messages that cannot be disable from the VM + return logging.NOTSET + if (line.find('info: ') >= 0 or + line.startswith('INFO ') or + line.find(' INFO ') >= 0): # noqa + return logging.INFO + if (line.find('warning: ') >= 0 or + line.startswith('WARNING ') or + line.find(' WARNING ') >= 0): # noqa + return logging.WARNING + if (line.find('debug: ') >= 0 or + line.startswith('DEBUG ') or + line.find(' DEBUG ') >= 0): # noqa + return logging.DEBUG + return default + + def _colorize_vcp_log(self, vcplogname: str, lognames: list[str]) -> None: + vlog = logging.getLogger(vcplogname) + clr_fmt = None + while vlog: + for hdlr in vlog.handlers: + if isinstance(hdlr.formatter, ColorLogFormatter): + clr_fmt = hdlr.formatter + break + vlog = vlog.parent + if not clr_fmt: + return + for color, logname in enumerate(sorted(lognames)): + clr_fmt.add_logger_colors(f'{vcplogname}.{logname}', color) + + def _qemu_logger(self, proc: Popen, queue: deque, err: bool): + # worker thread, blocking on VM stdout/stderr + stream = proc.stderr if err else proc.stdout + while proc.poll() is None: + line = stream.readline().strip() + if line: + queue.append((err, line)) + + def _get_exit_code(self, xmo: re.Match) -> int: + groups = xmo.groups() + if not groups: + self._log.debug('No matching group, using defaut code') + return self.NO_MATCH_RETURN_CODE + match = groups[0] + try: + # try to match an integer value + return int(match) + except ValueError: + pass + # try to find in the regular expression whether the match is one of + # the alternative in the first group + alts = re.sub(rb'^.*\((.*?)\).*$', rb'\1', xmo.re.pattern).split(b'|') + try: + pos = alts.index(match) + if pos: + pos += self.GUEST_ERROR_OFFSET + return pos + except ValueError as exc: + self._log.error('Invalid match: %s with %s', exc, alts) + return len(alts) + # any other case + self._log.debug('No match, using defaut code') + return self.NO_MATCH_RETURN_CODE diff --git a/python/qemu/ot/util/file.py b/python/qemu/ot/util/file.py new file mode 100644 index 0000000000000..14f71dd75d1c0 --- /dev/null +++ b/python/qemu/ot/util/file.py @@ -0,0 +1,87 @@ +# Copyright (c) 2023-2025 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""File utilities. + + :author: Emmanuel Blot +""" + +from os import stat +from os.path import relpath +from time import localtime, strftime +from typing import BinaryIO, TextIO, Union + +import re + +from .elf import ElfBlob +from .recfmt import RecordSegment, VMemBuilder + + +def guess_test_type(file_path: str) -> str: + """Guess a test file type from its contents. + + :return: identified content + """ + with open(file_path, 'rb') as bfp: + header = bfp.read(1024) + if header[:4] == b'\x7fELF': + return 'elf' + if header[:4] == b'OTPT': + return 'spiflash' + vmem_re = rb'(?i)^@[0-9A-F]{4,}\s[0-9A-F]{6,}' + for line in header.split(b'\n'): + if line.startswith(b'/*') or line.startswith(b'#'): + continue + if re.match(vmem_re, line): + return 'vmem' + return 'bin' + + +def make_vmem_from_elf(elf_file: Union[str, BinaryIO], + vmem_file: Union[str, TextIO], + offset: int = 0, + chunksize: int = 4, + offsetsize: int = 0, + close_elf: bool = True) -> None: + """Create a VMEM file from an ELF file. + + :param elf_file: ELF to convert, as either a file path or a binary + file object + :param vmem_file: ELF to convert, either a file path or a text file + object + :param offset: optional offset in bytes within the VMEM file to store + the ELF blob + :param chunksize: how many bytes per VMEM chunk to generate + :param offsetsize: how many chars to use to emit the chunk offset + :param close_elf: whether to close the ELF input file object + """ + elf = ElfBlob() + if isinstance(elf_file, str): + with open(elf_file, 'rb') as efp: + elfstat = stat(efp.fileno()) + elf.load(efp) + elfname = relpath(elf_file) + else: + elf.load(elf_file) + elfname = relpath(elf_file.name) + elfstat = stat(elf_file.fileno()) + if close_elf: + elf_file.close() + + seg = RecordSegment() + seg.write(elf.blob, offset=offset) + + vmem = VMemBuilder(byteorder='little', chunksize=chunksize, + offsetsize=offsetsize) + vmem.build([seg]) + + elftime = localtime(elfstat.st_mtime) + is_path = isinstance(vmem_file, str) + with open(vmem_file, 'wt') if is_path else vmem_file as vfp: + print(f'// name: {elfname}', file=vfp) + print(f'// built: {strftime("%Y/%m/%d %H:%M:%S", elftime)}', file=vfp) + print(f'// size: {elfstat.st_size}', file=vfp) + print(f'// rawsize: {elf.size}', file=vfp) + print(f'// entrypoint: 0x{elf.entry_point:08x}', file=vfp) + print(file=vfp) + vfp.write(vmem.getvalue()) diff --git a/python/qemu/ot/util/recfmt.py b/python/qemu/ot/util/recfmt.py new file mode 100644 index 0000000000000..8e3fc980958be --- /dev/null +++ b/python/qemu/ot/util/recfmt.py @@ -0,0 +1,850 @@ +# Copyright (c) 2019 Emmanuel Blot +# Copyright (c) 2025 lowRISC contributors. +# SPDX-License-Identifier: MIT License + +"""Text-record tools +""" + +from array import array as Array +from binascii import hexlify, unhexlify +from io import BytesIO, StringIO +from os import stat, linesep, SEEK_SET +from re import compile as re_compile +from struct import pack as spack +from typing import BinaryIO, Optional, TextIO + +import sys + + +# pylint: disable-msg=broad-except,invalid-name + + +class RecordError(ValueError): + """Error in text record content""" + + +class SRecError(RecordError): + """Error in SREC content""" + + +class IHexError(RecordError): + """Error in iHex content""" + + +class TItxtError(RecordError): + """Error in TI-txt content""" + + +class VMemError(RecordError): + """Error in VMem content""" + + +class RecordSegment: + """Data container for a consecutive sequence of bytes. + + ..note:: RecordSegment methods are extensively called, so the code has + been optimized to decrease Python call overhead. + """ + + def __init__(self, baseaddr=0): + self._baseaddr = baseaddr + self._size = 0 + self._buffer = BytesIO() + + @property + def size(self): + """Return the size in bytes of a segment.""" + return self._size + + @property + def baseaddr(self): + """Return the address of the first byte of the segment.""" + return self._baseaddr + + @property + def absaddr(self): + """Return the absolute address of this segment.""" + return self._baseaddr + self._size + + @property + def reladdr(self): + """Return the relative address of this segment.""" + return self._size + + @property + def data(self) -> bytes: + """Return the segment payload.""" + return self._buffer.getvalue() + + def __str__(self): + return f'Data segment @ {self._baseaddr:08x} {self._size} bytes' + + def write(self, data: bytes, offset=None): + """Write new data to the segment, at the specified offset.""" + self.write_with_size(data, len(data), offset) + + def write_with_size(self, data, size, offset): + """Write new data to the segment, at the specified offset, with a + specific size.""" + if offset is not None and (offset - self._baseaddr) != self._size: + offset -= self._baseaddr + self._buffer.seek(offset, SEEK_SET) + size += offset - self._size + self._buffer.write(data) + self._size += size + + +class RecordParser: + """Abstract record file parser. + + :param src: file object for sourcing stream + :param offset: byte offset to substract to encoded address + :param min_addr: lowest address to consider + :param max_addr: highest address to consider + :param segment_gap: distance between non-consecutive address to trigger + a new segment + :param verbose: emit extra information while processing the stream + :param verify: verify the checksum with calculated one + """ + + (INFO, DATA, EXECUTE, EOF) = range(1, 5) + + def __init__(self, src: TextIO, offset: int = 0, min_addr: int = 0x0, + max_addr: int = 0xffffffff, segment_gap: int = 16, + verbose: bool = False, verify: bool = True): + if segment_gap < 1: + raise ValueError("Invalid segment gap") + self._src: TextIO = src + self._offset: int = offset + self._min_addr: int = min_addr + self._max_addr: int = max_addr + self._verbose: bool = verbose + self._exec_addr: Optional[int] = None + self._segments: list[RecordSegment] = [] + self._info: Optional[RecordSegment] = None + self._gap: int = segment_gap + self._seg: Optional[RecordSegment] = None + self._bytes: int = 0 + self._verify: bool = verify + + def parse(self): + """Parse the stream""" + for (record, address, value) in self: + if record == RecordParser.DATA: + addr = address - self._offset + if self._seg and (abs(addr - self._seg.absaddr) >= self._gap): + self._store_segment() + if not self._seg: + self._seg = RecordSegment(addr) + self._seg.write(value, addr) + elif record == RecordParser.EXECUTE: + self._exec_addr = address + elif record == RecordParser.INFO: + if not self._info: + self._info = RecordSegment(address) + self._info.write(value, address) + elif record == RecordParser.EOF: + pass + else: + raise RuntimeError("Internal error") + self._store_segment() + + def __iter__(self): + return self._get_next_chunk() + + def _get_next_chunk(self): + raise NotImplementedError() + + def get_data_segments(self) -> list[RecordSegment]: + """Return all segments.""" + return self._segments + + def get_info(self) -> bytes: + """Return the info segment data, if there is such a segment.""" + return bytes(self._info.data) if self._info else bytes() + + def getexec(self) -> Optional[int]: + """Return the execution address (entry point) if any.""" + return self._exec_addr + + @classmethod + def is_valid_syntax(cls, file): + """Tell whether the file contains a valid syntax. + + :param file: either a filepath or a file-like object + :return: True if the file content looks valid + """ + cre = getattr(cls, 'SYNTAX_CRE', None) + if not cre: + return False + last = False + with isinstance(file, str) and open(file, 'rt') or file as hfp: + try: + for line in hfp: + line = line.strip() + if not line: + last = True + continue + if not cre.match(line) or last: + # there should be no empty line but the last one(s) + return False + except Exception: + return False + return True + + def _verify_address(self, address): + if (address < self._min_addr) or (address > self._max_addr): + raise RecordError(f"Address out of range [0x{self._min_addr:08x}.." + f"0x{self._max_addr:08x}: 0x{address:08x}") + if address < self._offset: + raise RecordError(f"Invalid address in file: 0x{address:08x}") + + def _store_segment(self): + if self._seg and self._seg.size: + self._segments.append(self._seg) + self._seg = None + + +class SRecParser(RecordParser): + """S-record file parser. + """ + + SYNTAX_CRE = re_compile('(?i)^S([0-9])((?:[0-9A-F]{2})+)$') + + def _get_next_chunk(self): + # test if the file size can be found... + try: + self._bytes = stat(self._src.name)[6] + except Exception: + pass + bc = 0 + try: + for (lno, line) in enumerate(self._src, start=1): + line = line.strip() + if self._verbose and self._bytes: + opc = (50 * bc) // self._bytes + bc += len(line) + pc = (50 * bc) // self._bytes + if pc > opc: + info = f"\rAnalysing SREC file [{2*pc:3d}%%] {'.' * pc}" + sys.stdout.write(info) + sys.stdout.flush() + try: + # avoid line stripping, SREC files always use DOS format + if len(line) < 5: + continue + if line[0] != 'S': + raise SRecError("Invalid SREC header") + record = int(line[1]) + if record == 1: + addrend = 3 + address = int(line[4:8], 16) + type_ = RecordParser.DATA + elif record == 2: + addrend = 4 + address = int(line[4:10], 16) + type_ = RecordParser.DATA + elif record == 3: + addrend = 5 + address = int(line[4:12], 16) + type_ = RecordParser.DATA + elif record == 7: + addrend = 5 + address = int(line[4:12], 16) + type_ = RecordParser.EXECUTE + elif record == 8: + addrend = 4 + address = int(line[4:10], 16) + type_ = RecordParser.EXECUTE + elif record == 9: + addrend = 3 + address = int(line[4:8], 16) + type_ = RecordParser.EXECUTE + elif record == 0: + addrend = 3 + address = int(line[4:8], 16) + type_ = RecordParser.INFO + else: + raise SRecError("Unsupported SREC record") + try: + bytes_ = unhexlify(line[2:-2]) + except TypeError as exc: + raise SRecError(f"{exc} @ line {lno}") from exc + size = int(line[2:4], 16) + effsize = len(bytes_) + if size != effsize: + raise SRecError(f"Expected {size} bytes, got {effsize} " + f"@ line {lno}") + if self._verify: + csum = sum(Array('B', bytes_)) + csum &= 0xff + csum ^= 0xff + rsum = int(line[-2:], 16) + if rsum != csum: + raise SRecError(f"Invalid checksum: 0x{rsum:02x} / " + f"0x{csum:02x}") + if self._verify and record: + self._verify_address(address) + yield (type_, address, bytes_[addrend:]) + except RecordError as exc: + raise exc.__class__(f"{exc} @ line {lno}:'{line}'") from exc + finally: + if self._verbose: + print('') + + +class IHexParser(RecordParser): + """Intel Hex record file parser. + """ + + SYNTAX_CRE = re_compile('(?i)^:[0-9A-F]+$') + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._offset_addr = 0 + + def _get_next_chunk(self): + # test if the file size can be found... + try: + self._bytes = stat(self._src.name)[6] + except Exception: + pass + bc = 0 + try: + for (lno, line) in enumerate(self._src, start=1): + line = line.strip() + if self._verbose and self._bytes: + opc = (50 * bc) // self._bytes + bc += len(line) + pc = (50 * bc) // self._bytes + if pc > opc: + info = f"\rAnalysing iHEX file [{2*pc:3d}%%] {'.' * pc}" + sys.stdout.write(info) + sys.stdout.flush() + try: + if len(line) < 5: + continue + if line[0] != ':': + raise IHexError("Invalid IHEX header") + size = int(line[1:3], 16) + address = int(line[3:7], 16) + record = int(line[7:9]) + if record == 0: + type_ = RecordParser.DATA + elif record == 1: + type_ = RecordParser.EOF + if address != 0: + print(f"Unexpected non-zero address in EOF: " + f"0x{address:04x}", file=sys.stderr) + elif record == 2: + self._offset_addr &= ~((1 << 20) - 1) + self._offset_addr |= int(line[9:-2], 16) << 4 + continue + elif record == 4: + self._offset_addr = int(line[9:-2], 16) << 16 + continue + elif record == 3: + type_ = RecordParser.EXECUTE + cs = int(line[9:13], 16) + ip = int(line[13:-2], 16) + address = (cs << 4) + ip + else: + raise IHexError(f"Unsupported IHEX record: {record}") + try: + bytes_ = unhexlify(line[9:-2]) + except TypeError as exc: + raise IHexError(f"{exc} @ line {lno}") from exc + effsize = len(bytes_) + if size != effsize: + raise IHexError(f"Expected {size} bytes, got {effsize} " + f"@ line {lno}") + if self._verify: + csum = sum(Array('B', unhexlify(line[1:-2]))) + csum = (-csum) & 0xff + rsum = int(line[-2:], 16) + if rsum != csum: + raise IHexError(f"Invalid checksum: 0x{rsum:02x} / " + f"0x{csum:02x}") + if type_ == RecordParser.DATA: + address += self._offset_addr + if self._verify and record: + self._verify_address(address) + yield (type_, address, bytes_) + except RecordError as exc: + raise exc.__class__(f"{exc} @ line {lno}:'{line}'") from exc + finally: + if self._verbose: + print('') + + +class IHexFastParser(RecordParser): + """Intel Hex record file parser. + + Faster implementation than IHexParser, but less readable. + """ + + # pylint: disable=abstract-method + + HEXCRE = re_compile(r'(?aim)^:((?:[0-9A-F][0-9A-F]){5,})$') + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._offset_addr = 0 + + @classmethod + def is_valid_syntax(cls, file): + """Tell whether the file contains a valid HEX syntax. + + :param file: either a filepath or a file-like object + :return: True if the file content looks valid + """ + valid = False + with isinstance(file, str) and open(file, 'rt') or file as hfp: + try: + data = hfp.read() + # it seems there is no easy way to full match a multiline re + # so compare the count of line vs, the count of matching ihex + # valid lines as a quick workaround. This could give false + # positive or negative, but this approximation is for now + # sufficient to fast match a file. + ihex_count = len(cls.HEXCRE.findall(data)) + lf_count = data.count('\n') + valid = ihex_count == lf_count + except Exception: + pass + return valid + + def parse(self): + for pos, mo in enumerate(self.HEXCRE.finditer(self._src.read()), + start=1): + bvalues = unhexlify(mo.group(0)[1:]) + size = bvalues[0] + rsum = bvalues[-1] + data = bvalues[4:-1] + if len(data) != size: + raise IHexError(f'Invalid line @ {pos} in HEX file') + if self._verify: + csum = sum(bvalues[:-1]) + csum = (-csum) & 0xff + if rsum != csum: + raise IHexError(f"Invalid checksum: 0x{rsum:02x} /" + f" 0x{csum:02x}") + address = (bvalues[1] << 8) + bvalues[2] + record = bvalues[3] + if self._verify and record: + self._verify_address(address) + if record == 0: + # RecordParser.DATA + address += self._offset_addr + addr = address - self._offset + if self._seg: + gap = addr - self._seg.absaddr + if gap < 0: + gap = -gap + if gap >= self._gap: + self._store_segment() + if not self._seg: + self._seg = RecordSegment(addr) + self._seg.write_with_size(data, size, addr) + elif record == 1: + # RecordParser.EOF + if address != 0: + print(f"Unexpected non-zero address @ EOF: " + f"0x:{address:04x}", file=sys.stderr) + elif record == 2: + if size != 2: + raise IHexError('Invalid segment address') + self._offset_addr &= ~((1 << 20) - 1) + self._offset_addr |= ((data[0] << 8) + data[1]) << 4 + continue + elif record == 4: + if size != 2: + raise IHexError('Invalid linear address') + self._offset_addr = ((data[0] << 8) + data[1]) << 16 + continue + elif record == 3: + # RecordParser.EXECUTE + if size != 4: + raise IHexError('Invalid start address') + cs = (data[0] << 8) + data[1] + ip = (data[2] << 8) + data[3] + address = (cs << 4) + ip + self._exec_addr = address + self._store_segment() + + +class TItxtParser(RecordParser): + """TI txt record file parser. + """ + + def _get_next_chunk(self): + # test if the file size can be found... + try: + self._bytes = stat(self._src.name)[6] + except Exception: + pass + bc = 0 + try: + for (lno, line) in enumerate(self._src, start=1): + line = line.strip() + if self._verbose and self._bytes: + opc = (50 * bc) // self._bytes + bc += len(line) + pc = (50 * bc) // self._bytes + if pc > opc: + info = (f"\rAnalysing TItxt file [{2*pc:3d}%%] " + f"{'.' * pc}") + sys.stdout.write(info) + sys.stdout.flush() + try: + if line.startswith('@'): + address = int(line[1:], 16) + continue + if line == 'q': + yield (RecordParser.EOF, 0, b'') + break + try: + bytes_ = unhexlify(line) + except TypeError as exc: + raise IHexError(f'{exc} @ line {lno}') from exc + self._verify_address(address) + yield (RecordParser.DATA, address, bytes_) + address += len(bytes_) + except RecordError as exc: + raise exc.__class__(f"{exc} @ line {lno}:'{line}'") from exc + finally: + if self._verbose: + print('') + + +class VMemParser(RecordParser): + """VMEM record file parser. + + Additional named arguments: + + :param eccbits: count of trailing bits in each data chunk + :param byteorder: either 'little' or 'big', default to big + """ + + def __init__(self, src, *args, **kwargs): + if 'eccbits' in kwargs: + try: + self._eccbits = int(kwargs.pop('eccbits')) + except ValueError as exc: + raise ValueError('Invalid ecc bit count') from exc + else: + self._eccbits = 0 + self._ecc_bytes = (self._eccbits + 7) // 8 + if 'byteorder' in kwargs: + byteorder = kwargs.pop('byteorder') + try: + self._reverse = {'little': True, 'big': False}[byteorder] + except KeyError as exc: + raise ValueError('Invalid byte order') from exc + else: + self._reverse = False + super().__init__(src, *args, **kwargs) + + def _get_next_chunk(self): + cmt_re = re_compile(r'(#|//).*$') + address = 0 + block_count = 0 + if self._ecc_bytes: + bs = slice(0, -self._ecc_bytes) + be = slice(-self._ecc_bytes, None) + else: + bs, be = slice(None), slice(0) + + if self._reverse: + def conv(data): + return bytes(reversed(data)) + else: + def conv(data): + return data + + for lno, line in enumerate(self._src, start=1): + line = cmt_re.sub('', line).rstrip() + if not line: + continue + if not line.startswith('@'): + raise VMemError(f'Invalid line @ {lno}') + parts = line[1:].split(' ') + part_count = len(parts) + try: + block = int(parts[0], 16) + except ValueError as exc: + raise VMemError(f"Invalid address @ line {lno}: {exc}") from exc + if block != block_count: + raise VMemError(f"Unexpected block {block} @ line {lno}") + try: + bmap = map(unhexlify, parts[1:]) + except (TypeError, ValueError) as exc: + raise VMemError(f"{exc} @ line {lno}") from exc + blocks = ((conv(b[bs]), conv(b[be])) for b in bmap) + # _ecc is not yet managed/verified, only discarded + data, _ecc = (b''.join(x) for x in zip(*blocks)) + self._verify_address(address) + yield (RecordParser.DATA, address, data) + address += len(data) + block_count += part_count - 1 + + +class RecordBuilder: + """Abstract record generator. + + :param crlf: whether to force CRLF line terminators or use host default + """ + + def __init__(self, crlf=False): + self._linesep = '\r\n' if crlf else linesep + self._buffer = StringIO() + + def build(self, datasegs, infoseg=None, execaddr=None, offset=0): + """Build the SREC stream from a binary stream""" + if infoseg: + self._buffer.write(self._create_info(infoseg)) + self._buffer.write(self._linesep) + for dataseg in datasegs: + for line in self._create_data(offset, dataseg): + self._buffer.write(line) + self._buffer.write(self._linesep) + if execaddr is not None: + self._buffer.write(self._create_exec(execaddr)) + self._buffer.write(self._linesep) + eof = self._create_eof() + if eof: + self._buffer.write(eof) + self._buffer.write(self._linesep) + + def getvalue(self) -> str: + """Return the record as as string.""" + return self._buffer.getvalue() + + def _create_info(self, segment): + raise NotImplementedError() + + def _create_data(self, offset, segment): + raise NotImplementedError() + + def _create_exec(self, address): + raise NotImplementedError() + + def _create_eof(self): + raise NotImplementedError() + + +class SRecBuilder(RecordBuilder): + """Intel Hex generator. + """ + + @classmethod + def checksum(cls, hexastr: str) -> int: + """Compute the checksum of an hexa string.""" + dsum = sum(Array('B', unhexlify(hexastr))) + dsum &= 0xff + return dsum ^ 0xff + + def _create_info(self, segment): + msg = segment.data[:16] + line = f'S0{len(msg)+2+1:02x}{0:04x}' + line += hexlify(msg) + line += f'{SRecBuilder.checksum(line[2:]):02x}' + return line.upper() + + def _create_data(self, offset, segment): + data = segment.data + upaddr = segment.baseaddr+len(data) + if upaddr < (1 << 16): + prefix = 'S1%02x%04x' + elif upaddr < (1 << 24): + prefix = 'S2%02x%06x' + else: + prefix = 'S3%02x%08x' + for pos in range(0, len(data), 16): + chunk = data[pos:pos+16] + hexachunk = hexlify(chunk).decode() + line = prefix % (len(chunk) + int(prefix[1]) + 1 + 1, + offset + segment.baseaddr) + hexachunk + line += f'{SRecBuilder.checksum(line[2:]):02x}' + yield line.upper() + offset += 16 + + def _create_exec(self, address): + if address < (1 << 16): + prefix = 'S903%04x' + elif address < (1 << 24): + prefix = 'S804%06x' + else: + prefix = 'S705%08x' + line = prefix % address + line += f'{SRecBuilder.checksum(line[2:]):02x}' + return line.upper() + + def _create_eof(self): + return '' + + +class IHexBuilder(RecordBuilder): + """S-record generator. + """ + + @classmethod + def checksum(cls, hexastr: str) -> int: + """Compute the checksum of an hexa string.""" + csum = sum(Array('B', unhexlify(hexastr))) + csum = (-csum) & 0xff + return csum + + def _create_info(self, segment): + return '' + + def _create_data(self, offset, segment): + data = segment.data + address = offset + segment.baseaddr + high_addr = None + for pos in range(0, len(data), 16): + high = address >> 16 + if high != high_addr: + hi_bytes = spack('>H', high) + yield self._create_line(4, 0, hi_bytes) + high_addr = high + chunk = data[pos:pos+16] + yield self._create_line(0, address & 0xffff, chunk) + address += 16 + + def _create_exec(self, address): + if address < (1 << 20): + cs = address >> 4 + ip = address & 0xFFFF + addr = (cs << 16) | ip + addr = spack('>I', addr) + return self._create_line(3, 0, addr) + addr = spack('>I', address) + return self._create_line(5, 0, addr) + + def _create_eof(self): + return self._create_line(1) + + def _create_line(self, type_, address=0, data=None): + if not data: + data = b'' + hexdat = hexlify(data).decode() + length = len(data) + datastr = f'{length:02X}{address:04X}{type_:02X}{hexdat}' + checksum = self.checksum(datastr) + line = f':{datastr}{checksum:02X}' + return line.upper() + + +class TItxtBuilder(RecordBuilder): + """TI-txt generator. + """ + + # pylint: disable=abstract-method + + def _create_data(self, offset, segment): + data = segment.data + if data: + yield f'@{segment.baseaddr + offset:04x}' + for pos in range(0, len(data), 16): + chunk = data[pos:pos+16] + line = ' '.join((f'{b:02x}' for b in chunk)) + yield line.upper() + + def _create_eof(self): + return 'q' + + +class VMemBuilder(RecordBuilder): + """VMEM generator. + + :param crlf: whether to force CRLF line terminators or use host default + :param byteorder: either 'little' or 'big', default to big + :param chunksize: how many bytes to encode per chunk + :param offsetize: how many chars to encode the VMEM offset (0: auto) + :param linewidth: maximum character per output line + """ + + # pylint: disable=abstract-method + + def __init__(self, crlf=False, byteorder: Optional[str] = None, + chunksize: int = 4, offsetsize: int = 0, linewidth: int = 80): + super().__init__(crlf) + self._chunk_size = chunksize + self._offset_size = offsetsize + self._line_width = linewidth + try: + self._reverse = {'little': True, 'big': False}[byteorder] + except KeyError as exc: + raise ValueError('Invalid byte order') from exc + + def _create_data(self, offset, segment): + if offset: + raise ValueError('VMEM format does not support offsets') + data = segment.data + if not data: + return + total_chunk_count = ((segment.size + self._chunk_size - 1) // + self._chunk_size) + max_bit_count = total_chunk_count.bit_length() + off_char_len = 2 * ((max_bit_count + 7) // 8) + if self._offset_size: + if self._offset_size < off_char_len: + raise ValueError('Not enough char to encode VMEM offset') + off_char_len = self._offset_size + off_len = len(f'@{0:0{off_char_len}X} ') + chunk_len = 2 * self._chunk_size + 1 + chunk_per_line = (self._line_width - off_len) // chunk_len + line_byte_count = self._chunk_size * chunk_per_line + chunk_indices = [x * self._chunk_size for x in range(0, chunk_per_line)] + offpos = 0 + cksize = self._chunk_size + + if self._reverse: + def conv(data): + return bytes(reversed(data)) + else: + def conv(data): + return data + + for pos in range(0, len(data), line_byte_count): + chunks = (hexlify(conv(data[pos+ck:pos+ck+cksize])).upper().decode() + for ck in chunk_indices) + yield f'@{offpos:0{off_char_len}X} {" ".join(chunks)}' + offpos += chunk_per_line + + def _create_eof(self): + return '' + + +class BinaryBuilder: + """Raw binary generator. + """ + + # pylint: disable=missing-function-docstring + + def __init__(self, maxsize): + self._iofp = BytesIO() + self._maxsize = maxsize + + def build(self, datasegs): + addr_offset = None + for segment in sorted(datasegs, key=lambda seg: seg.baseaddr): + if addr_offset is None: + addr_offset = segment.baseaddr + offset = segment.baseaddr-addr_offset + if not 0 <= offset < self._maxsize: + # segment cannot start outside flash area + raise ValueError('Invalid HEX file') + if offset + segment.size > self._maxsize: + raise ValueError('Invalid HEX file') + self._iofp.seek(segment.baseaddr-addr_offset) + self._iofp.write(segment.data) + self._iofp.seek(0, SEEK_SET) + + def getvalue(self) -> bytes: + return self._iofp.getvalue() + + @property + def io(self) -> BinaryIO: + return self._iofp diff --git a/scripts/opentitan/flashgen.py b/scripts/opentitan/flashgen.py index 860ef74501e71..b2bb7b0f54d02 100755 --- a/scripts/opentitan/flashgen.py +++ b/scripts/opentitan/flashgen.py @@ -9,602 +9,20 @@ """ from argparse import ArgumentParser, FileType -from binascii import hexlify -from hashlib import sha256 -from itertools import repeat -from logging import getLogger -from os import SEEK_END, SEEK_SET, rename, stat -from os.path import (abspath, basename, dirname, exists, isfile, - join as joinpath, normpath) -from struct import calcsize as scalc, pack as spack, unpack as sunpack +from os import rename +from os.path import dirname, exists, isfile, join as joinpath, normpath from traceback import format_exc -from typing import Any, BinaryIO, NamedTuple, Optional, Union -import re import sys QEMU_PYPATH = joinpath(dirname(dirname(dirname(normpath(__file__)))), 'python', 'qemu') sys.path.append(QEMU_PYPATH) -from ot.util.elf import ElfBlob +from ot.eflash.gen import FlashGen from ot.util.log import configure_loggers from ot.util.misc import HexInt -# pylint: disable=missing-function-docstring - - -class BootLocation(NamedTuple): - """Boot location entry (always in two first pages of first info part) - """ - bank: int - page: int - seq: int - - -class RuntimeDescriptor(NamedTuple): - """Description of an executable binary. - """ - code_start: int - code_end: int - size: int - entry_point: int - - -class FlashGen: - """Generate a flash image file. - - :param bl_offset: offset of the BL0 storage within the data partition. - if forced to 0, do not reserve any space for BL0, i.e. - dedicated all storage space to ROM_EXT section. - :discard_elf_check: whether to ignore mismatching binary/ELF files. - :accept_invalid: accept invalid input files (fully ignore content) - :discard_time_check: whether to ignore mismatching time between binary - and ELF files. - """ - - NUM_BANKS = 2 - PAGES_PER_BANK = 256 - NUM_REGIONS = 8 - INFOS = [10, 1, 2] - WORDS_PER_PAGE = 256 - BYTES_PER_WORD = 8 - BYTES_PER_PAGE = 2048 - BYTES_PER_BANK = 524288 - CHIP_ROM_EXT_SIZE_MAX = 0x10000 - - HEADER_FORMAT = { - 'magic': '4s', # "vFSH" - 'hlength': 'I', # count of header bytes after this point - 'version': 'I', # version of the header - 'bank': 'B', # count of bank - 'info': 'B', # count of info partitions per bank - 'page': 'H', # count of pages per bank - 'psize': 'I', # page size in bytes - 'ipp': '12s', # count of pages for each info partition (up to 12 parts) - } - - BOOT_HEADER_FORMAT = { - 'sha': '32s', # SHA-256 digest of boot data - 'valid': 'Q', # Invalidate a previously entry - 'identifier': 'I', # Boot data identifier (i.e. magic) - 'counter': 'I', # used to determine the newest entry - 'min_ver_rom_ext': 'I', # Minimum required security version for ROM_EXT - 'min_ver_bl0': 'I', # Minimum required security version for BL0 - 'padding': 'Q', # Padding to make the size of header a power of 2 - } - - MANIFEST_FORMAT = { - # SigverifyRsaBuffer - 'signature': '384s', # 96u32 - # ManifestUsageConstraints - 'selector_bits': 'I', - 'device_id': '32s', # 8u32 - 'manuf_state_creator': 'I', - 'manuf_state_owner': 'I', - 'life_cycle_state': 'I', - # SigverifyRsaBuffer - 'modulus': '384s', - 'address_translation': 'I', - 'identifier': '4s', - # ManifestVersion - 'manifest_version_minor': 'H', - 'manifest_version_major': 'H', - 'signed_region_end': 'I', - 'length': 'I', - 'version_major': 'I', - 'version_minor': 'I', - 'security_version': 'I', - # Timestamp - 'timestamp': '8s', # cannot use 'Q', no longer aligned on 64-bit type - # KeymgrBindingValue - 'binding_value': '32s', # 8u32 - 'max_key_version': 'I', - 'code_start': 'I', - 'code_end': 'I', - 'entry_point': 'I', - # ManifestExtTable - 'entries': '120s' # 15*(2u32) - } - - MANIFEST_SIZE = 1024 - MANIFEST_VERSION_MINOR1 = 0x6c47 - MANIFEST_VERSION_MAJOR1 = 0x71c3 - # Allow v2 manifests, the only difference is that signatures are ECDSA. - MANIFEST_VERSION_MAJOR2 = 0x0002 - MANIFEST_EXT_TABLE_COUNT = 15 - - MANIFEST_TRUE = 0x739 # 'true' value for address_translation field - MANIFEST_FALSE = 0x1d4 # 'false' value for address_translation field - - IDENTIFIERS = { - None: b'\x00\x00\x00\x00', - 'rom_ext': b'OTRE', - 'bl0': b'OTB0', - } - - DEBUG_TRAILER_FORMAT = { - 'otre0': '256s', # optional path to the rom_ext filename in bank A - 'otb00': '256s', # optional path to the bl0 filename in bank A - 'otre1': '256s', # optional path to the rom_ext filename in bank B - 'otb01': '256s', # optional path to the bl0 filename in bank B - } - - BOOT_IDENTIFIER = 0x41444f42 - BOOT_INVALID = 0 - BOOT_VALID = (1 << 64) - 1 - BOOT_BANK = 1 - BOOT_PARTS = 2 - - def __init__(self, bl_offset: Optional[int] = None, - discard_elf_check: bool = False, - accept_invalid: bool = False, - discard_time_check: bool = False): - self._log = getLogger('flashgen') - self._check_manifest_size() - self._bl_offset = bl_offset if bl_offset is not None \ - else self.CHIP_ROM_EXT_SIZE_MAX - self._accept_invalid = accept_invalid - self._check_elf = not (discard_elf_check or self._accept_invalid) - self._check_time = not discard_time_check and self._check_elf - hfmt = ''.join(self.HEADER_FORMAT.values()) - header_size = scalc(hfmt) - assert header_size == 32 - self._header_size = header_size - bhfmt = ''.join(self.BOOT_HEADER_FORMAT.values()) - self._boot_header_size = scalc(bhfmt) - tfmt = ''.join(self.DEBUG_TRAILER_FORMAT.values()) - trailer_size = scalc(tfmt) - self._image_size = ((self.BYTES_PER_BANK + self.info_part_size()) * - self.NUM_BANKS + self._header_size + trailer_size) - self._ffp: Optional[BinaryIO] = None - - def open(self, path: str) -> None: - """Prepare flash content into a QEMU RAW stream. - """ - mode = 'r+b' if exists(path) else 'w+b' - # cannot use a context manager here - # pylint: disable=consider-using-with - self._ffp = open(path, mode) - self._ffp.seek(0, SEEK_END) - vsize = self._ffp.tell() - if vsize < self._image_size: - if vsize and mode.startswith('r'): - self._log.info('File image too short, expanding') - else: - self._log.info('Creating new image file') - header = self._build_header() - self._write(0, header) - vsize += len(header) - self._write(len(header), - bytes(repeat(0xff, self._image_size-vsize))) - self._ffp.seek(0) - if vsize > self._image_size: - self._log.info('File image too long, truncating') - self._ffp.truncate(self._image_size) - - def close(self): - if self._ffp: - pos = self._ffp.seek(0, SEEK_END) - self._ffp.close() - self._ffp = None - if pos != self._image_size: - self._log.error('Invalid image size (%d bytes)', pos) - - @property - def logger(self): - return self._log - - @classmethod - def info_part_size(cls) -> int: - return sum(cls.INFOS) * cls.BYTES_PER_PAGE - - def read_boot_info(self) -> dict[BootLocation, - dict[str, Union[int, bytes]]]: - size = self._boot_header_size - fmt = ''.join(self.BOOT_HEADER_FORMAT.values()) - boot_entries = {} - boot_bank = 1 - for page in range(self.BOOT_PARTS): - base = page * self.BYTES_PER_PAGE - for offset in range(0, self.BYTES_PER_PAGE, size): - bdata = self.read_info_partition(boot_bank, base+offset, size) - if len(bdata) != size: - raise ValueError(f'Cannot read header: {len(bdata)} ' - f'bytes @ page {page} offset ' - f'{base+offset}') - values = sunpack(f'<{fmt}', bdata) - boot = dict(zip(self.BOOT_HEADER_FORMAT, values)) - if boot['identifier'] != self.BOOT_IDENTIFIER: - continue - if boot['valid'] != self.BOOT_VALID: - continue - boot_entries[BootLocation(boot_bank, page, offset//size)] = boot - offset += size - return boot_entries - - def read_info_partition(self, bank: int, offset: int, size: int) -> bytes: - offset += (self._header_size + self.NUM_BANKS * self.BYTES_PER_BANK + - bank * self.info_part_size()) - pos = self._ffp.tell() - self._ffp.seek(offset) - data = self._ffp.read(size) - self._ffp.seek(pos) - return data - - def store_rom_ext(self, bank: int, dfp: BinaryIO, - elfpath: Optional[str] = None) -> None: - if not 0 <= bank < self.NUM_BANKS: - raise ValueError(f'Invalid bank {bank}') - data = dfp.read() - if len(data) > self.BYTES_PER_BANK: - raise ValueError('Data too large') - bindesc = self._check_rom_ext(data) - boot_entries = self.read_boot_info() - if not boot_entries: - next_loc = BootLocation(self.BOOT_BANK, 0, 0) - next_count = 5 - self._log.info('No pre-existing BootLocation') - else: - sorted_locs = sorted(boot_entries, - key=lambda e: boot_entries[e]['counter']) - mr_loc = sorted_locs[-1] - self._log.info('Last boot location %s', mr_loc) - mr_entry = boot_entries[mr_loc] - mr_bank = mr_loc.bank - next_op_bank = mr_bank - op_locs = [loc for loc in sorted_locs if loc.bank == next_op_bank] - if op_locs: - last_op_loc = op_locs[-1] - next_op_seq = last_op_loc.seq + 1 - next_op_page = last_op_loc.page - else: - next_op_seq = 0 - next_op_page = 0 - if next_op_seq >= self.BYTES_PER_PAGE/self._boot_header_size: - next_op_page += 1 - next_op_seq = 0 - if next_op_page >= self.BOOT_PARTS: - # erase the flash? - raise ValueError('No more room to store boot location') - next_loc = BootLocation(next_op_bank, next_op_page, next_op_seq) - next_count = mr_entry['counter'] + 1 - self._write(self._header_size + bank * self.BYTES_PER_BANK, data) - boot_header = self._build_boot_header(next_count) - offset = self._get_boot_location_offset(next_loc) - self._write(offset, boot_header) - info_offset = (offset - self.NUM_BANKS * self.BYTES_PER_BANK - - self._header_size) - self._log.info('New %s stored @ abs:0x%06x / rel:0x%06x', - next_loc, offset, info_offset) - field_offset, field_data = self._build_field(self.BOOT_HEADER_FORMAT, - 'valid', self.BOOT_INVALID) - for loc, entry in boot_entries.items(): - if loc.bank != next_op_bank: - continue - if entry['valid'] != self.BOOT_INVALID: - offset = self._get_boot_location_offset(loc) - offset += field_offset - self._write(offset, field_data) - ename = f'otre{bank}' - if bindesc: - if not elfpath: - elfpath = self._get_elf_filename(dfp.name) - elif elfpath: - self._log.warning('Discarding ELF as input binary file is invalid') - elfpath = None - if elfpath: - elftime = stat(elfpath).st_mtime - bintime = stat(dfp.name).st_mtime - if bintime < elftime: - msg = 'Application binary file is older than ELF file' - if self._check_time: - raise RuntimeError(msg) - self._log.warning(msg) - be_match = self._compare_bin_elf(bindesc, elfpath) - if not be_match: - if be_match is None: - msg = 'Cannot verify ELF file (pyelftools not installed)' - else: - msg = 'Application ELF file does not match binary file' - if self._check_elf: - raise RuntimeError(msg) - self._log.warning(msg) - self._store_debug_info(ename, elfpath) - - def store_bootloader(self, bank: int, dfp: BinaryIO, - elfpath: Optional[str] = None) -> None: - if self._bl_offset == 0: - raise ValueError('Bootloader cannot be used') - if not 0 <= bank < self.NUM_BANKS: - raise ValueError(f'Invalid bank {bank}') - data = dfp.read() - if len(data) > self.BYTES_PER_BANK: - raise ValueError('Data too large') - bindesc = self._check_bootloader(data) - self._write(self._header_size + self._bl_offset, data) - ename = f'otb0{bank}' - if bindesc: - if not elfpath: - elfpath = self._get_elf_filename(dfp.name) - elif elfpath: - self._log.warning('Discarding ELF as input binary file is invalid') - elfpath = None - if elfpath: - elftime = stat(elfpath).st_mtime - bintime = stat(dfp.name).st_mtime - if bintime < elftime: - msg = 'Boot binary file is older than ELF file' - if self._check_time: - raise RuntimeError(msg) - self._log.warning(msg) - be_match = self._compare_bin_elf(bindesc, elfpath) - if not be_match: - if be_match is None: - msg = 'Cannot verify ELF file (pyelftools not installed)' - else: - msg = 'Boot ELF file does not match binary file' - if self._check_elf: - raise RuntimeError(msg) - self._log.warning(msg) - self._store_debug_info(ename, elfpath) - - def store_ot_files(self, otdescs: list[str]) -> None: - for dpos, otdesc in enumerate(otdescs, start=1): - parts = otdesc.rsplit(':', 1) - if len(parts) > 1: - otdesc = parts[0] - elf_filename = parts[1] - else: - elf_filename = None - parts = otdesc.split('@', 1) - if len(parts) < 2: - raise ValueError('Missing address in OT descriptor') - bin_filename = parts[0] - if not isfile(bin_filename): - raise ValueError(f'No such file {bin_filename}') - try: - address = int(parts[1], 16) - except ValueError as exc: - raise ValueError('Invalid address in OT descriptor') from exc - bank = address // self.BYTES_PER_BANK - address %= self.BYTES_PER_BANK - kind = 'rom_ext' if address < self.CHIP_ROM_EXT_SIZE_MAX else \ - 'bootloader' - self._log.info( - 'Handling file #%d as %s @ 0x%x in bank %d with%s ELF', - dpos, kind, address, bank, '' if elf_filename else 'out') - with open(bin_filename, 'rb') as bfp: - # func decode should never fail, so no error handling here - getattr(self, f'store_{kind}')(bank, bfp, elf_filename) - - def _compare_bin_elf(self, bindesc: RuntimeDescriptor, elfpath: str) \ - -> Optional[bool]: - if ElfBlob.ELF_ERROR: - return None - with open(elfpath, 'rb') as efp: - elfdesc = self._load_elf_info(efp) - if not elfdesc: - return False - binep = bindesc.entry_point & (self.CHIP_ROM_EXT_SIZE_MAX - 1) - elfep = elfdesc.entry_point & (self.CHIP_ROM_EXT_SIZE_MAX - 1) - if binep != elfep: - self._log.warning('Cannot compare bin vs. elf files') - return False - offset = elfdesc.entry_point - bindesc.entry_point - self._log.debug('ELF base offset 0x%08x', offset) - relfdesc = RuntimeDescriptor(elfdesc.code_start - offset, - elfdesc.code_end - offset, - elfdesc.size, - elfdesc.entry_point - offset) - match = bindesc == relfdesc - logfunc = self._log.debug if match else self._log.warning - logfunc('start bin %08x / elf %08x', - bindesc.code_start, relfdesc.code_start) - logfunc('end bin %08x / elf %08x', - bindesc.code_end, relfdesc.code_end) - logfunc('size bin %08x / elf %08x', - bindesc.size, relfdesc.size) - logfunc('entry bin %08x / elf %08x', - bindesc.entry_point, relfdesc.entry_point) - return match - - def _write(self, offset: Optional[int], data: bytes) -> None: - pos = self._ffp.tell() - if offset is None: - offset = pos - if offset + len(data) > self._image_size: - raise ValueError(f'Invalid offset {offset}+{len(data)}, ' - f'max {self._image_size}') - self._ffp.seek(offset, SEEK_SET) - self._ffp.write(data) - self._ffp.seek(pos, SEEK_SET) - - def _get_info_part_offset(self, part: int, info: int) -> int: - offset = self._header_size + self.NUM_BANKS * self.BYTES_PER_BANK - partition = 0 - while partition < part: - offset += self.INFOS[partition]*self.BYTES_PER_PAGE - partition += 1 - offset += info * self.BYTES_PER_PAGE - return offset - - def _get_boot_location_offset(self, loc: BootLocation) -> int: - return (loc.bank * self.info_part_size() + - self._get_info_part_offset(0, 0) + - loc.page * self.BYTES_PER_PAGE + - loc.seq * self._boot_header_size) - - def _build_field(self, fmtdict: dict[str, Any], field: str, value: Any) \ - -> tuple[int, bytes]: - offset = 0 - for name, fmt in fmtdict.items(): - if name == field: - return offset, spack(f'<{fmt}', value) - offset += scalc(fmt) - raise ValueError(f'No such field: {field}') - - def _build_header(self) -> bytes: - # hlength is the length of header minus the two first items (T, L) - hfmt = self.HEADER_FORMAT - fhfmt = ''.join(hfmt.values()) - shfmt = ''.join(hfmt[k] for k in list(hfmt)[:2]) - hlen = scalc(fhfmt) - scalc(shfmt) - ipp = bytearray(self.INFOS) - ipp.extend([0] * (12 - len(ipp))) - values = { - 'magic': b'vFSH', 'hlength': hlen, 'version': 1, - 'bank': self.NUM_BANKS, 'info': len(self.INFOS), - 'page': self.PAGES_PER_BANK, 'psize': self.BYTES_PER_PAGE, - 'ipp': bytes(ipp) - } - args = [values[k] for k in hfmt] - header = spack(f'<{fhfmt}', *args) - return header - - def _build_boot_header(self, counter) -> bytes: - min_sec_ver_rom_ext = 0 - min_sec_ver_bl0 = 0 - padding = 0 - fmts = list(self.BOOT_HEADER_FORMAT.values()) - sha_fmt, pld_fmt = fmts[0], ''.join(fmts[1:]) - payload = spack(f'<{pld_fmt}', self.BOOT_VALID, self.BOOT_IDENTIFIER, - counter, min_sec_ver_rom_ext, min_sec_ver_bl0, padding) - sha = spack(sha_fmt, sha256(payload).digest()) - header = b''.join((sha, payload)) - return header - - def _get_elf_filename(self, filename: str) -> str: - pathname = abspath(filename) - radix = re.sub(r'.[a-z_]+_0.signed.bin$', '', pathname) - elfname = f'{radix}.elf' - if not exists(elfname): - self._log.warning('No ELF debug info found') - return '' - self._log.info('Using ELF %s for %s', - basename(elfname), basename(filename)) - return elfname - - def _load_elf_info(self, efp: BinaryIO) \ - -> Optional[RuntimeDescriptor]: - if ElfBlob.ELF_ERROR: - # ELF tools are not available - self._log.warning('ELF file cannot be verified') - return None - elf = ElfBlob() - elf.load(efp) - if elf.address_size != 32: - raise ValueError('Spefified ELF file {} is not an ELF32 file') - elfstart, elfend = elf.code_span - return RuntimeDescriptor(elfstart, elfend, elf.size, elf.entry_point) - - def _store_debug_info(self, entryname: str, filename: Optional[str]) \ - -> None: - fnp = filename.encode('utf8') if filename else b'' - lfnp = len(fnp) - tfmt = ''.join(self.DEBUG_TRAILER_FORMAT.values()) - trailer_size = scalc(tfmt) - trailer_offset = self._image_size - trailer_size - for name, fmt in self.DEBUG_TRAILER_FORMAT.items(): - lfmt = scalc(fmt) - if name != entryname: - trailer_offset += lfmt - continue - if lfnp < lfmt: - fnp = b''.join((fnp, bytes(lfmt-lfnp))) - elif lfnp > lfmt: - self._log.warning('ELF pathname too long to store') - return - fnp = spack(fmt, fnp) # useless, used as sanity check - self._write(trailer_offset, fnp) - break - else: - self._log.warning('Unable to find a matching debug entry: %s', - entryname) - - def _check_rom_ext(self, data: bytes) -> Optional[RuntimeDescriptor]: - max_size = self._bl_offset or self.BYTES_PER_BANK - try: - return self._check_manifest(data, 'rom_ext', max_size) - except ValueError: - if self._accept_invalid: - return None - raise - - def _check_bootloader(self, data: bytes) -> Optional[RuntimeDescriptor]: - assert self._bl_offset - max_size = self.BYTES_PER_BANK - self._bl_offset - try: - return self._check_manifest(data, 'bl0', max_size) - except ValueError: - if self._accept_invalid: - return None - raise - - def _check_manifest(self, data: bytes, kind: str, max_size: int) \ - -> RuntimeDescriptor: - if len(data) > max_size: - raise ValueError(f'{kind} too large') - mfmt = ''.join(self.MANIFEST_FORMAT.values()) - slen = scalc(mfmt) - if len(data) <= slen: - raise ValueError(f'{kind} too short') - manifest = dict(zip(self.MANIFEST_FORMAT, - sunpack(f'<{mfmt}', data[:slen]))) - self._log_manifest(manifest) - if (manifest['manifest_version_major'] not in - (self.MANIFEST_VERSION_MAJOR1, self.MANIFEST_VERSION_MAJOR2) - or manifest['manifest_version_minor'] != - self.MANIFEST_VERSION_MINOR1): - raise ValueError('Unsupported manifest version') - self._log.info('%s code start 0x%05x, end 0x%05x, exec 0x%05x', - kind, manifest['code_start'], manifest['code_end'], - manifest['entry_point']) - if manifest['identifier'] != self.IDENTIFIERS[kind]: - if manifest['identifier'] != self.IDENTIFIERS[None]: - manifest_str = hexlify(manifest["identifier"]).decode().upper() - raise ValueError(f'Specified file is not a {kind} file: ' - f'{manifest_str}') - self._log.warning('Empty %s manifest, cannot verify', kind) - return RuntimeDescriptor(manifest['code_start'], manifest['code_end'], - manifest['length'], manifest['entry_point']) - - @classmethod - def _check_manifest_size(cls): - slen = scalc(''.join(cls.MANIFEST_FORMAT.values())) - assert cls.MANIFEST_SIZE == slen, 'Invalid Manifest size' - - def _log_manifest(self, manifest): - for item, value in manifest.items(): - if isinstance(value, int): - self._log.debug('%s: 0x%08x', item, value) - elif isinstance(value, bytes): - self._log.debug('%s: (%d) %s', item, len(value), - hexlify(value).decode()) - else: - self._log.debug('%s: (%d) %s', item, len(value), value) - - def main(): """Main routine""" debug = True diff --git a/scripts/opentitan/pyot.py b/scripts/opentitan/pyot.py index 9da71bb814c38..e4d8e4a8f2b16 100755 --- a/scripts/opentitan/pyot.py +++ b/scripts/opentitan/pyot.py @@ -9,11 +9,6 @@ """ from argparse import ArgumentParser, FileType, Namespace -from atexit import register -from collections import defaultdict, deque -from csv import reader as csv_reader, writer as csv_writer -from fnmatch import fnmatchcase -from glob import glob try: _HJSON_ERROR = None from hjson import load as jload @@ -22,22 +17,14 @@ def hjload(*_, **__): # noqa: E301 """dummy func if HJSON module is not available""" return {} -from os import close, curdir, environ, getcwd, linesep, pardir, sep, unlink -from os.path import (abspath, basename, dirname, exists, isabs, isdir, isfile, - join as joinpath, normpath, relpath) -from select import POLLIN, POLLERR, POLLHUP, poll as spoll -from shutil import rmtree -from socket import socket, timeout as LegacyTimeoutError -from subprocess import Popen, PIPE, TimeoutExpired -from threading import Event, Thread -from tempfile import mkdtemp, mkstemp -from textwrap import shorten -from time import sleep, time as now +from os import close, linesep, unlink +from os.path import (basename, dirname, isfile, join as joinpath, normpath, + relpath) +from tempfile import mkstemp +from time import sleep from traceback import format_exc -from typing import Any, Iterator, NamedTuple, Optional +from typing import Optional -import logging -import re import sys QEMU_PYPATH = joinpath(dirname(dirname(dirname(normpath(__file__)))), @@ -48,1771 +35,14 @@ def hjload(*_, **__): # noqa: E301 # pylint: disable=wrong-import-order # pylint: disable=import-error -from ot.util.log import (ColorLogFormatter, RemoteLogService, configure_loggers, - flush_memory_loggers) -from ot.util.misc import EasyDict - +from ot.pyot import DEFAULT_TIMEOUT, DEFAULT_TIMEOUT_FACTOR +from ot.pyot.executer import QEMUExecuter +from ot.pyot.filemgr import QEMUFileManager +from ot.pyot.util import ResultFormatter +from ot.util.log import ColorLogFormatter, RemoteLogService, configure_loggers DEFAULT_MACHINE = 'ot-earlgrey' DEFAULT_DEVICE = 'localhost:8000' -DEFAULT_TIMEOUT = 60 # seconds -DEFAULT_TIMEOUT_FACTOR = 1.0 - -getLogger = logging.getLogger - - -class ExecTime(float): - """Float with hardcoded formatter. - """ - - def __repr__(self) -> str: - return f'{self*1000:.0f} ms' - - -class TestResult(NamedTuple): - """Test result. - """ - name: str - result: str - time: ExecTime - icount: Optional[str] - error: str - - -class ResultFormatter: - """Format a result CSV file as a simple result table.""" - - def __init__(self): - self._results = [] - - def load(self, csvpath: str) -> None: - """Load a CSV file (generated with QEMUExecuter) and parse it. - - :param csvpath: the path to the CSV file. - """ - with open(csvpath, 'rt', encoding='utf-8') as cfp: - csv = csv_reader(cfp) - for row in csv: - self._results.append(row) - - def show(self, spacing: bool = False) -> None: - """Print a simple formatted ASCII table with loaded CSV results. - - :param spacing: add an empty line before and after the table - """ - results = [r[:-1] + [shorten(r[-1], width=100)] for r in self._results] - if not results: - return - if spacing: - print('') - widths = [max(len(x) for x in col) for col in zip(*results)] - self._show_line(widths, '-') - self._show_row(widths, results[0]) - self._show_line(widths, '=') - for row in results[1:]: - self._show_row(widths, row) - self._show_line(widths, '-') - if spacing: - print('') - - def _show_line(self, widths: list[int], csep: str) -> None: - print(f'+{"+".join([csep * (w+2) for w in widths])}+') - - def _show_row(self, widths: list[int], cols: list[str]) -> None: - line = '|'.join([f' {c:{">" if p else "<"}{w}s} ' - for p, (w, c) in enumerate(zip(widths, cols))]) - print(f'|{line}|') - - -class LogMessageClassifier: - """Log level classifier for log messages. - - :param classifiers: a map of loglevel, list of RE-compatible strings - to match messages - :param qemux: the QEMU executable name, to filter out useless messages - """ - - def __init__(self, classifiers: Optional[dict[str, list[str]]] = None, - qemux: Optional[str] = None): - self._qemux = qemux - if classifiers is None: - classifiers = {} - self._regexes: dict[int, re.Pattern] = {} - for klv in 'error warning info debug'.split(): - uklv = klv.upper() - cstrs = classifiers.get(klv, []) - if not isinstance(cstrs, list): - raise ValueError(f'Invalid log classifiers for {klv}') - regexes = [f'{klv}: ', f'^{uklv} ', f' {uklv} '] - for cstr in cstrs: - try: - # only sanity-check pattern, do not use result - re.compile(cstr) - except re.error as exc: - raise ValueError(f"Invalid log classifier '{cstr}' for " - f"{klv}: {exc}") from exc - regexes.append(cstr) - if regexes: - lvl = getattr(logging, uklv) - self._regexes[lvl] = re.compile(f"({'|'.join(regexes)})") - else: - lvl = getattr(logging, 'NOTSET') - # never match RE - self._regexes[lvl] = re.compile(r'\A(?!x)x') - - def classify(self, line: str, default: int = logging.ERROR) -> int: - """Classify log level of a line depending on its content. - - :param line: line to classify - :param default: defaut log level in no classification is found - :return: the logger log level to use - """ - if self._qemux and line.startswith(self._qemux): - # discard QEMU internal messages that cannot be disable from the VM - if line.find("QEMU waiting") > 0: - return logging.NOTSET - for lvl, pattern in self._regexes.items(): - if pattern.search(line): - return lvl - return default - - -class QEMUWrapper: - """A small engine to run tests with QEMU. - - :param tcpdev: a host, port pair that defines how to access the TCP - Virtual Com Port of QEMU first UART - :param debug: whether running in debug mode - """ - # pylint: disable=too-few-public-methods - - EXIT_ON = rb'(PASS|FAIL)!\r' - """Matching strings to search for in guest output. - - The return code of the script is the position plus the GUEST_ERROR_OFFSET - in the above RE group when matched, except first item which is always 0. - This offset is used to differentiate from QEMU own return codes. QEMU may - return negative values, which are the negative value of POSIX signals, - such as SIGABRT. - """ - - ANSI_CRE = re.compile(rb'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]') - """ANSI escape sequences.""" - - GUEST_ERROR_OFFSET = 40 - """Offset for guest errors. Should be larger than the host max signal value. - """ - - NO_MATCH_RETURN_CODE = 100 - """Return code when no matching string is found in guest output.""" - - def __init__(self, log_classifiers: dict[str, list[str]], debug: bool): - self._log_classifiers = log_classifiers - self._debug = debug - self._log = getLogger('pyot') - self._qlog = getLogger('pyot.qemu') - - def run(self, tdef: EasyDict[str, Any]) -> tuple[int, ExecTime, str]: - """Execute the specified QEMU command, aborting execution if QEMU does - not exit after the specified timeout. - - :param tdef: test definition and parameters - - command, a list of strings defining the QEMU command to - execute with all its options - - vcp_map: how to connect to QEMU virtual communication ports - - timeout, the allowed time for the command to execute, - specified as a real number - - expect_result, the expected outcome of QEMU (exit code). Some - tests may expect that QEMU terminates with a non-zero - exit code - - context, an option QEMUContextWorker instance, to execute - concurrently with the QEMU process. Many tests - expect to communicate with the QEMU process. - - trigger, a string to match on the QEMU virtual comm port - output to trigger the context execution. It may be - defined as a regular expression. - - validate, a string to match on the QEMU virtual comm port - output to early exit. It may be defined as a regular - expression. - - start_delay, the delay to wait before starting the execution - of the context once QEMU command has been started. - :return: a 3-uple of exit code, execution time, and last guest error - """ - # stdout and stderr belongs to QEMU VM - # OT's UART0 is redirected to a TCP stream that can be accessed through - # self._device. The VM pauses till the TCP socket is connected - xre = re.compile(self.EXIT_ON) - sync_event = None - if tdef.trigger: - sync_event = Event() - match_pattern = tdef.trigger or tdef.validate - if match_pattern: - if match_pattern.startswith("r'") and match_pattern.endswith("'"): - try: - tmo = re.compile(match_pattern[2:-1].encode()) - except re.error as exc: - raise ValueError('Invalid regex: {exc}') from exc - - def trig_match(bline): - return tmo.match(bline) - else: - btrigger = match_pattern.encode() - - def trig_match(bline): - return bline.find(btrigger) >= 0 - else: - trig_match = None - ret = None - proc = None - xstart = None - xend = None - log = self._log - last_error = '' - vcp_map = tdef.vcp_map - vcp_ctxs: dict[int, tuple[str, socket, bytearray]] = {} - try: - workdir = dirname(tdef.command[0]) - log.debug('Executing QEMU as %s', ' '.join(tdef.command)) - # pylint: disable=consider-using-with - proc = Popen(tdef.command, bufsize=1, cwd=workdir, stdout=PIPE, - stderr=PIPE, encoding='utf-8', errors='ignore', - text=True) - try: - proc.wait(0.1) - except TimeoutExpired: - pass - else: - ret = proc.returncode - log.error('QEMU bailed out: %d for "%s"', ret, tdef.test_name) - raise OSError() - log.debug('Execute QEMU for %.0f secs', tdef.timeout) - # unfortunately, subprocess's stdout calls are blocking, so the - # only way to get near real-time output from QEMU is to use a - # dedicated thread that may block whenever no output is available - # from the VM. This thread reads and pushes back lines to a local - # queue, which is popped and logged to the local logger on each - # loop. Note that Popen's communicate() also relies on threads to - # perform stdout/stderr read out. - log_q = deque() - Thread(target=self._qemu_logger, name='qemu_out_logger', - args=(proc, log_q, True), daemon=True).start() - Thread(target=self._qemu_logger, name='qemu_err_logger', - args=(proc, log_q, False), daemon=True).start() - poller = spoll() - connect_map = vcp_map.copy() - timeout = now() + tdef.start_delay - # ensure that QEMU starts and give some time for it to set up - # when multiple VCPs are set to 'wait', one VCP can be connected at - # a time, i.e. QEMU does not open all connections at once. - vcp_lognames = [] - vcplogname = 'pyot.vcp' - while connect_map: - if now() > timeout: - minfo = ', '.join(f'{d} @ {r[0]}:{r[1]}' - for d, r in connect_map.items()) - raise TimeoutError(f'Cannot connect to QEMU VCPs: {minfo}') - connected = [] - for vcpid, (host, port) in connect_map.items(): - try: - # timeout for connecting to VCP - sock = socket() - sock.settimeout(1) - sock.connect((host, port)) - connected.append(vcpid) - vcp_name = re.sub(r'^.*[-\.+]', '', vcpid) - vcp_lognames.append(vcp_name) - vcp_log = getLogger(f'{vcplogname}.{vcp_name}') - vcp_ctxs[sock.fileno()] = [vcpid, sock, bytearray(), - vcp_log] - # remove timeout for VCP comm, as poll is used - sock.settimeout(None) - poller.register(sock, POLLIN | POLLERR | POLLHUP) - except ConnectionRefusedError: - continue - except OSError as exc: - log.error('Cannot setup QEMU VCP connection %s: %s', - vcpid, exc) - print(format_exc(chain=False), file=sys.stderr) - raise - # removal from dictionary cannot be done while iterating it - for vcpid in connected: - del connect_map[vcpid] - self._colorize_vcp_log(vcplogname, vcp_lognames) - xstart = now() - if tdef.context: - try: - tdef.context.execute('with', sync=sync_event) - except OSError as exc: - ret = exc.errno - last_error = exc.strerror - raise - # pylint: disable=broad-except - except Exception as exc: - ret = 126 - last_error = str(exc) - raise - qemu_exec = f'{basename(tdef.command[0])}: ' - classifier = LogMessageClassifier(classifiers=self._log_classifiers, - qemux=qemu_exec) - abstimeout = float(tdef.timeout) + now() - qemu_default_log = logging.ERROR - vcp_default_log = logging.DEBUG - while now() < abstimeout: - while log_q: - err, qline = log_q.popleft() - if err: - level = classifier.classify(qline, qemu_default_log) - if level == logging.INFO and \ - qline.find('QEMU waiting for connection') >= 0: - level = logging.DEBUG - else: - level = logging.INFO - self._qlog.log(level, qline) - if tdef.context: - wret = tdef.context.check_error() - if wret: - ret = wret - last_error = tdef.context.first_error or \ - 'Fail to execute worker' - raise OSError(wret, last_error) - xret = proc.poll() - if xret is not None: - if xend is None: - xend = now() - ret = xret - if ret != 0: - if ret != tdef.expect_result: - logfn = getattr(log, 'critical') - else: - logfn = getattr(log, 'warning') - logfn('Abnormal QEMU termination: %d for "%s"', - ret, tdef.test_name) - break - for vfd, event in poller.poll(0.01): - if event in (POLLERR, POLLHUP): - poller.modify(vfd, 0) - continue - vcpid, vcp, vcp_buf, vcp_log = vcp_ctxs[vfd] - try: - data = vcp.recv(4096) - except (TimeoutError, LegacyTimeoutError): - log.error('Unexpected timeout w/ poll on %s', vcp) - continue - vcp_buf += data - if not vcp_buf: - continue - lines = vcp_buf.split(b'\n') - vcp_buf[:] = bytearray(lines[-1]) - for line in lines[:-1]: - line = self.ANSI_CRE.sub(b'', line) - if trig_match and trig_match(line): - # reset timeout from this event - abstimeout = float(tdef.timeout) + now() - trig_match = None - if sync_event: - log.info('Trigger pattern detected, resuming ' - 'for %.0f secs', tdef.timeout) - sync_event.set() - else: - log.info('Validation pattern detected, exiting') - ret = 0 - break - xmo = xre.search(line) - if xmo: - xend = now() - exit_word = xmo.group(1).decode('utf-8', - errors='ignore') - ret = self._get_exit_code(xmo) - log.info("Exit sequence detected: '%s' -> %d", - exit_word, ret) - if ret == 0: - last_error = '' - break - sline = line.decode('utf-8', errors='ignore').rstrip() - level = classifier.classify(sline, vcp_default_log) - if level == logging.ERROR: - err = re.sub(r'^.*:\d+]', '', sline).lstrip() - # be sure not to preserve comma as this char is - # used as a CSV separator. - last_error = err.strip('"').replace(',', ';') - vcp_log.log(level, sline) - else: - # no match for exit sequence on current VCP - continue - if ret is not None: - # match for exit sequence on current VCP - break - if ret is not None: - # match for exit sequence on last VCP - break - if ret is None: - log.warning('Execution timed out for "%s"', tdef.test_name) - ret = 124 # timeout - except (OSError, ValueError) as exc: - if ret is None: - log.error('Unable to execute QEMU: %s', exc) - ret = proc.returncode if proc.poll() is not None else 125 - finally: - if xend is None: - xend = now() - for _, sock, _, _ in vcp_ctxs.values(): - sock.close() - vcp_ctxs.clear() - if proc: - if xend is None: - xend = now() - proc.terminate() - try: - # leave 1 second for QEMU to cleanly complete... - proc.wait(1.0) - except TimeoutExpired: - # otherwise kill it - log.error('Force-killing QEMU') - proc.kill() - if ret is None: - ret = proc.returncode - # retrieve the remaining log messages - stdlog = self._qlog.info if ret else self._qlog.debug - for msg, logger in zip(proc.communicate(timeout=0.1), - (stdlog, self._qlog.error)): - for line in msg.split('\n'): - line = line.strip() - if line: - logger(line) - xtime = ExecTime(xend-xstart) if xstart and xend else 0.0 - return abs(ret) or 0, xtime, last_error - - @classmethod - def classify_log(cls, line: str, default: int = logging.ERROR, - qemux: Optional[str] = None) -> int: - """Classify log level of a line depending on its content. - - :param line: line to classify - :param default: defaut log level in no classification is found - :return: the logger log level to use - """ - if qemux and line.startswith(qemux): - # discard QEMU internal messages that cannot be disable from the VM - return logging.NOTSET - if (line.find('info: ') >= 0 or - line.startswith('INFO ') or - line.find(' INFO ') >= 0): # noqa - return logging.INFO - if (line.find('warning: ') >= 0 or - line.startswith('WARNING ') or - line.find(' WARNING ') >= 0): # noqa - return logging.WARNING - if (line.find('debug: ') >= 0 or - line.startswith('DEBUG ') or - line.find(' DEBUG ') >= 0): # noqa - return logging.DEBUG - return default - - def _colorize_vcp_log(self, vcplogname: str, lognames: list[str]) -> None: - vlog = getLogger(vcplogname) - clr_fmt = None - while vlog: - for hdlr in vlog.handlers: - if isinstance(hdlr.formatter, ColorLogFormatter): - clr_fmt = hdlr.formatter - break - vlog = vlog.parent - if not clr_fmt: - return - for color, logname in enumerate(sorted(lognames)): - clr_fmt.add_logger_colors(f'{vcplogname}.{logname}', color) - - def _qemu_logger(self, proc: Popen, queue: deque, err: bool): - # worker thread, blocking on VM stdout/stderr - stream = proc.stderr if err else proc.stdout - while proc.poll() is None: - line = stream.readline().strip() - if line: - queue.append((err, line)) - - def _get_exit_code(self, xmo: re.Match) -> int: - groups = xmo.groups() - if not groups: - self._log.debug('No matching group, using defaut code') - return self.NO_MATCH_RETURN_CODE - match = groups[0] - try: - # try to match an integer value - return int(match) - except ValueError: - pass - # try to find in the regular expression whether the match is one of - # the alternative in the first group - alts = re.sub(rb'^.*\((.*?)\).*$', rb'\1', xmo.re.pattern).split(b'|') - try: - pos = alts.index(match) - if pos: - pos += self.GUEST_ERROR_OFFSET - return pos - except ValueError as exc: - self._log.error('Invalid match: %s with %s', exc, alts) - return len(alts) - # any other case - self._log.debug('No match, using defaut code') - return self.NO_MATCH_RETURN_CODE - - -class QEMUFileManager: - """Simple file manager to generate and track temporary files. - - :param keep_temp: do not automatically discard generated files on exit - """ - - DEFAULT_OTP_ECC_BITS = 6 - - def __init__(self, keep_temp: bool = False): - self._log = getLogger('pyot.file') - self._keep_temp = keep_temp - self._in_fly: set[str] = set() - self._otp_files: dict[str, tuple[str, int]] = {} - self._env: dict[str, str] = {} - self._transient_vars: set[str] = set() - self._dirs: dict[str, str] = {} - register(self._cleanup) - - @property - def keep_temporary(self) -> bool: - """Tell whether temporary files and directories should be preserved or - not. - - :return: True if temporary items should not be suppressed - """ - return self._keep_temp - - def set_qemu_src_dir(self, path: str) -> None: - """set the QEMU "source" directory. - - :param path: the path to the QEMU source directory - """ - self._env['QEMU_SRC_DIR'] = abspath(path) - - def set_qemu_bin_dir(self, path: str) -> None: - """set the QEMU executable directory. - - :param path: the path to the QEMU binary directory - """ - self._env['QEMU_BIN_DIR'] = abspath(path) - - def set_config_dir(self, path: str) -> None: - """Assign the configuration directory. - - :param path: the directory that contains the input configuration - file - """ - self._env['CONFIG'] = abspath(path) - - def set_udp_log_port(self, port: int) -> None: - """Assign the UDP logger port. - - :param port: the UDP logger port - """ - self._env['UDPLOG'] = f'{port}' - - def interpolate(self, value: Any) -> str: - """Interpolate a ${...} marker with shell substitutions or local - substitution. - - :param value: input value - :return: interpolated value as a string - """ - def replace(smo: re.Match) -> str: - name = smo.group(1) - val = self._env[name] if name in self._env \ - else environ.get(name, '') - if not val: - getLogger('pyot.file').warning("Unknown placeholder '%s'", - name) - return val - svalue = str(value) - nvalue = re.sub(r'\$\{(\w+)\}', replace, svalue) - if nvalue != svalue: - self._log.debug('Interpolate %s with %s', value, nvalue) - return nvalue - - def define(self, aliases: dict[str, Any]) -> None: - """Store interpolation variables into a local dictionary. - - Variable values are interpolated before being stored. - - :param aliases: an alias JSON (sub-)tree - """ - def replace(smo: re.Match) -> str: - name = smo.group(1) - val = self._env[name] if name in self._env \ - else environ.get(name, '') - return val - for name in aliases: - value = str(aliases[name]) - value = re.sub(r'\$\{(\w+)\}', replace, value) - if exists(value): - value = normpath(value) - aliases[name] = value - self._env[name.upper()] = value - self._log.debug('Store %s as %s', name.upper(), value) - - def define_transient(self, aliases: dict[str, Any]) -> None: - """Add short-lived aliases that are all discarded when cleanup_transient - is called. - - :param aliases: a dict of aliases - """ - for name in aliases: - name = name.upper() - # be sure not to make an existing non-transient variable transient - if name not in self._env: - self._transient_vars.add(name) - self.define(aliases) - - def cleanup_transient(self) -> None: - """Remove all transient variables.""" - for name in self._transient_vars: - if name in self._env: - del self._env[name] - self._transient_vars.clear() - - def interpolate_dirs(self, value: str, default: str) -> str: - """Resolve temporary directories, creating ones whenever required. - - :param value: the string with optional directory placeholders - :param default: the default name to use if the placeholder contains - none - :return: the interpolated string - """ - def replace(smo: re.Match) -> str: - name = smo.group(1) - if name == '': - name = default - if name not in self._dirs: - tmp_dir = mkdtemp(prefix='qemu_ot_dir_') - self._dirs[name] = tmp_dir - else: - tmp_dir = self._dirs[name] - if not tmp_dir.endswith(sep): - tmp_dir = f'{tmp_dir}{sep}' - return tmp_dir - nvalue = re.sub(r'\@\{(\w*)\}/', replace, value) - if nvalue != value: - self._log.debug('Interpolate %s with %s', value, nvalue) - return nvalue - - def delete_default_dir(self, name: str) -> None: - """Delete a temporary directory, if has been referenced. - - :param name: the name of the directory reference - """ - if name not in self._dirs: - return - if not isdir(self._dirs[name]): - return - try: - self._log.debug('Removing tree %s for %s', self._dirs[name], name) - rmtree(self._dirs[name]) - del self._dirs[name] - except OSError: - self._log.error('Cannot be removed dir %s for %s', self._dirs[name], - name) - - def create_eflash_image(self, app: Optional[str] = None, - bootloader: Optional[str] = None) -> str: - """Generate a temporary flash image file. - - :param app: optional path to the application or the rom extension - :param bootloader: optional path to a bootloader - :return: the full path to the temporary flash file - """ - # pylint: disable=import-outside-toplevel - from flashgen import FlashGen - gen = FlashGen(FlashGen.CHIP_ROM_EXT_SIZE_MAX if bool(bootloader) - else 0, True) - self._configure_logger(gen) - flash_fd, flash_file = mkstemp(suffix='.raw', prefix='qemu_ot_flash_') - self._in_fly.add(flash_file) - close(flash_fd) - self._log.debug('Create %s', basename(flash_file)) - try: - gen.open(flash_file) - if app: - with open(app, 'rb') as afp: - gen.store_rom_ext(0, afp) - if bootloader: - with open(bootloader, 'rb') as bfp: - gen.store_bootloader(0, bfp) - finally: - gen.close() - return flash_file - - def create_otp_image(self, vmem: str) -> str: - """Generate a temporary OTP image file. - - If a temporary file has already been generated for the input VMEM - file, use it instead. - - :param vmem: path to the VMEM source file - :return: the full path to the temporary OTP file - """ - # pylint: disable=import-outside-toplevel - if vmem in self._otp_files: - otp_file, ref_count = self._otp_files[vmem] - self._log.debug('Use existing %s', basename(otp_file)) - self._otp_files[vmem] = (otp_file, ref_count + 1) - return otp_file - from otptool import OtpImage - otp = OtpImage() - self._configure_logger(otp) - with open(vmem, 'rt', encoding='utf-8') as vfp: - otp.load_vmem(vfp, 'otp') - otp_fd, otp_file = mkstemp(suffix='.raw', prefix='qemu_ot_otp_') - self._log.debug('Create %s', basename(otp_file)) - self._in_fly.add(otp_file) - close(otp_fd) - with open(otp_file, 'wb') as rfp: - otp.save_raw(rfp) - self._otp_files[vmem] = (otp_file, 1) - return otp_file - - def delete_flash_image(self, filename: str) -> None: - """Delete a previously generated flash image file. - - :param filename: full path to the file to delete - """ - if not isfile(filename): - self._log.warning('No such flash image file %s', basename(filename)) - return - self._log.debug('Delete flash image file %s', basename(filename)) - unlink(filename) - self._in_fly.discard(filename) - - def delete_otp_image(self, filename: str) -> None: - """Delete a previously generated OTP image file. - - The file may be used by other tests, it is only deleted if it not - useful anymore. - - :param filename: full path to the file to delete - """ - if not isfile(filename): - self._log.warning('No such OTP image file %s', basename(filename)) - return - for vmem, (raw, count) in self._otp_files.items(): - if raw != filename: - continue - count -= 1 - if not count: - self._log.debug('Delete OTP image file %s', basename(filename)) - unlink(filename) - self._in_fly.discard(filename) - del self._otp_files[vmem] - else: - self._log.debug('Keep OTP image file %s', basename(filename)) - self._otp_files[vmem] = (raw, count) - break - - def _configure_logger(self, tool) -> None: - log = getLogger('pyot') - flog = tool.logger - # sub-tool get one logging level down to reduce log messages - floglevel = min(logging.CRITICAL, log.getEffectiveLevel() + 10) - flog.setLevel(floglevel) - for hdlr in log.handlers: - flog.addHandler(hdlr) - - def _cleanup(self) -> None: - """Remove a generated, temporary flash image file. - """ - removed: set[str] = set() - for tmpfile in self._in_fly: - if not isfile(tmpfile): - removed.add(tmpfile) - continue - if not self._keep_temp: - self._log.debug('Delete %s', basename(tmpfile)) - try: - unlink(tmpfile) - removed.add(tmpfile) - except OSError: - self._log.error('Cannot delete %s', basename(tmpfile)) - self._in_fly -= removed - if self._in_fly: - if not self._keep_temp: - raise OSError(f'{len(self._in_fly)} temp. files cannot be ' - f'removed') - for tmpfile in self._in_fly: - self._log.warning('Temporary file %s not suppressed', tmpfile) - removed: set[str] = set() - if not self._keep_temp: - for tmpname, tmpdir in self._dirs.items(): - if not isdir(tmpdir): - removed.add(tmpname) - continue - self._log.debug('Delete dir %s', tmpdir) - try: - rmtree(tmpdir) - removed.add(tmpname) - except OSError as exc: - self._log.error('Cannot delete %s: %s', tmpdir, exc) - for tmpname in removed: - del self._dirs[tmpname] - if self._dirs: - if not self._keep_temp: - raise OSError(f'{len(self._dirs)} temp. dirs cannot be removed') - for tmpdir in self._dirs.values(): - self._log.warning('Temporary dir %s not suppressed', tmpdir) - - -class QEMUContextWorker: - - """Background task for QEMU context. - """ - - def __init__(self, cmd: str, env: dict[str, str], - sync: Optional[Event] = None): - self._log = getLogger('pyot.cmd') - self._cmd = cmd - self._env = env - self._sync = sync - self._log_q = deque() - self._resume = False - self._thread: Optional[Thread] = None - self._ret = None - self._first_error = '' - - def run(self): - """Start the worker. - """ - self._thread = Thread(target=self._run, daemon=True) - self._thread.start() - - def stop(self) -> int: - """Stop the worker. - """ - if self._thread is None: - raise ValueError('Cannot stop idle worker') - self._resume = False - self._thread.join() - return self._ret - - def exit_code(self) -> Optional[int]: - """Return the exit code of the worker. - - :return: the exit code or None if the worked has not yet completed. - """ - return self._ret - - @property - def command(self) -> str: - """Return the executed command name. - """ - return normpath(self._cmd.split(' ', 1)[0]) - - @property - def first_error(self): - """Return the message of the first error, if any.""" - return self._first_error - - def _run(self): - self._resume = True - if self._sync and not self._sync.is_set(): - self._log.info('Waiting for sync') - while self._resume: - if self._sync.wait(0.1): - self._log.debug('Synchronized') - break - self._sync.clear() - # pylint: disable=consider-using-with - proc = Popen(self._cmd, bufsize=1, stdout=PIPE, stderr=PIPE, - shell=True, env=self._env, encoding='utf-8', - errors='ignore', text=True) - Thread(target=self._logger, args=(proc, True), daemon=True).start() - Thread(target=self._logger, args=(proc, False), daemon=True).start() - qemu_exec = f'{basename(self._cmd[0])}: ' - classifier = LogMessageClassifier(qemux=qemu_exec) - while self._resume: - while self._log_q: - err, qline = self._log_q.popleft() - if err: - if not self._first_error: - self._first_error = qline - loglevel = classifier.classify(qline) - self._log.log(loglevel, qline) - else: - self._log.debug(qline) - if proc.poll() is not None: - # worker has exited on its own - self._resume = False - break - try: - # give some time for the process to complete on its own - proc.wait(0.2) - self._ret = proc.returncode - self._log.debug("'%s' completed with '%d'", self.command, self._ret) - except TimeoutExpired: - # still executing - proc.terminate() - try: - # leave 1 second for QEMU to cleanly complete... - proc.wait(1.0) - self._ret = 0 - except TimeoutExpired: - # otherwise kill it - self._log.error("Force-killing command '%s'", self.command) - proc.kill() - self._ret = proc.returncode - # retrieve the remaining log messages - stdlog = self._log.info if self._ret else self._log.debug - try: - outs, errs = proc.communicate(timeout=0.1) - if not self._first_error: - self._first_error = errs.split('\n', 1)[0] - for sfp, logger in zip((outs, errs), (stdlog, self._log.error)): - for line in sfp.split('\n'): - line = line.strip() - if line: - logger(line) - except TimeoutExpired: - proc.kill() - if self._ret is None: - self._ret = proc.returncode - - def _logger(self, proc: Popen, err: bool): - # worker thread, blocking on VM stdout/stderr - stream = proc.stderr if err else proc.stdout - while proc.poll() is None: - line = stream.readline().strip() - if line: - self._log_q.append((err, line)) - - -class QEMUContext: - """Execution context for QEMU session. - - Execute commands before, while and after QEMU executes. - - :param test_name: the name of the test QEMU should execute - :param qfm: the file manager - :param qemu_cmd: the command and argument to execute QEMU - :param context: the contex configuration for the current test - """ - - def __init__(self, test_name: str, qfm: QEMUFileManager, - qemu_cmd: list[str], context: dict[str, list[str]], - env: Optional[dict[str, str]] = None): - # pylint: disable=too-many-arguments - self._clog = getLogger('pyot.ctx') - self._test_name = test_name - self._qfm = qfm - self._qemu_cmd = qemu_cmd - self._context = context - self._env = env or {} - self._workers: list[Popen] = [] - self._first_error: str = '' - - def execute(self, ctx_name: str, code: int = 0, - sync: Optional[Event] = None) -> None: - """Execute all commands, in order, for the selected context. - - Synchronous commands are executed in order. If one command fails, - subsequent commands are not executed. - - Background commands are started in order, but a failure does not - stop other commands. - - :param ctx_name: the name of the execution context - :param code: a previous error completion code, if any - :param sync: an optional synchronisation event to start up the - execution - """ - ctx = self._context.get(ctx_name, None) - if ctx_name == 'post' and code: - self._clog.info("Discard execution of '%s' commands after failure " - "of '%s'", ctx_name, self._test_name) - return - env = dict(environ) - env.update(self._env) - if self._qemu_cmd: - env['PATH'] = ':'.join((env['PATH'], dirname(self._qemu_cmd[0]))) - if ctx: - for cmd in ctx: - bkgnd = ctx_name == 'with' - if cmd.endswith('!'): - bkgnd = False - cmd = cmd[:-1] - elif cmd.endswith('&'): - bkgnd = True - cmd = cmd[:-1] - cmd = normpath(cmd.rstrip()) - if bkgnd: - if ctx_name == 'post': - raise ValueError(f"Cannot execute background command " - f"in [{ctx_name}] context for " - f"'{self._test_name}'") - rcmd = relpath(cmd) - if rcmd.startswith(pardir): - rcmd = cmd - rcmd = ' '.join(p if not p.startswith(sep) else basename(p) - for p in rcmd.split(' ')) - self._clog.info('Execute "%s" in background for [%s] ' - 'context', rcmd, ctx_name) - worker = QEMUContextWorker(cmd, env, sync) - worker.run() - self._workers.append(worker) - else: - if sync: - self._clog.debug('Synchronization ignored') - cmd = normpath(cmd.rstrip()) - rcmd = relpath(cmd) - if rcmd.startswith(pardir): - rcmd = cmd - rcmd = ' '.join(p if not p.startswith(sep) else basename(p) - for p in rcmd.split(' ')) - self._clog.info('Execute "%s" in sync for [%s] context', - rcmd, ctx_name) - # pylint: disable=consider-using-with - proc = Popen(cmd, bufsize=1, stdout=PIPE, stderr=PIPE, - shell=True, env=env, encoding='utf-8', - errors='ignore', text=True) - ret = 0 - try: - outs, errs = proc.communicate(timeout=5) - ret = proc.returncode - except TimeoutExpired: - proc.kill() - outs, errs = proc.communicate() - ret = proc.returncode - if not self._first_error: - self._first_error = errs.split('\n', 1)[0] - for sfp, logger in zip( - (outs, errs), - (self._clog.debug, - self._clog.error if ret else self._clog.info)): - for line in sfp.split('\n'): - line = line.strip() - if line: - logger(line) - if ret: - self._clog.error("Fail to execute '%s' command for " - "'%s'", cmd, self._test_name) - errmsg = self._first_error or \ - f'Cannot execute [{ctx_name}] command' - raise OSError(ret, errmsg) - if ctx_name == 'post': - if not self._qfm.keep_temporary: - self._qfm.delete_default_dir(self._test_name) - - def check_error(self) -> int: - """Check if any background worker exited in error. - - :return: a non-zero value on error - """ - for worker in self._workers: - ret = worker.exit_code() - if not ret: - continue - if not self._first_error: - self._first_error = worker.first_error - self._clog.error("%s exited with %d", worker.command, ret) - return ret - return 0 - - @property - def first_error(self): - """Return the message of the first error, if any.""" - return self._first_error - - def finalize(self) -> int: - """Terminate any running background command, in reverse order. - - :return: a non-zero value if one or more workers have reported an - error - """ - rets = {0} - while self._workers: - worker = self._workers.pop() - ret = worker.stop() - rets.add(ret) - if ret: - self._clog.warning("Command '%s' has failed for '%s': %d", - worker.command, self._test_name, ret) - if not self._first_error: - self._first_error = worker.first_error - return max(rets) - - -class QEMUExecuter: - """Test execution sequencer. - - :param qfm: file manager that tracks temporary files - :param config: configuration dictionary - :param args: parsed arguments - """ - - RESULT_MAP = { - 0: 'PASS', - 1: 'ERROR', - 6: 'ABORT', - 11: 'CRASH', - QEMUWrapper.GUEST_ERROR_OFFSET - 1: 'GUEST_ESC', - QEMUWrapper.GUEST_ERROR_OFFSET + 1: 'FAIL', - 98: 'UNEXP_SUCCESS', - 99: 'CONTEXT', - 124: 'TIMEOUT', - 125: 'DEADLOCK', - 126: 'CONTEXT', - QEMUWrapper.NO_MATCH_RETURN_CODE: 'UNKNOWN', - } - - DEFAULT_START_DELAY = 1.0 - """Default start up delay to let QEMU initialize before connecting the - virtual UART port. - """ - - DEFAULT_SERIAL_PORT = 'serial0' - """Default VCP name.""" - - LOG_SHORTCUTS = { - 'A': 'in_asm', - 'E': 'exec', - 'G': 'guest_errors', - 'I': 'int', - 'U': 'unimp', - } - """Shortcut names for QEMU log sources.""" - - def __init__(self, qfm: QEMUFileManager, config: dict[str, any], - args: Namespace): - self._log = getLogger('pyot.exec') - self._qfm = qfm - self._config = config - self._args = args - self._argdict: dict[str, Any] = {} - self._qemu_cmd: list[str] = [] - self._suffixes = [] - self._virtual_tests: dict[str, str] = {} - if hasattr(self._args, 'opts'): - setattr(self._args, 'global_opts', getattr(self._args, 'opts')) - setattr(self._args, 'opts', []) - else: - setattr(self._args, 'global_opts', []) - - def build(self) -> None: - """Build initial QEMU arguments. - - :raise ValueError: if some argument is invalid - """ - exec_info = self._build_qemu_command(self._args) - self._qemu_cmd = exec_info.command - self._argdict = dict(self._args.__dict__) - self._suffixes = [] - suffixes = self._config.get('suffixes', []) - if not isinstance(suffixes, list): - raise ValueError('Invalid suffixes sub-section') - self._suffixes.extend(suffixes) - - def enumerate_tests(self) -> Iterator[str]: - """Enumerate tests to execute. - """ - self._argdict = dict(self._args.__dict__) - for tst in sorted(self._build_test_list()): - ttype = self.guess_test_type(tst) - yield f'{basename(tst)} ({ttype})' - - def run(self, debug: bool, allow_no_test: bool) -> int: - """Execute all requested tests. - - :return: success or the code of the first encountered error - """ - log_classifiers = self._config.get('logclass', {}) - qot = QEMUWrapper(log_classifiers, debug) - ret = 0 - results = defaultdict(int) - result_file = self._argdict.get('result') - # pylint: disable=consider-using-with - cfp = open(result_file, 'wt', encoding='utf-8') if result_file else None - try: - csv = csv_writer(cfp) if cfp else None - if csv: - csv.writerow((x.title() for x in TestResult._fields)) - app = self._argdict.get('exec') - if app: - assert 'timeout' in self._argdict - timeout = int(float(self._argdict.get('timeout') * - float(self._argdict.get('timeout_factor', - DEFAULT_TIMEOUT_FACTOR)))) - self._log.debug('Execute %s', basename(self._argdict['exec'])) - adef = EasyDict(command=self._qemu_cmd, timeout=timeout, - start_delay=self.DEFAULT_START_DELAY) - ret, xtime, err = qot.run(adef) - results[ret] += 1 - sret = self.RESULT_MAP.get(ret, ret) - icount = self._argdict.get('icount') - if csv: - csv.writerow(TestResult(self.get_test_radix(app), sret, - xtime, icount, err)) - cfp.flush() - tests = self._build_test_list() - tcount = len(tests) - self._log.info('Found %d tests to execute', tcount) - if not tcount and not allow_no_test: - self._log.error('No test can be run') - return 1 - targs = None - temp_files = {} - for tpos, test in enumerate(tests, start=1): - self._log.info('[TEST %s] (%d/%d)', self.get_test_radix(test), - tpos, tcount) - try: - self._qfm.define_transient({ - 'UTPATH': test, - 'UTDIR': normpath(dirname(test)), - 'UTFILE': basename(test), - }) - test_name = self.get_test_radix(test) - exec_info = self._build_qemu_test_command(test) - exec_info.test_name = test_name - exec_info.context.execute('pre') - tret, xtime, err = qot.run(exec_info) - cret = exec_info.context.finalize() - if exec_info.expect_result != 0: - if tret == exec_info.expect_result: - self._log.info('QEMU failed with expected error, ' - 'assume success') - tret = 0 - elif tret == 0: - self._log.warning('QEMU success while expected ' - 'error %d, assume error', tret) - tret = 98 - if tret == 0 and cret != 0: - tret = 99 - if tret and not err: - err = exec_info.context.first_error - exec_info.context.execute('post', tret) - # pylint: disable=broad-except - except Exception as exc: - self._log.critical('%s', str(exc)) - if debug: - print(format_exc(chain=False), file=sys.stderr) - tret = 99 - xtime = 0.0 - err = str(exc) - finally: - self._qfm.cleanup_transient() - flush_memory_loggers(['pyot', 'pyot.vcp'], logging.INFO) - results[tret] += 1 - sret = self.RESULT_MAP.get(tret, tret) - try: - targs = exec_info.args - icount = self.get_namespace_arg(targs, 'icount') - except (AttributeError, KeyError): - icount = None - if csv: - csv.writerow(TestResult(test_name, sret, xtime, icount, - err)) - # want to commit result as soon as possible if some client - # is live-tracking progress on long test runs - cfp.flush() - else: - self._log.info('"%s" executed in %s (%s)', - test_name, xtime, sret) - self._cleanup_temp_files(temp_files) - finally: - if cfp: - cfp.close() - for kind in sorted(results): - self._log.info('%s count: %d', - self.RESULT_MAP.get(kind, kind), - results[kind]) - # sort by the largest occurence, discarding success - errors = sorted((x for x in results.items() if x[0]), - key=lambda x: -x[1]) - # overall return code is the most common error, or success otherwise - ret = errors[0][0] if errors else 0 - self._log.info('Total count: %d, overall result: %s', - sum(results.values()), - self.RESULT_MAP.get(ret, ret)) - return ret - - def get_test_radix(self, filename: str) -> str: - """Extract the radix name from a test pathname. - - :param filename: the path to the test executable - :return: the test name - """ - test_name = basename(filename).split('.')[0] - for suffix in self._suffixes: - if not test_name.endswith(suffix): - continue - return test_name[:-len(suffix)] - return test_name - - @classmethod - def get_namespace_arg(cls, args: Namespace, name: str) -> Optional[str]: - """Extract a value from a namespace. - - :param args: the namespace - :param name: the value's key - :return: the value if any - """ - return args.__dict__.get(name) - - @staticmethod - def flatten(lst: list) -> list: - """Flatten a list. - """ - return [item for sublist in lst for item in sublist] - - @staticmethod - def abspath(path: str) -> str: - """Build absolute path""" - if isabs(path): - return normpath(path) - return normpath(joinpath(getcwd(), path)) - - @staticmethod - def guess_test_type(filepath: str) -> str: - """Guess a test file type from its contents. - - :return: identified content - """ - with open(filepath, 'rb') as bfp: - header = bfp.read(4) - if header == b'\x7fELF': - return 'elf' - if header == b'OTPT': - return 'spiflash' - return 'bin' - - def _cleanup_temp_files(self, storage: dict[str, set[str]]) -> None: - if self._qfm.keep_temporary: - return - for kind, files in storage.items(): - delete_file = getattr(self._qfm, f'delete_{kind}_image') - for filename in files: - delete_file(filename) - - def _build_qemu_fw_args(self, args: Namespace) \ - -> tuple[str, Optional[str], list[str], Optional[str]]: - rom_exec = bool(args.rom_exec) - roms = args.rom or [] - multi_rom = (len(roms) + int(rom_exec)) > 1 - # generate pre-application ROM option - fw_args: list[str] = [] - machine = args.machine - variant = args.variant - chiplet_count = 1 - if variant: - machine = f'{machine},variant={variant}' - try: - chiplet_count = sum(int(x) - for x in re.split(r'[A-Za-z]', variant) - if x) - except ValueError: - self._log.warning('Unknown variant syntax %s', variant) - rom_counts: list[int] = [0] - for chip_id in range(chiplet_count): - rom_count = 0 - for rom in roms: - rom_path = self._qfm.interpolate(rom) - if not isfile(rom_path): - raise ValueError(f'Unable to find ROM file {rom_path}') - rom_ids = [] - if args.first_soc: - if chiplet_count == 1: - rom_ids.append(f'{args.first_soc}.') - else: - rom_ids.append(f'{args.first_soc}{chip_id}.') - rom_ids.append('rom') - if multi_rom: - rom_ids.append(f'{rom_count}') - rom_id = ''.join(rom_ids) - rom_opt = f'ot-rom_img,id={rom_id},file={rom_path}' - fw_args.extend(('-object', rom_opt)) - rom_count += 1 - rom_counts.append(rom_count) - rom_count = max(rom_counts) - xtype = None - if args.exec: - exec_path = self._virtual_tests.get(args.exec) - if not exec_path: - exec_path = self.abspath(args.exec) - xtype = self.guess_test_type(exec_path) - if xtype == 'spiflash': - fw_args.extend(('-drive', - f'if=mtd,id=spiflash,bus=0,format=raw,' - f'file={exec_path}')) - elif xtype == 'bin': - if args.embedded_flash is None: - raise ValueError(f'{xtype} test type not supported without ' - f'embedded-flash option') - else: - if xtype != 'elf': - raise ValueError(f'No support for test type: {xtype} ' - f'({basename(exec_path)})') - if rom_exec: - # generate ROM option(s) for the application itself - for chip in range(chiplet_count): - rom_id_parts = [] - if args.first_soc: - if chiplet_count == 1: - rom_id_parts.append(f'{args.first_soc}.') - else: - rom_id_parts.append(f'{args.first_soc}{chip}.') - rom_id_parts.append('rom') - if multi_rom: - rom_id_parts.append(f'{rom_count}') - rom_id = ''.join(rom_id_parts) - rom_opt = f'ot-rom_img,id={rom_id},file={exec_path}' - fw_args.extend(('-object', rom_opt)) - rom_count += 1 - else: - fw_args.extend(('-kernel', exec_path)) - else: - exec_path = None - return machine, xtype, fw_args, exec_path - - def _build_qemu_log_sources(self, args: Namespace) -> list[str]: - if not args.log: - return [] - log_args = [] - for arg in args.log: - if arg.lower() == arg: - log_args.append(arg) - continue - for upch in arg: - try: - logname = self.LOG_SHORTCUTS[upch] - except KeyError as exc: - raise ValueError(f"Unknown log name '{upch}'") from exc - log_args.append(logname) - return ['-d', ','.join(log_args)] - - def _build_qemu_vcp_args(self, args: Namespace) -> \ - tuple[list[str], dict[str, tuple[str, int]]]: - device = args.device - devdesc = device.split(':') - host = devdesc[0] - try: - port = int(devdesc[1]) - if not 0 < port < 65536: - raise ValueError(f'Invalid serial TCP port: {port}') - except IndexError as exc: - raise ValueError(f'TCP port not specified: {device}') from exc - except TypeError as exc: - raise ValueError(f'Invalid TCP serial device: {device}') from exc - mux = f'mux={"on" if args.muxserial else "off"}' - vcps = args.vcp or [self.DEFAULT_SERIAL_PORT] - vcp_args = ['-display', 'none'] - vcp_map = {} - for vix, vcp in enumerate(vcps): - vcp_map[vcp] = (host, port+vix) - vcp_args.extend(('-chardev', - f'socket,id={vcp},host={host},port={port+vix},' - f'{mux},server=on,wait=on')) - if vcp == self.DEFAULT_SERIAL_PORT: - vcp_args.extend(('-serial', 'chardev:serial0')) - return vcp_args, vcp_map - - def _build_qemu_command(self, args: Namespace, - opts: Optional[list[str]] = None) \ - -> EasyDict[str, Any]: - """Build QEMU command line from argparser values. - - :param args: the parsed arguments - :param opts: any QEMU-specific additional options - :return: a dictionary defining how to execute the command - """ - if args.qemu is None: - raise ValueError('QEMU path is not defined') - machine, xtype, fw_args, xexec = self._build_qemu_fw_args(args) - qemu_args = [args.qemu, '-M', machine] - if args.otcfg: - qemu_args.extend(('-readconfig', self.abspath(args.otcfg))) - qemu_args.extend(fw_args) - temp_files = defaultdict(set) - if all((args.otp, args.otp_raw)): - raise ValueError('OTP VMEM and RAW options are mutually exclusive') - if args.otp: - if not isfile(args.otp): - raise ValueError(f'No such OTP file: {args.otp}') - otp_file = self._qfm.create_otp_image(args.otp) - temp_files['otp'].add(otp_file) - qemu_args.extend(('-drive', - f'if=pflash,file={otp_file},format=raw')) - elif args.otp_raw: - otp_raw_path = self.abspath(args.otp_raw) - qemu_args.extend(('-drive', - f'if=pflash,file={otp_raw_path},format=raw')) - if args.flash: - if xtype == 'spiflash': - raise ValueError('Cannot use a flash file with a flash test') - if not isfile(args.flash): - raise ValueError(f'No such flash file: {args.flash}') - if any((args.exec, args.boot)): - raise ValueError('Flash file argument is mutually exclusive ' - 'with bootloader or rom extension') - flash_path = self.abspath(args.flash) - if args.embedded_flash is None: - raise ValueError('Embedded flash bus not defined') - qemu_args.extend(('-drive', f'if=mtd,id=eflash,' - f'bus={args.embedded_flash},' - f'file={flash_path},format=raw')) - elif any((xexec, args.boot)): - if xexec and not isfile(xexec): - raise ValueError(f'No such exec file: {xexec}') - if args.boot and not isfile(args.boot): - raise ValueError(f'No such bootloader file: {args.boot}') - if args.embedded_flash is not None: - flash_file = self._qfm.create_eflash_image(xexec, args.boot) - temp_files['flash'].add(flash_file) - qemu_args.extend(('-drive', f'if=mtd,id=eflash,' - f'bus={args.embedded_flash},' - f'file={flash_file},format=raw')) - if args.log_file: - qemu_args.extend(('-D', self.abspath(args.log_file))) - if args.trace: - # use a FileType to let argparser validate presence and type - args.trace.close() - qemu_args.extend(('-trace', - f'events={self.abspath(args.trace.name)}')) - qemu_args.extend(self._build_qemu_log_sources(args)) - if args.singlestep: - qemu_args.extend(('-accel', 'tcg,one-insn-per-tb=on')) - if 'icount' in args: - if args.icount is not None: - qemu_args.extend(('-icount', f'shift={args.icount}')) - try: - start_delay = float(getattr(args, 'start_delay') or - self.DEFAULT_START_DELAY) - except ValueError as exc: - raise ValueError(f'Invalid start up delay {args.start_delay}') \ - from exc - start_delay *= args.timeout_factor - trigger = getattr(args, 'trigger', '') - validate = getattr(args, 'validate', '') - if trigger and validate: - raise ValueError(f"{getattr(args, 'exec', '?')}: 'trigger' and " - f"'validate' are mutually exclusive") - vcp_args, vcp_map = self._build_qemu_vcp_args(args) - qemu_args.extend(vcp_args) - qemu_args.extend(args.global_opts or []) - if opts: - qemu_args.extend((str(o) for o in opts)) - return EasyDict(command=qemu_args, vcp_map=vcp_map, - tmpfiles=temp_files, start_delay=start_delay, - trigger=trigger, validate=validate) - - def _build_qemu_test_command(self, filename: str) -> EasyDict[str, Any]: - test_name = self.get_test_radix(filename) - args, opts, timeout, texp = self._build_test_args(test_name) - setattr(args, 'exec', filename) - exec_info = self._build_qemu_command(args, opts) - exec_info.pop('connection', None) - exec_info.args = args - exec_info.context = self._build_test_context(test_name) - exec_info.timeout = timeout - exec_info.expect_result = texp - return exec_info - - def _build_test_list(self, alphasort: bool = True) -> list[str]: - pathnames = set() - testdir = normpath(self._qfm.interpolate(self._config.get('testdir', - curdir))) - self._qfm.define({'testdir': testdir}) - cfilters = self._args.filter or [] - pfilters = [f for f in cfilters if not f.startswith('!')] - if not pfilters: - cfilters = ['*'] + cfilters - tfilters = ['*'] + pfilters - else: - tfilters = list(pfilters) - virttests = self._config.get('virtual', {}) - if not isinstance(virttests, dict): - raise ValueError('Invalid virtual tests definition') - vtests = {} - for vname, vpath in virttests.items(): - if not isinstance(vname, str): - raise ValueError(f"Invalid virtual test definition '{vname}'") - if sep in vname: - raise ValueError(f"Virtual test name cannot contain directory " - f"specifier: '{vname}'") - rpath = normpath(self._qfm.interpolate(vpath)) - if not isfile(rpath): - raise ValueError(f"Invalid virtual test '{vname}': " - f"missing file '{rpath}'") - vtests[vname] = rpath - self._virtual_tests.update(vtests) - inc_filters = self._build_config_list('include') - if inc_filters: - self._log.debug('Searching for tests from %s dir', testdir) - for path_filter in filter(None, inc_filters): - if testdir: - path_filter = joinpath(testdir, path_filter) - paths = set(glob(path_filter, recursive=True)) - for path in paths: - if isfile(path): - for tfilter in tfilters: - if fnmatchcase(self.get_test_radix(path), tfilter): - pathnames.add(path) - break - for vpath in vtests: - for tfilter in tfilters: - if fnmatchcase(self.get_test_radix(vpath), tfilter): - pathnames.add(vpath) - break - for testfile in self._enumerate_from('include_from'): - if not isfile(testfile): - raise ValueError(f'Unable to locate test file ' - f'"{testfile}"') - for tfilter in tfilters: - if fnmatchcase(self.get_test_radix(testfile), tfilter): - pathnames.add(testfile) - if not pathnames: - return [] - roms = self._argdict.get('rom', []) - pathnames -= {normpath(rom) for rom in roms} - xtfilters = [f[1:].strip() for f in cfilters if f.startswith('!')] - exc_filters = self._build_config_list('exclude') - xtfilters.extend(exc_filters) - if xtfilters: - for path_filter in filter(None, xtfilters): - if testdir: - path_filter = joinpath(testdir, path_filter) - paths = set(glob(path_filter, recursive=True)) - pathnames -= paths - vdiscards: set[str] = set() - for vpath in vtests: - if fnmatchcase(vpath, basename(path_filter)): - vdiscards.add(vpath) - pathnames -= vdiscards - pathnames -= set(self._enumerate_from('exclude_from')) - if alphasort: - return sorted(pathnames, key=basename) - return list(pathnames) - - def _enumerate_from(self, config_entry: str) -> Iterator[str]: - incf_filters = self._build_config_list(config_entry) - if incf_filters: - for incf in incf_filters: - incf = normpath(self._qfm.interpolate(incf)) - if not isfile(incf): - raise ValueError(f'Invalid test file: "{incf}"') - self._log.debug('Loading test list from %s', incf) - incf_dir = dirname(incf) - with open(incf, 'rt', encoding='utf-8') as ifp: - for testfile in ifp: - testfile = re.sub('#.*$', '', testfile).strip() - if not testfile: - continue - testfile = self._qfm.interpolate(testfile) - if not testfile.startswith(sep): - testfile = joinpath(incf_dir, testfile) - yield normpath(testfile) - - def _build_config_list(self, config_entry: str) -> list: - cfglist = [] - items = self._config.get(config_entry) - if not items: - return cfglist - if not isinstance(items, list): - raise ValueError(f'Invalid configuration file: ' - f'"{config_entry}" is not a list') - for item in items: - if isinstance(item, str): - cfglist.append(item) - continue - if isinstance(item, dict): - for dname, dval in item.items(): - try: - cond = bool(int(environ.get(dname, '0'))) - except (ValueError, TypeError): - cond = False - if not cond: - continue - if isinstance(dval, str): - dval = [dval] - if isinstance(dval, list): - for sitem in dval: - if isinstance(sitem, str): - cfglist.append(sitem) - return cfglist - - def _build_test_args(self, test_name: str) \ - -> tuple[Namespace, list[str], int, int]: - tests_cfg = self._config.get('tests', {}) - if not isinstance(tests_cfg, dict): - raise ValueError('Invalid tests sub-section') - kwargs = dict(self._args.__dict__) - test_cfg = tests_cfg.get(test_name, {}) - if test_cfg is None: - # does not default to an empty dict to differenciate empty from - # inexistent test configuration - self._log.debug('No configuration for test %s', test_name) - opts = None - else: - test_cfg = {k: v for k, v in test_cfg.items() - if k not in ('pre', 'post', 'with')} - self._log.debug('Using custom test config for %s', test_name) - discards = {k for k, v in test_cfg.items() if v == ''} - if discards: - test_cfg = dict(test_cfg) - for discard in discards: - del test_cfg[discard] - if discard in kwargs: - del kwargs[discard] - kwargs.update(test_cfg) - opts = kwargs.get('opts') - if opts and not isinstance(opts, list): - raise ValueError('fInvalid QEMU options for {test_name}') - opts = self.flatten([opt.split(' ') for opt in opts]) - opts = [self._qfm.interpolate(opt) for opt in opts] - opts = self.flatten([opt.split(' ') for opt in opts]) - opts = [self._qfm.interpolate_dirs(opt, test_name) for opt in opts] - timeout = float(kwargs.get('timeout', DEFAULT_TIMEOUT)) - tmfactor = float(kwargs.get('timeout_factor', DEFAULT_TIMEOUT_FACTOR)) - itimeout = int(timeout * tmfactor) - texpect = kwargs.get('expect', 0) - try: - texp = int(texpect) - except ValueError: - result_map = {v: k for k, v in self.RESULT_MAP.items()} - try: - texp = result_map[texpect.upper()] - except KeyError as exc: - raise ValueError(f'Unsupported expect: {texpect}') from exc - return Namespace(**kwargs), opts or [], itimeout, texp - - def _build_test_context(self, test_name: str) -> QEMUContext: - context = defaultdict(list) - tests_cfg = self._config.get('tests', {}) - test_cfg = tests_cfg.get(test_name, {}) - test_env = None - if test_cfg: - for ctx_name in ('pre', 'with', 'post'): - if ctx_name not in test_cfg: - continue - ctx = test_cfg[ctx_name] - if not isinstance(ctx, list): - raise ValueError(f'Invalid context "{ctx_name}" ' - f'for test {test_name}') - for pos, cmd in enumerate(ctx, start=1): - if not isinstance(cmd, str): - raise ValueError(f'Invalid command #{pos} in ' - f'"{ctx_name}" for test {test_name}') - cmd = re.sub(r'[\n\r]', ' ', cmd.strip()) - cmd = re.sub(r'\s{2,}', ' ', cmd) - cmd = self._qfm.interpolate(cmd) - cmd = self._qfm.interpolate_dirs(cmd, test_name) - context[ctx_name].append(cmd) - env = test_cfg.get('env') - if env: - if not isinstance(env, dict): - raise ValueError('Invalid context environment') - test_env = {k: self._qfm.interpolate(v) for k, v in env.items()} - return QEMUContext(test_name, self._qfm, self._qemu_cmd, dict(context), - test_env) def main(): @@ -1883,6 +113,10 @@ def main(): help='SPI flash image file') files.add_argument('-g', '--otcfg', metavar='file', help='configuration options for OpenTitan devices') + files.add_argument('-H', '--no-flash-header', action='store_const', + const=True, + help='application and/or bootloader files contain ' + 'no OT header') files.add_argument('-K', '--keep-tmp', action='store_true', help='Do not automatically remove temporary files ' 'and dirs on exit') @@ -1961,6 +195,7 @@ def main(): result_file = args.result log = configure_loggers(args.verbose, 'pyot', + -1, 'flashgen', 'elf', 'otp', 1, args.vcp_verbose or 0, 'pyot.vcp', name_width=30, ms=args.log_time, quiet=args.quiet,