From 27c76f251720b97e0899987a09e27f4ad63e7d9c Mon Sep 17 00:00:00 2001 From: Mike Prosser Date: Mon, 22 Sep 2025 17:03:23 -0500 Subject: [PATCH 01/15] stream benchmarks --- poetry.lock | 35 ++- pyproject.toml | 1 + tests/benchmark/__init__.py | 1 + tests/benchmark/conftest.py | 4 + tests/benchmark/test_analog_stream_readers.py | 88 +++++++ tests/benchmark/test_analog_stream_writers.py | 85 +++++++ .../benchmark/test_digital_stream_readers.py | 222 ++++++++++++++++++ .../benchmark/test_digital_stream_writers.py | 210 +++++++++++++++++ 8 files changed, 645 insertions(+), 1 deletion(-) create mode 100644 tests/benchmark/__init__.py create mode 100644 tests/benchmark/conftest.py create mode 100644 tests/benchmark/test_analog_stream_readers.py create mode 100644 tests/benchmark/test_analog_stream_writers.py create mode 100644 tests/benchmark/test_digital_stream_readers.py create mode 100644 tests/benchmark/test_digital_stream_writers.py diff --git a/poetry.lock b/poetry.lock index 44fff0b9..ecc09167 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2362,6 +2362,18 @@ files = [ ] markers = {main = "python_version >= \"3.13\" and extra == \"grpc\"", codegen = "python_version >= \"3.13\""} +[[package]] +name = "py-cpuinfo" +version = "9.0.0" +description = "Get CPU info with pure Python" +optional = false +python-versions = "*" +groups = ["test"] +files = [ + {file = "py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690"}, + {file = "py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5"}, +] + [[package]] name = "pycodestyle" version = "2.9.1" @@ -2521,6 +2533,27 @@ tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] +[[package]] +name = "pytest-benchmark" +version = "5.1.0" +description = "A ``pytest`` fixture for benchmarking code. It will group the tests into rounds that are calibrated to the chosen timer." +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "pytest-benchmark-5.1.0.tar.gz", hash = "sha256:9ea661cdc292e8231f7cd4c10b0319e56a2118e2c09d9f50e1b3d150d2aca105"}, + {file = "pytest_benchmark-5.1.0-py3-none-any.whl", hash = "sha256:922de2dfa3033c227c96da942d1878191afa135a29485fb942e85dff1c592c89"}, +] + +[package.dependencies] +py-cpuinfo = "*" +pytest = ">=8.1" + +[package.extras] +aspect = ["aspectlib"] +elasticsearch = ["elasticsearch"] +histogram = ["pygal", "pygaljs", "setuptools"] + [[package]] name = "pytest-cov" version = "7.0.0" @@ -3243,4 +3276,4 @@ grpc = ["grpcio", "protobuf"] [metadata] lock-version = "2.1" python-versions = ">=3.9,<4.0" -content-hash = "0be294ec7ba2a497d90e43221ebf50c790c066089bc838b17d0c7de7b43c4997" +content-hash = "c6e65f2267f438dede07c5b61377556442bdc24ce68bb299e3b468c0583062d7" diff --git a/pyproject.toml b/pyproject.toml index 435d894b..ac2f31de 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -107,6 +107,7 @@ types-grpcio = ">=1.0" [tool.poetry.group.test.dependencies] pytest = ">=7.2" +pytest-benchmark = ">=5.1" pytest-cov = ">=4.0" pytest-mock = ">=3.0" pykka = ">=3.0" diff --git a/tests/benchmark/__init__.py b/tests/benchmark/__init__.py new file mode 100644 index 00000000..60ac1715 --- /dev/null +++ b/tests/benchmark/__init__.py @@ -0,0 +1 @@ +"""Benchmarks for the nidaqmx package.""" diff --git a/tests/benchmark/conftest.py b/tests/benchmark/conftest.py new file mode 100644 index 00000000..d28efb9a --- /dev/null +++ b/tests/benchmark/conftest.py @@ -0,0 +1,4 @@ +"""Import fixtures from component tests for benchmark tests.""" + +# Import all fixtures from component conftest.py to make them available for benchmark tests +from tests.component.conftest import * # noqa: F403, F401 diff --git a/tests/benchmark/test_analog_stream_readers.py b/tests/benchmark/test_analog_stream_readers.py new file mode 100644 index 00000000..c148af01 --- /dev/null +++ b/tests/benchmark/test_analog_stream_readers.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +import math + +import numpy +import pytest +from nitypes.waveform import AnalogWaveform +from pytest_benchmark.fixture import BenchmarkFixture + +import nidaqmx +from nidaqmx.stream_readers._analog_multi_channel_reader import AnalogMultiChannelReader +from nidaqmx.stream_readers._analog_single_channel_reader import ( + AnalogSingleChannelReader, +) + + +@pytest.mark.benchmark(group="analog_stream_readers") +def test___analog_single_channel_reader___read_one_sample___1_sample( + benchmark: BenchmarkFixture, + ai_single_channel_task: nidaqmx.Task, +) -> None: + reader = AnalogSingleChannelReader(ai_single_channel_task.in_stream) + + benchmark(reader.read_one_sample) + + +@pytest.mark.benchmark(group="analog_stream_readers") +def test___analog_single_channel_reader___read_many_sample___1000_samples( + benchmark: BenchmarkFixture, + ai_single_channel_task: nidaqmx.Task, +) -> None: + reader = AnalogSingleChannelReader(ai_single_channel_task.in_stream) + samples_to_read = 1000 + data = numpy.full(samples_to_read, math.inf, dtype=numpy.float64) + + benchmark(reader.read_many_sample, data, samples_to_read) + + +@pytest.mark.benchmark(group="analog_stream_readers") +@pytest.mark.grpc_skip(reason="read_analog_waveform not implemented in GRPC") +def test___analog_single_channel_reader___read_waveform___1000_samples( + benchmark: BenchmarkFixture, + ai_single_channel_task: nidaqmx.Task, +) -> None: + reader = AnalogSingleChannelReader(ai_single_channel_task.in_stream) + samples_to_read = 1000 + waveform = AnalogWaveform(samples_to_read) + + benchmark(reader.read_waveform, waveform, samples_to_read) + + +@pytest.mark.benchmark(group="analog_stream_readers") +def test___analog_multi_channel_reader___read_one_sample___1_sample( + benchmark: BenchmarkFixture, + ai_multi_channel_task: nidaqmx.Task, +) -> None: + reader = AnalogMultiChannelReader(ai_multi_channel_task.in_stream) + num_channels = 3 + data = numpy.full(num_channels, math.inf, dtype=numpy.float64) + + benchmark(reader.read_one_sample, data) + + +@pytest.mark.benchmark(group="analog_stream_readers") +def test___analog_multi_channel_reader___read_many_sample___1000_samples( + benchmark: BenchmarkFixture, + ai_multi_channel_task: nidaqmx.Task, +) -> None: + reader = AnalogMultiChannelReader(ai_multi_channel_task.in_stream) + num_channels = 3 + samples_to_read = 1000 + data = numpy.full((num_channels, samples_to_read), math.inf, dtype=numpy.float64) + + benchmark(reader.read_many_sample, data, samples_to_read) + + +@pytest.mark.benchmark(group="analog_stream_readers") +@pytest.mark.grpc_skip(reason="read_analog_waveforms not implemented in GRPC") +def test___analog_multi_channel_reader___read_waveform___1000_samples( + benchmark: BenchmarkFixture, + ai_multi_channel_task: nidaqmx.Task, +) -> None: + reader = AnalogMultiChannelReader(ai_multi_channel_task.in_stream) + num_channels = 3 + samples_to_read = 1000 + waveforms = [AnalogWaveform(samples_to_read) for _ in range(num_channels)] + + benchmark(reader.read_waveforms, waveforms, samples_to_read) diff --git a/tests/benchmark/test_analog_stream_writers.py b/tests/benchmark/test_analog_stream_writers.py new file mode 100644 index 00000000..96b1a326 --- /dev/null +++ b/tests/benchmark/test_analog_stream_writers.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +import numpy +import pytest +from nitypes.waveform import AnalogWaveform +from pytest_benchmark.fixture import BenchmarkFixture + +import nidaqmx +from nidaqmx.stream_writers._analog_multi_channel_writer import AnalogMultiChannelWriter +from nidaqmx.stream_writers._analog_single_channel_writer import ( + AnalogSingleChannelWriter, +) + + +@pytest.mark.benchmark(group="analog_stream_writers") +def test___analog_single_channel_writer___write_one_sample___1_sample( + benchmark: BenchmarkFixture, + ao_single_channel_task: nidaqmx.Task, +) -> None: + writer = AnalogSingleChannelWriter(ao_single_channel_task.out_stream) + + benchmark(writer.write_one_sample, 1.0) + + +@pytest.mark.benchmark(group="analog_stream_writers") +def test___analog_single_channel_writer___write_many_sample___100_samples( + benchmark: BenchmarkFixture, + ao_single_channel_task: nidaqmx.Task, +) -> None: + writer = AnalogSingleChannelWriter(ao_single_channel_task.out_stream) + samples_to_write = 100 + data = numpy.linspace(0.0, 1.0, num=samples_to_write, dtype=numpy.float64) + + benchmark(writer.write_many_sample, data) + + +@pytest.mark.benchmark(group="analog_stream_writers") +@pytest.mark.grpc_skip(reason="write_analog_waveform not implemented in GRPC") +def test___analog_single_channel_writer___write_waveform___100_samples( + benchmark: BenchmarkFixture, + ao_single_channel_task: nidaqmx.Task, +) -> None: + writer = AnalogSingleChannelWriter(ao_single_channel_task.out_stream) + num_samples = 100 + waveform = AnalogWaveform(num_samples) + + benchmark(writer.write_waveform, waveform) + + +@pytest.mark.benchmark(group="analog_stream_writers") +def test___analog_multi_channel_writer___write_one_sample___1_sample( + benchmark: BenchmarkFixture, + ao_multi_channel_task: nidaqmx.Task, +) -> None: + writer = AnalogMultiChannelWriter(ao_multi_channel_task.out_stream) + expected = [1.0, 1.0] + data = numpy.asarray(expected, dtype=numpy.float64) + + benchmark(writer.write_one_sample, data) + + +@pytest.mark.benchmark(group="analog_stream_writers") +def test___analog_multi_channel_writer___write_many_sample___100_samples( + benchmark: BenchmarkFixture, + ao_multi_channel_task: nidaqmx.Task, +) -> None: + writer = AnalogMultiChannelWriter(ao_multi_channel_task.out_stream) + num_channels = 2 + samples_to_write = 100 + data = numpy.full((num_channels, samples_to_write), 1.0, dtype=numpy.float64) + + benchmark(writer.write_many_sample, data) + + +@pytest.mark.benchmark(group="analog_stream_writers") +@pytest.mark.grpc_skip(reason="write_analog_waveform not implemented in GRPC") +def test___analog_multi_channel_writer___write_waveform___100_samples( + benchmark: BenchmarkFixture, + ao_multi_channel_task: nidaqmx.Task, +) -> None: + writer = AnalogMultiChannelWriter(ao_multi_channel_task.out_stream) + num_samples = 100 + waveform = [AnalogWaveform(num_samples), AnalogWaveform(num_samples)] + + benchmark(writer.write_waveforms, waveform) diff --git a/tests/benchmark/test_digital_stream_readers.py b/tests/benchmark/test_digital_stream_readers.py new file mode 100644 index 00000000..805e2fc8 --- /dev/null +++ b/tests/benchmark/test_digital_stream_readers.py @@ -0,0 +1,222 @@ +from __future__ import annotations + +import numpy +import pytest +from nitypes.waveform import DigitalWaveform +from pytest_benchmark.fixture import BenchmarkFixture + +import nidaqmx +from nidaqmx.stream_readers._digital_multi_channel_reader import ( + DigitalMultiChannelReader, +) +from nidaqmx.stream_readers._digital_single_channel_reader import ( + DigitalSingleChannelReader, +) + + +@pytest.mark.benchmark(group="digital_stream_readers") +def test___digital_single_channel_reader___read_one_sample_one_line___1_sample( + benchmark: BenchmarkFixture, + di_single_line_task: nidaqmx.Task, +) -> None: + reader = DigitalSingleChannelReader(di_single_line_task.in_stream) + + benchmark(reader.read_one_sample_one_line) + + +@pytest.mark.benchmark(group="digital_stream_readers") +def test___digital_single_channel_reader___read_one_sample_multi_line___1_sample( + benchmark: BenchmarkFixture, + di_single_channel_multi_line_task: nidaqmx.Task, +) -> None: + reader = DigitalSingleChannelReader(di_single_channel_multi_line_task.in_stream) + num_lines = 8 + sample = numpy.full(num_lines, False, dtype=numpy.bool_) + + benchmark(reader.read_one_sample_multi_line, sample) + + +@pytest.mark.benchmark(group="digital_stream_readers") +def test___digital_single_channel_reader___read_many_sample_port_byte___256_samples( + benchmark: BenchmarkFixture, + di_single_channel_port_byte_task: nidaqmx.Task, +) -> None: + reader = DigitalSingleChannelReader(di_single_channel_port_byte_task.in_stream) + samples_to_read = 256 + data = numpy.full(samples_to_read, numpy.iinfo(numpy.uint8).min, dtype=numpy.uint8) + + benchmark( + reader.read_many_sample_port_byte, data, number_of_samples_per_channel=samples_to_read + ) + + +@pytest.mark.benchmark(group="digital_stream_readers") +def test___digital_single_channel_reader___read_many_sample_port_uint32___256_samples( + benchmark: BenchmarkFixture, + di_single_channel_port_uint32_task: nidaqmx.Task, +) -> None: + reader = DigitalSingleChannelReader(di_single_channel_port_uint32_task.in_stream) + samples_to_read = 256 + data = numpy.full(samples_to_read, numpy.iinfo(numpy.uint32).min, dtype=numpy.uint32) + + benchmark( + reader.read_many_sample_port_uint32, data, number_of_samples_per_channel=samples_to_read + ) + + +@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___digital_single_line_reader___read_waveform___256_samples( + benchmark: BenchmarkFixture, + di_single_line_task: nidaqmx.Task, +) -> None: + reader = DigitalSingleChannelReader(di_single_line_task.in_stream) + samples_to_read = 256 + waveform = DigitalWaveform(samples_to_read) + + benchmark(reader.read_waveform, waveform, samples_to_read) + + +@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___digital_single_channel_multi_line_reader___read_waveform___256_samples( + benchmark: BenchmarkFixture, + di_single_channel_multi_line_task: nidaqmx.Task, +) -> None: + reader = DigitalSingleChannelReader(di_single_channel_multi_line_task.in_stream) + samples_to_read = 256 + num_lines = 8 + waveform = DigitalWaveform(samples_to_read, num_lines) + + benchmark(reader.read_waveform, waveform, samples_to_read) + + +@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___digital_single_channel_reader___read_waveform_port_byte___256_samples( + benchmark: BenchmarkFixture, + di_single_channel_port_byte_task: nidaqmx.Task, +) -> None: + reader = DigitalSingleChannelReader(di_single_channel_port_byte_task.in_stream) + samples_to_read = 256 + num_lines = 8 + waveform = DigitalWaveform(samples_to_read, num_lines) + + benchmark(reader.read_waveform, waveform, samples_to_read) + + +@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___digital_single_channel_reader___read_waveform_port_uint32___256_samples( + benchmark: BenchmarkFixture, + di_single_channel_port_uint32_task: nidaqmx.Task, +) -> None: + reader = DigitalSingleChannelReader(di_single_channel_port_uint32_task.in_stream) + samples_to_read = 256 + num_lines = 32 + waveform = DigitalWaveform(samples_to_read, num_lines) + + benchmark(reader.read_waveform, waveform, samples_to_read) + + +@pytest.mark.benchmark(group="digital_stream_readers") +def test___digital_multi_channel_reader___read_one_sample_one_line___1_sample( + benchmark: BenchmarkFixture, + di_single_line_task: nidaqmx.Task, +) -> None: + reader = DigitalMultiChannelReader(di_single_line_task.in_stream) + sample = numpy.full(1, False, dtype=numpy.bool_) + + benchmark(reader.read_one_sample_one_line, sample) + + +@pytest.mark.benchmark(group="digital_stream_readers") +def test___digital_multi_channel_reader___read_one_sample_multi_line___1_sample( + benchmark: BenchmarkFixture, + di_multi_channel_multi_line_task: nidaqmx.Task, +) -> None: + reader = DigitalMultiChannelReader(di_multi_channel_multi_line_task.in_stream) + num_channels = di_multi_channel_multi_line_task.number_of_channels + sample = numpy.full((num_channels, 1), False, dtype=numpy.bool_) + + benchmark(reader.read_one_sample_multi_line, sample) + + +@pytest.mark.benchmark(group="digital_stream_readers") +def test___digital_multi_channel_reader___read_many_sample_port_byte___256_samples( + benchmark: BenchmarkFixture, + di_multi_channel_port_byte_task: nidaqmx.Task, +) -> None: + reader = DigitalMultiChannelReader(di_multi_channel_port_byte_task.in_stream) + num_channels = 2 + samples_to_read = 256 + data = numpy.full( + (num_channels, samples_to_read), numpy.iinfo(numpy.uint8).min, dtype=numpy.uint8 + ) + + benchmark( + reader.read_many_sample_port_byte, data, number_of_samples_per_channel=samples_to_read + ) + + +@pytest.mark.benchmark(group="digital_stream_readers") +def test___digital_multi_channel_reader___read_many_sample_port_uint32___256_samples( + benchmark: BenchmarkFixture, + di_multi_channel_port_uint32_task: nidaqmx.Task, +) -> None: + reader = DigitalMultiChannelReader(di_multi_channel_port_uint32_task.in_stream) + num_channels = 3 + samples_to_read = 256 + data = numpy.full( + (num_channels, samples_to_read), numpy.iinfo(numpy.uint32).min, dtype=numpy.uint32 + ) + + benchmark( + reader.read_many_sample_port_uint32, data, number_of_samples_per_channel=samples_to_read + ) + + +@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___digital_multi_channel_multi_line_reader___read_waveform___256_samples( + benchmark: BenchmarkFixture, + di_multi_channel_multi_line_task: nidaqmx.Task, +) -> None: + reader = DigitalMultiChannelReader(di_multi_channel_multi_line_task.in_stream) + num_channels = 8 + samples_to_read = 2561 + waveforms = [DigitalWaveform(samples_to_read) for _ in range(num_channels)] + + benchmark(reader.read_waveforms, waveforms, samples_to_read) + + +@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___digital_multi_channel_reader___read_waveform_port_byte___256_samples( + benchmark: BenchmarkFixture, + di_multi_channel_port_byte_task: nidaqmx.Task, +) -> None: + reader = DigitalMultiChannelReader(di_multi_channel_port_byte_task.in_stream) + num_channels = 2 + samples_to_read = 256 + num_lines = 8 + waveforms = [DigitalWaveform(samples_to_read, num_lines) for _ in range(num_channels)] + + benchmark(reader.read_waveforms, waveforms, samples_to_read) + + +@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___digital_multi_channel_reader___read_waveform_port_uint32___256_samples( + benchmark: BenchmarkFixture, + di_multi_channel_port_uint32_task: nidaqmx.Task, +) -> None: + reader = DigitalMultiChannelReader(di_multi_channel_port_uint32_task.in_stream) + samples_to_read = 256 + waveforms = [ + DigitalWaveform(samples_to_read, 32), + DigitalWaveform(samples_to_read, 8), + DigitalWaveform(samples_to_read, 8), + ] + + benchmark(reader.read_waveforms, waveforms, samples_to_read) diff --git a/tests/benchmark/test_digital_stream_writers.py b/tests/benchmark/test_digital_stream_writers.py new file mode 100644 index 00000000..fa8a0f56 --- /dev/null +++ b/tests/benchmark/test_digital_stream_writers.py @@ -0,0 +1,210 @@ +from __future__ import annotations + +import numpy +import nidaqmx +from nidaqmx.stream_writers._digital_single_channel_writer import DigitalSingleChannelWriter +from nidaqmx.stream_writers._digital_multi_channel_writer import DigitalMultiChannelWriter +import pytest +from pytest_benchmark.fixture import BenchmarkFixture + +from nitypes.waveform import DigitalWaveform + + +@pytest.mark.benchmark(group="digital_stream_writers") +def test___digital_single_channel_writer___write_one_sample_one_line___1_sample( + benchmark: BenchmarkFixture, + do_single_line_task: nidaqmx.Task, +) -> None: + writer = DigitalSingleChannelWriter(do_single_line_task.out_stream) + + benchmark(writer.write_one_sample_one_line, True) + + +@pytest.mark.benchmark(group="digital_stream_writers") +def test___digital_single_channel_writer___write_one_sample_multi_line___1_sample( + benchmark: BenchmarkFixture, + do_single_channel_multi_line_task: nidaqmx.Task, +) -> None: + writer = DigitalSingleChannelWriter(do_single_channel_multi_line_task.out_stream) + num_lines = 8 + sample = numpy.full(num_lines, True, dtype=numpy.bool_) + + benchmark(writer.write_one_sample_multi_line, sample) + + +@pytest.mark.benchmark(group="digital_stream_writers") +def test___digital_single_channel_writer___write_one_sample_port_byte___1_sample( + benchmark: BenchmarkFixture, + do_port1_task: nidaqmx.Task, +) -> None: + writer = DigitalSingleChannelWriter(do_port1_task.out_stream) + sample = numpy.uint8(1) + + benchmark(writer.write_one_sample_port_byte, sample) + + +@pytest.mark.benchmark(group="digital_stream_writers") +def test___digital_single_channel_writer___write_one_sample_port_uint32___1_sample( + benchmark: BenchmarkFixture, + do_port0_task: nidaqmx.Task, +) -> None: + writer = DigitalSingleChannelWriter(do_port0_task.out_stream) + sample = numpy.uint32(1) + + benchmark(writer.write_one_sample_port_uint32, sample) + + +@pytest.mark.benchmark(group="digital_stream_writers") +def test___digital_single_channel_writer___write_many_sample_port_byte___256_samples( + benchmark: BenchmarkFixture, + do_multi_channel_port_task: nidaqmx.Task, +) -> None: + writer = DigitalSingleChannelWriter(do_multi_channel_port_task.out_stream) + samples_to_write = 256 + data = numpy.full(samples_to_write, numpy.uint8(1), dtype=numpy.uint8) + + benchmark(writer.write_many_sample_port_byte, data) + + +@pytest.mark.benchmark(group="digital_stream_writers") +def test___digital_single_channel_writer___write_many_sample_port_uint32___256_samples( + benchmark: BenchmarkFixture, + do_port1_task: nidaqmx.Task, +) -> None: + writer = DigitalSingleChannelWriter(do_port1_task.out_stream) + samples_to_write = 256 + data = numpy.full(samples_to_write, numpy.uint32(1), dtype=numpy.uint32) + + benchmark(writer.write_many_sample_port_uint32, data) + + +@pytest.mark.benchmark(group="digital_stream_writers") +def test___digital_multi_channel_writer___write_one_sample_one_line___1_sample( + benchmark: BenchmarkFixture, + do_single_line_task: nidaqmx.Task, +) -> None: + writer = DigitalMultiChannelWriter(do_single_line_task.out_stream) + sample = numpy.array([True], dtype=numpy.bool_) + + benchmark(writer.write_one_sample_one_line, sample) + + +@pytest.mark.benchmark(group="digital_stream_writers") +def test___digital_multi_channel_writer___write_one_sample_multi_line___1_sample( + benchmark: BenchmarkFixture, + do_multi_channel_multi_line_task: nidaqmx.Task, +) -> None: + writer = DigitalMultiChannelWriter(do_multi_channel_multi_line_task.out_stream) + sample = numpy.full((2, 1), True, dtype=numpy.bool_) + + benchmark(writer.write_one_sample_multi_line, sample) + + +@pytest.mark.benchmark(group="digital_stream_writers") +def test___digital_multi_channel_writer___write_one_sample_port_byte___1_sample( + benchmark: BenchmarkFixture, + do_multi_channel_port_task: nidaqmx.Task, +) -> None: + writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) + sample = numpy.array([numpy.uint8(1), numpy.uint8(1)], dtype=numpy.uint8) + + benchmark(writer.write_one_sample_port_byte, sample) + + +@pytest.mark.benchmark(group="digital_stream_writers") +def test___digital_multi_channel_writer___write_many_sample_port_byte___256_samples( + benchmark: BenchmarkFixture, + do_multi_channel_port_task: nidaqmx.Task, +) -> None: + writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) + num_channels = 2 + samples_to_write = 256 + data = numpy.full( + (num_channels, samples_to_write), numpy.uint8(1), dtype=numpy.uint8 + ) + + benchmark(writer.write_many_sample_port_byte, data) + + +@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___digital_single_line_writer___write_waveform___256_samples( + benchmark: BenchmarkFixture, + do_single_line_task: nidaqmx.Task, +) -> None: + writer = DigitalSingleChannelWriter(do_single_line_task.out_stream) + samples_to_write = 256 + waveform = DigitalWaveform(samples_to_write) + + benchmark(writer.write_waveform, waveform) + + +@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___digital_single_channel_multi_line_writer___write_waveform___256_samples( + benchmark: BenchmarkFixture, + do_single_channel_multi_line_task: nidaqmx.Task, +) -> None: + writer = DigitalSingleChannelWriter(do_single_channel_multi_line_task.out_stream) + samples_to_write = 256 + num_lines = 8 + waveform = DigitalWaveform(samples_to_write, num_lines) + + benchmark(writer.write_waveform, waveform) + + +@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___digital_single_channel_writer___write_waveform_port_byte___256_samples( + benchmark: BenchmarkFixture, + do_port1_task: nidaqmx.Task, +) -> None: + writer = DigitalSingleChannelWriter(do_port1_task.out_stream) + samples_to_write = 256 + num_lines = 8 + waveform = DigitalWaveform(samples_to_write, num_lines) + + benchmark(writer.write_waveform, waveform) + + +@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___digital_single_channel_writer___write_waveform_port_uint32___256_samples( + benchmark: BenchmarkFixture, + do_port0_task: nidaqmx.Task, +) -> None: + writer = DigitalSingleChannelWriter(do_port0_task.out_stream) + samples_to_write = 256 + num_lines = 32 + waveform = DigitalWaveform(samples_to_write, num_lines) + + benchmark(writer.write_waveform, waveform) + + +@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___digital_multi_channel_multi_line_writer___write_waveforms___256_samples( + benchmark: BenchmarkFixture, + do_multi_channel_multi_line_task: nidaqmx.Task, +) -> None: + writer = DigitalMultiChannelWriter(do_multi_channel_multi_line_task.out_stream) + num_channels = 8 + samples_to_write = 256 + waveforms = [DigitalWaveform(samples_to_write) for _ in range(num_channels)] + + benchmark(writer.write_waveforms, waveforms) + + +@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___digital_multi_channel_writer___write_waveforms_port_byte___256_samples( + benchmark: BenchmarkFixture, + do_multi_channel_port_task: nidaqmx.Task, +) -> None: + writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) + num_channels = 2 + samples_to_write = 256 + num_lines = 8 + waveforms = [DigitalWaveform(samples_to_write, num_lines) for _ in range(num_channels)] + + benchmark(writer.write_waveforms, waveforms) \ No newline at end of file From 0316ebdf18e5686c1be6c46b3e1cc3cecbf44412 Mon Sep 17 00:00:00 2001 From: Mike Prosser Date: Tue, 23 Sep 2025 09:43:32 -0500 Subject: [PATCH 02/15] cleanup --- CONTRIBUTING.md | 13 ++++++++++++ pyproject.toml | 4 +++- .../benchmark/test_digital_stream_writers.py | 20 ++++++++++--------- 3 files changed, 27 insertions(+), 10 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a2fc3dfd..5f9f6a07 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -75,6 +75,19 @@ $ poetry run tox This requires you to have all the Python interpreters supported by **nidaqmx** installed on your machine. +# Benchmarks + +Benchmark tests are not run by default when you run pytest. To run the benchmarks, use this command: + +```sh +# Run the benchmarks +# Compare benchmark before/after a change +# see https://pytest-benchmark.readthedocs.io/en/latest/comparing.html +# Run 1: --benchmark-save=some-name +# Run N: --benchmark-compare=0001 +$ poetry run pytest -v tests/benchmark +``` + # Building Documentation To build the documentation install the optional docs packages and run sphinx. For example: diff --git a/pyproject.toml b/pyproject.toml index ac2f31de..e427d0f2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -136,7 +136,7 @@ application-import-names = "nidaqmx" [tool.pytest.ini_options] addopts = "--doctest-modules --strict-markers" filterwarnings = ["always::ImportWarning", "always::ResourceWarning"] -testpaths = ["tests"] +testpaths = ["tests/acceptance", "tests/component", "tests/legacy", "tests/unit"] markers = [ # Defines custom markers used by nidaqmx tests. Prevents PytestUnknownMarkWarning. "library_only(reason=...): run the test with only the library interpreter implementation.", @@ -185,6 +185,8 @@ module = [ "importlib_metadata", "mako.*", "nidaqmx.*", + # https://github.com/ionelmc/pytest-benchmark/issues/212 - Add type annotations + "pytest_benchmark.*", ] ignore_missing_imports = true diff --git a/tests/benchmark/test_digital_stream_writers.py b/tests/benchmark/test_digital_stream_writers.py index fa8a0f56..aef57f8c 100644 --- a/tests/benchmark/test_digital_stream_writers.py +++ b/tests/benchmark/test_digital_stream_writers.py @@ -1,13 +1,17 @@ from __future__ import annotations import numpy -import nidaqmx -from nidaqmx.stream_writers._digital_single_channel_writer import DigitalSingleChannelWriter -from nidaqmx.stream_writers._digital_multi_channel_writer import DigitalMultiChannelWriter import pytest +from nitypes.waveform import DigitalWaveform from pytest_benchmark.fixture import BenchmarkFixture -from nitypes.waveform import DigitalWaveform +import nidaqmx +from nidaqmx.stream_writers._digital_multi_channel_writer import ( + DigitalMultiChannelWriter, +) +from nidaqmx.stream_writers._digital_single_channel_writer import ( + DigitalSingleChannelWriter, +) @pytest.mark.benchmark(group="digital_stream_writers") @@ -95,7 +99,7 @@ def test___digital_multi_channel_writer___write_one_sample_multi_line___1_sample do_multi_channel_multi_line_task: nidaqmx.Task, ) -> None: writer = DigitalMultiChannelWriter(do_multi_channel_multi_line_task.out_stream) - sample = numpy.full((2, 1), True, dtype=numpy.bool_) + sample = numpy.full((8, 1), True, dtype=numpy.bool_) benchmark(writer.write_one_sample_multi_line, sample) @@ -119,9 +123,7 @@ def test___digital_multi_channel_writer___write_many_sample_port_byte___256_samp writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) num_channels = 2 samples_to_write = 256 - data = numpy.full( - (num_channels, samples_to_write), numpy.uint8(1), dtype=numpy.uint8 - ) + data = numpy.full((num_channels, samples_to_write), numpy.uint8(1), dtype=numpy.uint8) benchmark(writer.write_many_sample_port_byte, data) @@ -207,4 +209,4 @@ def test___digital_multi_channel_writer___write_waveforms_port_byte___256_sample num_lines = 8 waveforms = [DigitalWaveform(samples_to_write, num_lines) for _ in range(num_channels)] - benchmark(writer.write_waveforms, waveforms) \ No newline at end of file + benchmark(writer.write_waveforms, waveforms) From 0b7c6d2843fb4527debeda0235f8e44786dc4759 Mon Sep 17 00:00:00 2001 From: Mike Prosser Date: Tue, 23 Sep 2025 10:06:41 -0500 Subject: [PATCH 03/15] cleanup --- .../benchmark/test_digital_stream_readers.py | 4 +- .../benchmark/test_digital_stream_writers.py | 98 +++++++++---------- 2 files changed, 51 insertions(+), 51 deletions(-) diff --git a/tests/benchmark/test_digital_stream_readers.py b/tests/benchmark/test_digital_stream_readers.py index 805e2fc8..32f928a1 100644 --- a/tests/benchmark/test_digital_stream_readers.py +++ b/tests/benchmark/test_digital_stream_readers.py @@ -66,7 +66,7 @@ def test___digital_single_channel_reader___read_many_sample_port_uint32___256_sa @pytest.mark.benchmark(group="digital_stream_readers") @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_single_line_reader___read_waveform___256_samples( +def test___digital_single_channel_reader___read_waveform___256_samples( benchmark: BenchmarkFixture, di_single_line_task: nidaqmx.Task, ) -> None: @@ -178,7 +178,7 @@ def test___digital_multi_channel_reader___read_many_sample_port_uint32___256_sam @pytest.mark.benchmark(group="digital_stream_readers") @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_multi_line_reader___read_waveform___256_samples( +def test___digital_multi_channel_reader___read_waveform_multi_line___256_samples( benchmark: BenchmarkFixture, di_multi_channel_multi_line_task: nidaqmx.Task, ) -> None: diff --git a/tests/benchmark/test_digital_stream_writers.py b/tests/benchmark/test_digital_stream_writers.py index aef57f8c..1d5054d6 100644 --- a/tests/benchmark/test_digital_stream_writers.py +++ b/tests/benchmark/test_digital_stream_writers.py @@ -82,55 +82,9 @@ def test___digital_single_channel_writer___write_many_sample_port_uint32___256_s benchmark(writer.write_many_sample_port_uint32, data) -@pytest.mark.benchmark(group="digital_stream_writers") -def test___digital_multi_channel_writer___write_one_sample_one_line___1_sample( - benchmark: BenchmarkFixture, - do_single_line_task: nidaqmx.Task, -) -> None: - writer = DigitalMultiChannelWriter(do_single_line_task.out_stream) - sample = numpy.array([True], dtype=numpy.bool_) - - benchmark(writer.write_one_sample_one_line, sample) - - -@pytest.mark.benchmark(group="digital_stream_writers") -def test___digital_multi_channel_writer___write_one_sample_multi_line___1_sample( - benchmark: BenchmarkFixture, - do_multi_channel_multi_line_task: nidaqmx.Task, -) -> None: - writer = DigitalMultiChannelWriter(do_multi_channel_multi_line_task.out_stream) - sample = numpy.full((8, 1), True, dtype=numpy.bool_) - - benchmark(writer.write_one_sample_multi_line, sample) - - -@pytest.mark.benchmark(group="digital_stream_writers") -def test___digital_multi_channel_writer___write_one_sample_port_byte___1_sample( - benchmark: BenchmarkFixture, - do_multi_channel_port_task: nidaqmx.Task, -) -> None: - writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) - sample = numpy.array([numpy.uint8(1), numpy.uint8(1)], dtype=numpy.uint8) - - benchmark(writer.write_one_sample_port_byte, sample) - - -@pytest.mark.benchmark(group="digital_stream_writers") -def test___digital_multi_channel_writer___write_many_sample_port_byte___256_samples( - benchmark: BenchmarkFixture, - do_multi_channel_port_task: nidaqmx.Task, -) -> None: - writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) - num_channels = 2 - samples_to_write = 256 - data = numpy.full((num_channels, samples_to_write), numpy.uint8(1), dtype=numpy.uint8) - - benchmark(writer.write_many_sample_port_byte, data) - - @pytest.mark.benchmark(group="digital_stream_writers") @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_single_line_writer___write_waveform___256_samples( +def test___digital_single_channel_writer___write_waveform___256_samples( benchmark: BenchmarkFixture, do_single_line_task: nidaqmx.Task, ) -> None: @@ -143,7 +97,7 @@ def test___digital_single_line_writer___write_waveform___256_samples( @pytest.mark.benchmark(group="digital_stream_writers") @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_single_channel_multi_line_writer___write_waveform___256_samples( +def test___digital_single_channel_writer___write_waveform_multi_line___256_samples( benchmark: BenchmarkFixture, do_single_channel_multi_line_task: nidaqmx.Task, ) -> None: @@ -183,9 +137,55 @@ def test___digital_single_channel_writer___write_waveform_port_uint32___256_samp benchmark(writer.write_waveform, waveform) +@pytest.mark.benchmark(group="digital_stream_writers") +def test___digital_multi_channel_writer___write_one_sample_one_line___1_sample( + benchmark: BenchmarkFixture, + do_single_line_task: nidaqmx.Task, +) -> None: + writer = DigitalMultiChannelWriter(do_single_line_task.out_stream) + sample = numpy.array([True], dtype=numpy.bool_) + + benchmark(writer.write_one_sample_one_line, sample) + + +@pytest.mark.benchmark(group="digital_stream_writers") +def test___digital_multi_channel_writer___write_one_sample_multi_line___1_sample( + benchmark: BenchmarkFixture, + do_multi_channel_multi_line_task: nidaqmx.Task, +) -> None: + writer = DigitalMultiChannelWriter(do_multi_channel_multi_line_task.out_stream) + sample = numpy.full((8, 1), True, dtype=numpy.bool_) + + benchmark(writer.write_one_sample_multi_line, sample) + + +@pytest.mark.benchmark(group="digital_stream_writers") +def test___digital_multi_channel_writer___write_one_sample_port_byte___1_sample( + benchmark: BenchmarkFixture, + do_multi_channel_port_task: nidaqmx.Task, +) -> None: + writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) + sample = numpy.array([numpy.uint8(1), numpy.uint8(1)], dtype=numpy.uint8) + + benchmark(writer.write_one_sample_port_byte, sample) + + +@pytest.mark.benchmark(group="digital_stream_writers") +def test___digital_multi_channel_writer___write_many_sample_port_byte___256_samples( + benchmark: BenchmarkFixture, + do_multi_channel_port_task: nidaqmx.Task, +) -> None: + writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) + num_channels = 2 + samples_to_write = 256 + data = numpy.full((num_channels, samples_to_write), numpy.uint8(1), dtype=numpy.uint8) + + benchmark(writer.write_many_sample_port_byte, data) + + @pytest.mark.benchmark(group="digital_stream_writers") @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_multi_line_writer___write_waveforms___256_samples( +def test___digital_multi_channel_writer___write_waveform_multi_lines___256_samples( benchmark: BenchmarkFixture, do_multi_channel_multi_line_task: nidaqmx.Task, ) -> None: From 2d428cd3cc37ff9348851b9f97780784bff27251 Mon Sep 17 00:00:00 2001 From: Mike Prosser Date: Tue, 23 Sep 2025 10:38:04 -0500 Subject: [PATCH 04/15] fix sample count --- tests/benchmark/test_digital_stream_readers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/benchmark/test_digital_stream_readers.py b/tests/benchmark/test_digital_stream_readers.py index 32f928a1..72557fa9 100644 --- a/tests/benchmark/test_digital_stream_readers.py +++ b/tests/benchmark/test_digital_stream_readers.py @@ -184,7 +184,7 @@ def test___digital_multi_channel_reader___read_waveform_multi_line___256_samples ) -> None: reader = DigitalMultiChannelReader(di_multi_channel_multi_line_task.in_stream) num_channels = 8 - samples_to_read = 2561 + samples_to_read = 256 waveforms = [DigitalWaveform(samples_to_read) for _ in range(num_channels)] benchmark(reader.read_waveforms, waveforms, samples_to_read) From 482e1d7288484ff0d652e40960bf8bd701182735 Mon Sep 17 00:00:00 2001 From: Mike Prosser Date: Tue, 23 Sep 2025 16:47:38 -0500 Subject: [PATCH 05/15] rework analog reader benchmarks --- tests/benchmark/test_analog_stream_readers.py | 114 ++++++++++++++---- 1 file changed, 88 insertions(+), 26 deletions(-) diff --git a/tests/benchmark/test_analog_stream_readers.py b/tests/benchmark/test_analog_stream_readers.py index c148af01..76f867a4 100644 --- a/tests/benchmark/test_analog_stream_readers.py +++ b/tests/benchmark/test_analog_stream_readers.py @@ -7,67 +7,120 @@ from nitypes.waveform import AnalogWaveform from pytest_benchmark.fixture import BenchmarkFixture -import nidaqmx +from nidaqmx import Task +from nidaqmx.constants import AcquisitionType, ReadRelativeTo, Edge, WaveformAttributeMode from nidaqmx.stream_readers._analog_multi_channel_reader import AnalogMultiChannelReader from nidaqmx.stream_readers._analog_single_channel_reader import ( AnalogSingleChannelReader, ) +from nidaqmx.system import Device + +def configure_ai_task( + task: Task, + sim_6363_device: Device, + num_channels: int, + num_samples: int, +) -> None: + """Configure an AI task for benchmarking.""" + channel_names = [chan.name for chan in sim_6363_device.ai_physical_chans[:num_channels]] + physical_channel_string = ",".join(channel_names) + task.ai_channels.add_ai_voltage_chan( + physical_channel_string, + min_val=-5.0, + max_val=5.0, + ) + task.timing.cfg_samp_clk_timing( + rate=25000.0, active_edge=Edge.RISING, sample_mode=AcquisitionType.FINITE, samps_per_chan=num_channels * num_samples * 2 + ) + task.start() + task.wait_until_done(timeout=10.0) + task.in_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE @pytest.mark.benchmark(group="analog_stream_readers") -def test___analog_single_channel_reader___read_one_sample___1_sample( +@pytest.mark.parametrize("num_channels", [1]) +@pytest.mark.parametrize("num_samples", [1]) +def test___analog_single_channel_reader___read_one_sample( benchmark: BenchmarkFixture, - ai_single_channel_task: nidaqmx.Task, + task: Task, + sim_6363_device: Device, + num_channels: int, + num_samples: int ) -> None: - reader = AnalogSingleChannelReader(ai_single_channel_task.in_stream) + configure_ai_task(task, sim_6363_device, num_channels, num_samples) + reader = AnalogSingleChannelReader(task.in_stream) benchmark(reader.read_one_sample) @pytest.mark.benchmark(group="analog_stream_readers") -def test___analog_single_channel_reader___read_many_sample___1000_samples( +@pytest.mark.parametrize("num_channels", [1]) +@pytest.mark.parametrize("num_samples", [2, 1000]) +def test___analog_single_channel_reader___read_many_sample( benchmark: BenchmarkFixture, - ai_single_channel_task: nidaqmx.Task, + task: Task, + sim_6363_device: Device, + num_channels: int, + num_samples: int ) -> None: - reader = AnalogSingleChannelReader(ai_single_channel_task.in_stream) - samples_to_read = 1000 - data = numpy.full(samples_to_read, math.inf, dtype=numpy.float64) + configure_ai_task(task, sim_6363_device, num_channels, num_samples) + reader = AnalogSingleChannelReader(task.in_stream) + data = numpy.full(num_samples, math.inf, dtype=numpy.float64) - benchmark(reader.read_many_sample, data, samples_to_read) + benchmark(reader.read_many_sample, data, num_samples) @pytest.mark.benchmark(group="analog_stream_readers") +@pytest.mark.parametrize("num_channels", [1]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +@pytest.mark.parametrize("waveform_attribute_mode", list(WaveformAttributeMode)) @pytest.mark.grpc_skip(reason="read_analog_waveform not implemented in GRPC") -def test___analog_single_channel_reader___read_waveform___1000_samples( +def test___analog_single_channel_reader___read_waveform( benchmark: BenchmarkFixture, - ai_single_channel_task: nidaqmx.Task, + task: Task, + sim_6363_device: Device, + num_channels: int, + num_samples: int, + waveform_attribute_mode: WaveformAttributeMode ) -> None: - reader = AnalogSingleChannelReader(ai_single_channel_task.in_stream) - samples_to_read = 1000 - waveform = AnalogWaveform(samples_to_read) + configure_ai_task(task, sim_6363_device, num_channels, num_samples) + task.in_stream.waveform_attribute_mode = waveform_attribute_mode + reader = AnalogSingleChannelReader(task.in_stream) + waveform = AnalogWaveform(num_samples) - benchmark(reader.read_waveform, waveform, samples_to_read) + benchmark(reader.read_waveform, waveform, num_samples) @pytest.mark.benchmark(group="analog_stream_readers") -def test___analog_multi_channel_reader___read_one_sample___1_sample( +@pytest.mark.parametrize("num_channels", [1, 2, 8]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +def test___analog_multi_channel_reader___read_one_sample( benchmark: BenchmarkFixture, - ai_multi_channel_task: nidaqmx.Task, + task: Task, + sim_6363_device: Device, + num_channels: int, + num_samples: int ) -> None: - reader = AnalogMultiChannelReader(ai_multi_channel_task.in_stream) - num_channels = 3 + configure_ai_task(task, sim_6363_device, num_channels, num_samples) + reader = AnalogMultiChannelReader(task.in_stream) data = numpy.full(num_channels, math.inf, dtype=numpy.float64) benchmark(reader.read_one_sample, data) @pytest.mark.benchmark(group="analog_stream_readers") -def test___analog_multi_channel_reader___read_many_sample___1000_samples( +@pytest.mark.parametrize("num_channels", [1, 2, 8]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +def test___analog_multi_channel_reader___read_many_sample( benchmark: BenchmarkFixture, - ai_multi_channel_task: nidaqmx.Task, + task: Task, + sim_6363_device: Device, + num_channels: int, + num_samples: int ) -> None: - reader = AnalogMultiChannelReader(ai_multi_channel_task.in_stream) num_channels = 3 + configure_ai_task(task, sim_6363_device, num_channels, num_samples) + reader = AnalogMultiChannelReader(task.in_stream) samples_to_read = 1000 data = numpy.full((num_channels, samples_to_read), math.inf, dtype=numpy.float64) @@ -75,13 +128,22 @@ def test___analog_multi_channel_reader___read_many_sample___1000_samples( @pytest.mark.benchmark(group="analog_stream_readers") +@pytest.mark.parametrize("num_channels", [1, 2, 8]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +@pytest.mark.parametrize("waveform_attribute_mode", list(WaveformAttributeMode)) @pytest.mark.grpc_skip(reason="read_analog_waveforms not implemented in GRPC") -def test___analog_multi_channel_reader___read_waveform___1000_samples( +def test___analog_multi_channel_reader___read_waveform( benchmark: BenchmarkFixture, - ai_multi_channel_task: nidaqmx.Task, + task: Task, + sim_6363_device: Device, + num_channels: int, + num_samples: int, + waveform_attribute_mode: WaveformAttributeMode ) -> None: - reader = AnalogMultiChannelReader(ai_multi_channel_task.in_stream) num_channels = 3 + configure_ai_task(task, sim_6363_device, num_channels, num_samples) + task.in_stream.waveform_attribute_mode = waveform_attribute_mode + reader = AnalogMultiChannelReader(task.in_stream) samples_to_read = 1000 waveforms = [AnalogWaveform(samples_to_read) for _ in range(num_channels)] From a5c033c16fec225f3defd030a91bb6f7f012f858 Mon Sep 17 00:00:00 2001 From: Mike Prosser Date: Wed, 24 Sep 2025 16:26:51 -0500 Subject: [PATCH 06/15] rework all stream benchmarks --- pyproject.toml | 1 + tests/benchmark/conftest.py | 290 +++++++++++++++++- tests/benchmark/test_analog_stream_readers.py | 82 +---- tests/benchmark/test_analog_stream_writers.py | 70 +++-- .../benchmark/test_digital_stream_readers.py | 215 ++++++------- .../benchmark/test_digital_stream_writers.py | 260 +++++++++------- 6 files changed, 605 insertions(+), 313 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e427d0f2..71162162 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -199,6 +199,7 @@ warn_unused_ignores = false typeCheckingMode = "basic" reportArgumentType = false reportAttributeAccessIssue = false +reportGeneralTypeIssues = false reportInvalidTypeForm = false reportOperatorIssue = false reportOptionalIterable = false diff --git a/tests/benchmark/conftest.py b/tests/benchmark/conftest.py index d28efb9a..189862bc 100644 --- a/tests/benchmark/conftest.py +++ b/tests/benchmark/conftest.py @@ -1,4 +1,288 @@ -"""Import fixtures from component tests for benchmark tests.""" +"""Fixtures for benchmark tests.""" -# Import all fixtures from component conftest.py to make them available for benchmark tests -from tests.component.conftest import * # noqa: F403, F401 +from __future__ import annotations + +import pytest + +from nidaqmx import Task +from nidaqmx.constants import ( + AcquisitionType, + Edge, + LineGrouping, + ReadRelativeTo, + TaskMode, +) +from nidaqmx.system import Device + + +@pytest.fixture +def ai_benchmark_task( + task: Task, + sim_6363_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure an AI task for benchmarking.""" + num_channels = request.node.callspec.params.get("num_channels", 1) + num_samples = request.node.callspec.params.get("num_samples", 1) + + channel_names = [chan.name for chan in sim_6363_device.ai_physical_chans[:num_channels]] + physical_channel_string = ",".join(channel_names) + task.ai_channels.add_ai_voltage_chan( + physical_channel_string, + min_val=-5.0, + max_val=5.0, + ) + task.timing.cfg_samp_clk_timing( + rate=25000.0, + active_edge=Edge.RISING, + sample_mode=AcquisitionType.FINITE, + samps_per_chan=num_channels * num_samples * 2, + ) + task.start() + task.wait_until_done(timeout=10.0) + task.in_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + + return task + + +@pytest.fixture +def ao_single_sample_benchmark_task( + task: Task, + real_x_series_multiplexed_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a single-sample AO task for benchmarking.""" + num_channels = request.node.callspec.params.get("num_channels", 1) + for chan in range(num_channels): + task.ao_channels.add_ao_voltage_chan( + real_x_series_multiplexed_device.ao_physical_chans[chan].name, + min_val=-10.0, + max_val=10.0, + ) + + return task + + +@pytest.fixture +def ao_multi_sample_benchmark_task( + task: Task, + real_x_series_multiplexed_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a hardware-timed buffered AO task for benchmarking.""" + num_channels = request.node.callspec.params.get("num_channels", 1) + num_samples = request.node.callspec.params.get("num_samples", 1) + for chan in range(num_channels): + task.ao_channels.add_ao_voltage_chan( + real_x_series_multiplexed_device.ao_physical_chans[chan].name, + min_val=-10.0, + max_val=10.0, + ) + + task.timing.cfg_samp_clk_timing( + rate=25000.0, + active_edge=Edge.RISING, + sample_mode=AcquisitionType.FINITE, + samps_per_chan=num_channels * num_samples * 2, + ) + task.out_stream.output_buf_size = num_channels * num_samples * 2 + task.control(TaskMode.TASK_COMMIT) + task.out_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + + return task + + +@pytest.fixture +def di_single_sample_single_channel_benchmark_task( + task: Task, + sim_6363_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a single-sample DI task for benchmarking.""" + num_lines = request.node.callspec.params.get("num_lines", 1) + + channel_names = [chan.name for chan in sim_6363_device.di_lines[:num_lines]] + physical_channel_string = ",".join(channel_names) + task.di_channels.add_di_chan( + physical_channel_string, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + ) + task.start() + task.wait_until_done(timeout=10.0) + + return task + + +@pytest.fixture +def di_single_sample_single_line_benchmark_task( + task: Task, + sim_6363_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a single-sample DI task for benchmarking.""" + num_channels = request.node.callspec.params.get("num_channels", 1) + + channel_names = [chan.name for chan in sim_6363_device.di_lines[:num_channels]] + physical_channel_string = ",".join(channel_names) + task.di_channels.add_di_chan(physical_channel_string, line_grouping=LineGrouping.CHAN_PER_LINE) + task.start() + task.wait_until_done(timeout=10.0) + + return task + + +@pytest.fixture +def di_multi_sample_port_benchmark_task( + task: Task, + sim_6363_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a hardware-timed buffered DI task for benchmarking.""" + num_samples = request.node.callspec.params.get("num_samples", 1) + + # port 0 is the only port that supports buffered operations + task.di_channels.add_di_chan( + sim_6363_device.di_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + ) + task.timing.cfg_samp_clk_timing( + rate=25000.0, + active_edge=Edge.RISING, + sample_mode=AcquisitionType.FINITE, + samps_per_chan=num_samples * 2, + ) + task.start() + task.wait_until_done(timeout=10.0) + task.in_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + + return task + + +@pytest.fixture +def di_multi_sample_lines_benchmark_task( + task: Task, + sim_6363_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a hardware-timed buffered DI task for benchmarking.""" + num_channels = request.node.callspec.params.get("num_channels", 1) + num_samples = request.node.callspec.params.get("num_samples", 1) + num_lines = request.node.callspec.params.get("num_lines", 1) + + for chan in range(num_channels): + line_names = [ + chan.name + for chan in sim_6363_device.di_lines[chan * num_lines : (chan + 1) * num_lines] + ] + physical_channel_string = ",".join(line_names) + task.di_channels.add_di_chan( + physical_channel_string, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + ) + + task.timing.cfg_samp_clk_timing( + rate=25000.0, + active_edge=Edge.RISING, + sample_mode=AcquisitionType.FINITE, + samps_per_chan=num_channels * num_samples * 2, + ) + task.start() + task.wait_until_done(timeout=10.0) + task.in_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + + return task + + +@pytest.fixture +def do_single_sample_single_channel_benchmark_task( + task: Task, + sim_6363_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a single-sample DO task for benchmarking.""" + num_lines = request.node.callspec.params.get("num_lines", 1) + + channel_names = [chan.name for chan in sim_6363_device.do_lines[:num_lines]] + physical_channel_string = ",".join(channel_names) + task.do_channels.add_do_chan( + physical_channel_string, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + ) + task.start() + task.wait_until_done(timeout=10.0) + + return task + + +@pytest.fixture +def do_single_sample_single_line_benchmark_task( + task: Task, + sim_6363_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a single-sample DO task for benchmarking.""" + num_channels = request.node.callspec.params.get("num_channels", 1) + + channel_names = [chan.name for chan in sim_6363_device.do_lines[:num_channels]] + physical_channel_string = ",".join(channel_names) + task.do_channels.add_do_chan(physical_channel_string, line_grouping=LineGrouping.CHAN_PER_LINE) + task.start() + task.wait_until_done(timeout=10.0) + + return task + + +@pytest.fixture +def do_multi_sample_port_benchmark_task( + task: Task, + sim_6363_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a hardware-timed buffered DO task for benchmarking.""" + num_samples = request.node.callspec.params.get("num_samples", 1) + + # port 0 is the only port that supports buffered operations + task.do_channels.add_do_chan( + sim_6363_device.do_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + ) + task.timing.cfg_samp_clk_timing( + rate=25000.0, + active_edge=Edge.RISING, + sample_mode=AcquisitionType.FINITE, + samps_per_chan=num_samples * 2, + ) + task.out_stream.output_buf_size = num_samples * 2 + task.control(TaskMode.TASK_COMMIT) + task.out_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + + return task + + +@pytest.fixture +def do_multi_sample_lines_benchmark_task( + task: Task, + sim_6363_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a hardware-timed buffered DO task for benchmarking.""" + num_channels = request.node.callspec.params.get("num_channels", 1) + num_samples = request.node.callspec.params.get("num_samples", 1) + num_lines = request.node.callspec.params.get("num_lines", 1) + + for chan in range(num_channels): + line_names = [ + chan.name + for chan in sim_6363_device.do_lines[chan * num_lines : (chan + 1) * num_lines] + ] + physical_channel_string = ",".join(line_names) + task.do_channels.add_do_chan( + physical_channel_string, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + ) + + task.timing.cfg_samp_clk_timing( + rate=25000.0, + active_edge=Edge.RISING, + sample_mode=AcquisitionType.FINITE, + samps_per_chan=num_channels * num_samples * 2, + ) + task.out_stream.output_buf_size = num_samples * 2 + task.control(TaskMode.TASK_COMMIT) + task.out_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + + return task diff --git a/tests/benchmark/test_analog_stream_readers.py b/tests/benchmark/test_analog_stream_readers.py index 76f867a4..3daed764 100644 --- a/tests/benchmark/test_analog_stream_readers.py +++ b/tests/benchmark/test_analog_stream_readers.py @@ -8,47 +8,20 @@ from pytest_benchmark.fixture import BenchmarkFixture from nidaqmx import Task -from nidaqmx.constants import AcquisitionType, ReadRelativeTo, Edge, WaveformAttributeMode +from nidaqmx.constants import WaveformAttributeMode from nidaqmx.stream_readers._analog_multi_channel_reader import AnalogMultiChannelReader from nidaqmx.stream_readers._analog_single_channel_reader import ( AnalogSingleChannelReader, ) -from nidaqmx.system import Device - -def configure_ai_task( - task: Task, - sim_6363_device: Device, - num_channels: int, - num_samples: int, -) -> None: - """Configure an AI task for benchmarking.""" - channel_names = [chan.name for chan in sim_6363_device.ai_physical_chans[:num_channels]] - physical_channel_string = ",".join(channel_names) - task.ai_channels.add_ai_voltage_chan( - physical_channel_string, - min_val=-5.0, - max_val=5.0, - ) - task.timing.cfg_samp_clk_timing( - rate=25000.0, active_edge=Edge.RISING, sample_mode=AcquisitionType.FINITE, samps_per_chan=num_channels * num_samples * 2 - ) - task.start() - task.wait_until_done(timeout=10.0) - task.in_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE @pytest.mark.benchmark(group="analog_stream_readers") @pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1]) def test___analog_single_channel_reader___read_one_sample( - benchmark: BenchmarkFixture, - task: Task, - sim_6363_device: Device, - num_channels: int, - num_samples: int + benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int ) -> None: - configure_ai_task(task, sim_6363_device, num_channels, num_samples) - reader = AnalogSingleChannelReader(task.in_stream) + reader = AnalogSingleChannelReader(ai_benchmark_task.in_stream) benchmark(reader.read_one_sample) @@ -57,14 +30,9 @@ def test___analog_single_channel_reader___read_one_sample( @pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [2, 1000]) def test___analog_single_channel_reader___read_many_sample( - benchmark: BenchmarkFixture, - task: Task, - sim_6363_device: Device, - num_channels: int, - num_samples: int + benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int ) -> None: - configure_ai_task(task, sim_6363_device, num_channels, num_samples) - reader = AnalogSingleChannelReader(task.in_stream) + reader = AnalogSingleChannelReader(ai_benchmark_task.in_stream) data = numpy.full(num_samples, math.inf, dtype=numpy.float64) benchmark(reader.read_many_sample, data, num_samples) @@ -77,15 +45,13 @@ def test___analog_single_channel_reader___read_many_sample( @pytest.mark.grpc_skip(reason="read_analog_waveform not implemented in GRPC") def test___analog_single_channel_reader___read_waveform( benchmark: BenchmarkFixture, - task: Task, - sim_6363_device: Device, + ai_benchmark_task: Task, num_channels: int, num_samples: int, - waveform_attribute_mode: WaveformAttributeMode + waveform_attribute_mode: WaveformAttributeMode, ) -> None: - configure_ai_task(task, sim_6363_device, num_channels, num_samples) - task.in_stream.waveform_attribute_mode = waveform_attribute_mode - reader = AnalogSingleChannelReader(task.in_stream) + ai_benchmark_task.in_stream.waveform_attribute_mode = waveform_attribute_mode + reader = AnalogSingleChannelReader(ai_benchmark_task.in_stream) waveform = AnalogWaveform(num_samples) benchmark(reader.read_waveform, waveform, num_samples) @@ -95,14 +61,9 @@ def test___analog_single_channel_reader___read_waveform( @pytest.mark.parametrize("num_channels", [1, 2, 8]) @pytest.mark.parametrize("num_samples", [1, 1000]) def test___analog_multi_channel_reader___read_one_sample( - benchmark: BenchmarkFixture, - task: Task, - sim_6363_device: Device, - num_channels: int, - num_samples: int + benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int ) -> None: - configure_ai_task(task, sim_6363_device, num_channels, num_samples) - reader = AnalogMultiChannelReader(task.in_stream) + reader = AnalogMultiChannelReader(ai_benchmark_task.in_stream) data = numpy.full(num_channels, math.inf, dtype=numpy.float64) benchmark(reader.read_one_sample, data) @@ -112,15 +73,9 @@ def test___analog_multi_channel_reader___read_one_sample( @pytest.mark.parametrize("num_channels", [1, 2, 8]) @pytest.mark.parametrize("num_samples", [1, 1000]) def test___analog_multi_channel_reader___read_many_sample( - benchmark: BenchmarkFixture, - task: Task, - sim_6363_device: Device, - num_channels: int, - num_samples: int + benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int ) -> None: - num_channels = 3 - configure_ai_task(task, sim_6363_device, num_channels, num_samples) - reader = AnalogMultiChannelReader(task.in_stream) + reader = AnalogMultiChannelReader(ai_benchmark_task.in_stream) samples_to_read = 1000 data = numpy.full((num_channels, samples_to_read), math.inf, dtype=numpy.float64) @@ -134,16 +89,13 @@ def test___analog_multi_channel_reader___read_many_sample( @pytest.mark.grpc_skip(reason="read_analog_waveforms not implemented in GRPC") def test___analog_multi_channel_reader___read_waveform( benchmark: BenchmarkFixture, - task: Task, - sim_6363_device: Device, + ai_benchmark_task: Task, num_channels: int, num_samples: int, - waveform_attribute_mode: WaveformAttributeMode + waveform_attribute_mode: WaveformAttributeMode, ) -> None: - num_channels = 3 - configure_ai_task(task, sim_6363_device, num_channels, num_samples) - task.in_stream.waveform_attribute_mode = waveform_attribute_mode - reader = AnalogMultiChannelReader(task.in_stream) + ai_benchmark_task.in_stream.waveform_attribute_mode = waveform_attribute_mode + reader = AnalogMultiChannelReader(ai_benchmark_task.in_stream) samples_to_read = 1000 waveforms = [AnalogWaveform(samples_to_read) for _ in range(num_channels)] diff --git a/tests/benchmark/test_analog_stream_writers.py b/tests/benchmark/test_analog_stream_writers.py index 96b1a326..b0d6dedb 100644 --- a/tests/benchmark/test_analog_stream_writers.py +++ b/tests/benchmark/test_analog_stream_writers.py @@ -13,73 +13,87 @@ @pytest.mark.benchmark(group="analog_stream_writers") -def test___analog_single_channel_writer___write_one_sample___1_sample( +@pytest.mark.parametrize("num_channels", [1]) +def test___analog_single_channel_writer___write_one_sample( benchmark: BenchmarkFixture, - ao_single_channel_task: nidaqmx.Task, + ao_single_sample_benchmark_task: nidaqmx.Task, + num_channels: int, ) -> None: - writer = AnalogSingleChannelWriter(ao_single_channel_task.out_stream) + writer = AnalogSingleChannelWriter(ao_single_sample_benchmark_task.out_stream) benchmark(writer.write_one_sample, 1.0) @pytest.mark.benchmark(group="analog_stream_writers") -def test___analog_single_channel_writer___write_many_sample___100_samples( +@pytest.mark.parametrize("num_channels", [1]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +def test___analog_single_channel_writer___write_many_sample( benchmark: BenchmarkFixture, - ao_single_channel_task: nidaqmx.Task, + ao_multi_sample_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, ) -> None: - writer = AnalogSingleChannelWriter(ao_single_channel_task.out_stream) - samples_to_write = 100 - data = numpy.linspace(0.0, 1.0, num=samples_to_write, dtype=numpy.float64) + writer = AnalogSingleChannelWriter(ao_multi_sample_benchmark_task.out_stream) + data = numpy.linspace(0.0, 1.0, num=num_samples, dtype=numpy.float64) benchmark(writer.write_many_sample, data) @pytest.mark.benchmark(group="analog_stream_writers") +@pytest.mark.parametrize("num_channels", [1]) +@pytest.mark.parametrize("num_samples", [1, 1000]) @pytest.mark.grpc_skip(reason="write_analog_waveform not implemented in GRPC") -def test___analog_single_channel_writer___write_waveform___100_samples( +def test___analog_single_channel_writer___write_waveform( benchmark: BenchmarkFixture, - ao_single_channel_task: nidaqmx.Task, + ao_multi_sample_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, ) -> None: - writer = AnalogSingleChannelWriter(ao_single_channel_task.out_stream) - num_samples = 100 + writer = AnalogSingleChannelWriter(ao_multi_sample_benchmark_task.out_stream) waveform = AnalogWaveform(num_samples) benchmark(writer.write_waveform, waveform) @pytest.mark.benchmark(group="analog_stream_writers") -def test___analog_multi_channel_writer___write_one_sample___1_sample( +@pytest.mark.parametrize("num_channels", [1, 2]) +def test___analog_multi_channel_writer___write_one_sample( benchmark: BenchmarkFixture, - ao_multi_channel_task: nidaqmx.Task, + ao_single_sample_benchmark_task: nidaqmx.Task, + num_channels: int, ) -> None: - writer = AnalogMultiChannelWriter(ao_multi_channel_task.out_stream) - expected = [1.0, 1.0] - data = numpy.asarray(expected, dtype=numpy.float64) + writer = AnalogMultiChannelWriter(ao_single_sample_benchmark_task.out_stream) + data = numpy.asarray([1.0] * num_channels, dtype=numpy.float64) benchmark(writer.write_one_sample, data) @pytest.mark.benchmark(group="analog_stream_writers") -def test___analog_multi_channel_writer___write_many_sample___100_samples( +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +def test___analog_multi_channel_writer___write_many_sample( benchmark: BenchmarkFixture, - ao_multi_channel_task: nidaqmx.Task, + ao_multi_sample_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, ) -> None: - writer = AnalogMultiChannelWriter(ao_multi_channel_task.out_stream) - num_channels = 2 - samples_to_write = 100 - data = numpy.full((num_channels, samples_to_write), 1.0, dtype=numpy.float64) + writer = AnalogMultiChannelWriter(ao_multi_sample_benchmark_task.out_stream) + data = numpy.full((num_channels, num_samples), 1.0, dtype=numpy.float64) benchmark(writer.write_many_sample, data) @pytest.mark.benchmark(group="analog_stream_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 1000]) @pytest.mark.grpc_skip(reason="write_analog_waveform not implemented in GRPC") -def test___analog_multi_channel_writer___write_waveform___100_samples( +def test___analog_multi_channel_writer___write_waveform( benchmark: BenchmarkFixture, - ao_multi_channel_task: nidaqmx.Task, + ao_multi_sample_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, ) -> None: - writer = AnalogMultiChannelWriter(ao_multi_channel_task.out_stream) - num_samples = 100 - waveform = [AnalogWaveform(num_samples), AnalogWaveform(num_samples)] + writer = AnalogMultiChannelWriter(ao_multi_sample_benchmark_task.out_stream) + waveform = [AnalogWaveform(num_samples) for _ in range(num_channels)] benchmark(writer.write_waveforms, waveform) diff --git a/tests/benchmark/test_digital_stream_readers.py b/tests/benchmark/test_digital_stream_readers.py index 72557fa9..0cf93c4b 100644 --- a/tests/benchmark/test_digital_stream_readers.py +++ b/tests/benchmark/test_digital_stream_readers.py @@ -15,208 +15,197 @@ @pytest.mark.benchmark(group="digital_stream_readers") -def test___digital_single_channel_reader___read_one_sample_one_line___1_sample( +@pytest.mark.parametrize("num_channels", [1]) +def test___digital_single_channel_reader___read_one_sample_one_line( benchmark: BenchmarkFixture, - di_single_line_task: nidaqmx.Task, + di_single_sample_single_line_benchmark_task: nidaqmx.Task, + num_channels: int, ) -> None: - reader = DigitalSingleChannelReader(di_single_line_task.in_stream) + reader = DigitalSingleChannelReader(di_single_sample_single_line_benchmark_task.in_stream) benchmark(reader.read_one_sample_one_line) @pytest.mark.benchmark(group="digital_stream_readers") -def test___digital_single_channel_reader___read_one_sample_multi_line___1_sample( +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +def test___digital_single_channel_reader___read_one_sample_multi_line( benchmark: BenchmarkFixture, - di_single_channel_multi_line_task: nidaqmx.Task, + di_single_sample_single_channel_benchmark_task: nidaqmx.Task, + num_lines: int, ) -> None: - reader = DigitalSingleChannelReader(di_single_channel_multi_line_task.in_stream) - num_lines = 8 + reader = DigitalSingleChannelReader(di_single_sample_single_channel_benchmark_task.in_stream) sample = numpy.full(num_lines, False, dtype=numpy.bool_) benchmark(reader.read_one_sample_multi_line, sample) @pytest.mark.benchmark(group="digital_stream_readers") -def test___digital_single_channel_reader___read_many_sample_port_byte___256_samples( +@pytest.mark.parametrize("num_samples", [1, 100]) +def test___digital_single_channel_reader___read_many_sample_port_uint32( benchmark: BenchmarkFixture, - di_single_channel_port_byte_task: nidaqmx.Task, + di_multi_sample_port_benchmark_task: nidaqmx.Task, + num_samples: int, ) -> None: - reader = DigitalSingleChannelReader(di_single_channel_port_byte_task.in_stream) - samples_to_read = 256 - data = numpy.full(samples_to_read, numpy.iinfo(numpy.uint8).min, dtype=numpy.uint8) + reader = DigitalSingleChannelReader(di_multi_sample_port_benchmark_task.in_stream) + data = numpy.full(num_samples, numpy.iinfo(numpy.uint32).min, dtype=numpy.uint32) - benchmark( - reader.read_many_sample_port_byte, data, number_of_samples_per_channel=samples_to_read - ) - - -@pytest.mark.benchmark(group="digital_stream_readers") -def test___digital_single_channel_reader___read_many_sample_port_uint32___256_samples( - benchmark: BenchmarkFixture, - di_single_channel_port_uint32_task: nidaqmx.Task, -) -> None: - reader = DigitalSingleChannelReader(di_single_channel_port_uint32_task.in_stream) - samples_to_read = 256 - data = numpy.full(samples_to_read, numpy.iinfo(numpy.uint32).min, dtype=numpy.uint32) - - benchmark( - reader.read_many_sample_port_uint32, data, number_of_samples_per_channel=samples_to_read - ) + benchmark(reader.read_many_sample_port_uint32, data, num_samples) @pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.parametrize("num_channels", [1]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_single_channel_reader___read_waveform___256_samples( +def test___digital_single_channel_reader___read_waveform_single_sample_single_line( benchmark: BenchmarkFixture, - di_single_line_task: nidaqmx.Task, + di_single_sample_single_line_benchmark_task: nidaqmx.Task, + num_channels: int, ) -> None: - reader = DigitalSingleChannelReader(di_single_line_task.in_stream) - samples_to_read = 256 - waveform = DigitalWaveform(samples_to_read) + reader = DigitalSingleChannelReader(di_single_sample_single_line_benchmark_task.in_stream) + waveform = DigitalWaveform(1, 1) - benchmark(reader.read_waveform, waveform, samples_to_read) + benchmark(reader.read_waveform, waveform, 1) @pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_single_channel_multi_line_reader___read_waveform___256_samples( +def test___digital_single_channel_reader___read_waveform_single_sample_multi_line( benchmark: BenchmarkFixture, - di_single_channel_multi_line_task: nidaqmx.Task, + di_single_sample_single_channel_benchmark_task: nidaqmx.Task, + num_lines: int, ) -> None: - reader = DigitalSingleChannelReader(di_single_channel_multi_line_task.in_stream) - samples_to_read = 256 - num_lines = 8 - waveform = DigitalWaveform(samples_to_read, num_lines) + reader = DigitalSingleChannelReader(di_single_sample_single_channel_benchmark_task.in_stream) + waveform = DigitalWaveform(1, num_lines) - benchmark(reader.read_waveform, waveform, samples_to_read) + benchmark(reader.read_waveform, waveform, 1) @pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_single_channel_reader___read_waveform_port_byte___256_samples( +def test___digital_single_channel_reader___read_waveform_many_sample_port_uint32( benchmark: BenchmarkFixture, - di_single_channel_port_byte_task: nidaqmx.Task, + di_multi_sample_port_benchmark_task: nidaqmx.Task, + num_samples: int, ) -> None: - reader = DigitalSingleChannelReader(di_single_channel_port_byte_task.in_stream) - samples_to_read = 256 - num_lines = 8 - waveform = DigitalWaveform(samples_to_read, num_lines) + reader = DigitalSingleChannelReader(di_multi_sample_port_benchmark_task.in_stream) + waveform = DigitalWaveform(num_samples, signal_count=32) - benchmark(reader.read_waveform, waveform, samples_to_read) + benchmark(reader.read_waveform, waveform, num_samples) @pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.parametrize("num_channels", [1]) +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_single_channel_reader___read_waveform_port_uint32___256_samples( +def test___digital_single_channel_reader___read_waveform_many_sample_lines( benchmark: BenchmarkFixture, - di_single_channel_port_uint32_task: nidaqmx.Task, + di_multi_sample_lines_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, + num_lines: int, ) -> None: - reader = DigitalSingleChannelReader(di_single_channel_port_uint32_task.in_stream) - samples_to_read = 256 - num_lines = 32 - waveform = DigitalWaveform(samples_to_read, num_lines) + reader = DigitalSingleChannelReader(di_multi_sample_lines_benchmark_task.in_stream) + waveform = DigitalWaveform(num_samples, num_lines) - benchmark(reader.read_waveform, waveform, samples_to_read) + benchmark(reader.read_waveform, waveform, num_samples) @pytest.mark.benchmark(group="digital_stream_readers") -def test___digital_multi_channel_reader___read_one_sample_one_line___1_sample( +@pytest.mark.parametrize("num_channels", [1, 2]) +def test___digital_multi_channel_reader___read_one_sample_one_line( benchmark: BenchmarkFixture, - di_single_line_task: nidaqmx.Task, + di_single_sample_single_line_benchmark_task: nidaqmx.Task, + num_channels: int, ) -> None: - reader = DigitalMultiChannelReader(di_single_line_task.in_stream) - sample = numpy.full(1, False, dtype=numpy.bool_) + reader = DigitalMultiChannelReader(di_single_sample_single_line_benchmark_task.in_stream) + sample = numpy.full(num_channels, False, dtype=numpy.bool_) benchmark(reader.read_one_sample_one_line, sample) @pytest.mark.benchmark(group="digital_stream_readers") -def test___digital_multi_channel_reader___read_one_sample_multi_line___1_sample( +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +def test___digital_multi_channel_reader___read_one_sample_multi_line( benchmark: BenchmarkFixture, - di_multi_channel_multi_line_task: nidaqmx.Task, + di_single_sample_single_channel_benchmark_task: nidaqmx.Task, + num_lines: int, ) -> None: - reader = DigitalMultiChannelReader(di_multi_channel_multi_line_task.in_stream) - num_channels = di_multi_channel_multi_line_task.number_of_channels - sample = numpy.full((num_channels, 1), False, dtype=numpy.bool_) + reader = DigitalMultiChannelReader(di_single_sample_single_channel_benchmark_task.in_stream) + sample = numpy.full((1, num_lines), False, dtype=numpy.bool_) benchmark(reader.read_one_sample_multi_line, sample) @pytest.mark.benchmark(group="digital_stream_readers") -def test___digital_multi_channel_reader___read_many_sample_port_byte___256_samples( +@pytest.mark.parametrize("num_samples", [1, 100]) +def test___digital_multi_channel_reader___read_many_sample_port_uint32( benchmark: BenchmarkFixture, - di_multi_channel_port_byte_task: nidaqmx.Task, + di_multi_sample_port_benchmark_task: nidaqmx.Task, + num_samples: int, ) -> None: - reader = DigitalMultiChannelReader(di_multi_channel_port_byte_task.in_stream) - num_channels = 2 - samples_to_read = 256 - data = numpy.full( - (num_channels, samples_to_read), numpy.iinfo(numpy.uint8).min, dtype=numpy.uint8 - ) + reader = DigitalMultiChannelReader(di_multi_sample_port_benchmark_task.in_stream) + data = numpy.full((1, num_samples), numpy.iinfo(numpy.uint32).min, dtype=numpy.uint32) - benchmark( - reader.read_many_sample_port_byte, data, number_of_samples_per_channel=samples_to_read - ) + benchmark(reader.read_many_sample_port_uint32, data, num_samples) @pytest.mark.benchmark(group="digital_stream_readers") -def test___digital_multi_channel_reader___read_many_sample_port_uint32___256_samples( +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___digital_multi_channel_reader___read_waveform_single_sample_single_line( benchmark: BenchmarkFixture, - di_multi_channel_port_uint32_task: nidaqmx.Task, + di_single_sample_single_line_benchmark_task: nidaqmx.Task, + num_channels: int, ) -> None: - reader = DigitalMultiChannelReader(di_multi_channel_port_uint32_task.in_stream) - num_channels = 3 - samples_to_read = 256 - data = numpy.full( - (num_channels, samples_to_read), numpy.iinfo(numpy.uint32).min, dtype=numpy.uint32 - ) + reader = DigitalMultiChannelReader(di_single_sample_single_line_benchmark_task.in_stream) + waveforms = [DigitalWaveform(1, 1) for _ in range(num_channels)] - benchmark( - reader.read_many_sample_port_uint32, data, number_of_samples_per_channel=samples_to_read - ) + benchmark(reader.read_waveforms, waveforms, 1) @pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_reader___read_waveform_multi_line___256_samples( +def test___digital_multi_channel_reader___read_waveform_single_sample_multi_line( benchmark: BenchmarkFixture, - di_multi_channel_multi_line_task: nidaqmx.Task, + di_single_sample_single_channel_benchmark_task: nidaqmx.Task, + num_lines: int, ) -> None: - reader = DigitalMultiChannelReader(di_multi_channel_multi_line_task.in_stream) - num_channels = 8 - samples_to_read = 256 - waveforms = [DigitalWaveform(samples_to_read) for _ in range(num_channels)] + reader = DigitalMultiChannelReader(di_single_sample_single_channel_benchmark_task.in_stream) + waveforms = [DigitalWaveform(1, num_lines)] - benchmark(reader.read_waveforms, waveforms, samples_to_read) + benchmark(reader.read_waveforms, waveforms, 1) @pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_reader___read_waveform_port_byte___256_samples( +def test___digital_multi_channel_reader___read_waveform_many_sample_port_uint32( benchmark: BenchmarkFixture, - di_multi_channel_port_byte_task: nidaqmx.Task, + di_multi_sample_port_benchmark_task: nidaqmx.Task, + num_samples: int, ) -> None: - reader = DigitalMultiChannelReader(di_multi_channel_port_byte_task.in_stream) - num_channels = 2 - samples_to_read = 256 - num_lines = 8 - waveforms = [DigitalWaveform(samples_to_read, num_lines) for _ in range(num_channels)] + reader = DigitalMultiChannelReader(di_multi_sample_port_benchmark_task.in_stream) + waveforms = [DigitalWaveform(num_samples, signal_count=32)] - benchmark(reader.read_waveforms, waveforms, samples_to_read) + benchmark(reader.read_waveforms, waveforms, num_samples) @pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_reader___read_waveform_port_uint32___256_samples( +def test___digital_multi_channel_reader___read_waveform_many_sample_lines( benchmark: BenchmarkFixture, - di_multi_channel_port_uint32_task: nidaqmx.Task, + di_multi_sample_lines_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, + num_lines: int, ) -> None: - reader = DigitalMultiChannelReader(di_multi_channel_port_uint32_task.in_stream) - samples_to_read = 256 - waveforms = [ - DigitalWaveform(samples_to_read, 32), - DigitalWaveform(samples_to_read, 8), - DigitalWaveform(samples_to_read, 8), - ] - - benchmark(reader.read_waveforms, waveforms, samples_to_read) + reader = DigitalMultiChannelReader(di_multi_sample_lines_benchmark_task.in_stream) + waveforms = [DigitalWaveform(num_samples, num_lines) for _ in range(num_channels)] + + benchmark(reader.read_waveforms, waveforms, num_samples) diff --git a/tests/benchmark/test_digital_stream_writers.py b/tests/benchmark/test_digital_stream_writers.py index 1d5054d6..4bbe71a0 100644 --- a/tests/benchmark/test_digital_stream_writers.py +++ b/tests/benchmark/test_digital_stream_writers.py @@ -15,198 +15,250 @@ @pytest.mark.benchmark(group="digital_stream_writers") -def test___digital_single_channel_writer___write_one_sample_one_line___1_sample( +@pytest.mark.parametrize("num_channels", [1]) +def test___digital_single_channel_writer___write_one_sample_one_line( benchmark: BenchmarkFixture, - do_single_line_task: nidaqmx.Task, + do_single_sample_single_line_benchmark_task: nidaqmx.Task, + num_channels: int, ) -> None: - writer = DigitalSingleChannelWriter(do_single_line_task.out_stream) + writer = DigitalSingleChannelWriter(do_single_sample_single_line_benchmark_task.out_stream) benchmark(writer.write_one_sample_one_line, True) @pytest.mark.benchmark(group="digital_stream_writers") -def test___digital_single_channel_writer___write_one_sample_multi_line___1_sample( +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +def test___digital_single_channel_writer___write_one_sample_multi_line( benchmark: BenchmarkFixture, - do_single_channel_multi_line_task: nidaqmx.Task, + do_single_sample_single_channel_benchmark_task: nidaqmx.Task, + num_lines: int, ) -> None: - writer = DigitalSingleChannelWriter(do_single_channel_multi_line_task.out_stream) - num_lines = 8 + writer = DigitalSingleChannelWriter(do_single_sample_single_channel_benchmark_task.out_stream) sample = numpy.full(num_lines, True, dtype=numpy.bool_) benchmark(writer.write_one_sample_multi_line, sample) @pytest.mark.benchmark(group="digital_stream_writers") -def test___digital_single_channel_writer___write_one_sample_port_byte___1_sample( +@pytest.mark.parametrize("num_samples", [1, 100]) +def test___digital_single_channel_writer___write_many_sample_port_uint32( benchmark: BenchmarkFixture, - do_port1_task: nidaqmx.Task, + do_multi_sample_port_benchmark_task: nidaqmx.Task, + num_samples: int, ) -> None: - writer = DigitalSingleChannelWriter(do_port1_task.out_stream) - sample = numpy.uint8(1) - - benchmark(writer.write_one_sample_port_byte, sample) - - -@pytest.mark.benchmark(group="digital_stream_writers") -def test___digital_single_channel_writer___write_one_sample_port_uint32___1_sample( - benchmark: BenchmarkFixture, - do_port0_task: nidaqmx.Task, -) -> None: - writer = DigitalSingleChannelWriter(do_port0_task.out_stream) - sample = numpy.uint32(1) - - benchmark(writer.write_one_sample_port_uint32, sample) - - -@pytest.mark.benchmark(group="digital_stream_writers") -def test___digital_single_channel_writer___write_many_sample_port_byte___256_samples( - benchmark: BenchmarkFixture, - do_multi_channel_port_task: nidaqmx.Task, -) -> None: - writer = DigitalSingleChannelWriter(do_multi_channel_port_task.out_stream) - samples_to_write = 256 - data = numpy.full(samples_to_write, numpy.uint8(1), dtype=numpy.uint8) - - benchmark(writer.write_many_sample_port_byte, data) - - -@pytest.mark.benchmark(group="digital_stream_writers") -def test___digital_single_channel_writer___write_many_sample_port_uint32___256_samples( - benchmark: BenchmarkFixture, - do_port1_task: nidaqmx.Task, -) -> None: - writer = DigitalSingleChannelWriter(do_port1_task.out_stream) - samples_to_write = 256 - data = numpy.full(samples_to_write, numpy.uint32(1), dtype=numpy.uint32) + writer = DigitalSingleChannelWriter(do_multi_sample_port_benchmark_task.out_stream) + data = numpy.full(num_samples, numpy.uint32(1), dtype=numpy.uint32) benchmark(writer.write_many_sample_port_uint32, data) @pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.parametrize("num_channels", [1]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_single_channel_writer___write_waveform___256_samples( +def test___digital_single_channel_writer___write_waveform_single_sample_single_line( benchmark: BenchmarkFixture, - do_single_line_task: nidaqmx.Task, + do_single_sample_single_line_benchmark_task: nidaqmx.Task, + num_channels: int, ) -> None: - writer = DigitalSingleChannelWriter(do_single_line_task.out_stream) - samples_to_write = 256 - waveform = DigitalWaveform(samples_to_write) + writer = DigitalSingleChannelWriter(do_single_sample_single_line_benchmark_task.out_stream) + waveform = DigitalWaveform(1) benchmark(writer.write_waveform, waveform) @pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_single_channel_writer___write_waveform_multi_line___256_samples( +def test___digital_single_channel_writer___write_waveform_single_sample_multi_line( benchmark: BenchmarkFixture, - do_single_channel_multi_line_task: nidaqmx.Task, + do_single_sample_single_channel_benchmark_task: nidaqmx.Task, + num_lines: int, ) -> None: - writer = DigitalSingleChannelWriter(do_single_channel_multi_line_task.out_stream) - samples_to_write = 256 - num_lines = 8 - waveform = DigitalWaveform(samples_to_write, num_lines) + writer = DigitalSingleChannelWriter(do_single_sample_single_channel_benchmark_task.out_stream) + waveform = DigitalWaveform(1, num_lines) benchmark(writer.write_waveform, waveform) @pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_single_channel_writer___write_waveform_port_byte___256_samples( +def test___digital_single_channel_writer___write_waveform_many_sample_port_uint32( benchmark: BenchmarkFixture, - do_port1_task: nidaqmx.Task, + do_multi_sample_port_benchmark_task: nidaqmx.Task, + num_samples: int, ) -> None: - writer = DigitalSingleChannelWriter(do_port1_task.out_stream) - samples_to_write = 256 - num_lines = 8 - waveform = DigitalWaveform(samples_to_write, num_lines) + writer = DigitalSingleChannelWriter(do_multi_sample_port_benchmark_task.out_stream) + waveform = DigitalWaveform(num_samples, 32) benchmark(writer.write_waveform, waveform) @pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.parametrize("num_channels", [1]) +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_single_channel_writer___write_waveform_port_uint32___256_samples( +def test___digital_single_channel_writer___waveform_many_sample_lines( benchmark: BenchmarkFixture, - do_port0_task: nidaqmx.Task, + do_multi_sample_lines_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, + num_lines: int, ) -> None: - writer = DigitalSingleChannelWriter(do_port0_task.out_stream) - samples_to_write = 256 - num_lines = 32 - waveform = DigitalWaveform(samples_to_write, num_lines) + writer = DigitalSingleChannelWriter(do_multi_sample_lines_benchmark_task.out_stream) + waveform = DigitalWaveform(num_samples, num_lines) benchmark(writer.write_waveform, waveform) @pytest.mark.benchmark(group="digital_stream_writers") -def test___digital_multi_channel_writer___write_one_sample_one_line___1_sample( +@pytest.mark.parametrize("num_channels", [1, 2]) +def test___digital_multi_channel_writer___write_one_sample_one_line( benchmark: BenchmarkFixture, - do_single_line_task: nidaqmx.Task, + do_single_sample_single_line_benchmark_task: nidaqmx.Task, + num_channels: int, ) -> None: - writer = DigitalMultiChannelWriter(do_single_line_task.out_stream) - sample = numpy.array([True], dtype=numpy.bool_) + writer = DigitalMultiChannelWriter(do_single_sample_single_line_benchmark_task.out_stream) + sample = numpy.full(num_channels, False, dtype=numpy.bool_) benchmark(writer.write_one_sample_one_line, sample) @pytest.mark.benchmark(group="digital_stream_writers") -def test___digital_multi_channel_writer___write_one_sample_multi_line___1_sample( +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +def test___digital_multi_channel_writer___write_one_sample_multi_line( benchmark: BenchmarkFixture, - do_multi_channel_multi_line_task: nidaqmx.Task, + do_single_sample_single_channel_benchmark_task: nidaqmx.Task, + num_lines: int, ) -> None: - writer = DigitalMultiChannelWriter(do_multi_channel_multi_line_task.out_stream) - sample = numpy.full((8, 1), True, dtype=numpy.bool_) + writer = DigitalMultiChannelWriter(do_single_sample_single_channel_benchmark_task.out_stream) + sample = numpy.full((1, num_lines), False, dtype=numpy.bool_) benchmark(writer.write_one_sample_multi_line, sample) @pytest.mark.benchmark(group="digital_stream_writers") -def test___digital_multi_channel_writer___write_one_sample_port_byte___1_sample( +@pytest.mark.parametrize("num_samples", [1, 100]) +def test___digital_multi_channel_writer___write_many_sample_port_uint32( benchmark: BenchmarkFixture, - do_multi_channel_port_task: nidaqmx.Task, + do_multi_sample_port_benchmark_task: nidaqmx.Task, + num_samples: int, ) -> None: - writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) - sample = numpy.array([numpy.uint8(1), numpy.uint8(1)], dtype=numpy.uint8) + writer = DigitalMultiChannelWriter(do_multi_sample_port_benchmark_task.in_stream) + data = numpy.full((1, num_samples), numpy.iinfo(numpy.uint32).min, dtype=numpy.uint32) - benchmark(writer.write_one_sample_port_byte, sample) + benchmark(writer.write_many_sample_port_uint32, data, num_samples) @pytest.mark.benchmark(group="digital_stream_writers") -def test___digital_multi_channel_writer___write_many_sample_port_byte___256_samples( +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___digital_multi_channel_writer___write_waveform_single_sample_single_line( benchmark: BenchmarkFixture, - do_multi_channel_port_task: nidaqmx.Task, + do_single_sample_single_line_benchmark_task: nidaqmx.Task, + num_channels: int, ) -> None: - writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) - num_channels = 2 - samples_to_write = 256 - data = numpy.full((num_channels, samples_to_write), numpy.uint8(1), dtype=numpy.uint8) + writer = DigitalMultiChannelWriter(do_single_sample_single_line_benchmark_task.in_stream) + waveforms = [DigitalWaveform(1, 1) for _ in range(num_channels)] - benchmark(writer.write_many_sample_port_byte, data) + benchmark(writer.write_waveforms, waveforms, 1) @pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_writer___write_waveform_multi_lines___256_samples( +def test___digital_multi_channel_writer___write_waveform_single_sample_multi_line( benchmark: BenchmarkFixture, - do_multi_channel_multi_line_task: nidaqmx.Task, + do_single_sample_single_channel_benchmark_task: nidaqmx.Task, + num_lines: int, ) -> None: - writer = DigitalMultiChannelWriter(do_multi_channel_multi_line_task.out_stream) - num_channels = 8 - samples_to_write = 256 - waveforms = [DigitalWaveform(samples_to_write) for _ in range(num_channels)] + writer = DigitalMultiChannelWriter(do_single_sample_single_channel_benchmark_task.in_stream) + waveforms = [DigitalWaveform(1, num_lines)] - benchmark(writer.write_waveforms, waveforms) + benchmark(writer.write_waveforms, waveforms, 1) @pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_writer___write_waveforms_port_byte___256_samples( +def test___digital_multi_channel_writer___write_waveform_many_sample_port_uint32( benchmark: BenchmarkFixture, - do_multi_channel_port_task: nidaqmx.Task, + do_multi_sample_port_benchmark_task: nidaqmx.Task, + num_samples: int, ) -> None: - writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) - num_channels = 2 - samples_to_write = 256 - num_lines = 8 - waveforms = [DigitalWaveform(samples_to_write, num_lines) for _ in range(num_channels)] + writer = DigitalMultiChannelWriter(do_multi_sample_port_benchmark_task.in_stream) + waveforms = [DigitalWaveform(num_samples, signal_count=32)] - benchmark(writer.write_waveforms, waveforms) + benchmark(writer.write_waveforms, waveforms, num_samples) + + +@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___digital_multi_channel_writer___write_waveform_many_sample_lines( + benchmark: BenchmarkFixture, + do_multi_sample_lines_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, + num_lines: int, +) -> None: + writer = DigitalMultiChannelWriter(do_multi_sample_lines_benchmark_task.in_stream) + waveforms = [DigitalWaveform(num_samples, num_lines) for _ in range(num_channels)] + + benchmark(writer.write_waveforms, waveforms, num_samples) + + +# @pytest.mark.benchmark(group="digital_stream_writers") +# def test___digital_multi_channel_writer___write_one_sample_port_byte___1_sample( +# benchmark: BenchmarkFixture, +# do_multi_channel_port_task: nidaqmx.Task, +# ) -> None: +# writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) +# sample = numpy.array([numpy.uint8(1), numpy.uint8(1)], dtype=numpy.uint8) + +# benchmark(writer.write_one_sample_port_byte, sample) + + +# @pytest.mark.benchmark(group="digital_stream_writers") +# def test___digital_multi_channel_writer___write_many_sample_port_byte___256_samples( +# benchmark: BenchmarkFixture, +# do_multi_channel_port_task: nidaqmx.Task, +# ) -> None: +# writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) +# num_channels = 2 +# samples_to_write = 256 +# data = numpy.full((num_channels, samples_to_write), numpy.uint8(1), dtype=numpy.uint8) + +# benchmark(writer.write_many_sample_port_byte, data) + + +# @pytest.mark.benchmark(group="digital_stream_writers") +# @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +# def test___digital_multi_channel_writer___write_waveform_multi_lines___256_samples( +# benchmark: BenchmarkFixture, +# do_multi_channel_multi_line_task: nidaqmx.Task, +# ) -> None: +# writer = DigitalMultiChannelWriter(do_multi_channel_multi_line_task.out_stream) +# num_channels = 8 +# samples_to_write = 256 +# waveforms = [DigitalWaveform(samples_to_write) for _ in range(num_channels)] + +# benchmark(writer.write_waveforms, waveforms) + + +# @pytest.mark.benchmark(group="digital_stream_writers") +# @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +# def test___digital_multi_channel_writer___write_waveforms_port_byte___256_samples( +# benchmark: BenchmarkFixture, +# do_multi_channel_port_task: nidaqmx.Task, +# ) -> None: +# writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) +# num_channels = 2 +# samples_to_write = 256 +# num_lines = 8 +# waveforms = [DigitalWaveform(samples_to_write, num_lines) for _ in range(num_channels)] + +# benchmark(writer.write_waveforms, waveforms) From 4c99db964ad73f92415efc95a5a198095d0b34ed Mon Sep 17 00:00:00 2001 From: Mike Prosser Date: Wed, 24 Sep 2025 16:31:13 -0500 Subject: [PATCH 07/15] cleanup --- tests/benchmark/test_analog_stream_readers.py | 2 +- .../benchmark/test_digital_stream_writers.py | 53 ------------------- 2 files changed, 1 insertion(+), 54 deletions(-) diff --git a/tests/benchmark/test_analog_stream_readers.py b/tests/benchmark/test_analog_stream_readers.py index 3daed764..e0190bb1 100644 --- a/tests/benchmark/test_analog_stream_readers.py +++ b/tests/benchmark/test_analog_stream_readers.py @@ -59,7 +59,7 @@ def test___analog_single_channel_reader___read_waveform( @pytest.mark.benchmark(group="analog_stream_readers") @pytest.mark.parametrize("num_channels", [1, 2, 8]) -@pytest.mark.parametrize("num_samples", [1, 1000]) +@pytest.mark.parametrize("num_samples", [1]) def test___analog_multi_channel_reader___read_one_sample( benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int ) -> None: diff --git a/tests/benchmark/test_digital_stream_writers.py b/tests/benchmark/test_digital_stream_writers.py index 4bbe71a0..df6f8efd 100644 --- a/tests/benchmark/test_digital_stream_writers.py +++ b/tests/benchmark/test_digital_stream_writers.py @@ -209,56 +209,3 @@ def test___digital_multi_channel_writer___write_waveform_many_sample_lines( waveforms = [DigitalWaveform(num_samples, num_lines) for _ in range(num_channels)] benchmark(writer.write_waveforms, waveforms, num_samples) - - -# @pytest.mark.benchmark(group="digital_stream_writers") -# def test___digital_multi_channel_writer___write_one_sample_port_byte___1_sample( -# benchmark: BenchmarkFixture, -# do_multi_channel_port_task: nidaqmx.Task, -# ) -> None: -# writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) -# sample = numpy.array([numpy.uint8(1), numpy.uint8(1)], dtype=numpy.uint8) - -# benchmark(writer.write_one_sample_port_byte, sample) - - -# @pytest.mark.benchmark(group="digital_stream_writers") -# def test___digital_multi_channel_writer___write_many_sample_port_byte___256_samples( -# benchmark: BenchmarkFixture, -# do_multi_channel_port_task: nidaqmx.Task, -# ) -> None: -# writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) -# num_channels = 2 -# samples_to_write = 256 -# data = numpy.full((num_channels, samples_to_write), numpy.uint8(1), dtype=numpy.uint8) - -# benchmark(writer.write_many_sample_port_byte, data) - - -# @pytest.mark.benchmark(group="digital_stream_writers") -# @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -# def test___digital_multi_channel_writer___write_waveform_multi_lines___256_samples( -# benchmark: BenchmarkFixture, -# do_multi_channel_multi_line_task: nidaqmx.Task, -# ) -> None: -# writer = DigitalMultiChannelWriter(do_multi_channel_multi_line_task.out_stream) -# num_channels = 8 -# samples_to_write = 256 -# waveforms = [DigitalWaveform(samples_to_write) for _ in range(num_channels)] - -# benchmark(writer.write_waveforms, waveforms) - - -# @pytest.mark.benchmark(group="digital_stream_writers") -# @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -# def test___digital_multi_channel_writer___write_waveforms_port_byte___256_samples( -# benchmark: BenchmarkFixture, -# do_multi_channel_port_task: nidaqmx.Task, -# ) -> None: -# writer = DigitalMultiChannelWriter(do_multi_channel_port_task.out_stream) -# num_channels = 2 -# samples_to_write = 256 -# num_lines = 8 -# waveforms = [DigitalWaveform(samples_to_write, num_lines) for _ in range(num_channels)] - -# benchmark(writer.write_waveforms, waveforms) From ee340004067aca4398e515da00f9fe88b60a891d Mon Sep 17 00:00:00 2001 From: Mike Prosser Date: Thu, 25 Sep 2025 10:05:25 -0500 Subject: [PATCH 08/15] auto_start=false and clean up fixtures --- tests/benchmark/conftest.py | 134 ++++++----------- tests/benchmark/test_analog_stream_readers.py | 2 +- tests/benchmark/test_analog_stream_writers.py | 24 ++-- .../benchmark/test_digital_stream_readers.py | 136 ++++++++---------- .../benchmark/test_digital_stream_writers.py | 136 ++++++++---------- 5 files changed, 183 insertions(+), 249 deletions(-) diff --git a/tests/benchmark/conftest.py b/tests/benchmark/conftest.py index 189862bc..e0cda615 100644 --- a/tests/benchmark/conftest.py +++ b/tests/benchmark/conftest.py @@ -46,25 +46,7 @@ def ai_benchmark_task( @pytest.fixture -def ao_single_sample_benchmark_task( - task: Task, - real_x_series_multiplexed_device: Device, - request: pytest.FixtureRequest, -) -> Task: - """Configure a single-sample AO task for benchmarking.""" - num_channels = request.node.callspec.params.get("num_channels", 1) - for chan in range(num_channels): - task.ao_channels.add_ao_voltage_chan( - real_x_series_multiplexed_device.ao_physical_chans[chan].name, - min_val=-10.0, - max_val=10.0, - ) - - return task - - -@pytest.fixture -def ao_multi_sample_benchmark_task( +def ao_benchmark_task( task: Task, real_x_series_multiplexed_device: Device, request: pytest.FixtureRequest, @@ -93,56 +75,18 @@ def ao_multi_sample_benchmark_task( @pytest.fixture -def di_single_sample_single_channel_benchmark_task( - task: Task, - sim_6363_device: Device, - request: pytest.FixtureRequest, -) -> Task: - """Configure a single-sample DI task for benchmarking.""" - num_lines = request.node.callspec.params.get("num_lines", 1) - - channel_names = [chan.name for chan in sim_6363_device.di_lines[:num_lines]] - physical_channel_string = ",".join(channel_names) - task.di_channels.add_di_chan( - physical_channel_string, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES - ) - task.start() - task.wait_until_done(timeout=10.0) - - return task - - -@pytest.fixture -def di_single_sample_single_line_benchmark_task( +def di_single_line_benchmark_task( task: Task, sim_6363_device: Device, request: pytest.FixtureRequest, ) -> Task: """Configure a single-sample DI task for benchmarking.""" + num_samples = request.node.callspec.params.get("num_samples", 1) num_channels = request.node.callspec.params.get("num_channels", 1) channel_names = [chan.name for chan in sim_6363_device.di_lines[:num_channels]] physical_channel_string = ",".join(channel_names) task.di_channels.add_di_chan(physical_channel_string, line_grouping=LineGrouping.CHAN_PER_LINE) - task.start() - task.wait_until_done(timeout=10.0) - - return task - - -@pytest.fixture -def di_multi_sample_port_benchmark_task( - task: Task, - sim_6363_device: Device, - request: pytest.FixtureRequest, -) -> Task: - """Configure a hardware-timed buffered DI task for benchmarking.""" - num_samples = request.node.callspec.params.get("num_samples", 1) - - # port 0 is the only port that supports buffered operations - task.di_channels.add_di_chan( - sim_6363_device.di_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES - ) task.timing.cfg_samp_clk_timing( rate=25000.0, active_edge=Edge.RISING, @@ -157,7 +101,7 @@ def di_multi_sample_port_benchmark_task( @pytest.fixture -def di_multi_sample_lines_benchmark_task( +def di_multi_line_benchmark_task( task: Task, sim_6363_device: Device, request: pytest.FixtureRequest, @@ -191,56 +135,44 @@ def di_multi_sample_lines_benchmark_task( @pytest.fixture -def do_single_sample_single_channel_benchmark_task( +def di_port32_benchmark_task( task: Task, sim_6363_device: Device, request: pytest.FixtureRequest, ) -> Task: - """Configure a single-sample DO task for benchmarking.""" - num_lines = request.node.callspec.params.get("num_lines", 1) + """Configure a hardware-timed buffered DI task for benchmarking.""" + num_samples = request.node.callspec.params.get("num_samples", 1) - channel_names = [chan.name for chan in sim_6363_device.do_lines[:num_lines]] - physical_channel_string = ",".join(channel_names) - task.do_channels.add_do_chan( - physical_channel_string, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + # port 0 is the only port that supports buffered operations + task.di_channels.add_di_chan( + sim_6363_device.di_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + ) + task.timing.cfg_samp_clk_timing( + rate=25000.0, + active_edge=Edge.RISING, + sample_mode=AcquisitionType.FINITE, + samps_per_chan=num_samples * 2, ) task.start() task.wait_until_done(timeout=10.0) + task.in_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE return task @pytest.fixture -def do_single_sample_single_line_benchmark_task( +def do_single_line_benchmark_task( task: Task, sim_6363_device: Device, request: pytest.FixtureRequest, ) -> Task: """Configure a single-sample DO task for benchmarking.""" num_channels = request.node.callspec.params.get("num_channels", 1) + num_samples = request.node.callspec.params.get("num_samples", 1) channel_names = [chan.name for chan in sim_6363_device.do_lines[:num_channels]] physical_channel_string = ",".join(channel_names) task.do_channels.add_do_chan(physical_channel_string, line_grouping=LineGrouping.CHAN_PER_LINE) - task.start() - task.wait_until_done(timeout=10.0) - - return task - - -@pytest.fixture -def do_multi_sample_port_benchmark_task( - task: Task, - sim_6363_device: Device, - request: pytest.FixtureRequest, -) -> Task: - """Configure a hardware-timed buffered DO task for benchmarking.""" - num_samples = request.node.callspec.params.get("num_samples", 1) - - # port 0 is the only port that supports buffered operations - task.do_channels.add_do_chan( - sim_6363_device.do_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES - ) task.timing.cfg_samp_clk_timing( rate=25000.0, active_edge=Edge.RISING, @@ -255,7 +187,7 @@ def do_multi_sample_port_benchmark_task( @pytest.fixture -def do_multi_sample_lines_benchmark_task( +def do_multi_line_benchmark_task( task: Task, sim_6363_device: Device, request: pytest.FixtureRequest, @@ -286,3 +218,29 @@ def do_multi_sample_lines_benchmark_task( task.out_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE return task + + +@pytest.fixture +def do_port32_benchmark_task( + task: Task, + sim_6363_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a hardware-timed buffered DO task for benchmarking.""" + num_samples = request.node.callspec.params.get("num_samples", 1) + + # port 0 is the only port that supports buffered operations + task.do_channels.add_do_chan( + sim_6363_device.do_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + ) + task.timing.cfg_samp_clk_timing( + rate=25000.0, + active_edge=Edge.RISING, + sample_mode=AcquisitionType.FINITE, + samps_per_chan=num_samples * 2, + ) + task.out_stream.output_buf_size = num_samples * 2 + task.control(TaskMode.TASK_COMMIT) + task.out_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + + return task diff --git a/tests/benchmark/test_analog_stream_readers.py b/tests/benchmark/test_analog_stream_readers.py index e0190bb1..af2ac83c 100644 --- a/tests/benchmark/test_analog_stream_readers.py +++ b/tests/benchmark/test_analog_stream_readers.py @@ -28,7 +28,7 @@ def test___analog_single_channel_reader___read_one_sample( @pytest.mark.benchmark(group="analog_stream_readers") @pytest.mark.parametrize("num_channels", [1]) -@pytest.mark.parametrize("num_samples", [2, 1000]) +@pytest.mark.parametrize("num_samples", [1, 1000]) def test___analog_single_channel_reader___read_many_sample( benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int ) -> None: diff --git a/tests/benchmark/test_analog_stream_writers.py b/tests/benchmark/test_analog_stream_writers.py index b0d6dedb..d582d448 100644 --- a/tests/benchmark/test_analog_stream_writers.py +++ b/tests/benchmark/test_analog_stream_writers.py @@ -16,10 +16,10 @@ @pytest.mark.parametrize("num_channels", [1]) def test___analog_single_channel_writer___write_one_sample( benchmark: BenchmarkFixture, - ao_single_sample_benchmark_task: nidaqmx.Task, + ao_benchmark_task: nidaqmx.Task, num_channels: int, ) -> None: - writer = AnalogSingleChannelWriter(ao_single_sample_benchmark_task.out_stream) + writer = AnalogSingleChannelWriter(ao_benchmark_task.out_stream, auto_start=False) benchmark(writer.write_one_sample, 1.0) @@ -29,11 +29,11 @@ def test___analog_single_channel_writer___write_one_sample( @pytest.mark.parametrize("num_samples", [1, 1000]) def test___analog_single_channel_writer___write_many_sample( benchmark: BenchmarkFixture, - ao_multi_sample_benchmark_task: nidaqmx.Task, + ao_benchmark_task: nidaqmx.Task, num_channels: int, num_samples: int, ) -> None: - writer = AnalogSingleChannelWriter(ao_multi_sample_benchmark_task.out_stream) + writer = AnalogSingleChannelWriter(ao_benchmark_task.out_stream, auto_start=False) data = numpy.linspace(0.0, 1.0, num=num_samples, dtype=numpy.float64) benchmark(writer.write_many_sample, data) @@ -45,11 +45,11 @@ def test___analog_single_channel_writer___write_many_sample( @pytest.mark.grpc_skip(reason="write_analog_waveform not implemented in GRPC") def test___analog_single_channel_writer___write_waveform( benchmark: BenchmarkFixture, - ao_multi_sample_benchmark_task: nidaqmx.Task, + ao_benchmark_task: nidaqmx.Task, num_channels: int, num_samples: int, ) -> None: - writer = AnalogSingleChannelWriter(ao_multi_sample_benchmark_task.out_stream) + writer = AnalogSingleChannelWriter(ao_benchmark_task.out_stream, auto_start=False) waveform = AnalogWaveform(num_samples) benchmark(writer.write_waveform, waveform) @@ -59,10 +59,10 @@ def test___analog_single_channel_writer___write_waveform( @pytest.mark.parametrize("num_channels", [1, 2]) def test___analog_multi_channel_writer___write_one_sample( benchmark: BenchmarkFixture, - ao_single_sample_benchmark_task: nidaqmx.Task, + ao_benchmark_task: nidaqmx.Task, num_channels: int, ) -> None: - writer = AnalogMultiChannelWriter(ao_single_sample_benchmark_task.out_stream) + writer = AnalogMultiChannelWriter(ao_benchmark_task.out_stream, auto_start=False) data = numpy.asarray([1.0] * num_channels, dtype=numpy.float64) benchmark(writer.write_one_sample, data) @@ -73,11 +73,11 @@ def test___analog_multi_channel_writer___write_one_sample( @pytest.mark.parametrize("num_samples", [1, 1000]) def test___analog_multi_channel_writer___write_many_sample( benchmark: BenchmarkFixture, - ao_multi_sample_benchmark_task: nidaqmx.Task, + ao_benchmark_task: nidaqmx.Task, num_channels: int, num_samples: int, ) -> None: - writer = AnalogMultiChannelWriter(ao_multi_sample_benchmark_task.out_stream) + writer = AnalogMultiChannelWriter(ao_benchmark_task.out_stream, auto_start=False) data = numpy.full((num_channels, num_samples), 1.0, dtype=numpy.float64) benchmark(writer.write_many_sample, data) @@ -89,11 +89,11 @@ def test___analog_multi_channel_writer___write_many_sample( @pytest.mark.grpc_skip(reason="write_analog_waveform not implemented in GRPC") def test___analog_multi_channel_writer___write_waveform( benchmark: BenchmarkFixture, - ao_multi_sample_benchmark_task: nidaqmx.Task, + ao_benchmark_task: nidaqmx.Task, num_channels: int, num_samples: int, ) -> None: - writer = AnalogMultiChannelWriter(ao_multi_sample_benchmark_task.out_stream) + writer = AnalogMultiChannelWriter(ao_benchmark_task.out_stream, auto_start=False) waveform = [AnalogWaveform(num_samples) for _ in range(num_channels)] benchmark(writer.write_waveforms, waveform) diff --git a/tests/benchmark/test_digital_stream_readers.py b/tests/benchmark/test_digital_stream_readers.py index 0cf93c4b..b12ff4b2 100644 --- a/tests/benchmark/test_digital_stream_readers.py +++ b/tests/benchmark/test_digital_stream_readers.py @@ -16,24 +16,30 @@ @pytest.mark.benchmark(group="digital_stream_readers") @pytest.mark.parametrize("num_channels", [1]) +@pytest.mark.parametrize("num_samples", [1]) def test___digital_single_channel_reader___read_one_sample_one_line( benchmark: BenchmarkFixture, - di_single_sample_single_line_benchmark_task: nidaqmx.Task, + di_single_line_benchmark_task: nidaqmx.Task, num_channels: int, + num_samples: int, ) -> None: - reader = DigitalSingleChannelReader(di_single_sample_single_line_benchmark_task.in_stream) + reader = DigitalSingleChannelReader(di_single_line_benchmark_task.in_stream) benchmark(reader.read_one_sample_one_line) @pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.parametrize("num_channels", [1]) +@pytest.mark.parametrize("num_samples", [1]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) def test___digital_single_channel_reader___read_one_sample_multi_line( benchmark: BenchmarkFixture, - di_single_sample_single_channel_benchmark_task: nidaqmx.Task, + di_multi_line_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, num_lines: int, ) -> None: - reader = DigitalSingleChannelReader(di_single_sample_single_channel_benchmark_task.in_stream) + reader = DigitalSingleChannelReader(di_multi_line_benchmark_task.in_stream) sample = numpy.full(num_lines, False, dtype=numpy.bool_) benchmark(reader.read_one_sample_multi_line, sample) @@ -43,10 +49,10 @@ def test___digital_single_channel_reader___read_one_sample_multi_line( @pytest.mark.parametrize("num_samples", [1, 100]) def test___digital_single_channel_reader___read_many_sample_port_uint32( benchmark: BenchmarkFixture, - di_multi_sample_port_benchmark_task: nidaqmx.Task, + di_port32_benchmark_task: nidaqmx.Task, num_samples: int, ) -> None: - reader = DigitalSingleChannelReader(di_multi_sample_port_benchmark_task.in_stream) + reader = DigitalSingleChannelReader(di_port32_benchmark_task.in_stream) data = numpy.full(num_samples, numpy.iinfo(numpy.uint32).min, dtype=numpy.uint32) benchmark(reader.read_many_sample_port_uint32, data, num_samples) @@ -54,86 +60,80 @@ def test___digital_single_channel_reader___read_many_sample_port_uint32( @pytest.mark.benchmark(group="digital_stream_readers") @pytest.mark.parametrize("num_channels", [1]) +@pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_single_channel_reader___read_waveform_single_sample_single_line( +def test___digital_single_channel_reader___read_waveform_single_line( benchmark: BenchmarkFixture, - di_single_sample_single_line_benchmark_task: nidaqmx.Task, + di_single_line_benchmark_task: nidaqmx.Task, num_channels: int, + num_samples: int, ) -> None: - reader = DigitalSingleChannelReader(di_single_sample_single_line_benchmark_task.in_stream) - waveform = DigitalWaveform(1, 1) - - benchmark(reader.read_waveform, waveform, 1) - - -@pytest.mark.benchmark(group="digital_stream_readers") -@pytest.mark.parametrize("num_lines", [1, 2, 8]) -@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_single_channel_reader___read_waveform_single_sample_multi_line( - benchmark: BenchmarkFixture, - di_single_sample_single_channel_benchmark_task: nidaqmx.Task, - num_lines: int, -) -> None: - reader = DigitalSingleChannelReader(di_single_sample_single_channel_benchmark_task.in_stream) - waveform = DigitalWaveform(1, num_lines) + reader = DigitalSingleChannelReader(di_single_line_benchmark_task.in_stream) + waveform = DigitalWaveform(num_samples, signal_count=1) - benchmark(reader.read_waveform, waveform, 1) + benchmark(reader.read_waveform, waveform, num_samples) @pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_single_channel_reader___read_waveform_many_sample_port_uint32( +def test___digital_single_channel_reader___read_waveform_multi_line( benchmark: BenchmarkFixture, - di_multi_sample_port_benchmark_task: nidaqmx.Task, + di_multi_line_benchmark_task: nidaqmx.Task, + num_channels: int, num_samples: int, + num_lines: int, ) -> None: - reader = DigitalSingleChannelReader(di_multi_sample_port_benchmark_task.in_stream) - waveform = DigitalWaveform(num_samples, signal_count=32) + reader = DigitalSingleChannelReader(di_multi_line_benchmark_task.in_stream) + waveform = DigitalWaveform(num_samples, num_lines) benchmark(reader.read_waveform, waveform, num_samples) @pytest.mark.benchmark(group="digital_stream_readers") -@pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 100]) -@pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_single_channel_reader___read_waveform_many_sample_lines( +def test___digital_single_channel_reader___read_waveform_port_uint32( benchmark: BenchmarkFixture, - di_multi_sample_lines_benchmark_task: nidaqmx.Task, - num_channels: int, + di_port32_benchmark_task: nidaqmx.Task, num_samples: int, - num_lines: int, ) -> None: - reader = DigitalSingleChannelReader(di_multi_sample_lines_benchmark_task.in_stream) - waveform = DigitalWaveform(num_samples, num_lines) + reader = DigitalSingleChannelReader(di_port32_benchmark_task.in_stream) + waveform = DigitalWaveform(num_samples, signal_count=32) benchmark(reader.read_waveform, waveform, num_samples) @pytest.mark.benchmark(group="digital_stream_readers") @pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1]) def test___digital_multi_channel_reader___read_one_sample_one_line( benchmark: BenchmarkFixture, - di_single_sample_single_line_benchmark_task: nidaqmx.Task, + di_single_line_benchmark_task: nidaqmx.Task, num_channels: int, + num_samples: int, ) -> None: - reader = DigitalMultiChannelReader(di_single_sample_single_line_benchmark_task.in_stream) + reader = DigitalMultiChannelReader(di_single_line_benchmark_task.in_stream) sample = numpy.full(num_channels, False, dtype=numpy.bool_) benchmark(reader.read_one_sample_one_line, sample) @pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) def test___digital_multi_channel_reader___read_one_sample_multi_line( benchmark: BenchmarkFixture, - di_single_sample_single_channel_benchmark_task: nidaqmx.Task, + di_multi_line_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, num_lines: int, ) -> None: - reader = DigitalMultiChannelReader(di_single_sample_single_channel_benchmark_task.in_stream) - sample = numpy.full((1, num_lines), False, dtype=numpy.bool_) + reader = DigitalMultiChannelReader(di_multi_line_benchmark_task.in_stream) + sample = numpy.full((num_channels, num_lines), False, dtype=numpy.bool_) benchmark(reader.read_one_sample_multi_line, sample) @@ -142,10 +142,10 @@ def test___digital_multi_channel_reader___read_one_sample_multi_line( @pytest.mark.parametrize("num_samples", [1, 100]) def test___digital_multi_channel_reader___read_many_sample_port_uint32( benchmark: BenchmarkFixture, - di_multi_sample_port_benchmark_task: nidaqmx.Task, + di_port32_benchmark_task: nidaqmx.Task, num_samples: int, ) -> None: - reader = DigitalMultiChannelReader(di_multi_sample_port_benchmark_task.in_stream) + reader = DigitalMultiChannelReader(di_port32_benchmark_task.in_stream) data = numpy.full((1, num_samples), numpy.iinfo(numpy.uint32).min, dtype=numpy.uint32) benchmark(reader.read_many_sample_port_uint32, data, num_samples) @@ -153,59 +153,47 @@ def test___digital_multi_channel_reader___read_many_sample_port_uint32( @pytest.mark.benchmark(group="digital_stream_readers") @pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_reader___read_waveform_single_sample_single_line( +def test___digital_multi_channel_reader___read_waveform_single_line( benchmark: BenchmarkFixture, - di_single_sample_single_line_benchmark_task: nidaqmx.Task, + di_single_line_benchmark_task: nidaqmx.Task, num_channels: int, + num_samples: int, ) -> None: - reader = DigitalMultiChannelReader(di_single_sample_single_line_benchmark_task.in_stream) - waveforms = [DigitalWaveform(1, 1) for _ in range(num_channels)] - - benchmark(reader.read_waveforms, waveforms, 1) - - -@pytest.mark.benchmark(group="digital_stream_readers") -@pytest.mark.parametrize("num_lines", [1, 2, 8]) -@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_reader___read_waveform_single_sample_multi_line( - benchmark: BenchmarkFixture, - di_single_sample_single_channel_benchmark_task: nidaqmx.Task, - num_lines: int, -) -> None: - reader = DigitalMultiChannelReader(di_single_sample_single_channel_benchmark_task.in_stream) - waveforms = [DigitalWaveform(1, num_lines)] + reader = DigitalMultiChannelReader(di_single_line_benchmark_task.in_stream) + waveforms = [DigitalWaveform(num_samples, signal_count=1) for _ in range(num_channels)] benchmark(reader.read_waveforms, waveforms, 1) @pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_reader___read_waveform_many_sample_port_uint32( +def test___digital_multi_channel_reader___read_waveform_multi_line( benchmark: BenchmarkFixture, - di_multi_sample_port_benchmark_task: nidaqmx.Task, + di_multi_line_benchmark_task: nidaqmx.Task, + num_channels: int, num_samples: int, + num_lines: int, ) -> None: - reader = DigitalMultiChannelReader(di_multi_sample_port_benchmark_task.in_stream) - waveforms = [DigitalWaveform(num_samples, signal_count=32)] + reader = DigitalMultiChannelReader(di_multi_line_benchmark_task.in_stream) + waveforms = [DigitalWaveform(num_samples, num_lines) for _ in range(num_channels)] benchmark(reader.read_waveforms, waveforms, num_samples) @pytest.mark.benchmark(group="digital_stream_readers") -@pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1, 100]) -@pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_reader___read_waveform_many_sample_lines( +def test___digital_multi_channel_reader___read_waveform_port_uint32( benchmark: BenchmarkFixture, - di_multi_sample_lines_benchmark_task: nidaqmx.Task, - num_channels: int, + di_port32_benchmark_task: nidaqmx.Task, num_samples: int, - num_lines: int, ) -> None: - reader = DigitalMultiChannelReader(di_multi_sample_lines_benchmark_task.in_stream) - waveforms = [DigitalWaveform(num_samples, num_lines) for _ in range(num_channels)] + reader = DigitalMultiChannelReader(di_port32_benchmark_task.in_stream) + waveforms = [DigitalWaveform(num_samples, signal_count=32)] benchmark(reader.read_waveforms, waveforms, num_samples) diff --git a/tests/benchmark/test_digital_stream_writers.py b/tests/benchmark/test_digital_stream_writers.py index df6f8efd..ab25b79c 100644 --- a/tests/benchmark/test_digital_stream_writers.py +++ b/tests/benchmark/test_digital_stream_writers.py @@ -16,24 +16,30 @@ @pytest.mark.benchmark(group="digital_stream_writers") @pytest.mark.parametrize("num_channels", [1]) +@pytest.mark.parametrize("num_samples", [1]) def test___digital_single_channel_writer___write_one_sample_one_line( benchmark: BenchmarkFixture, - do_single_sample_single_line_benchmark_task: nidaqmx.Task, + do_single_line_benchmark_task: nidaqmx.Task, num_channels: int, + num_samples: int, ) -> None: - writer = DigitalSingleChannelWriter(do_single_sample_single_line_benchmark_task.out_stream) + writer = DigitalSingleChannelWriter(do_single_line_benchmark_task.out_stream, auto_start=False) benchmark(writer.write_one_sample_one_line, True) @pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.parametrize("num_channels", [1]) +@pytest.mark.parametrize("num_samples", [1]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) def test___digital_single_channel_writer___write_one_sample_multi_line( benchmark: BenchmarkFixture, - do_single_sample_single_channel_benchmark_task: nidaqmx.Task, + do_multi_line_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, num_lines: int, ) -> None: - writer = DigitalSingleChannelWriter(do_single_sample_single_channel_benchmark_task.out_stream) + writer = DigitalSingleChannelWriter(do_multi_line_benchmark_task.out_stream, auto_start=False) sample = numpy.full(num_lines, True, dtype=numpy.bool_) benchmark(writer.write_one_sample_multi_line, sample) @@ -43,10 +49,10 @@ def test___digital_single_channel_writer___write_one_sample_multi_line( @pytest.mark.parametrize("num_samples", [1, 100]) def test___digital_single_channel_writer___write_many_sample_port_uint32( benchmark: BenchmarkFixture, - do_multi_sample_port_benchmark_task: nidaqmx.Task, + do_port32_benchmark_task: nidaqmx.Task, num_samples: int, ) -> None: - writer = DigitalSingleChannelWriter(do_multi_sample_port_benchmark_task.out_stream) + writer = DigitalSingleChannelWriter(do_port32_benchmark_task.out_stream, auto_start=False) data = numpy.full(num_samples, numpy.uint32(1), dtype=numpy.uint32) benchmark(writer.write_many_sample_port_uint32, data) @@ -54,86 +60,80 @@ def test___digital_single_channel_writer___write_many_sample_port_uint32( @pytest.mark.benchmark(group="digital_stream_writers") @pytest.mark.parametrize("num_channels", [1]) +@pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_single_channel_writer___write_waveform_single_sample_single_line( +def test___digital_single_channel_writer___write_waveform_single_line( benchmark: BenchmarkFixture, - do_single_sample_single_line_benchmark_task: nidaqmx.Task, + do_single_line_benchmark_task: nidaqmx.Task, num_channels: int, + num_samples: int, ) -> None: - writer = DigitalSingleChannelWriter(do_single_sample_single_line_benchmark_task.out_stream) - waveform = DigitalWaveform(1) - - benchmark(writer.write_waveform, waveform) - - -@pytest.mark.benchmark(group="digital_stream_writers") -@pytest.mark.parametrize("num_lines", [1, 2, 8]) -@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_single_channel_writer___write_waveform_single_sample_multi_line( - benchmark: BenchmarkFixture, - do_single_sample_single_channel_benchmark_task: nidaqmx.Task, - num_lines: int, -) -> None: - writer = DigitalSingleChannelWriter(do_single_sample_single_channel_benchmark_task.out_stream) - waveform = DigitalWaveform(1, num_lines) + writer = DigitalSingleChannelWriter(do_single_line_benchmark_task.out_stream, auto_start=False) + waveform = DigitalWaveform(num_samples) benchmark(writer.write_waveform, waveform) @pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_single_channel_writer___write_waveform_many_sample_port_uint32( +def test___digital_single_channel_writer___write_waveform_multi_line( benchmark: BenchmarkFixture, - do_multi_sample_port_benchmark_task: nidaqmx.Task, + do_multi_line_benchmark_task: nidaqmx.Task, + num_channels: int, num_samples: int, + num_lines: int, ) -> None: - writer = DigitalSingleChannelWriter(do_multi_sample_port_benchmark_task.out_stream) - waveform = DigitalWaveform(num_samples, 32) + writer = DigitalSingleChannelWriter(do_multi_line_benchmark_task.out_stream, auto_start=False) + waveform = DigitalWaveform(num_channels, num_lines) benchmark(writer.write_waveform, waveform) @pytest.mark.benchmark(group="digital_stream_writers") -@pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 100]) -@pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_single_channel_writer___waveform_many_sample_lines( +def test___digital_single_channel_writer___write_waveform_port_uint32( benchmark: BenchmarkFixture, - do_multi_sample_lines_benchmark_task: nidaqmx.Task, - num_channels: int, + do_port32_benchmark_task: nidaqmx.Task, num_samples: int, - num_lines: int, ) -> None: - writer = DigitalSingleChannelWriter(do_multi_sample_lines_benchmark_task.out_stream) - waveform = DigitalWaveform(num_samples, num_lines) + writer = DigitalSingleChannelWriter(do_port32_benchmark_task.out_stream, auto_start=False) + waveform = DigitalWaveform(num_samples, signal_count=32) benchmark(writer.write_waveform, waveform) @pytest.mark.benchmark(group="digital_stream_writers") @pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1]) def test___digital_multi_channel_writer___write_one_sample_one_line( benchmark: BenchmarkFixture, - do_single_sample_single_line_benchmark_task: nidaqmx.Task, + do_single_line_benchmark_task: nidaqmx.Task, num_channels: int, + num_samples: int, ) -> None: - writer = DigitalMultiChannelWriter(do_single_sample_single_line_benchmark_task.out_stream) + writer = DigitalMultiChannelWriter(do_single_line_benchmark_task.out_stream, auto_start=False) sample = numpy.full(num_channels, False, dtype=numpy.bool_) benchmark(writer.write_one_sample_one_line, sample) @pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) def test___digital_multi_channel_writer___write_one_sample_multi_line( benchmark: BenchmarkFixture, - do_single_sample_single_channel_benchmark_task: nidaqmx.Task, + do_multi_line_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, num_lines: int, ) -> None: - writer = DigitalMultiChannelWriter(do_single_sample_single_channel_benchmark_task.out_stream) - sample = numpy.full((1, num_lines), False, dtype=numpy.bool_) + writer = DigitalMultiChannelWriter(do_multi_line_benchmark_task.out_stream, auto_start=False) + sample = numpy.full((num_channels, num_lines), False, dtype=numpy.bool_) benchmark(writer.write_one_sample_multi_line, sample) @@ -142,10 +142,10 @@ def test___digital_multi_channel_writer___write_one_sample_multi_line( @pytest.mark.parametrize("num_samples", [1, 100]) def test___digital_multi_channel_writer___write_many_sample_port_uint32( benchmark: BenchmarkFixture, - do_multi_sample_port_benchmark_task: nidaqmx.Task, + do_port32_benchmark_task: nidaqmx.Task, num_samples: int, ) -> None: - writer = DigitalMultiChannelWriter(do_multi_sample_port_benchmark_task.in_stream) + writer = DigitalMultiChannelWriter(do_port32_benchmark_task.in_stream, auto_start=False) data = numpy.full((1, num_samples), numpy.iinfo(numpy.uint32).min, dtype=numpy.uint32) benchmark(writer.write_many_sample_port_uint32, data, num_samples) @@ -153,59 +153,47 @@ def test___digital_multi_channel_writer___write_many_sample_port_uint32( @pytest.mark.benchmark(group="digital_stream_writers") @pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_writer___write_waveform_single_sample_single_line( +def test___digital_multi_channel_writer___write_waveform_single_line( benchmark: BenchmarkFixture, - do_single_sample_single_line_benchmark_task: nidaqmx.Task, + do_single_line_benchmark_task: nidaqmx.Task, num_channels: int, + num_samples: int, ) -> None: - writer = DigitalMultiChannelWriter(do_single_sample_single_line_benchmark_task.in_stream) - waveforms = [DigitalWaveform(1, 1) for _ in range(num_channels)] - - benchmark(writer.write_waveforms, waveforms, 1) - - -@pytest.mark.benchmark(group="digital_stream_writers") -@pytest.mark.parametrize("num_lines", [1, 2, 8]) -@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_writer___write_waveform_single_sample_multi_line( - benchmark: BenchmarkFixture, - do_single_sample_single_channel_benchmark_task: nidaqmx.Task, - num_lines: int, -) -> None: - writer = DigitalMultiChannelWriter(do_single_sample_single_channel_benchmark_task.in_stream) - waveforms = [DigitalWaveform(1, num_lines)] + writer = DigitalMultiChannelWriter(do_single_line_benchmark_task.in_stream, auto_start=False) + waveforms = [DigitalWaveform(num_samples) for _ in range(num_channels)] - benchmark(writer.write_waveforms, waveforms, 1) + benchmark(writer.write_waveforms, waveforms, num_samples) @pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_writer___write_waveform_many_sample_port_uint32( +def test___digital_multi_channel_writer___write_waveform_multi_line( benchmark: BenchmarkFixture, - do_multi_sample_port_benchmark_task: nidaqmx.Task, + do_multi_line_benchmark_task: nidaqmx.Task, + num_channels: int, num_samples: int, + num_lines: int, ) -> None: - writer = DigitalMultiChannelWriter(do_multi_sample_port_benchmark_task.in_stream) - waveforms = [DigitalWaveform(num_samples, signal_count=32)] + writer = DigitalMultiChannelWriter(do_multi_line_benchmark_task.in_stream, auto_start=False) + waveforms = [DigitalWaveform(num_samples, num_lines) for _ in range(num_channels)] benchmark(writer.write_waveforms, waveforms, num_samples) @pytest.mark.benchmark(group="digital_stream_writers") -@pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1, 100]) -@pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_writer___write_waveform_many_sample_lines( +def test___digital_multi_channel_writer___write_waveform_port_uint32( benchmark: BenchmarkFixture, - do_multi_sample_lines_benchmark_task: nidaqmx.Task, - num_channels: int, + do_port32_benchmark_task: nidaqmx.Task, num_samples: int, - num_lines: int, ) -> None: - writer = DigitalMultiChannelWriter(do_multi_sample_lines_benchmark_task.in_stream) - waveforms = [DigitalWaveform(num_samples, num_lines) for _ in range(num_channels)] + writer = DigitalMultiChannelWriter(do_port32_benchmark_task.in_stream, auto_start=False) + waveforms = [DigitalWaveform(num_samples, signal_count=32)] benchmark(writer.write_waveforms, waveforms, num_samples) From 96407ec7f65b16939c3b1edd2326bd4c2597c8ae Mon Sep 17 00:00:00 2001 From: Mike Prosser Date: Thu, 25 Sep 2025 10:30:18 -0500 Subject: [PATCH 09/15] cleanup conftest.py --- tests/benchmark/conftest.py | 136 ++++++++++++++---------------------- 1 file changed, 53 insertions(+), 83 deletions(-) diff --git a/tests/benchmark/conftest.py b/tests/benchmark/conftest.py index e0cda615..abb736be 100644 --- a/tests/benchmark/conftest.py +++ b/tests/benchmark/conftest.py @@ -15,6 +15,27 @@ from nidaqmx.system import Device +def _configure_timing(task, num_channels, num_samples): + task.timing.cfg_samp_clk_timing( + rate=25000.0, + active_edge=Edge.RISING, + sample_mode=AcquisitionType.FINITE, + samps_per_chan=num_channels * num_samples * 2, + ) + + +def _start_input_task(task): + task.start() + task.wait_until_done(timeout=10.0) + task.in_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + + +def _commit_output_task(task, num_channels, num_samples): + task.out_stream.output_buf_size = num_channels * num_samples * 2 + task.control(TaskMode.TASK_COMMIT) + task.out_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + + @pytest.fixture def ai_benchmark_task( task: Task, @@ -25,22 +46,15 @@ def ai_benchmark_task( num_channels = request.node.callspec.params.get("num_channels", 1) num_samples = request.node.callspec.params.get("num_samples", 1) - channel_names = [chan.name for chan in sim_6363_device.ai_physical_chans[:num_channels]] - physical_channel_string = ",".join(channel_names) - task.ai_channels.add_ai_voltage_chan( - physical_channel_string, - min_val=-5.0, - max_val=5.0, - ) - task.timing.cfg_samp_clk_timing( - rate=25000.0, - active_edge=Edge.RISING, - sample_mode=AcquisitionType.FINITE, - samps_per_chan=num_channels * num_samples * 2, - ) - task.start() - task.wait_until_done(timeout=10.0) - task.in_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + for chan in range(num_channels): + task.ai_channels.add_ai_voltage_chan( + sim_6363_device.ai_physical_chans[chan].name, + min_val=-5.0, + max_val=5.0, + ) + + _configure_timing(task, num_channels, num_samples) + _start_input_task(task) return task @@ -54,6 +68,7 @@ def ao_benchmark_task( """Configure a hardware-timed buffered AO task for benchmarking.""" num_channels = request.node.callspec.params.get("num_channels", 1) num_samples = request.node.callspec.params.get("num_samples", 1) + for chan in range(num_channels): task.ao_channels.add_ao_voltage_chan( real_x_series_multiplexed_device.ao_physical_chans[chan].name, @@ -61,15 +76,8 @@ def ao_benchmark_task( max_val=10.0, ) - task.timing.cfg_samp_clk_timing( - rate=25000.0, - active_edge=Edge.RISING, - sample_mode=AcquisitionType.FINITE, - samps_per_chan=num_channels * num_samples * 2, - ) - task.out_stream.output_buf_size = num_channels * num_samples * 2 - task.control(TaskMode.TASK_COMMIT) - task.out_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + _configure_timing(task, num_channels, num_samples) + _commit_output_task(task, num_channels, num_samples) return task @@ -84,18 +92,12 @@ def di_single_line_benchmark_task( num_samples = request.node.callspec.params.get("num_samples", 1) num_channels = request.node.callspec.params.get("num_channels", 1) - channel_names = [chan.name for chan in sim_6363_device.di_lines[:num_channels]] - physical_channel_string = ",".join(channel_names) + line_names = [chan.name for chan in sim_6363_device.di_lines[:num_channels]] + physical_channel_string = ",".join(line_names) task.di_channels.add_di_chan(physical_channel_string, line_grouping=LineGrouping.CHAN_PER_LINE) - task.timing.cfg_samp_clk_timing( - rate=25000.0, - active_edge=Edge.RISING, - sample_mode=AcquisitionType.FINITE, - samps_per_chan=num_samples * 2, - ) - task.start() - task.wait_until_done(timeout=10.0) - task.in_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + + _configure_timing(task, num_channels, num_samples) + _start_input_task(task) return task @@ -121,15 +123,8 @@ def di_multi_line_benchmark_task( physical_channel_string, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES ) - task.timing.cfg_samp_clk_timing( - rate=25000.0, - active_edge=Edge.RISING, - sample_mode=AcquisitionType.FINITE, - samps_per_chan=num_channels * num_samples * 2, - ) - task.start() - task.wait_until_done(timeout=10.0) - task.in_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + _configure_timing(task, num_channels, num_samples) + _start_input_task(task) return task @@ -147,15 +142,9 @@ def di_port32_benchmark_task( task.di_channels.add_di_chan( sim_6363_device.di_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES ) - task.timing.cfg_samp_clk_timing( - rate=25000.0, - active_edge=Edge.RISING, - sample_mode=AcquisitionType.FINITE, - samps_per_chan=num_samples * 2, - ) - task.start() - task.wait_until_done(timeout=10.0) - task.in_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + + _configure_timing(task, 1, num_samples) + _start_input_task(task) return task @@ -170,18 +159,12 @@ def do_single_line_benchmark_task( num_channels = request.node.callspec.params.get("num_channels", 1) num_samples = request.node.callspec.params.get("num_samples", 1) - channel_names = [chan.name for chan in sim_6363_device.do_lines[:num_channels]] - physical_channel_string = ",".join(channel_names) + line_names = [chan.name for chan in sim_6363_device.do_lines[:num_channels]] + physical_channel_string = ",".join(line_names) task.do_channels.add_do_chan(physical_channel_string, line_grouping=LineGrouping.CHAN_PER_LINE) - task.timing.cfg_samp_clk_timing( - rate=25000.0, - active_edge=Edge.RISING, - sample_mode=AcquisitionType.FINITE, - samps_per_chan=num_samples * 2, - ) - task.out_stream.output_buf_size = num_samples * 2 - task.control(TaskMode.TASK_COMMIT) - task.out_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + + _configure_timing(task, num_channels, num_samples) + _commit_output_task(task, num_channels, num_samples) return task @@ -207,15 +190,8 @@ def do_multi_line_benchmark_task( physical_channel_string, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES ) - task.timing.cfg_samp_clk_timing( - rate=25000.0, - active_edge=Edge.RISING, - sample_mode=AcquisitionType.FINITE, - samps_per_chan=num_channels * num_samples * 2, - ) - task.out_stream.output_buf_size = num_samples * 2 - task.control(TaskMode.TASK_COMMIT) - task.out_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + _configure_timing(task, num_channels, num_samples) + _commit_output_task(task, num_channels, num_samples) return task @@ -233,14 +209,8 @@ def do_port32_benchmark_task( task.do_channels.add_do_chan( sim_6363_device.do_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES ) - task.timing.cfg_samp_clk_timing( - rate=25000.0, - active_edge=Edge.RISING, - sample_mode=AcquisitionType.FINITE, - samps_per_chan=num_samples * 2, - ) - task.out_stream.output_buf_size = num_samples * 2 - task.control(TaskMode.TASK_COMMIT) - task.out_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + + _configure_timing(task, 1, num_samples) + _commit_output_task(task, 1, num_samples) return task From 93ec2e047320bb1c92a1f8264bed203dffdd5616 Mon Sep 17 00:00:00 2001 From: Mike Prosser Date: Thu, 25 Sep 2025 13:26:56 -0500 Subject: [PATCH 10/15] task benchmarks --- tests/benchmark/test_analog_stream_readers.py | 22 +- tests/benchmark/test_analog_stream_writers.py | 16 +- .../benchmark/test_digital_stream_readers.py | 24 +-- .../benchmark/test_digital_stream_writers.py | 24 +-- tests/benchmark/test_task.py | 194 ++++++++++++++++++ 5 files changed, 236 insertions(+), 44 deletions(-) create mode 100644 tests/benchmark/test_task.py diff --git a/tests/benchmark/test_analog_stream_readers.py b/tests/benchmark/test_analog_stream_readers.py index af2ac83c..223b3175 100644 --- a/tests/benchmark/test_analog_stream_readers.py +++ b/tests/benchmark/test_analog_stream_readers.py @@ -15,7 +15,7 @@ ) -@pytest.mark.benchmark(group="analog_stream_readers") +@pytest.mark.benchmark(group="analog_readers") @pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1]) def test___analog_single_channel_reader___read_one_sample( @@ -26,7 +26,7 @@ def test___analog_single_channel_reader___read_one_sample( benchmark(reader.read_one_sample) -@pytest.mark.benchmark(group="analog_stream_readers") +@pytest.mark.benchmark(group="analog_readers") @pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 1000]) def test___analog_single_channel_reader___read_many_sample( @@ -38,7 +38,7 @@ def test___analog_single_channel_reader___read_many_sample( benchmark(reader.read_many_sample, data, num_samples) -@pytest.mark.benchmark(group="analog_stream_readers") +@pytest.mark.benchmark(group="analog_readers") @pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 1000]) @pytest.mark.parametrize("waveform_attribute_mode", list(WaveformAttributeMode)) @@ -57,7 +57,7 @@ def test___analog_single_channel_reader___read_waveform( benchmark(reader.read_waveform, waveform, num_samples) -@pytest.mark.benchmark(group="analog_stream_readers") +@pytest.mark.benchmark(group="analog_readers") @pytest.mark.parametrize("num_channels", [1, 2, 8]) @pytest.mark.parametrize("num_samples", [1]) def test___analog_multi_channel_reader___read_one_sample( @@ -69,20 +69,19 @@ def test___analog_multi_channel_reader___read_one_sample( benchmark(reader.read_one_sample, data) -@pytest.mark.benchmark(group="analog_stream_readers") +@pytest.mark.benchmark(group="analog_readers") @pytest.mark.parametrize("num_channels", [1, 2, 8]) @pytest.mark.parametrize("num_samples", [1, 1000]) def test___analog_multi_channel_reader___read_many_sample( benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int ) -> None: reader = AnalogMultiChannelReader(ai_benchmark_task.in_stream) - samples_to_read = 1000 - data = numpy.full((num_channels, samples_to_read), math.inf, dtype=numpy.float64) + data = numpy.full((num_channels, num_samples), math.inf, dtype=numpy.float64) - benchmark(reader.read_many_sample, data, samples_to_read) + benchmark(reader.read_many_sample, data, num_samples) -@pytest.mark.benchmark(group="analog_stream_readers") +@pytest.mark.benchmark(group="analog_readers") @pytest.mark.parametrize("num_channels", [1, 2, 8]) @pytest.mark.parametrize("num_samples", [1, 1000]) @pytest.mark.parametrize("waveform_attribute_mode", list(WaveformAttributeMode)) @@ -96,7 +95,6 @@ def test___analog_multi_channel_reader___read_waveform( ) -> None: ai_benchmark_task.in_stream.waveform_attribute_mode = waveform_attribute_mode reader = AnalogMultiChannelReader(ai_benchmark_task.in_stream) - samples_to_read = 1000 - waveforms = [AnalogWaveform(samples_to_read) for _ in range(num_channels)] + waveforms = [AnalogWaveform(num_samples) for _ in range(num_channels)] - benchmark(reader.read_waveforms, waveforms, samples_to_read) + benchmark(reader.read_waveforms, waveforms, num_samples) diff --git a/tests/benchmark/test_analog_stream_writers.py b/tests/benchmark/test_analog_stream_writers.py index d582d448..9968bb51 100644 --- a/tests/benchmark/test_analog_stream_writers.py +++ b/tests/benchmark/test_analog_stream_writers.py @@ -12,7 +12,7 @@ ) -@pytest.mark.benchmark(group="analog_stream_writers") +@pytest.mark.benchmark(group="analog_writers") @pytest.mark.parametrize("num_channels", [1]) def test___analog_single_channel_writer___write_one_sample( benchmark: BenchmarkFixture, @@ -24,7 +24,7 @@ def test___analog_single_channel_writer___write_one_sample( benchmark(writer.write_one_sample, 1.0) -@pytest.mark.benchmark(group="analog_stream_writers") +@pytest.mark.benchmark(group="analog_writers") @pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 1000]) def test___analog_single_channel_writer___write_many_sample( @@ -39,7 +39,7 @@ def test___analog_single_channel_writer___write_many_sample( benchmark(writer.write_many_sample, data) -@pytest.mark.benchmark(group="analog_stream_writers") +@pytest.mark.benchmark(group="analog_writers") @pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 1000]) @pytest.mark.grpc_skip(reason="write_analog_waveform not implemented in GRPC") @@ -55,7 +55,7 @@ def test___analog_single_channel_writer___write_waveform( benchmark(writer.write_waveform, waveform) -@pytest.mark.benchmark(group="analog_stream_writers") +@pytest.mark.benchmark(group="analog_writers") @pytest.mark.parametrize("num_channels", [1, 2]) def test___analog_multi_channel_writer___write_one_sample( benchmark: BenchmarkFixture, @@ -68,7 +68,7 @@ def test___analog_multi_channel_writer___write_one_sample( benchmark(writer.write_one_sample, data) -@pytest.mark.benchmark(group="analog_stream_writers") +@pytest.mark.benchmark(group="analog_writers") @pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1, 1000]) def test___analog_multi_channel_writer___write_many_sample( @@ -83,7 +83,7 @@ def test___analog_multi_channel_writer___write_many_sample( benchmark(writer.write_many_sample, data) -@pytest.mark.benchmark(group="analog_stream_writers") +@pytest.mark.benchmark(group="analog_writers") @pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1, 1000]) @pytest.mark.grpc_skip(reason="write_analog_waveform not implemented in GRPC") @@ -94,6 +94,6 @@ def test___analog_multi_channel_writer___write_waveform( num_samples: int, ) -> None: writer = AnalogMultiChannelWriter(ao_benchmark_task.out_stream, auto_start=False) - waveform = [AnalogWaveform(num_samples) for _ in range(num_channels)] + waveforms = [AnalogWaveform(num_samples) for _ in range(num_channels)] - benchmark(writer.write_waveforms, waveform) + benchmark(writer.write_waveforms, waveforms) diff --git a/tests/benchmark/test_digital_stream_readers.py b/tests/benchmark/test_digital_stream_readers.py index b12ff4b2..78062bc2 100644 --- a/tests/benchmark/test_digital_stream_readers.py +++ b/tests/benchmark/test_digital_stream_readers.py @@ -14,7 +14,7 @@ ) -@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1]) def test___digital_single_channel_reader___read_one_sample_one_line( @@ -28,7 +28,7 @@ def test___digital_single_channel_reader___read_one_sample_one_line( benchmark(reader.read_one_sample_one_line) -@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) @@ -45,7 +45,7 @@ def test___digital_single_channel_reader___read_one_sample_multi_line( benchmark(reader.read_one_sample_multi_line, sample) -@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_samples", [1, 100]) def test___digital_single_channel_reader___read_many_sample_port_uint32( benchmark: BenchmarkFixture, @@ -58,7 +58,7 @@ def test___digital_single_channel_reader___read_many_sample_port_uint32( benchmark(reader.read_many_sample_port_uint32, data, num_samples) -@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") @@ -74,7 +74,7 @@ def test___digital_single_channel_reader___read_waveform_single_line( benchmark(reader.read_waveform, waveform, num_samples) -@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) @@ -92,7 +92,7 @@ def test___digital_single_channel_reader___read_waveform_multi_line( benchmark(reader.read_waveform, waveform, num_samples) -@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") def test___digital_single_channel_reader___read_waveform_port_uint32( @@ -106,7 +106,7 @@ def test___digital_single_channel_reader___read_waveform_port_uint32( benchmark(reader.read_waveform, waveform, num_samples) -@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1]) def test___digital_multi_channel_reader___read_one_sample_one_line( @@ -121,7 +121,7 @@ def test___digital_multi_channel_reader___read_one_sample_one_line( benchmark(reader.read_one_sample_one_line, sample) -@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) @@ -138,7 +138,7 @@ def test___digital_multi_channel_reader___read_one_sample_multi_line( benchmark(reader.read_one_sample_multi_line, sample) -@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_samples", [1, 100]) def test___digital_multi_channel_reader___read_many_sample_port_uint32( benchmark: BenchmarkFixture, @@ -151,7 +151,7 @@ def test___digital_multi_channel_reader___read_many_sample_port_uint32( benchmark(reader.read_many_sample_port_uint32, data, num_samples) -@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") @@ -167,7 +167,7 @@ def test___digital_multi_channel_reader___read_waveform_single_line( benchmark(reader.read_waveforms, waveforms, 1) -@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) @@ -185,7 +185,7 @@ def test___digital_multi_channel_reader___read_waveform_multi_line( benchmark(reader.read_waveforms, waveforms, num_samples) -@pytest.mark.benchmark(group="digital_stream_readers") +@pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") def test___digital_multi_channel_reader___read_waveform_port_uint32( diff --git a/tests/benchmark/test_digital_stream_writers.py b/tests/benchmark/test_digital_stream_writers.py index ab25b79c..fe643dca 100644 --- a/tests/benchmark/test_digital_stream_writers.py +++ b/tests/benchmark/test_digital_stream_writers.py @@ -14,7 +14,7 @@ ) -@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1]) def test___digital_single_channel_writer___write_one_sample_one_line( @@ -28,7 +28,7 @@ def test___digital_single_channel_writer___write_one_sample_one_line( benchmark(writer.write_one_sample_one_line, True) -@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) @@ -45,7 +45,7 @@ def test___digital_single_channel_writer___write_one_sample_multi_line( benchmark(writer.write_one_sample_multi_line, sample) -@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_samples", [1, 100]) def test___digital_single_channel_writer___write_many_sample_port_uint32( benchmark: BenchmarkFixture, @@ -58,7 +58,7 @@ def test___digital_single_channel_writer___write_many_sample_port_uint32( benchmark(writer.write_many_sample_port_uint32, data) -@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") @@ -74,7 +74,7 @@ def test___digital_single_channel_writer___write_waveform_single_line( benchmark(writer.write_waveform, waveform) -@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) @@ -92,7 +92,7 @@ def test___digital_single_channel_writer___write_waveform_multi_line( benchmark(writer.write_waveform, waveform) -@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") def test___digital_single_channel_writer___write_waveform_port_uint32( @@ -106,7 +106,7 @@ def test___digital_single_channel_writer___write_waveform_port_uint32( benchmark(writer.write_waveform, waveform) -@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1]) def test___digital_multi_channel_writer___write_one_sample_one_line( @@ -121,7 +121,7 @@ def test___digital_multi_channel_writer___write_one_sample_one_line( benchmark(writer.write_one_sample_one_line, sample) -@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) @@ -138,7 +138,7 @@ def test___digital_multi_channel_writer___write_one_sample_multi_line( benchmark(writer.write_one_sample_multi_line, sample) -@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_samples", [1, 100]) def test___digital_multi_channel_writer___write_many_sample_port_uint32( benchmark: BenchmarkFixture, @@ -151,7 +151,7 @@ def test___digital_multi_channel_writer___write_many_sample_port_uint32( benchmark(writer.write_many_sample_port_uint32, data, num_samples) -@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") @@ -167,7 +167,7 @@ def test___digital_multi_channel_writer___write_waveform_single_line( benchmark(writer.write_waveforms, waveforms, num_samples) -@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) @@ -185,7 +185,7 @@ def test___digital_multi_channel_writer___write_waveform_multi_line( benchmark(writer.write_waveforms, waveforms, num_samples) -@pytest.mark.benchmark(group="digital_stream_writers") +@pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") def test___digital_multi_channel_writer___write_waveform_port_uint32( diff --git a/tests/benchmark/test_task.py b/tests/benchmark/test_task.py new file mode 100644 index 00000000..dbbd21b3 --- /dev/null +++ b/tests/benchmark/test_task.py @@ -0,0 +1,194 @@ +from __future__ import annotations + +from typing import Any + +import numpy +import pytest +from nitypes.waveform import AnalogWaveform, DigitalWaveform +from pytest_benchmark.fixture import BenchmarkFixture + +from nidaqmx import Task +from nidaqmx.constants import WaveformAttributeMode + + +def _create_analog_data(num_channels, num_samples): + if num_channels == 1: + if num_samples == 1: + return 1.0 + return numpy.full((num_samples), 1.0, numpy.float64) + else: + return numpy.full((num_channels, num_samples), 1.0, numpy.float64) + + +def _create_digital_data(num_channels, num_samples, num_lines): + if num_lines == 1: + dtype: Any = numpy.bool_ + value: Any = True + else: + dtype = numpy.uint32 + value = 1 + + if num_channels == 1: + if num_samples == 1: + return value + return numpy.full((num_samples), value, dtype) + else: + return numpy.full((num_channels, num_samples), value, dtype) + + +@pytest.mark.benchmark(group="analog_readers") +@pytest.mark.parametrize("num_channels", [1, 2, 8]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +def test___task___read_analog( + benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int +) -> None: + benchmark(ai_benchmark_task.read, num_samples) + + +@pytest.mark.benchmark(group="analog_readers") +@pytest.mark.parametrize("num_channels", [1, 2, 8]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +@pytest.mark.parametrize("waveform_attribute_mode", list(WaveformAttributeMode)) +@pytest.mark.grpc_skip(reason="read_analog_waveforms not implemented in GRPC") +def test___task___read_analog_waveform( + benchmark: BenchmarkFixture, + ai_benchmark_task: Task, + num_channels: int, + num_samples: int, + waveform_attribute_mode: WaveformAttributeMode, +) -> None: + ai_benchmark_task.in_stream.waveform_attribute_mode = waveform_attribute_mode + benchmark(ai_benchmark_task.read_waveform, num_samples) + + +@pytest.mark.benchmark(group="analog_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +def test___task___write_analog( + benchmark: BenchmarkFixture, + ao_benchmark_task: Task, + num_channels: int, + num_samples: int, +) -> None: + data = _create_analog_data(num_channels, num_samples) + ao_benchmark_task.write(data, auto_start=False) + benchmark(ao_benchmark_task.write, data, auto_start=False) + + +@pytest.mark.benchmark(group="analog_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +@pytest.mark.grpc_skip(reason="write_analog_waveform not implemented in GRPC") +def test___task___write_analog_waveform( + benchmark: BenchmarkFixture, + ao_benchmark_task: Task, + num_channels: int, + num_samples: int, +) -> None: + waveforms = [AnalogWaveform(num_samples) for _ in range(num_channels)] + + benchmark(ao_benchmark_task.write_waveform, waveforms, auto_start=False) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +def test___task___read_digital_lines( + benchmark: BenchmarkFixture, + di_multi_line_benchmark_task: Task, + num_channels: int, + num_samples: int, + num_lines: int, +) -> None: + benchmark(di_multi_line_benchmark_task.read, num_samples) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_samples", [1, 100]) +def test___task___read_digital_port( + benchmark: BenchmarkFixture, + di_port32_benchmark_task: Task, + num_samples: int, +) -> None: + benchmark(di_port32_benchmark_task.read, num_samples) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___task___read_digital_lines_waveform( + benchmark: BenchmarkFixture, + di_multi_line_benchmark_task: Task, + num_channels: int, + num_samples: int, + num_lines: int, +) -> None: + benchmark(di_multi_line_benchmark_task.read_waveform, num_samples) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___task___read_digital_port_waveform( + benchmark: BenchmarkFixture, + di_port32_benchmark_task: Task, + num_samples: int, +) -> None: + benchmark(di_port32_benchmark_task.read_waveform, num_samples) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +def test___task___write_digital_lines( + benchmark: BenchmarkFixture, + do_multi_line_benchmark_task: Task, + num_channels: int, + num_samples: int, + num_lines: int, +) -> None: + data = _create_digital_data(num_channels, num_samples, num_lines) + benchmark(do_multi_line_benchmark_task.write, data, auto_start=False) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_samples", [1, 100]) +def test___task___write_digital_port( + benchmark: BenchmarkFixture, + do_port32_benchmark_task: Task, + num_samples: int, +) -> None: + data = _create_digital_data(1, num_samples, 32) + benchmark(do_port32_benchmark_task.write, data, auto_start=False) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___task___write_digital_lines_waveform( + benchmark: BenchmarkFixture, + do_multi_line_benchmark_task: Task, + num_channels: int, + num_samples: int, + num_lines: int, +) -> None: + waveforms = [DigitalWaveform(num_samples, num_lines) for _ in range(num_channels)] + benchmark(do_multi_line_benchmark_task.write_waveform, waveforms, auto_start=False) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___task___write_digital_port_waveform( + benchmark: BenchmarkFixture, + do_port32_benchmark_task: Task, + num_samples: int, +) -> None: + waveforms = [DigitalWaveform(num_samples, signal_count=32)] + benchmark(do_port32_benchmark_task.write_waveform, waveforms, auto_start=False) From 4eb85459dae673da3a1dd3dcf269e5d0aada196a Mon Sep 17 00:00:00 2001 From: Mike Prosser Date: Thu, 25 Sep 2025 13:58:47 -0500 Subject: [PATCH 11/15] cleanup --- tests/benchmark/conftest.py | 44 +-------- tests/benchmark/test_analog_stream_readers.py | 12 +-- tests/benchmark/test_analog_stream_writers.py | 6 -- .../benchmark/test_digital_stream_readers.py | 90 +++++------------- .../benchmark/test_digital_stream_writers.py | 92 +++++-------------- tests/benchmark/test_task.py | 16 ++-- 6 files changed, 58 insertions(+), 202 deletions(-) diff --git a/tests/benchmark/conftest.py b/tests/benchmark/conftest.py index abb736be..6b4f7035 100644 --- a/tests/benchmark/conftest.py +++ b/tests/benchmark/conftest.py @@ -83,27 +83,7 @@ def ao_benchmark_task( @pytest.fixture -def di_single_line_benchmark_task( - task: Task, - sim_6363_device: Device, - request: pytest.FixtureRequest, -) -> Task: - """Configure a single-sample DI task for benchmarking.""" - num_samples = request.node.callspec.params.get("num_samples", 1) - num_channels = request.node.callspec.params.get("num_channels", 1) - - line_names = [chan.name for chan in sim_6363_device.di_lines[:num_channels]] - physical_channel_string = ",".join(line_names) - task.di_channels.add_di_chan(physical_channel_string, line_grouping=LineGrouping.CHAN_PER_LINE) - - _configure_timing(task, num_channels, num_samples) - _start_input_task(task) - - return task - - -@pytest.fixture -def di_multi_line_benchmark_task( +def di_lines_benchmark_task( task: Task, sim_6363_device: Device, request: pytest.FixtureRequest, @@ -150,27 +130,7 @@ def di_port32_benchmark_task( @pytest.fixture -def do_single_line_benchmark_task( - task: Task, - sim_6363_device: Device, - request: pytest.FixtureRequest, -) -> Task: - """Configure a single-sample DO task for benchmarking.""" - num_channels = request.node.callspec.params.get("num_channels", 1) - num_samples = request.node.callspec.params.get("num_samples", 1) - - line_names = [chan.name for chan in sim_6363_device.do_lines[:num_channels]] - physical_channel_string = ",".join(line_names) - task.do_channels.add_do_chan(physical_channel_string, line_grouping=LineGrouping.CHAN_PER_LINE) - - _configure_timing(task, num_channels, num_samples) - _commit_output_task(task, num_channels, num_samples) - - return task - - -@pytest.fixture -def do_multi_line_benchmark_task( +def do_lines_benchmark_task( task: Task, sim_6363_device: Device, request: pytest.FixtureRequest, diff --git a/tests/benchmark/test_analog_stream_readers.py b/tests/benchmark/test_analog_stream_readers.py index 223b3175..5f61f4ba 100644 --- a/tests/benchmark/test_analog_stream_readers.py +++ b/tests/benchmark/test_analog_stream_readers.py @@ -16,10 +16,8 @@ @pytest.mark.benchmark(group="analog_readers") -@pytest.mark.parametrize("num_channels", [1]) -@pytest.mark.parametrize("num_samples", [1]) def test___analog_single_channel_reader___read_one_sample( - benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int + benchmark: BenchmarkFixture, ai_benchmark_task: Task ) -> None: reader = AnalogSingleChannelReader(ai_benchmark_task.in_stream) @@ -27,10 +25,9 @@ def test___analog_single_channel_reader___read_one_sample( @pytest.mark.benchmark(group="analog_readers") -@pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 1000]) def test___analog_single_channel_reader___read_many_sample( - benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int + benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_samples: int ) -> None: reader = AnalogSingleChannelReader(ai_benchmark_task.in_stream) data = numpy.full(num_samples, math.inf, dtype=numpy.float64) @@ -39,14 +36,12 @@ def test___analog_single_channel_reader___read_many_sample( @pytest.mark.benchmark(group="analog_readers") -@pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 1000]) @pytest.mark.parametrize("waveform_attribute_mode", list(WaveformAttributeMode)) @pytest.mark.grpc_skip(reason="read_analog_waveform not implemented in GRPC") def test___analog_single_channel_reader___read_waveform( benchmark: BenchmarkFixture, ai_benchmark_task: Task, - num_channels: int, num_samples: int, waveform_attribute_mode: WaveformAttributeMode, ) -> None: @@ -59,9 +54,8 @@ def test___analog_single_channel_reader___read_waveform( @pytest.mark.benchmark(group="analog_readers") @pytest.mark.parametrize("num_channels", [1, 2, 8]) -@pytest.mark.parametrize("num_samples", [1]) def test___analog_multi_channel_reader___read_one_sample( - benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int + benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int ) -> None: reader = AnalogMultiChannelReader(ai_benchmark_task.in_stream) data = numpy.full(num_channels, math.inf, dtype=numpy.float64) diff --git a/tests/benchmark/test_analog_stream_writers.py b/tests/benchmark/test_analog_stream_writers.py index 9968bb51..feb3b994 100644 --- a/tests/benchmark/test_analog_stream_writers.py +++ b/tests/benchmark/test_analog_stream_writers.py @@ -13,11 +13,9 @@ @pytest.mark.benchmark(group="analog_writers") -@pytest.mark.parametrize("num_channels", [1]) def test___analog_single_channel_writer___write_one_sample( benchmark: BenchmarkFixture, ao_benchmark_task: nidaqmx.Task, - num_channels: int, ) -> None: writer = AnalogSingleChannelWriter(ao_benchmark_task.out_stream, auto_start=False) @@ -25,12 +23,10 @@ def test___analog_single_channel_writer___write_one_sample( @pytest.mark.benchmark(group="analog_writers") -@pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 1000]) def test___analog_single_channel_writer___write_many_sample( benchmark: BenchmarkFixture, ao_benchmark_task: nidaqmx.Task, - num_channels: int, num_samples: int, ) -> None: writer = AnalogSingleChannelWriter(ao_benchmark_task.out_stream, auto_start=False) @@ -40,13 +36,11 @@ def test___analog_single_channel_writer___write_many_sample( @pytest.mark.benchmark(group="analog_writers") -@pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 1000]) @pytest.mark.grpc_skip(reason="write_analog_waveform not implemented in GRPC") def test___analog_single_channel_writer___write_waveform( benchmark: BenchmarkFixture, ao_benchmark_task: nidaqmx.Task, - num_channels: int, num_samples: int, ) -> None: writer = AnalogSingleChannelWriter(ao_benchmark_task.out_stream, auto_start=False) diff --git a/tests/benchmark/test_digital_stream_readers.py b/tests/benchmark/test_digital_stream_readers.py index 78062bc2..c6262393 100644 --- a/tests/benchmark/test_digital_stream_readers.py +++ b/tests/benchmark/test_digital_stream_readers.py @@ -15,34 +15,26 @@ @pytest.mark.benchmark(group="digital_readers") -@pytest.mark.parametrize("num_channels", [1]) -@pytest.mark.parametrize("num_samples", [1]) def test___digital_single_channel_reader___read_one_sample_one_line( benchmark: BenchmarkFixture, - di_single_line_benchmark_task: nidaqmx.Task, - num_channels: int, - num_samples: int, + di_lines_benchmark_task: nidaqmx.Task, ) -> None: - reader = DigitalSingleChannelReader(di_single_line_benchmark_task.in_stream) + reader = DigitalSingleChannelReader(di_lines_benchmark_task.in_stream) benchmark(reader.read_one_sample_one_line) @pytest.mark.benchmark(group="digital_readers") -@pytest.mark.parametrize("num_channels", [1]) -@pytest.mark.parametrize("num_samples", [1]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) def test___digital_single_channel_reader___read_one_sample_multi_line( benchmark: BenchmarkFixture, - di_multi_line_benchmark_task: nidaqmx.Task, - num_channels: int, - num_samples: int, + di_lines_benchmark_task: nidaqmx.Task, num_lines: int, ) -> None: - reader = DigitalSingleChannelReader(di_multi_line_benchmark_task.in_stream) - sample = numpy.full(num_lines, False, dtype=numpy.bool_) + reader = DigitalSingleChannelReader(di_lines_benchmark_task.in_stream) + data = numpy.full(num_lines, False, dtype=numpy.bool_) - benchmark(reader.read_one_sample_multi_line, sample) + benchmark(reader.read_one_sample_multi_line, data) @pytest.mark.benchmark(group="digital_readers") @@ -59,34 +51,16 @@ def test___digital_single_channel_reader___read_many_sample_port_uint32( @pytest.mark.benchmark(group="digital_readers") -@pytest.mark.parametrize("num_channels", [1]) -@pytest.mark.parametrize("num_samples", [1, 100]) -@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_single_channel_reader___read_waveform_single_line( - benchmark: BenchmarkFixture, - di_single_line_benchmark_task: nidaqmx.Task, - num_channels: int, - num_samples: int, -) -> None: - reader = DigitalSingleChannelReader(di_single_line_benchmark_task.in_stream) - waveform = DigitalWaveform(num_samples, signal_count=1) - - benchmark(reader.read_waveform, waveform, num_samples) - - -@pytest.mark.benchmark(group="digital_readers") -@pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_single_channel_reader___read_waveform_multi_line( +def test___digital_single_channel_reader___read_waveform_lines( benchmark: BenchmarkFixture, - di_multi_line_benchmark_task: nidaqmx.Task, - num_channels: int, + di_lines_benchmark_task: nidaqmx.Task, num_samples: int, num_lines: int, ) -> None: - reader = DigitalSingleChannelReader(di_multi_line_benchmark_task.in_stream) + reader = DigitalSingleChannelReader(di_lines_benchmark_task.in_stream) waveform = DigitalWaveform(num_samples, num_lines) benchmark(reader.read_waveform, waveform, num_samples) @@ -95,7 +69,7 @@ def test___digital_single_channel_reader___read_waveform_multi_line( @pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_single_channel_reader___read_waveform_port_uint32( +def test___digital_single_channel_reader___read_waveform_port( benchmark: BenchmarkFixture, di_port32_benchmark_task: nidaqmx.Task, num_samples: int, @@ -108,34 +82,30 @@ def test___digital_single_channel_reader___read_waveform_port_uint32( @pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_channels", [1, 2]) -@pytest.mark.parametrize("num_samples", [1]) def test___digital_multi_channel_reader___read_one_sample_one_line( benchmark: BenchmarkFixture, - di_single_line_benchmark_task: nidaqmx.Task, + di_lines_benchmark_task: nidaqmx.Task, num_channels: int, - num_samples: int, ) -> None: - reader = DigitalMultiChannelReader(di_single_line_benchmark_task.in_stream) - sample = numpy.full(num_channels, False, dtype=numpy.bool_) + reader = DigitalMultiChannelReader(di_lines_benchmark_task.in_stream) + data = numpy.full(num_channels, False, dtype=numpy.bool_) - benchmark(reader.read_one_sample_one_line, sample) + benchmark(reader.read_one_sample_one_line, data) @pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_channels", [1, 2]) -@pytest.mark.parametrize("num_samples", [1]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) def test___digital_multi_channel_reader___read_one_sample_multi_line( benchmark: BenchmarkFixture, - di_multi_line_benchmark_task: nidaqmx.Task, + di_lines_benchmark_task: nidaqmx.Task, num_channels: int, - num_samples: int, num_lines: int, ) -> None: - reader = DigitalMultiChannelReader(di_multi_line_benchmark_task.in_stream) - sample = numpy.full((num_channels, num_lines), False, dtype=numpy.bool_) + reader = DigitalMultiChannelReader(di_lines_benchmark_task.in_stream) + data = numpy.full((num_channels, num_lines), False, dtype=numpy.bool_) - benchmark(reader.read_one_sample_multi_line, sample) + benchmark(reader.read_one_sample_multi_line, data) @pytest.mark.benchmark(group="digital_readers") @@ -151,35 +121,19 @@ def test___digital_multi_channel_reader___read_many_sample_port_uint32( benchmark(reader.read_many_sample_port_uint32, data, num_samples) -@pytest.mark.benchmark(group="digital_readers") -@pytest.mark.parametrize("num_channels", [1, 2]) -@pytest.mark.parametrize("num_samples", [1, 100]) -@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_reader___read_waveform_single_line( - benchmark: BenchmarkFixture, - di_single_line_benchmark_task: nidaqmx.Task, - num_channels: int, - num_samples: int, -) -> None: - reader = DigitalMultiChannelReader(di_single_line_benchmark_task.in_stream) - waveforms = [DigitalWaveform(num_samples, signal_count=1) for _ in range(num_channels)] - - benchmark(reader.read_waveforms, waveforms, 1) - - @pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_reader___read_waveform_multi_line( +def test___digital_multi_channel_reader___read_waveform_lines( benchmark: BenchmarkFixture, - di_multi_line_benchmark_task: nidaqmx.Task, + di_lines_benchmark_task: nidaqmx.Task, num_channels: int, num_samples: int, num_lines: int, ) -> None: - reader = DigitalMultiChannelReader(di_multi_line_benchmark_task.in_stream) + reader = DigitalMultiChannelReader(di_lines_benchmark_task.in_stream) waveforms = [DigitalWaveform(num_samples, num_lines) for _ in range(num_channels)] benchmark(reader.read_waveforms, waveforms, num_samples) @@ -188,7 +142,7 @@ def test___digital_multi_channel_reader___read_waveform_multi_line( @pytest.mark.benchmark(group="digital_readers") @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_reader___read_waveform_port_uint32( +def test___digital_multi_channel_reader___read_waveform_port( benchmark: BenchmarkFixture, di_port32_benchmark_task: nidaqmx.Task, num_samples: int, diff --git a/tests/benchmark/test_digital_stream_writers.py b/tests/benchmark/test_digital_stream_writers.py index fe643dca..14081326 100644 --- a/tests/benchmark/test_digital_stream_writers.py +++ b/tests/benchmark/test_digital_stream_writers.py @@ -15,34 +15,26 @@ @pytest.mark.benchmark(group="digital_writers") -@pytest.mark.parametrize("num_channels", [1]) -@pytest.mark.parametrize("num_samples", [1]) def test___digital_single_channel_writer___write_one_sample_one_line( benchmark: BenchmarkFixture, - do_single_line_benchmark_task: nidaqmx.Task, - num_channels: int, - num_samples: int, + do_lines_benchmark_task: nidaqmx.Task, ) -> None: - writer = DigitalSingleChannelWriter(do_single_line_benchmark_task.out_stream, auto_start=False) + writer = DigitalSingleChannelWriter(do_lines_benchmark_task.out_stream, auto_start=False) benchmark(writer.write_one_sample_one_line, True) @pytest.mark.benchmark(group="digital_writers") -@pytest.mark.parametrize("num_channels", [1]) -@pytest.mark.parametrize("num_samples", [1]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) def test___digital_single_channel_writer___write_one_sample_multi_line( benchmark: BenchmarkFixture, - do_multi_line_benchmark_task: nidaqmx.Task, - num_channels: int, - num_samples: int, + do_lines_benchmark_task: nidaqmx.Task, num_lines: int, ) -> None: - writer = DigitalSingleChannelWriter(do_multi_line_benchmark_task.out_stream, auto_start=False) - sample = numpy.full(num_lines, True, dtype=numpy.bool_) + writer = DigitalSingleChannelWriter(do_lines_benchmark_task.out_stream, auto_start=False) + data = numpy.full(num_lines, True, dtype=numpy.bool_) - benchmark(writer.write_one_sample_multi_line, sample) + benchmark(writer.write_one_sample_multi_line, data) @pytest.mark.benchmark(group="digital_writers") @@ -59,35 +51,17 @@ def test___digital_single_channel_writer___write_many_sample_port_uint32( @pytest.mark.benchmark(group="digital_writers") -@pytest.mark.parametrize("num_channels", [1]) -@pytest.mark.parametrize("num_samples", [1, 100]) -@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_single_channel_writer___write_waveform_single_line( - benchmark: BenchmarkFixture, - do_single_line_benchmark_task: nidaqmx.Task, - num_channels: int, - num_samples: int, -) -> None: - writer = DigitalSingleChannelWriter(do_single_line_benchmark_task.out_stream, auto_start=False) - waveform = DigitalWaveform(num_samples) - - benchmark(writer.write_waveform, waveform) - - -@pytest.mark.benchmark(group="digital_writers") -@pytest.mark.parametrize("num_channels", [1]) @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_single_channel_writer___write_waveform_multi_line( +def test___digital_single_channel_writer___write_waveform_lines( benchmark: BenchmarkFixture, - do_multi_line_benchmark_task: nidaqmx.Task, - num_channels: int, + do_lines_benchmark_task: nidaqmx.Task, num_samples: int, num_lines: int, ) -> None: - writer = DigitalSingleChannelWriter(do_multi_line_benchmark_task.out_stream, auto_start=False) - waveform = DigitalWaveform(num_channels, num_lines) + writer = DigitalSingleChannelWriter(do_lines_benchmark_task.out_stream, auto_start=False) + waveform = DigitalWaveform(num_samples, num_lines) benchmark(writer.write_waveform, waveform) @@ -95,7 +69,7 @@ def test___digital_single_channel_writer___write_waveform_multi_line( @pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_single_channel_writer___write_waveform_port_uint32( +def test___digital_single_channel_writer___write_waveform_port( benchmark: BenchmarkFixture, do_port32_benchmark_task: nidaqmx.Task, num_samples: int, @@ -108,34 +82,30 @@ def test___digital_single_channel_writer___write_waveform_port_uint32( @pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_channels", [1, 2]) -@pytest.mark.parametrize("num_samples", [1]) def test___digital_multi_channel_writer___write_one_sample_one_line( benchmark: BenchmarkFixture, - do_single_line_benchmark_task: nidaqmx.Task, + do_lines_benchmark_task: nidaqmx.Task, num_channels: int, - num_samples: int, ) -> None: - writer = DigitalMultiChannelWriter(do_single_line_benchmark_task.out_stream, auto_start=False) - sample = numpy.full(num_channels, False, dtype=numpy.bool_) + writer = DigitalMultiChannelWriter(do_lines_benchmark_task.out_stream, auto_start=False) + data = numpy.full(num_channels, False, dtype=numpy.bool_) - benchmark(writer.write_one_sample_one_line, sample) + benchmark(writer.write_one_sample_one_line, data) @pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_channels", [1, 2]) -@pytest.mark.parametrize("num_samples", [1]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) def test___digital_multi_channel_writer___write_one_sample_multi_line( benchmark: BenchmarkFixture, - do_multi_line_benchmark_task: nidaqmx.Task, + do_lines_benchmark_task: nidaqmx.Task, num_channels: int, - num_samples: int, num_lines: int, ) -> None: - writer = DigitalMultiChannelWriter(do_multi_line_benchmark_task.out_stream, auto_start=False) - sample = numpy.full((num_channels, num_lines), False, dtype=numpy.bool_) + writer = DigitalMultiChannelWriter(do_lines_benchmark_task.out_stream, auto_start=False) + data = numpy.full((num_channels, num_lines), False, dtype=numpy.bool_) - benchmark(writer.write_one_sample_multi_line, sample) + benchmark(writer.write_one_sample_multi_line, data) @pytest.mark.benchmark(group="digital_writers") @@ -151,35 +121,19 @@ def test___digital_multi_channel_writer___write_many_sample_port_uint32( benchmark(writer.write_many_sample_port_uint32, data, num_samples) -@pytest.mark.benchmark(group="digital_writers") -@pytest.mark.parametrize("num_channels", [1, 2]) -@pytest.mark.parametrize("num_samples", [1, 100]) -@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_writer___write_waveform_single_line( - benchmark: BenchmarkFixture, - do_single_line_benchmark_task: nidaqmx.Task, - num_channels: int, - num_samples: int, -) -> None: - writer = DigitalMultiChannelWriter(do_single_line_benchmark_task.in_stream, auto_start=False) - waveforms = [DigitalWaveform(num_samples) for _ in range(num_channels)] - - benchmark(writer.write_waveforms, waveforms, num_samples) - - @pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_channels", [1, 2]) @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.parametrize("num_lines", [1, 2, 8]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_writer___write_waveform_multi_line( +def test___digital_multi_channel_writer___write_waveform_lines( benchmark: BenchmarkFixture, - do_multi_line_benchmark_task: nidaqmx.Task, + do_lines_benchmark_task: nidaqmx.Task, num_channels: int, num_samples: int, num_lines: int, ) -> None: - writer = DigitalMultiChannelWriter(do_multi_line_benchmark_task.in_stream, auto_start=False) + writer = DigitalMultiChannelWriter(do_lines_benchmark_task.in_stream, auto_start=False) waveforms = [DigitalWaveform(num_samples, num_lines) for _ in range(num_channels)] benchmark(writer.write_waveforms, waveforms, num_samples) @@ -188,7 +142,7 @@ def test___digital_multi_channel_writer___write_waveform_multi_line( @pytest.mark.benchmark(group="digital_writers") @pytest.mark.parametrize("num_samples", [1, 100]) @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") -def test___digital_multi_channel_writer___write_waveform_port_uint32( +def test___digital_multi_channel_writer___write_waveform_port( benchmark: BenchmarkFixture, do_port32_benchmark_task: nidaqmx.Task, num_samples: int, diff --git a/tests/benchmark/test_task.py b/tests/benchmark/test_task.py index dbbd21b3..66bd7db8 100644 --- a/tests/benchmark/test_task.py +++ b/tests/benchmark/test_task.py @@ -96,12 +96,12 @@ def test___task___write_analog_waveform( @pytest.mark.parametrize("num_lines", [1, 2, 8]) def test___task___read_digital_lines( benchmark: BenchmarkFixture, - di_multi_line_benchmark_task: Task, + di_lines_benchmark_task: Task, num_channels: int, num_samples: int, num_lines: int, ) -> None: - benchmark(di_multi_line_benchmark_task.read, num_samples) + benchmark(di_lines_benchmark_task.read, num_samples) @pytest.mark.benchmark(group="digital_readers") @@ -121,12 +121,12 @@ def test___task___read_digital_port( @pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") def test___task___read_digital_lines_waveform( benchmark: BenchmarkFixture, - di_multi_line_benchmark_task: Task, + di_lines_benchmark_task: Task, num_channels: int, num_samples: int, num_lines: int, ) -> None: - benchmark(di_multi_line_benchmark_task.read_waveform, num_samples) + benchmark(di_lines_benchmark_task.read_waveform, num_samples) @pytest.mark.benchmark(group="digital_readers") @@ -146,13 +146,13 @@ def test___task___read_digital_port_waveform( @pytest.mark.parametrize("num_lines", [1, 2, 8]) def test___task___write_digital_lines( benchmark: BenchmarkFixture, - do_multi_line_benchmark_task: Task, + do_lines_benchmark_task: Task, num_channels: int, num_samples: int, num_lines: int, ) -> None: data = _create_digital_data(num_channels, num_samples, num_lines) - benchmark(do_multi_line_benchmark_task.write, data, auto_start=False) + benchmark(do_lines_benchmark_task.write, data, auto_start=False) @pytest.mark.benchmark(group="digital_writers") @@ -173,13 +173,13 @@ def test___task___write_digital_port( @pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") def test___task___write_digital_lines_waveform( benchmark: BenchmarkFixture, - do_multi_line_benchmark_task: Task, + do_lines_benchmark_task: Task, num_channels: int, num_samples: int, num_lines: int, ) -> None: waveforms = [DigitalWaveform(num_samples, num_lines) for _ in range(num_channels)] - benchmark(do_multi_line_benchmark_task.write_waveform, waveforms, auto_start=False) + benchmark(do_lines_benchmark_task.write_waveform, waveforms, auto_start=False) @pytest.mark.benchmark(group="digital_writers") From 3b6495a8dd9385c38f0ceade29a5eafc6fbd78af Mon Sep 17 00:00:00 2001 From: Mike Prosser Date: Thu, 25 Sep 2025 14:44:49 -0500 Subject: [PATCH 12/15] any_6363_device --- tests/benchmark/conftest.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/tests/benchmark/conftest.py b/tests/benchmark/conftest.py index 6b4f7035..32fb8064 100644 --- a/tests/benchmark/conftest.py +++ b/tests/benchmark/conftest.py @@ -12,7 +12,8 @@ ReadRelativeTo, TaskMode, ) -from nidaqmx.system import Device +from nidaqmx.system import Device, System +from tests.conftest import DeviceType, _device_by_product_type def _configure_timing(task, num_channels, num_samples): @@ -36,10 +37,16 @@ def _commit_output_task(task, num_channels, num_samples): task.out_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE +@pytest.fixture +def any_6363_device(system: System) -> Device: + """Gets a 6363 device, either real or simulated.""" + return _device_by_product_type("PCIe-6363", DeviceType.ANY, system) + + @pytest.fixture def ai_benchmark_task( task: Task, - sim_6363_device: Device, + any_6363_device: Device, request: pytest.FixtureRequest, ) -> Task: """Configure an AI task for benchmarking.""" @@ -48,7 +55,7 @@ def ai_benchmark_task( for chan in range(num_channels): task.ai_channels.add_ai_voltage_chan( - sim_6363_device.ai_physical_chans[chan].name, + any_6363_device.ai_physical_chans[chan].name, min_val=-5.0, max_val=5.0, ) @@ -85,7 +92,7 @@ def ao_benchmark_task( @pytest.fixture def di_lines_benchmark_task( task: Task, - sim_6363_device: Device, + any_6363_device: Device, request: pytest.FixtureRequest, ) -> Task: """Configure a hardware-timed buffered DI task for benchmarking.""" @@ -96,7 +103,7 @@ def di_lines_benchmark_task( for chan in range(num_channels): line_names = [ chan.name - for chan in sim_6363_device.di_lines[chan * num_lines : (chan + 1) * num_lines] + for chan in any_6363_device.di_lines[chan * num_lines : (chan + 1) * num_lines] ] physical_channel_string = ",".join(line_names) task.di_channels.add_di_chan( @@ -112,7 +119,7 @@ def di_lines_benchmark_task( @pytest.fixture def di_port32_benchmark_task( task: Task, - sim_6363_device: Device, + any_6363_device: Device, request: pytest.FixtureRequest, ) -> Task: """Configure a hardware-timed buffered DI task for benchmarking.""" @@ -120,7 +127,7 @@ def di_port32_benchmark_task( # port 0 is the only port that supports buffered operations task.di_channels.add_di_chan( - sim_6363_device.di_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + any_6363_device.di_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES ) _configure_timing(task, 1, num_samples) @@ -132,7 +139,7 @@ def di_port32_benchmark_task( @pytest.fixture def do_lines_benchmark_task( task: Task, - sim_6363_device: Device, + any_6363_device: Device, request: pytest.FixtureRequest, ) -> Task: """Configure a hardware-timed buffered DO task for benchmarking.""" @@ -143,7 +150,7 @@ def do_lines_benchmark_task( for chan in range(num_channels): line_names = [ chan.name - for chan in sim_6363_device.do_lines[chan * num_lines : (chan + 1) * num_lines] + for chan in any_6363_device.do_lines[chan * num_lines : (chan + 1) * num_lines] ] physical_channel_string = ",".join(line_names) task.do_channels.add_do_chan( @@ -159,7 +166,7 @@ def do_lines_benchmark_task( @pytest.fixture def do_port32_benchmark_task( task: Task, - sim_6363_device: Device, + any_6363_device: Device, request: pytest.FixtureRequest, ) -> Task: """Configure a hardware-timed buffered DO task for benchmarking.""" @@ -167,7 +174,7 @@ def do_port32_benchmark_task( # port 0 is the only port that supports buffered operations task.do_channels.add_do_chan( - sim_6363_device.do_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + any_6363_device.do_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES ) _configure_timing(task, 1, num_samples) From 30d90f381ec59140058e855dd50ddf6f99daecd8 Mon Sep 17 00:00:00 2001 From: Mike Prosser Date: Thu, 25 Sep 2025 15:26:28 -0500 Subject: [PATCH 13/15] add a tox for benchmark --- CONTRIBUTING.md | 5 +++++ tox.ini | 11 ++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5f9f6a07..35f1785a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -88,6 +88,11 @@ Benchmark tests are not run by default when you run pytest. To run the benchmark $ poetry run pytest -v tests/benchmark ``` +Or you can use tox (which skips the gRPC variants): +``` +poetry run tox -e benchmark +``` + # Building Documentation To build the documentation install the optional docs packages and run sphinx. For example: diff --git a/tox.ini b/tox.ini index d055ebd2..ee87c26c 100644 --- a/tox.ini +++ b/tox.ini @@ -5,7 +5,7 @@ [tox] isolated_build = true -envlist = clean, py{39,310,311,312,313}-base, py{39,310,311,312,313}-grpc, py39-base-nicaiu, py39-base-nicai_utf8, report, docs +envlist = clean, py{39,310,311,312,313}-base, py{39,310,311,312,313}-grpc, py39-base-nicaiu, py39-base-nicai_utf8, report, docs, benchmark [testenv] skip_install = true @@ -42,3 +42,12 @@ commands = poetry install -v --only main,docs # Use -W to treat warnings as errors. poetry run sphinx-build -b html -W docs docs/_build + +[testenv:benchmark] +base_python = python3.11 +skip_install = true +allowlist_externals = poetry +commands = + poetry run python --version + poetry install -v --only main,test + poetry run pytest tests/benchmark/ --quiet -k "library" --benchmark-only {posargs} From 001235e733e324f8e3709e9dd7ba463c733cbe61 Mon Sep 17 00:00:00 2001 From: Mike Prosser Date: Mon, 29 Sep 2025 10:30:50 -0500 Subject: [PATCH 14/15] address feedback from brad --- CONTRIBUTING.md | 8 ++- tests/benchmark/conftest.py | 49 +++++++++++++------ tests/benchmark/test_analog_stream_readers.py | 12 ++++- tests/benchmark/test_task.py | 8 ++- tox.ini | 16 ++---- 5 files changed, 61 insertions(+), 32 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 35f1785a..efe0f17b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -85,14 +85,18 @@ Benchmark tests are not run by default when you run pytest. To run the benchmark # see https://pytest-benchmark.readthedocs.io/en/latest/comparing.html # Run 1: --benchmark-save=some-name # Run N: --benchmark-compare=0001 -$ poetry run pytest -v tests/benchmark +$ poetry run pytest -v tests/benchmark --device Dev1 ``` Or you can use tox (which skips the gRPC variants): ``` -poetry run tox -e benchmark +poetry run -- tox -e py39-base-benchmark -- --device Dev1 ``` +The benchmarks are designed to run on a 6363 device. If you don't spcecify a specific +device using `--device`, then it will automatically use any real or simulated 6363 +that can be found. + # Building Documentation To build the documentation install the optional docs packages and run sphinx. For example: diff --git a/tests/benchmark/conftest.py b/tests/benchmark/conftest.py index 32fb8064..4223c3cd 100644 --- a/tests/benchmark/conftest.py +++ b/tests/benchmark/conftest.py @@ -11,12 +11,22 @@ LineGrouping, ReadRelativeTo, TaskMode, + WaveformAttributeMode, ) from nidaqmx.system import Device, System from tests.conftest import DeviceType, _device_by_product_type -def _configure_timing(task, num_channels, num_samples): +_WAVEFORM_BENCHMARK_MODES = [ + WaveformAttributeMode.NONE, + WaveformAttributeMode.TIMING, + WaveformAttributeMode.TIMING | WaveformAttributeMode.EXTENDED_PROPERTIES, +] + +_WAVEFORM_BENCHMARK_MODE_IDS = ["NONE", "TIMING", "ALL"] + + +def _configure_timing(task: Task, num_channels: int, num_samples: int) -> None: task.timing.cfg_samp_clk_timing( rate=25000.0, active_edge=Edge.RISING, @@ -25,28 +35,37 @@ def _configure_timing(task, num_channels, num_samples): ) -def _start_input_task(task): +def _start_input_task(task: Task) -> None: task.start() task.wait_until_done(timeout=10.0) task.in_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE -def _commit_output_task(task, num_channels, num_samples): +def _commit_output_task(task: Task, num_channels: int, num_samples: int) -> None: task.out_stream.output_buf_size = num_channels * num_samples * 2 task.control(TaskMode.TASK_COMMIT) task.out_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE +def pytest_addoption(parser: pytest.Parser) -> None: + """Add command line options to pytest.""" + parser.addoption("--device", action="store", default=None, help="Device name for benchmarks") + + @pytest.fixture -def any_6363_device(system: System) -> Device: - """Gets a 6363 device, either real or simulated.""" +def benchmark_device(system: System, request: pytest.FixtureRequest) -> Device: + """Get device for benchmarking.""" + device: str | None = request.config.getoption("--device") + if device is not None: + return system.devices[device] + return _device_by_product_type("PCIe-6363", DeviceType.ANY, system) @pytest.fixture def ai_benchmark_task( task: Task, - any_6363_device: Device, + benchmark_device: Device, request: pytest.FixtureRequest, ) -> Task: """Configure an AI task for benchmarking.""" @@ -55,7 +74,7 @@ def ai_benchmark_task( for chan in range(num_channels): task.ai_channels.add_ai_voltage_chan( - any_6363_device.ai_physical_chans[chan].name, + benchmark_device.ai_physical_chans[chan].name, min_val=-5.0, max_val=5.0, ) @@ -92,7 +111,7 @@ def ao_benchmark_task( @pytest.fixture def di_lines_benchmark_task( task: Task, - any_6363_device: Device, + benchmark_device: Device, request: pytest.FixtureRequest, ) -> Task: """Configure a hardware-timed buffered DI task for benchmarking.""" @@ -103,7 +122,7 @@ def di_lines_benchmark_task( for chan in range(num_channels): line_names = [ chan.name - for chan in any_6363_device.di_lines[chan * num_lines : (chan + 1) * num_lines] + for chan in benchmark_device.di_lines[chan * num_lines : (chan + 1) * num_lines] ] physical_channel_string = ",".join(line_names) task.di_channels.add_di_chan( @@ -119,7 +138,7 @@ def di_lines_benchmark_task( @pytest.fixture def di_port32_benchmark_task( task: Task, - any_6363_device: Device, + benchmark_device: Device, request: pytest.FixtureRequest, ) -> Task: """Configure a hardware-timed buffered DI task for benchmarking.""" @@ -127,7 +146,7 @@ def di_port32_benchmark_task( # port 0 is the only port that supports buffered operations task.di_channels.add_di_chan( - any_6363_device.di_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + benchmark_device.di_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES ) _configure_timing(task, 1, num_samples) @@ -139,7 +158,7 @@ def di_port32_benchmark_task( @pytest.fixture def do_lines_benchmark_task( task: Task, - any_6363_device: Device, + benchmark_device: Device, request: pytest.FixtureRequest, ) -> Task: """Configure a hardware-timed buffered DO task for benchmarking.""" @@ -150,7 +169,7 @@ def do_lines_benchmark_task( for chan in range(num_channels): line_names = [ chan.name - for chan in any_6363_device.do_lines[chan * num_lines : (chan + 1) * num_lines] + for chan in benchmark_device.do_lines[chan * num_lines : (chan + 1) * num_lines] ] physical_channel_string = ",".join(line_names) task.do_channels.add_do_chan( @@ -166,7 +185,7 @@ def do_lines_benchmark_task( @pytest.fixture def do_port32_benchmark_task( task: Task, - any_6363_device: Device, + benchmark_device: Device, request: pytest.FixtureRequest, ) -> Task: """Configure a hardware-timed buffered DO task for benchmarking.""" @@ -174,7 +193,7 @@ def do_port32_benchmark_task( # port 0 is the only port that supports buffered operations task.do_channels.add_do_chan( - any_6363_device.do_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + benchmark_device.do_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES ) _configure_timing(task, 1, num_samples) diff --git a/tests/benchmark/test_analog_stream_readers.py b/tests/benchmark/test_analog_stream_readers.py index 5f61f4ba..746336dd 100644 --- a/tests/benchmark/test_analog_stream_readers.py +++ b/tests/benchmark/test_analog_stream_readers.py @@ -13,6 +13,10 @@ from nidaqmx.stream_readers._analog_single_channel_reader import ( AnalogSingleChannelReader, ) +from tests.benchmark.conftest import ( + _WAVEFORM_BENCHMARK_MODE_IDS, + _WAVEFORM_BENCHMARK_MODES, +) @pytest.mark.benchmark(group="analog_readers") @@ -37,7 +41,9 @@ def test___analog_single_channel_reader___read_many_sample( @pytest.mark.benchmark(group="analog_readers") @pytest.mark.parametrize("num_samples", [1, 1000]) -@pytest.mark.parametrize("waveform_attribute_mode", list(WaveformAttributeMode)) +@pytest.mark.parametrize( + "waveform_attribute_mode", _WAVEFORM_BENCHMARK_MODES, ids=_WAVEFORM_BENCHMARK_MODE_IDS +) @pytest.mark.grpc_skip(reason="read_analog_waveform not implemented in GRPC") def test___analog_single_channel_reader___read_waveform( benchmark: BenchmarkFixture, @@ -78,7 +84,9 @@ def test___analog_multi_channel_reader___read_many_sample( @pytest.mark.benchmark(group="analog_readers") @pytest.mark.parametrize("num_channels", [1, 2, 8]) @pytest.mark.parametrize("num_samples", [1, 1000]) -@pytest.mark.parametrize("waveform_attribute_mode", list(WaveformAttributeMode)) +@pytest.mark.parametrize( + "waveform_attribute_mode", _WAVEFORM_BENCHMARK_MODES, ids=_WAVEFORM_BENCHMARK_MODE_IDS +) @pytest.mark.grpc_skip(reason="read_analog_waveforms not implemented in GRPC") def test___analog_multi_channel_reader___read_waveform( benchmark: BenchmarkFixture, diff --git a/tests/benchmark/test_task.py b/tests/benchmark/test_task.py index 66bd7db8..90ad446e 100644 --- a/tests/benchmark/test_task.py +++ b/tests/benchmark/test_task.py @@ -9,6 +9,10 @@ from nidaqmx import Task from nidaqmx.constants import WaveformAttributeMode +from tests.benchmark.conftest import ( + _WAVEFORM_BENCHMARK_MODE_IDS, + _WAVEFORM_BENCHMARK_MODES, +) def _create_analog_data(num_channels, num_samples): @@ -48,7 +52,9 @@ def test___task___read_analog( @pytest.mark.benchmark(group="analog_readers") @pytest.mark.parametrize("num_channels", [1, 2, 8]) @pytest.mark.parametrize("num_samples", [1, 1000]) -@pytest.mark.parametrize("waveform_attribute_mode", list(WaveformAttributeMode)) +@pytest.mark.parametrize( + "waveform_attribute_mode", _WAVEFORM_BENCHMARK_MODES, ids=_WAVEFORM_BENCHMARK_MODE_IDS +) @pytest.mark.grpc_skip(reason="read_analog_waveforms not implemented in GRPC") def test___task___read_analog_waveform( benchmark: BenchmarkFixture, diff --git a/tox.ini b/tox.ini index ee87c26c..1304eb5e 100644 --- a/tox.ini +++ b/tox.ini @@ -5,7 +5,7 @@ [tox] isolated_build = true -envlist = clean, py{39,310,311,312,313}-base, py{39,310,311,312,313}-grpc, py39-base-nicaiu, py39-base-nicai_utf8, report, docs, benchmark +envlist = clean, py{39,310,311,312,313}-base, py{39,310,311,312,313}-grpc, py39-base-nicaiu, py39-base-nicai_utf8, py39-base-benchmark, report, docs [testenv] skip_install = true @@ -13,7 +13,7 @@ allowlist_externals = poetry setenv = base: INSTALL_OPTS=--only main,test grpc: INSTALL_OPTS=--only main,test --extras grpc - base: PYTEST_OPTS=-k "not grpc" + base: PYTEST_OPTS=-k "library" grpc: PYTEST_OPTS= nicaiu: NIDAQMX_C_LIBRARY=nicaiu nicai_utf8: NIDAQMX_C_LIBRARY=nicai_utf8 @@ -24,7 +24,8 @@ commands = poetry run python --version poetry install -v {env:INSTALL_OPTS} poetry run python -c "from nidaqmx._lib import lib_importer; print(f'Library: {lib_importer.windll._library._name}\nLibrary encoding: {lib_importer.encoding}')" - poetry run pytest --quiet --cov=generated/nidaqmx --cov-append --cov-report= --junitxml=test_results/system-{envname}.xml {env:PYTEST_OPTS} {posargs} + !benchmark: poetry run pytest --quiet --cov=generated/nidaqmx --cov-append --cov-report= --junitxml=test_results/system-{envname}.xml {env:PYTEST_OPTS} {posargs} + benchmark: poetry run pytest tests/benchmark/ --quiet --junitxml=test_results/benchmark-{envname}.xml {env:PYTEST_OPTS} {posargs} [testenv:clean] commands = poetry run coverage erase @@ -42,12 +43,3 @@ commands = poetry install -v --only main,docs # Use -W to treat warnings as errors. poetry run sphinx-build -b html -W docs docs/_build - -[testenv:benchmark] -base_python = python3.11 -skip_install = true -allowlist_externals = poetry -commands = - poetry run python --version - poetry install -v --only main,test - poetry run pytest tests/benchmark/ --quiet -k "library" --benchmark-only {posargs} From e9e4c9f8a6645bc267a795cb4a5ea53adbefe5be Mon Sep 17 00:00:00 2001 From: Mike Prosser Date: Tue, 30 Sep 2025 10:04:38 -0500 Subject: [PATCH 15/15] fix typo --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index efe0f17b..26f886b9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -93,7 +93,7 @@ Or you can use tox (which skips the gRPC variants): poetry run -- tox -e py39-base-benchmark -- --device Dev1 ``` -The benchmarks are designed to run on a 6363 device. If you don't spcecify a specific +The benchmarks are designed to run on a 6363 device. If you don't specify a specific device using `--device`, then it will automatically use any real or simulated 6363 that can be found.