diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a2fc3dfd..26f886b9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -75,6 +75,28 @@ $ poetry run tox This requires you to have all the Python interpreters supported by **nidaqmx** installed on your machine. +# Benchmarks + +Benchmark tests are not run by default when you run pytest. To run the benchmarks, use this command: + +```sh +# Run the benchmarks +# Compare benchmark before/after a change +# see https://pytest-benchmark.readthedocs.io/en/latest/comparing.html +# Run 1: --benchmark-save=some-name +# Run N: --benchmark-compare=0001 +$ poetry run pytest -v tests/benchmark --device Dev1 +``` + +Or you can use tox (which skips the gRPC variants): +``` +poetry run -- tox -e py39-base-benchmark -- --device Dev1 +``` + +The benchmarks are designed to run on a 6363 device. If you don't specify a specific +device using `--device`, then it will automatically use any real or simulated 6363 +that can be found. + # Building Documentation To build the documentation install the optional docs packages and run sphinx. For example: diff --git a/poetry.lock b/poetry.lock index 44fff0b9..ecc09167 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2362,6 +2362,18 @@ files = [ ] markers = {main = "python_version >= \"3.13\" and extra == \"grpc\"", codegen = "python_version >= \"3.13\""} +[[package]] +name = "py-cpuinfo" +version = "9.0.0" +description = "Get CPU info with pure Python" +optional = false +python-versions = "*" +groups = ["test"] +files = [ + {file = "py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690"}, + {file = "py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5"}, +] + [[package]] name = "pycodestyle" version = "2.9.1" @@ -2521,6 +2533,27 @@ tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] +[[package]] +name = "pytest-benchmark" +version = "5.1.0" +description = "A ``pytest`` fixture for benchmarking code. It will group the tests into rounds that are calibrated to the chosen timer." +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "pytest-benchmark-5.1.0.tar.gz", hash = "sha256:9ea661cdc292e8231f7cd4c10b0319e56a2118e2c09d9f50e1b3d150d2aca105"}, + {file = "pytest_benchmark-5.1.0-py3-none-any.whl", hash = "sha256:922de2dfa3033c227c96da942d1878191afa135a29485fb942e85dff1c592c89"}, +] + +[package.dependencies] +py-cpuinfo = "*" +pytest = ">=8.1" + +[package.extras] +aspect = ["aspectlib"] +elasticsearch = ["elasticsearch"] +histogram = ["pygal", "pygaljs", "setuptools"] + [[package]] name = "pytest-cov" version = "7.0.0" @@ -3243,4 +3276,4 @@ grpc = ["grpcio", "protobuf"] [metadata] lock-version = "2.1" python-versions = ">=3.9,<4.0" -content-hash = "0be294ec7ba2a497d90e43221ebf50c790c066089bc838b17d0c7de7b43c4997" +content-hash = "c6e65f2267f438dede07c5b61377556442bdc24ce68bb299e3b468c0583062d7" diff --git a/pyproject.toml b/pyproject.toml index 435d894b..71162162 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -107,6 +107,7 @@ types-grpcio = ">=1.0" [tool.poetry.group.test.dependencies] pytest = ">=7.2" +pytest-benchmark = ">=5.1" pytest-cov = ">=4.0" pytest-mock = ">=3.0" pykka = ">=3.0" @@ -135,7 +136,7 @@ application-import-names = "nidaqmx" [tool.pytest.ini_options] addopts = "--doctest-modules --strict-markers" filterwarnings = ["always::ImportWarning", "always::ResourceWarning"] -testpaths = ["tests"] +testpaths = ["tests/acceptance", "tests/component", "tests/legacy", "tests/unit"] markers = [ # Defines custom markers used by nidaqmx tests. Prevents PytestUnknownMarkWarning. "library_only(reason=...): run the test with only the library interpreter implementation.", @@ -184,6 +185,8 @@ module = [ "importlib_metadata", "mako.*", "nidaqmx.*", + # https://github.com/ionelmc/pytest-benchmark/issues/212 - Add type annotations + "pytest_benchmark.*", ] ignore_missing_imports = true @@ -196,6 +199,7 @@ warn_unused_ignores = false typeCheckingMode = "basic" reportArgumentType = false reportAttributeAccessIssue = false +reportGeneralTypeIssues = false reportInvalidTypeForm = false reportOperatorIssue = false reportOptionalIterable = false diff --git a/tests/benchmark/__init__.py b/tests/benchmark/__init__.py new file mode 100644 index 00000000..60ac1715 --- /dev/null +++ b/tests/benchmark/__init__.py @@ -0,0 +1 @@ +"""Benchmarks for the nidaqmx package.""" diff --git a/tests/benchmark/conftest.py b/tests/benchmark/conftest.py new file mode 100644 index 00000000..4223c3cd --- /dev/null +++ b/tests/benchmark/conftest.py @@ -0,0 +1,202 @@ +"""Fixtures for benchmark tests.""" + +from __future__ import annotations + +import pytest + +from nidaqmx import Task +from nidaqmx.constants import ( + AcquisitionType, + Edge, + LineGrouping, + ReadRelativeTo, + TaskMode, + WaveformAttributeMode, +) +from nidaqmx.system import Device, System +from tests.conftest import DeviceType, _device_by_product_type + + +_WAVEFORM_BENCHMARK_MODES = [ + WaveformAttributeMode.NONE, + WaveformAttributeMode.TIMING, + WaveformAttributeMode.TIMING | WaveformAttributeMode.EXTENDED_PROPERTIES, +] + +_WAVEFORM_BENCHMARK_MODE_IDS = ["NONE", "TIMING", "ALL"] + + +def _configure_timing(task: Task, num_channels: int, num_samples: int) -> None: + task.timing.cfg_samp_clk_timing( + rate=25000.0, + active_edge=Edge.RISING, + sample_mode=AcquisitionType.FINITE, + samps_per_chan=num_channels * num_samples * 2, + ) + + +def _start_input_task(task: Task) -> None: + task.start() + task.wait_until_done(timeout=10.0) + task.in_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + + +def _commit_output_task(task: Task, num_channels: int, num_samples: int) -> None: + task.out_stream.output_buf_size = num_channels * num_samples * 2 + task.control(TaskMode.TASK_COMMIT) + task.out_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE + + +def pytest_addoption(parser: pytest.Parser) -> None: + """Add command line options to pytest.""" + parser.addoption("--device", action="store", default=None, help="Device name for benchmarks") + + +@pytest.fixture +def benchmark_device(system: System, request: pytest.FixtureRequest) -> Device: + """Get device for benchmarking.""" + device: str | None = request.config.getoption("--device") + if device is not None: + return system.devices[device] + + return _device_by_product_type("PCIe-6363", DeviceType.ANY, system) + + +@pytest.fixture +def ai_benchmark_task( + task: Task, + benchmark_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure an AI task for benchmarking.""" + num_channels = request.node.callspec.params.get("num_channels", 1) + num_samples = request.node.callspec.params.get("num_samples", 1) + + for chan in range(num_channels): + task.ai_channels.add_ai_voltage_chan( + benchmark_device.ai_physical_chans[chan].name, + min_val=-5.0, + max_val=5.0, + ) + + _configure_timing(task, num_channels, num_samples) + _start_input_task(task) + + return task + + +@pytest.fixture +def ao_benchmark_task( + task: Task, + real_x_series_multiplexed_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a hardware-timed buffered AO task for benchmarking.""" + num_channels = request.node.callspec.params.get("num_channels", 1) + num_samples = request.node.callspec.params.get("num_samples", 1) + + for chan in range(num_channels): + task.ao_channels.add_ao_voltage_chan( + real_x_series_multiplexed_device.ao_physical_chans[chan].name, + min_val=-10.0, + max_val=10.0, + ) + + _configure_timing(task, num_channels, num_samples) + _commit_output_task(task, num_channels, num_samples) + + return task + + +@pytest.fixture +def di_lines_benchmark_task( + task: Task, + benchmark_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a hardware-timed buffered DI task for benchmarking.""" + num_channels = request.node.callspec.params.get("num_channels", 1) + num_samples = request.node.callspec.params.get("num_samples", 1) + num_lines = request.node.callspec.params.get("num_lines", 1) + + for chan in range(num_channels): + line_names = [ + chan.name + for chan in benchmark_device.di_lines[chan * num_lines : (chan + 1) * num_lines] + ] + physical_channel_string = ",".join(line_names) + task.di_channels.add_di_chan( + physical_channel_string, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + ) + + _configure_timing(task, num_channels, num_samples) + _start_input_task(task) + + return task + + +@pytest.fixture +def di_port32_benchmark_task( + task: Task, + benchmark_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a hardware-timed buffered DI task for benchmarking.""" + num_samples = request.node.callspec.params.get("num_samples", 1) + + # port 0 is the only port that supports buffered operations + task.di_channels.add_di_chan( + benchmark_device.di_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + ) + + _configure_timing(task, 1, num_samples) + _start_input_task(task) + + return task + + +@pytest.fixture +def do_lines_benchmark_task( + task: Task, + benchmark_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a hardware-timed buffered DO task for benchmarking.""" + num_channels = request.node.callspec.params.get("num_channels", 1) + num_samples = request.node.callspec.params.get("num_samples", 1) + num_lines = request.node.callspec.params.get("num_lines", 1) + + for chan in range(num_channels): + line_names = [ + chan.name + for chan in benchmark_device.do_lines[chan * num_lines : (chan + 1) * num_lines] + ] + physical_channel_string = ",".join(line_names) + task.do_channels.add_do_chan( + physical_channel_string, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + ) + + _configure_timing(task, num_channels, num_samples) + _commit_output_task(task, num_channels, num_samples) + + return task + + +@pytest.fixture +def do_port32_benchmark_task( + task: Task, + benchmark_device: Device, + request: pytest.FixtureRequest, +) -> Task: + """Configure a hardware-timed buffered DO task for benchmarking.""" + num_samples = request.node.callspec.params.get("num_samples", 1) + + # port 0 is the only port that supports buffered operations + task.do_channels.add_do_chan( + benchmark_device.do_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES + ) + + _configure_timing(task, 1, num_samples) + _commit_output_task(task, 1, num_samples) + + return task diff --git a/tests/benchmark/test_analog_stream_readers.py b/tests/benchmark/test_analog_stream_readers.py new file mode 100644 index 00000000..746336dd --- /dev/null +++ b/tests/benchmark/test_analog_stream_readers.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +import math + +import numpy +import pytest +from nitypes.waveform import AnalogWaveform +from pytest_benchmark.fixture import BenchmarkFixture + +from nidaqmx import Task +from nidaqmx.constants import WaveformAttributeMode +from nidaqmx.stream_readers._analog_multi_channel_reader import AnalogMultiChannelReader +from nidaqmx.stream_readers._analog_single_channel_reader import ( + AnalogSingleChannelReader, +) +from tests.benchmark.conftest import ( + _WAVEFORM_BENCHMARK_MODE_IDS, + _WAVEFORM_BENCHMARK_MODES, +) + + +@pytest.mark.benchmark(group="analog_readers") +def test___analog_single_channel_reader___read_one_sample( + benchmark: BenchmarkFixture, ai_benchmark_task: Task +) -> None: + reader = AnalogSingleChannelReader(ai_benchmark_task.in_stream) + + benchmark(reader.read_one_sample) + + +@pytest.mark.benchmark(group="analog_readers") +@pytest.mark.parametrize("num_samples", [1, 1000]) +def test___analog_single_channel_reader___read_many_sample( + benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_samples: int +) -> None: + reader = AnalogSingleChannelReader(ai_benchmark_task.in_stream) + data = numpy.full(num_samples, math.inf, dtype=numpy.float64) + + benchmark(reader.read_many_sample, data, num_samples) + + +@pytest.mark.benchmark(group="analog_readers") +@pytest.mark.parametrize("num_samples", [1, 1000]) +@pytest.mark.parametrize( + "waveform_attribute_mode", _WAVEFORM_BENCHMARK_MODES, ids=_WAVEFORM_BENCHMARK_MODE_IDS +) +@pytest.mark.grpc_skip(reason="read_analog_waveform not implemented in GRPC") +def test___analog_single_channel_reader___read_waveform( + benchmark: BenchmarkFixture, + ai_benchmark_task: Task, + num_samples: int, + waveform_attribute_mode: WaveformAttributeMode, +) -> None: + ai_benchmark_task.in_stream.waveform_attribute_mode = waveform_attribute_mode + reader = AnalogSingleChannelReader(ai_benchmark_task.in_stream) + waveform = AnalogWaveform(num_samples) + + benchmark(reader.read_waveform, waveform, num_samples) + + +@pytest.mark.benchmark(group="analog_readers") +@pytest.mark.parametrize("num_channels", [1, 2, 8]) +def test___analog_multi_channel_reader___read_one_sample( + benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int +) -> None: + reader = AnalogMultiChannelReader(ai_benchmark_task.in_stream) + data = numpy.full(num_channels, math.inf, dtype=numpy.float64) + + benchmark(reader.read_one_sample, data) + + +@pytest.mark.benchmark(group="analog_readers") +@pytest.mark.parametrize("num_channels", [1, 2, 8]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +def test___analog_multi_channel_reader___read_many_sample( + benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int +) -> None: + reader = AnalogMultiChannelReader(ai_benchmark_task.in_stream) + data = numpy.full((num_channels, num_samples), math.inf, dtype=numpy.float64) + + benchmark(reader.read_many_sample, data, num_samples) + + +@pytest.mark.benchmark(group="analog_readers") +@pytest.mark.parametrize("num_channels", [1, 2, 8]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +@pytest.mark.parametrize( + "waveform_attribute_mode", _WAVEFORM_BENCHMARK_MODES, ids=_WAVEFORM_BENCHMARK_MODE_IDS +) +@pytest.mark.grpc_skip(reason="read_analog_waveforms not implemented in GRPC") +def test___analog_multi_channel_reader___read_waveform( + benchmark: BenchmarkFixture, + ai_benchmark_task: Task, + num_channels: int, + num_samples: int, + waveform_attribute_mode: WaveformAttributeMode, +) -> None: + ai_benchmark_task.in_stream.waveform_attribute_mode = waveform_attribute_mode + reader = AnalogMultiChannelReader(ai_benchmark_task.in_stream) + waveforms = [AnalogWaveform(num_samples) for _ in range(num_channels)] + + benchmark(reader.read_waveforms, waveforms, num_samples) diff --git a/tests/benchmark/test_analog_stream_writers.py b/tests/benchmark/test_analog_stream_writers.py new file mode 100644 index 00000000..feb3b994 --- /dev/null +++ b/tests/benchmark/test_analog_stream_writers.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +import numpy +import pytest +from nitypes.waveform import AnalogWaveform +from pytest_benchmark.fixture import BenchmarkFixture + +import nidaqmx +from nidaqmx.stream_writers._analog_multi_channel_writer import AnalogMultiChannelWriter +from nidaqmx.stream_writers._analog_single_channel_writer import ( + AnalogSingleChannelWriter, +) + + +@pytest.mark.benchmark(group="analog_writers") +def test___analog_single_channel_writer___write_one_sample( + benchmark: BenchmarkFixture, + ao_benchmark_task: nidaqmx.Task, +) -> None: + writer = AnalogSingleChannelWriter(ao_benchmark_task.out_stream, auto_start=False) + + benchmark(writer.write_one_sample, 1.0) + + +@pytest.mark.benchmark(group="analog_writers") +@pytest.mark.parametrize("num_samples", [1, 1000]) +def test___analog_single_channel_writer___write_many_sample( + benchmark: BenchmarkFixture, + ao_benchmark_task: nidaqmx.Task, + num_samples: int, +) -> None: + writer = AnalogSingleChannelWriter(ao_benchmark_task.out_stream, auto_start=False) + data = numpy.linspace(0.0, 1.0, num=num_samples, dtype=numpy.float64) + + benchmark(writer.write_many_sample, data) + + +@pytest.mark.benchmark(group="analog_writers") +@pytest.mark.parametrize("num_samples", [1, 1000]) +@pytest.mark.grpc_skip(reason="write_analog_waveform not implemented in GRPC") +def test___analog_single_channel_writer___write_waveform( + benchmark: BenchmarkFixture, + ao_benchmark_task: nidaqmx.Task, + num_samples: int, +) -> None: + writer = AnalogSingleChannelWriter(ao_benchmark_task.out_stream, auto_start=False) + waveform = AnalogWaveform(num_samples) + + benchmark(writer.write_waveform, waveform) + + +@pytest.mark.benchmark(group="analog_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +def test___analog_multi_channel_writer___write_one_sample( + benchmark: BenchmarkFixture, + ao_benchmark_task: nidaqmx.Task, + num_channels: int, +) -> None: + writer = AnalogMultiChannelWriter(ao_benchmark_task.out_stream, auto_start=False) + data = numpy.asarray([1.0] * num_channels, dtype=numpy.float64) + + benchmark(writer.write_one_sample, data) + + +@pytest.mark.benchmark(group="analog_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +def test___analog_multi_channel_writer___write_many_sample( + benchmark: BenchmarkFixture, + ao_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, +) -> None: + writer = AnalogMultiChannelWriter(ao_benchmark_task.out_stream, auto_start=False) + data = numpy.full((num_channels, num_samples), 1.0, dtype=numpy.float64) + + benchmark(writer.write_many_sample, data) + + +@pytest.mark.benchmark(group="analog_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +@pytest.mark.grpc_skip(reason="write_analog_waveform not implemented in GRPC") +def test___analog_multi_channel_writer___write_waveform( + benchmark: BenchmarkFixture, + ao_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, +) -> None: + writer = AnalogMultiChannelWriter(ao_benchmark_task.out_stream, auto_start=False) + waveforms = [AnalogWaveform(num_samples) for _ in range(num_channels)] + + benchmark(writer.write_waveforms, waveforms) diff --git a/tests/benchmark/test_digital_stream_readers.py b/tests/benchmark/test_digital_stream_readers.py new file mode 100644 index 00000000..c6262393 --- /dev/null +++ b/tests/benchmark/test_digital_stream_readers.py @@ -0,0 +1,153 @@ +from __future__ import annotations + +import numpy +import pytest +from nitypes.waveform import DigitalWaveform +from pytest_benchmark.fixture import BenchmarkFixture + +import nidaqmx +from nidaqmx.stream_readers._digital_multi_channel_reader import ( + DigitalMultiChannelReader, +) +from nidaqmx.stream_readers._digital_single_channel_reader import ( + DigitalSingleChannelReader, +) + + +@pytest.mark.benchmark(group="digital_readers") +def test___digital_single_channel_reader___read_one_sample_one_line( + benchmark: BenchmarkFixture, + di_lines_benchmark_task: nidaqmx.Task, +) -> None: + reader = DigitalSingleChannelReader(di_lines_benchmark_task.in_stream) + + benchmark(reader.read_one_sample_one_line) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +def test___digital_single_channel_reader___read_one_sample_multi_line( + benchmark: BenchmarkFixture, + di_lines_benchmark_task: nidaqmx.Task, + num_lines: int, +) -> None: + reader = DigitalSingleChannelReader(di_lines_benchmark_task.in_stream) + data = numpy.full(num_lines, False, dtype=numpy.bool_) + + benchmark(reader.read_one_sample_multi_line, data) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_samples", [1, 100]) +def test___digital_single_channel_reader___read_many_sample_port_uint32( + benchmark: BenchmarkFixture, + di_port32_benchmark_task: nidaqmx.Task, + num_samples: int, +) -> None: + reader = DigitalSingleChannelReader(di_port32_benchmark_task.in_stream) + data = numpy.full(num_samples, numpy.iinfo(numpy.uint32).min, dtype=numpy.uint32) + + benchmark(reader.read_many_sample_port_uint32, data, num_samples) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___digital_single_channel_reader___read_waveform_lines( + benchmark: BenchmarkFixture, + di_lines_benchmark_task: nidaqmx.Task, + num_samples: int, + num_lines: int, +) -> None: + reader = DigitalSingleChannelReader(di_lines_benchmark_task.in_stream) + waveform = DigitalWaveform(num_samples, num_lines) + + benchmark(reader.read_waveform, waveform, num_samples) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___digital_single_channel_reader___read_waveform_port( + benchmark: BenchmarkFixture, + di_port32_benchmark_task: nidaqmx.Task, + num_samples: int, +) -> None: + reader = DigitalSingleChannelReader(di_port32_benchmark_task.in_stream) + waveform = DigitalWaveform(num_samples, signal_count=32) + + benchmark(reader.read_waveform, waveform, num_samples) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_channels", [1, 2]) +def test___digital_multi_channel_reader___read_one_sample_one_line( + benchmark: BenchmarkFixture, + di_lines_benchmark_task: nidaqmx.Task, + num_channels: int, +) -> None: + reader = DigitalMultiChannelReader(di_lines_benchmark_task.in_stream) + data = numpy.full(num_channels, False, dtype=numpy.bool_) + + benchmark(reader.read_one_sample_one_line, data) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +def test___digital_multi_channel_reader___read_one_sample_multi_line( + benchmark: BenchmarkFixture, + di_lines_benchmark_task: nidaqmx.Task, + num_channels: int, + num_lines: int, +) -> None: + reader = DigitalMultiChannelReader(di_lines_benchmark_task.in_stream) + data = numpy.full((num_channels, num_lines), False, dtype=numpy.bool_) + + benchmark(reader.read_one_sample_multi_line, data) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_samples", [1, 100]) +def test___digital_multi_channel_reader___read_many_sample_port_uint32( + benchmark: BenchmarkFixture, + di_port32_benchmark_task: nidaqmx.Task, + num_samples: int, +) -> None: + reader = DigitalMultiChannelReader(di_port32_benchmark_task.in_stream) + data = numpy.full((1, num_samples), numpy.iinfo(numpy.uint32).min, dtype=numpy.uint32) + + benchmark(reader.read_many_sample_port_uint32, data, num_samples) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___digital_multi_channel_reader___read_waveform_lines( + benchmark: BenchmarkFixture, + di_lines_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, + num_lines: int, +) -> None: + reader = DigitalMultiChannelReader(di_lines_benchmark_task.in_stream) + waveforms = [DigitalWaveform(num_samples, num_lines) for _ in range(num_channels)] + + benchmark(reader.read_waveforms, waveforms, num_samples) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___digital_multi_channel_reader___read_waveform_port( + benchmark: BenchmarkFixture, + di_port32_benchmark_task: nidaqmx.Task, + num_samples: int, +) -> None: + reader = DigitalMultiChannelReader(di_port32_benchmark_task.in_stream) + waveforms = [DigitalWaveform(num_samples, signal_count=32)] + + benchmark(reader.read_waveforms, waveforms, num_samples) diff --git a/tests/benchmark/test_digital_stream_writers.py b/tests/benchmark/test_digital_stream_writers.py new file mode 100644 index 00000000..14081326 --- /dev/null +++ b/tests/benchmark/test_digital_stream_writers.py @@ -0,0 +1,153 @@ +from __future__ import annotations + +import numpy +import pytest +from nitypes.waveform import DigitalWaveform +from pytest_benchmark.fixture import BenchmarkFixture + +import nidaqmx +from nidaqmx.stream_writers._digital_multi_channel_writer import ( + DigitalMultiChannelWriter, +) +from nidaqmx.stream_writers._digital_single_channel_writer import ( + DigitalSingleChannelWriter, +) + + +@pytest.mark.benchmark(group="digital_writers") +def test___digital_single_channel_writer___write_one_sample_one_line( + benchmark: BenchmarkFixture, + do_lines_benchmark_task: nidaqmx.Task, +) -> None: + writer = DigitalSingleChannelWriter(do_lines_benchmark_task.out_stream, auto_start=False) + + benchmark(writer.write_one_sample_one_line, True) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +def test___digital_single_channel_writer___write_one_sample_multi_line( + benchmark: BenchmarkFixture, + do_lines_benchmark_task: nidaqmx.Task, + num_lines: int, +) -> None: + writer = DigitalSingleChannelWriter(do_lines_benchmark_task.out_stream, auto_start=False) + data = numpy.full(num_lines, True, dtype=numpy.bool_) + + benchmark(writer.write_one_sample_multi_line, data) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_samples", [1, 100]) +def test___digital_single_channel_writer___write_many_sample_port_uint32( + benchmark: BenchmarkFixture, + do_port32_benchmark_task: nidaqmx.Task, + num_samples: int, +) -> None: + writer = DigitalSingleChannelWriter(do_port32_benchmark_task.out_stream, auto_start=False) + data = numpy.full(num_samples, numpy.uint32(1), dtype=numpy.uint32) + + benchmark(writer.write_many_sample_port_uint32, data) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___digital_single_channel_writer___write_waveform_lines( + benchmark: BenchmarkFixture, + do_lines_benchmark_task: nidaqmx.Task, + num_samples: int, + num_lines: int, +) -> None: + writer = DigitalSingleChannelWriter(do_lines_benchmark_task.out_stream, auto_start=False) + waveform = DigitalWaveform(num_samples, num_lines) + + benchmark(writer.write_waveform, waveform) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___digital_single_channel_writer___write_waveform_port( + benchmark: BenchmarkFixture, + do_port32_benchmark_task: nidaqmx.Task, + num_samples: int, +) -> None: + writer = DigitalSingleChannelWriter(do_port32_benchmark_task.out_stream, auto_start=False) + waveform = DigitalWaveform(num_samples, signal_count=32) + + benchmark(writer.write_waveform, waveform) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +def test___digital_multi_channel_writer___write_one_sample_one_line( + benchmark: BenchmarkFixture, + do_lines_benchmark_task: nidaqmx.Task, + num_channels: int, +) -> None: + writer = DigitalMultiChannelWriter(do_lines_benchmark_task.out_stream, auto_start=False) + data = numpy.full(num_channels, False, dtype=numpy.bool_) + + benchmark(writer.write_one_sample_one_line, data) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +def test___digital_multi_channel_writer___write_one_sample_multi_line( + benchmark: BenchmarkFixture, + do_lines_benchmark_task: nidaqmx.Task, + num_channels: int, + num_lines: int, +) -> None: + writer = DigitalMultiChannelWriter(do_lines_benchmark_task.out_stream, auto_start=False) + data = numpy.full((num_channels, num_lines), False, dtype=numpy.bool_) + + benchmark(writer.write_one_sample_multi_line, data) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_samples", [1, 100]) +def test___digital_multi_channel_writer___write_many_sample_port_uint32( + benchmark: BenchmarkFixture, + do_port32_benchmark_task: nidaqmx.Task, + num_samples: int, +) -> None: + writer = DigitalMultiChannelWriter(do_port32_benchmark_task.in_stream, auto_start=False) + data = numpy.full((1, num_samples), numpy.iinfo(numpy.uint32).min, dtype=numpy.uint32) + + benchmark(writer.write_many_sample_port_uint32, data, num_samples) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___digital_multi_channel_writer___write_waveform_lines( + benchmark: BenchmarkFixture, + do_lines_benchmark_task: nidaqmx.Task, + num_channels: int, + num_samples: int, + num_lines: int, +) -> None: + writer = DigitalMultiChannelWriter(do_lines_benchmark_task.in_stream, auto_start=False) + waveforms = [DigitalWaveform(num_samples, num_lines) for _ in range(num_channels)] + + benchmark(writer.write_waveforms, waveforms, num_samples) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___digital_multi_channel_writer___write_waveform_port( + benchmark: BenchmarkFixture, + do_port32_benchmark_task: nidaqmx.Task, + num_samples: int, +) -> None: + writer = DigitalMultiChannelWriter(do_port32_benchmark_task.in_stream, auto_start=False) + waveforms = [DigitalWaveform(num_samples, signal_count=32)] + + benchmark(writer.write_waveforms, waveforms, num_samples) diff --git a/tests/benchmark/test_task.py b/tests/benchmark/test_task.py new file mode 100644 index 00000000..90ad446e --- /dev/null +++ b/tests/benchmark/test_task.py @@ -0,0 +1,200 @@ +from __future__ import annotations + +from typing import Any + +import numpy +import pytest +from nitypes.waveform import AnalogWaveform, DigitalWaveform +from pytest_benchmark.fixture import BenchmarkFixture + +from nidaqmx import Task +from nidaqmx.constants import WaveformAttributeMode +from tests.benchmark.conftest import ( + _WAVEFORM_BENCHMARK_MODE_IDS, + _WAVEFORM_BENCHMARK_MODES, +) + + +def _create_analog_data(num_channels, num_samples): + if num_channels == 1: + if num_samples == 1: + return 1.0 + return numpy.full((num_samples), 1.0, numpy.float64) + else: + return numpy.full((num_channels, num_samples), 1.0, numpy.float64) + + +def _create_digital_data(num_channels, num_samples, num_lines): + if num_lines == 1: + dtype: Any = numpy.bool_ + value: Any = True + else: + dtype = numpy.uint32 + value = 1 + + if num_channels == 1: + if num_samples == 1: + return value + return numpy.full((num_samples), value, dtype) + else: + return numpy.full((num_channels, num_samples), value, dtype) + + +@pytest.mark.benchmark(group="analog_readers") +@pytest.mark.parametrize("num_channels", [1, 2, 8]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +def test___task___read_analog( + benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int +) -> None: + benchmark(ai_benchmark_task.read, num_samples) + + +@pytest.mark.benchmark(group="analog_readers") +@pytest.mark.parametrize("num_channels", [1, 2, 8]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +@pytest.mark.parametrize( + "waveform_attribute_mode", _WAVEFORM_BENCHMARK_MODES, ids=_WAVEFORM_BENCHMARK_MODE_IDS +) +@pytest.mark.grpc_skip(reason="read_analog_waveforms not implemented in GRPC") +def test___task___read_analog_waveform( + benchmark: BenchmarkFixture, + ai_benchmark_task: Task, + num_channels: int, + num_samples: int, + waveform_attribute_mode: WaveformAttributeMode, +) -> None: + ai_benchmark_task.in_stream.waveform_attribute_mode = waveform_attribute_mode + benchmark(ai_benchmark_task.read_waveform, num_samples) + + +@pytest.mark.benchmark(group="analog_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +def test___task___write_analog( + benchmark: BenchmarkFixture, + ao_benchmark_task: Task, + num_channels: int, + num_samples: int, +) -> None: + data = _create_analog_data(num_channels, num_samples) + ao_benchmark_task.write(data, auto_start=False) + benchmark(ao_benchmark_task.write, data, auto_start=False) + + +@pytest.mark.benchmark(group="analog_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 1000]) +@pytest.mark.grpc_skip(reason="write_analog_waveform not implemented in GRPC") +def test___task___write_analog_waveform( + benchmark: BenchmarkFixture, + ao_benchmark_task: Task, + num_channels: int, + num_samples: int, +) -> None: + waveforms = [AnalogWaveform(num_samples) for _ in range(num_channels)] + + benchmark(ao_benchmark_task.write_waveform, waveforms, auto_start=False) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +def test___task___read_digital_lines( + benchmark: BenchmarkFixture, + di_lines_benchmark_task: Task, + num_channels: int, + num_samples: int, + num_lines: int, +) -> None: + benchmark(di_lines_benchmark_task.read, num_samples) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_samples", [1, 100]) +def test___task___read_digital_port( + benchmark: BenchmarkFixture, + di_port32_benchmark_task: Task, + num_samples: int, +) -> None: + benchmark(di_port32_benchmark_task.read, num_samples) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___task___read_digital_lines_waveform( + benchmark: BenchmarkFixture, + di_lines_benchmark_task: Task, + num_channels: int, + num_samples: int, + num_lines: int, +) -> None: + benchmark(di_lines_benchmark_task.read_waveform, num_samples) + + +@pytest.mark.benchmark(group="digital_readers") +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.grpc_skip(reason="read_digital_waveform not implemented in GRPC") +def test___task___read_digital_port_waveform( + benchmark: BenchmarkFixture, + di_port32_benchmark_task: Task, + num_samples: int, +) -> None: + benchmark(di_port32_benchmark_task.read_waveform, num_samples) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +def test___task___write_digital_lines( + benchmark: BenchmarkFixture, + do_lines_benchmark_task: Task, + num_channels: int, + num_samples: int, + num_lines: int, +) -> None: + data = _create_digital_data(num_channels, num_samples, num_lines) + benchmark(do_lines_benchmark_task.write, data, auto_start=False) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_samples", [1, 100]) +def test___task___write_digital_port( + benchmark: BenchmarkFixture, + do_port32_benchmark_task: Task, + num_samples: int, +) -> None: + data = _create_digital_data(1, num_samples, 32) + benchmark(do_port32_benchmark_task.write, data, auto_start=False) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_channels", [1, 2]) +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.parametrize("num_lines", [1, 2, 8]) +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___task___write_digital_lines_waveform( + benchmark: BenchmarkFixture, + do_lines_benchmark_task: Task, + num_channels: int, + num_samples: int, + num_lines: int, +) -> None: + waveforms = [DigitalWaveform(num_samples, num_lines) for _ in range(num_channels)] + benchmark(do_lines_benchmark_task.write_waveform, waveforms, auto_start=False) + + +@pytest.mark.benchmark(group="digital_writers") +@pytest.mark.parametrize("num_samples", [1, 100]) +@pytest.mark.grpc_skip(reason="write_digital_waveform not implemented in GRPC") +def test___task___write_digital_port_waveform( + benchmark: BenchmarkFixture, + do_port32_benchmark_task: Task, + num_samples: int, +) -> None: + waveforms = [DigitalWaveform(num_samples, signal_count=32)] + benchmark(do_port32_benchmark_task.write_waveform, waveforms, auto_start=False) diff --git a/tox.ini b/tox.ini index d055ebd2..1304eb5e 100644 --- a/tox.ini +++ b/tox.ini @@ -5,7 +5,7 @@ [tox] isolated_build = true -envlist = clean, py{39,310,311,312,313}-base, py{39,310,311,312,313}-grpc, py39-base-nicaiu, py39-base-nicai_utf8, report, docs +envlist = clean, py{39,310,311,312,313}-base, py{39,310,311,312,313}-grpc, py39-base-nicaiu, py39-base-nicai_utf8, py39-base-benchmark, report, docs [testenv] skip_install = true @@ -13,7 +13,7 @@ allowlist_externals = poetry setenv = base: INSTALL_OPTS=--only main,test grpc: INSTALL_OPTS=--only main,test --extras grpc - base: PYTEST_OPTS=-k "not grpc" + base: PYTEST_OPTS=-k "library" grpc: PYTEST_OPTS= nicaiu: NIDAQMX_C_LIBRARY=nicaiu nicai_utf8: NIDAQMX_C_LIBRARY=nicai_utf8 @@ -24,7 +24,8 @@ commands = poetry run python --version poetry install -v {env:INSTALL_OPTS} poetry run python -c "from nidaqmx._lib import lib_importer; print(f'Library: {lib_importer.windll._library._name}\nLibrary encoding: {lib_importer.encoding}')" - poetry run pytest --quiet --cov=generated/nidaqmx --cov-append --cov-report= --junitxml=test_results/system-{envname}.xml {env:PYTEST_OPTS} {posargs} + !benchmark: poetry run pytest --quiet --cov=generated/nidaqmx --cov-append --cov-report= --junitxml=test_results/system-{envname}.xml {env:PYTEST_OPTS} {posargs} + benchmark: poetry run pytest tests/benchmark/ --quiet --junitxml=test_results/benchmark-{envname}.xml {env:PYTEST_OPTS} {posargs} [testenv:clean] commands = poetry run coverage erase