Skip to content
Merged
8 changes: 6 additions & 2 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,14 +85,18 @@ Benchmark tests are not run by default when you run pytest. To run the benchmark
# see https://pytest-benchmark.readthedocs.io/en/latest/comparing.html
# Run 1: --benchmark-save=some-name
# Run N: --benchmark-compare=0001
$ poetry run pytest -v tests/benchmark
$ poetry run pytest -v tests/benchmark --device Dev1
```

Or you can use tox (which skips the gRPC variants):
```
poetry run tox -e benchmark
poetry run -- tox -e py39-base-benchmark -- --device Dev1
```

The benchmarks are designed to run on a 6363 device. If you don't spcecify a specific
device using `--device`, then it will automatically use any real or simulated 6363
that can be found.

# Building Documentation

To build the documentation install the optional docs packages and run sphinx. For example:
Expand Down
49 changes: 34 additions & 15 deletions tests/benchmark/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,22 @@
LineGrouping,
ReadRelativeTo,
TaskMode,
WaveformAttributeMode,
)
from nidaqmx.system import Device, System
from tests.conftest import DeviceType, _device_by_product_type


def _configure_timing(task, num_channels, num_samples):
_WAVEFORM_BENCHMARK_MODES = [
WaveformAttributeMode.NONE,
WaveformAttributeMode.TIMING,
WaveformAttributeMode.TIMING | WaveformAttributeMode.EXTENDED_PROPERTIES,
]

_WAVEFORM_BENCHMARK_MODE_IDS = ["NONE", "TIMING", "ALL"]


def _configure_timing(task: Task, num_channels: int, num_samples: int) -> None:
task.timing.cfg_samp_clk_timing(
rate=25000.0,
active_edge=Edge.RISING,
Expand All @@ -25,28 +35,37 @@ def _configure_timing(task, num_channels, num_samples):
)


def _start_input_task(task):
def _start_input_task(task: Task) -> None:
task.start()
task.wait_until_done(timeout=10.0)
task.in_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE


def _commit_output_task(task, num_channels, num_samples):
def _commit_output_task(task: Task, num_channels: int, num_samples: int) -> None:
task.out_stream.output_buf_size = num_channels * num_samples * 2
task.control(TaskMode.TASK_COMMIT)
task.out_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE


def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command line options to pytest."""
parser.addoption("--device", action="store", default=None, help="Device name for benchmarks")


@pytest.fixture
def any_6363_device(system: System) -> Device:
"""Gets a 6363 device, either real or simulated."""
def benchmark_device(system: System, request: pytest.FixtureRequest) -> Device:
"""Get device for benchmarking."""
device: str | None = request.config.getoption("--device")
if device is not None:
return system.devices[device]

return _device_by_product_type("PCIe-6363", DeviceType.ANY, system)


@pytest.fixture
def ai_benchmark_task(
task: Task,
any_6363_device: Device,
benchmark_device: Device,
request: pytest.FixtureRequest,
) -> Task:
"""Configure an AI task for benchmarking."""
Expand All @@ -55,7 +74,7 @@ def ai_benchmark_task(

for chan in range(num_channels):
task.ai_channels.add_ai_voltage_chan(
any_6363_device.ai_physical_chans[chan].name,
benchmark_device.ai_physical_chans[chan].name,
min_val=-5.0,
max_val=5.0,
)
Expand Down Expand Up @@ -92,7 +111,7 @@ def ao_benchmark_task(
@pytest.fixture
def di_lines_benchmark_task(
task: Task,
any_6363_device: Device,
benchmark_device: Device,
request: pytest.FixtureRequest,
) -> Task:
"""Configure a hardware-timed buffered DI task for benchmarking."""
Expand All @@ -103,7 +122,7 @@ def di_lines_benchmark_task(
for chan in range(num_channels):
line_names = [
chan.name
for chan in any_6363_device.di_lines[chan * num_lines : (chan + 1) * num_lines]
for chan in benchmark_device.di_lines[chan * num_lines : (chan + 1) * num_lines]
]
physical_channel_string = ",".join(line_names)
task.di_channels.add_di_chan(
Expand All @@ -119,15 +138,15 @@ def di_lines_benchmark_task(
@pytest.fixture
def di_port32_benchmark_task(
task: Task,
any_6363_device: Device,
benchmark_device: Device,
request: pytest.FixtureRequest,
) -> Task:
"""Configure a hardware-timed buffered DI task for benchmarking."""
num_samples = request.node.callspec.params.get("num_samples", 1)

# port 0 is the only port that supports buffered operations
task.di_channels.add_di_chan(
any_6363_device.di_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES
benchmark_device.di_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES
)

_configure_timing(task, 1, num_samples)
Expand All @@ -139,7 +158,7 @@ def di_port32_benchmark_task(
@pytest.fixture
def do_lines_benchmark_task(
task: Task,
any_6363_device: Device,
benchmark_device: Device,
request: pytest.FixtureRequest,
) -> Task:
"""Configure a hardware-timed buffered DO task for benchmarking."""
Expand All @@ -150,7 +169,7 @@ def do_lines_benchmark_task(
for chan in range(num_channels):
line_names = [
chan.name
for chan in any_6363_device.do_lines[chan * num_lines : (chan + 1) * num_lines]
for chan in benchmark_device.do_lines[chan * num_lines : (chan + 1) * num_lines]
]
physical_channel_string = ",".join(line_names)
task.do_channels.add_do_chan(
Expand All @@ -166,15 +185,15 @@ def do_lines_benchmark_task(
@pytest.fixture
def do_port32_benchmark_task(
task: Task,
any_6363_device: Device,
benchmark_device: Device,
request: pytest.FixtureRequest,
) -> Task:
"""Configure a hardware-timed buffered DO task for benchmarking."""
num_samples = request.node.callspec.params.get("num_samples", 1)

# port 0 is the only port that supports buffered operations
task.do_channels.add_do_chan(
any_6363_device.do_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES
benchmark_device.do_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES
)

_configure_timing(task, 1, num_samples)
Expand Down
12 changes: 10 additions & 2 deletions tests/benchmark/test_analog_stream_readers.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@
from nidaqmx.stream_readers._analog_single_channel_reader import (
AnalogSingleChannelReader,
)
from tests.benchmark.conftest import (
_WAVEFORM_BENCHMARK_MODE_IDS,
_WAVEFORM_BENCHMARK_MODES,
)


@pytest.mark.benchmark(group="analog_readers")
Expand All @@ -37,7 +41,9 @@ def test___analog_single_channel_reader___read_many_sample(

@pytest.mark.benchmark(group="analog_readers")
@pytest.mark.parametrize("num_samples", [1, 1000])
@pytest.mark.parametrize("waveform_attribute_mode", list(WaveformAttributeMode))
@pytest.mark.parametrize(
"waveform_attribute_mode", _WAVEFORM_BENCHMARK_MODES, ids=_WAVEFORM_BENCHMARK_MODE_IDS
)
@pytest.mark.grpc_skip(reason="read_analog_waveform not implemented in GRPC")
def test___analog_single_channel_reader___read_waveform(
benchmark: BenchmarkFixture,
Expand Down Expand Up @@ -78,7 +84,9 @@ def test___analog_multi_channel_reader___read_many_sample(
@pytest.mark.benchmark(group="analog_readers")
@pytest.mark.parametrize("num_channels", [1, 2, 8])
@pytest.mark.parametrize("num_samples", [1, 1000])
@pytest.mark.parametrize("waveform_attribute_mode", list(WaveformAttributeMode))
@pytest.mark.parametrize(
"waveform_attribute_mode", _WAVEFORM_BENCHMARK_MODES, ids=_WAVEFORM_BENCHMARK_MODE_IDS
)
@pytest.mark.grpc_skip(reason="read_analog_waveforms not implemented in GRPC")
def test___analog_multi_channel_reader___read_waveform(
benchmark: BenchmarkFixture,
Expand Down
8 changes: 7 additions & 1 deletion tests/benchmark/test_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@

from nidaqmx import Task
from nidaqmx.constants import WaveformAttributeMode
from tests.benchmark.conftest import (
_WAVEFORM_BENCHMARK_MODE_IDS,
_WAVEFORM_BENCHMARK_MODES,
)


def _create_analog_data(num_channels, num_samples):
Expand Down Expand Up @@ -48,7 +52,9 @@ def test___task___read_analog(
@pytest.mark.benchmark(group="analog_readers")
@pytest.mark.parametrize("num_channels", [1, 2, 8])
@pytest.mark.parametrize("num_samples", [1, 1000])
@pytest.mark.parametrize("waveform_attribute_mode", list(WaveformAttributeMode))
@pytest.mark.parametrize(
"waveform_attribute_mode", _WAVEFORM_BENCHMARK_MODES, ids=_WAVEFORM_BENCHMARK_MODE_IDS
)
@pytest.mark.grpc_skip(reason="read_analog_waveforms not implemented in GRPC")
def test___task___read_analog_waveform(
benchmark: BenchmarkFixture,
Expand Down
16 changes: 4 additions & 12 deletions tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,15 @@

[tox]
isolated_build = true
envlist = clean, py{39,310,311,312,313}-base, py{39,310,311,312,313}-grpc, py39-base-nicaiu, py39-base-nicai_utf8, report, docs, benchmark
envlist = clean, py{39,310,311,312,313}-base, py{39,310,311,312,313}-grpc, py39-base-nicaiu, py39-base-nicai_utf8, py39-base-benchmark, report, docs

[testenv]
skip_install = true
allowlist_externals = poetry
setenv =
base: INSTALL_OPTS=--only main,test
grpc: INSTALL_OPTS=--only main,test --extras grpc
base: PYTEST_OPTS=-k "not grpc"
base: PYTEST_OPTS=-k "library"
grpc: PYTEST_OPTS=
nicaiu: NIDAQMX_C_LIBRARY=nicaiu
nicai_utf8: NIDAQMX_C_LIBRARY=nicai_utf8
Expand All @@ -24,7 +24,8 @@ commands =
poetry run python --version
poetry install -v {env:INSTALL_OPTS}
poetry run python -c "from nidaqmx._lib import lib_importer; print(f'Library: {lib_importer.windll._library._name}\nLibrary encoding: {lib_importer.encoding}')"
poetry run pytest --quiet --cov=generated/nidaqmx --cov-append --cov-report= --junitxml=test_results/system-{envname}.xml {env:PYTEST_OPTS} {posargs}
!benchmark: poetry run pytest --quiet --cov=generated/nidaqmx --cov-append --cov-report= --junitxml=test_results/system-{envname}.xml {env:PYTEST_OPTS} {posargs}
benchmark: poetry run pytest tests/benchmark/ --quiet --junitxml=test_results/benchmark-{envname}.xml {env:PYTEST_OPTS} {posargs}

[testenv:clean]
commands = poetry run coverage erase
Expand All @@ -42,12 +43,3 @@ commands =
poetry install -v --only main,docs
# Use -W to treat warnings as errors.
poetry run sphinx-build -b html -W docs docs/_build

[testenv:benchmark]
base_python = python3.11
skip_install = true
allowlist_externals = poetry
commands =
poetry run python --version
poetry install -v --only main,test
poetry run pytest tests/benchmark/ --quiet -k "library" --benchmark-only {posargs}
Loading