Skip to content
Merged
5 changes: 5 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,11 @@ Benchmark tests are not run by default when you run pytest. To run the benchmark
$ poetry run pytest -v tests/benchmark
```

Or you can use tox (which skips the gRPC variants):
```
poetry run tox -e benchmark
```

# Building Documentation

To build the documentation install the optional docs packages and run sphinx. For example:
Expand Down
73 changes: 20 additions & 53 deletions tests/benchmark/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@
ReadRelativeTo,
TaskMode,
)
from nidaqmx.system import Device
from nidaqmx.system import Device, System
from tests.conftest import DeviceType, _device_by_product_type


def _configure_timing(task, num_channels, num_samples):
Expand All @@ -36,10 +37,16 @@ def _commit_output_task(task, num_channels, num_samples):
task.out_stream.relative_to = ReadRelativeTo.FIRST_SAMPLE


@pytest.fixture
def any_6363_device(system: System) -> Device:
"""Gets a 6363 device, either real or simulated."""
return _device_by_product_type("PCIe-6363", DeviceType.ANY, system)


@pytest.fixture
def ai_benchmark_task(
task: Task,
sim_6363_device: Device,
any_6363_device: Device,
request: pytest.FixtureRequest,
) -> Task:
"""Configure an AI task for benchmarking."""
Expand All @@ -48,7 +55,7 @@ def ai_benchmark_task(

for chan in range(num_channels):
task.ai_channels.add_ai_voltage_chan(
sim_6363_device.ai_physical_chans[chan].name,
any_6363_device.ai_physical_chans[chan].name,
min_val=-5.0,
max_val=5.0,
)
Expand Down Expand Up @@ -83,29 +90,9 @@ def ao_benchmark_task(


@pytest.fixture
def di_single_line_benchmark_task(
def di_lines_benchmark_task(
task: Task,
sim_6363_device: Device,
request: pytest.FixtureRequest,
) -> Task:
"""Configure a single-sample DI task for benchmarking."""
num_samples = request.node.callspec.params.get("num_samples", 1)
num_channels = request.node.callspec.params.get("num_channels", 1)

line_names = [chan.name for chan in sim_6363_device.di_lines[:num_channels]]
physical_channel_string = ",".join(line_names)
task.di_channels.add_di_chan(physical_channel_string, line_grouping=LineGrouping.CHAN_PER_LINE)

_configure_timing(task, num_channels, num_samples)
_start_input_task(task)

return task


@pytest.fixture
def di_multi_line_benchmark_task(
task: Task,
sim_6363_device: Device,
any_6363_device: Device,
request: pytest.FixtureRequest,
) -> Task:
"""Configure a hardware-timed buffered DI task for benchmarking."""
Expand All @@ -116,7 +103,7 @@ def di_multi_line_benchmark_task(
for chan in range(num_channels):
line_names = [
chan.name
for chan in sim_6363_device.di_lines[chan * num_lines : (chan + 1) * num_lines]
for chan in any_6363_device.di_lines[chan * num_lines : (chan + 1) * num_lines]
]
physical_channel_string = ",".join(line_names)
task.di_channels.add_di_chan(
Expand All @@ -132,15 +119,15 @@ def di_multi_line_benchmark_task(
@pytest.fixture
def di_port32_benchmark_task(
task: Task,
sim_6363_device: Device,
any_6363_device: Device,
request: pytest.FixtureRequest,
) -> Task:
"""Configure a hardware-timed buffered DI task for benchmarking."""
num_samples = request.node.callspec.params.get("num_samples", 1)

# port 0 is the only port that supports buffered operations
task.di_channels.add_di_chan(
sim_6363_device.di_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES
any_6363_device.di_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES
)

_configure_timing(task, 1, num_samples)
Expand All @@ -150,29 +137,9 @@ def di_port32_benchmark_task(


@pytest.fixture
def do_single_line_benchmark_task(
task: Task,
sim_6363_device: Device,
request: pytest.FixtureRequest,
) -> Task:
"""Configure a single-sample DO task for benchmarking."""
num_channels = request.node.callspec.params.get("num_channels", 1)
num_samples = request.node.callspec.params.get("num_samples", 1)

line_names = [chan.name for chan in sim_6363_device.do_lines[:num_channels]]
physical_channel_string = ",".join(line_names)
task.do_channels.add_do_chan(physical_channel_string, line_grouping=LineGrouping.CHAN_PER_LINE)

_configure_timing(task, num_channels, num_samples)
_commit_output_task(task, num_channels, num_samples)

return task


@pytest.fixture
def do_multi_line_benchmark_task(
def do_lines_benchmark_task(
task: Task,
sim_6363_device: Device,
any_6363_device: Device,
request: pytest.FixtureRequest,
) -> Task:
"""Configure a hardware-timed buffered DO task for benchmarking."""
Expand All @@ -183,7 +150,7 @@ def do_multi_line_benchmark_task(
for chan in range(num_channels):
line_names = [
chan.name
for chan in sim_6363_device.do_lines[chan * num_lines : (chan + 1) * num_lines]
for chan in any_6363_device.do_lines[chan * num_lines : (chan + 1) * num_lines]
]
physical_channel_string = ",".join(line_names)
task.do_channels.add_do_chan(
Expand All @@ -199,15 +166,15 @@ def do_multi_line_benchmark_task(
@pytest.fixture
def do_port32_benchmark_task(
task: Task,
sim_6363_device: Device,
any_6363_device: Device,
request: pytest.FixtureRequest,
) -> Task:
"""Configure a hardware-timed buffered DO task for benchmarking."""
num_samples = request.node.callspec.params.get("num_samples", 1)

# port 0 is the only port that supports buffered operations
task.do_channels.add_do_chan(
sim_6363_device.do_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES
any_6363_device.do_ports[0].name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES
)

_configure_timing(task, 1, num_samples)
Expand Down
12 changes: 3 additions & 9 deletions tests/benchmark/test_analog_stream_readers.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,21 +16,18 @@


@pytest.mark.benchmark(group="analog_readers")
@pytest.mark.parametrize("num_channels", [1])
@pytest.mark.parametrize("num_samples", [1])
def test___analog_single_channel_reader___read_one_sample(
benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int
benchmark: BenchmarkFixture, ai_benchmark_task: Task
) -> None:
reader = AnalogSingleChannelReader(ai_benchmark_task.in_stream)

benchmark(reader.read_one_sample)


@pytest.mark.benchmark(group="analog_readers")
@pytest.mark.parametrize("num_channels", [1])
@pytest.mark.parametrize("num_samples", [1, 1000])
def test___analog_single_channel_reader___read_many_sample(
benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int
benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_samples: int
) -> None:
reader = AnalogSingleChannelReader(ai_benchmark_task.in_stream)
data = numpy.full(num_samples, math.inf, dtype=numpy.float64)
Expand All @@ -39,14 +36,12 @@ def test___analog_single_channel_reader___read_many_sample(


@pytest.mark.benchmark(group="analog_readers")
@pytest.mark.parametrize("num_channels", [1])
@pytest.mark.parametrize("num_samples", [1, 1000])
@pytest.mark.parametrize("waveform_attribute_mode", list(WaveformAttributeMode))
@pytest.mark.grpc_skip(reason="read_analog_waveform not implemented in GRPC")
def test___analog_single_channel_reader___read_waveform(
benchmark: BenchmarkFixture,
ai_benchmark_task: Task,
num_channels: int,
num_samples: int,
waveform_attribute_mode: WaveformAttributeMode,
) -> None:
Expand All @@ -59,9 +54,8 @@ def test___analog_single_channel_reader___read_waveform(

@pytest.mark.benchmark(group="analog_readers")
@pytest.mark.parametrize("num_channels", [1, 2, 8])
@pytest.mark.parametrize("num_samples", [1])
def test___analog_multi_channel_reader___read_one_sample(
benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int, num_samples: int
benchmark: BenchmarkFixture, ai_benchmark_task: Task, num_channels: int
) -> None:
reader = AnalogMultiChannelReader(ai_benchmark_task.in_stream)
data = numpy.full(num_channels, math.inf, dtype=numpy.float64)
Expand Down
6 changes: 0 additions & 6 deletions tests/benchmark/test_analog_stream_writers.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,24 +13,20 @@


@pytest.mark.benchmark(group="analog_writers")
@pytest.mark.parametrize("num_channels", [1])
def test___analog_single_channel_writer___write_one_sample(
benchmark: BenchmarkFixture,
ao_benchmark_task: nidaqmx.Task,
num_channels: int,
) -> None:
writer = AnalogSingleChannelWriter(ao_benchmark_task.out_stream, auto_start=False)

benchmark(writer.write_one_sample, 1.0)


@pytest.mark.benchmark(group="analog_writers")
@pytest.mark.parametrize("num_channels", [1])
@pytest.mark.parametrize("num_samples", [1, 1000])
def test___analog_single_channel_writer___write_many_sample(
benchmark: BenchmarkFixture,
ao_benchmark_task: nidaqmx.Task,
num_channels: int,
num_samples: int,
) -> None:
writer = AnalogSingleChannelWriter(ao_benchmark_task.out_stream, auto_start=False)
Expand All @@ -40,13 +36,11 @@ def test___analog_single_channel_writer___write_many_sample(


@pytest.mark.benchmark(group="analog_writers")
@pytest.mark.parametrize("num_channels", [1])
@pytest.mark.parametrize("num_samples", [1, 1000])
@pytest.mark.grpc_skip(reason="write_analog_waveform not implemented in GRPC")
def test___analog_single_channel_writer___write_waveform(
benchmark: BenchmarkFixture,
ao_benchmark_task: nidaqmx.Task,
num_channels: int,
num_samples: int,
) -> None:
writer = AnalogSingleChannelWriter(ao_benchmark_task.out_stream, auto_start=False)
Expand Down
Loading
Loading