Skip to content

Commit f746052

Browse files
clean up pwms better (especially when used in a REPL or script), and make self_test run in main thread to clean up properly (self_test is slower, but it's a good tradeoff)
1 parent 59c818d commit f746052

File tree

6 files changed

+275
-104
lines changed

6 files changed

+275
-104
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
- Fixed calibration detail pages so `Set active` / `Set inactive` waits for backend task completion before refetching, preventing stale "Set active" and missing "Active" status until manual refresh.
3939
- Made IR reference-noise gating in OD reading scale with the configured reading interval (baseline `std <= 0.01` at `5.0s`), including when the interval is changed at runtime.
4040
- Fixed Inventory model updates and active/inactive toggles to show success only after confirmed backend `2xx` responses, with explicit error feedback on failure.
41+
- Fixed changing RPM during a paused stirring would start stirring again.
4142

4243

4344

core/pioreactor/actions/self_test.py

Lines changed: 100 additions & 101 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
import sys
1010
from contextlib import nullcontext
1111
from json import dumps
12-
from threading import Thread
1312
from time import sleep
1413
from typing import Callable
1514
from typing import cast
@@ -177,45 +176,54 @@ def test_all_positive_correlations_between_pds_and_leds(
177176
st.block_until_rpm_is_close_to_target(abs_tolerance=150, timeout=10)
178177
# for led_channel in ALL_LED_CHANNELS: # we use to check all LED channels, but most users don't need to check all, also https://github.com/Pioreactor/pioreactor/issues/445
179178
for led_channel in [ir_led_channel]: # fast to just check IR
180-
varying_intensity_results: dict[PdChannel, list[float]] = {
181-
pd_channel: [] for pd_channel in pd_channels_available
182-
}
183-
for intensity in INTENSITIES:
184-
# turn on the LED to set intensity
185-
led_intensity(
186-
{led_channel: intensity},
187-
unit=unit,
188-
experiment=experiment,
189-
verbose=False,
190-
source_of_event="self_test",
191-
)
192-
193-
# record from ADC, we'll average them
194-
avg_reading = average_over_raw_pd_readings(
195-
adc_reader.take_reading(),
196-
adc_reader.take_reading(),
197-
adc_reader.take_reading(),
198-
)
199-
200-
# turn off led to cool it down
179+
try:
180+
varying_intensity_results: dict[PdChannel, list[float]] = {
181+
pd_channel: [] for pd_channel in pd_channels_available
182+
}
183+
for intensity in INTENSITIES:
184+
# turn on the LED to set intensity
185+
led_intensity(
186+
{led_channel: intensity},
187+
unit=unit,
188+
experiment=experiment,
189+
verbose=False,
190+
source_of_event="self_test",
191+
)
192+
193+
# record from ADC, we'll average them
194+
avg_reading = average_over_raw_pd_readings(
195+
adc_reader.take_reading(),
196+
adc_reader.take_reading(),
197+
adc_reader.take_reading(),
198+
)
199+
200+
# turn off led to cool it down
201+
led_intensity(
202+
{led_channel: 0},
203+
unit=unit,
204+
experiment=experiment,
205+
verbose=False,
206+
source_of_event="self_test",
207+
)
208+
sleep(intensity / 100) # let it cool down in proportion to the intensity
209+
210+
# Add to accumulating list
211+
for pd_channel in pd_channels_available:
212+
varying_intensity_results[pd_channel].append(avg_reading[pd_channel].reading)
213+
214+
if avg_reading[pd_channel].reading >= 2.0:
215+
# we are probably going to saturate the PD - clearly we are detecting something though!
216+
logger.debug(
217+
f"Saw {avg_reading[pd_channel].reading:.2f} for pair pd_channel={pd_channel}, led_channel={led_channel}@intensity={intensity}. Saturation possible. No solution implemented yet! See issue #445"
218+
)
219+
finally:
201220
led_intensity(
202221
{led_channel: 0},
203222
unit=unit,
204223
experiment=experiment,
205224
verbose=False,
206225
source_of_event="self_test",
207226
)
208-
sleep(intensity / 100) # let it cool down in proportion to the intensity
209-
210-
# Add to accumulating list
211-
for pd_channel in pd_channels_available:
212-
varying_intensity_results[pd_channel].append(avg_reading[pd_channel].reading)
213-
214-
if avg_reading[pd_channel].reading >= 2.0:
215-
# we are probably going to saturate the PD - clearly we are detecting something though!
216-
logger.debug(
217-
f"Saw {avg_reading[pd_channel].reading:.2f} for pair pd_channel={pd_channel}, led_channel={led_channel}@intensity={intensity}. Saturation possible. No solution implemented yet! See issue #445"
218-
)
219227

220228
# compute the linear correlation between the intensities and observed PD measurements
221229
for pd_channel in pd_channels_available:
@@ -456,52 +464,52 @@ def test_positive_correlation_between_rpm_and_stirring(
456464
assert measured_correlation > 0.9, f"RPM correlation not high enough: {(dcs, measured_rpms)}"
457465

458466

459-
class BatchTestRunner:
460-
def __init__(self, tests_to_run: list[Callable], *test_func_args) -> None:
461-
self.count_tested = 0
462-
self.count_passed = 0
463-
self.failed_tests: list[str] = []
464-
self.tests_to_run = tests_to_run
465-
self._thread = Thread(target=self._run, args=test_func_args) # don't make me daemon: 295
466-
467-
def start(self):
468-
self._thread.start()
469-
return self
470-
471-
def collect(self) -> SummableDict:
472-
self._thread.join()
473-
return SummableDict(
474-
{
475-
"count_tested": self.count_tested,
476-
"count_passed": self.count_passed,
477-
"failed_tests": self.failed_tests,
478-
}
479-
)
480-
481-
def _run(self, managed_state, logger: CustomLogger, unit: str, testing_experiment: str) -> None:
482-
for test in self.tests_to_run:
483-
test_name = test.__name__
484-
485-
success = True
486-
logger.debug(f"Starting test {test_name}...")
487-
try:
488-
test(managed_state, logger, unit, testing_experiment)
489-
except Exception as e:
490-
success = False
491-
logger.debug(e, exc_info=True)
492-
logger.warning(f"{test_name.replace('_', ' ')}: {e}")
493-
494-
logger.debug(f"{test_name}: {'✅' if success else '❌'}")
495-
496-
self.count_tested += 1
497-
self.count_passed += int(success)
498-
if not success:
499-
self.failed_tests.append(test_name)
500-
501-
managed_state.publish_setting(test_name, int(success))
502-
503-
with local_persistent_storage("self_test_results") as c:
504-
c[test_name] = int(success)
467+
def run_tests(
468+
tests_to_run: list[Callable],
469+
managed_state,
470+
logger: CustomLogger,
471+
unit: str,
472+
testing_experiment: str,
473+
) -> SummableDict:
474+
count_tested = 0
475+
count_passed = 0
476+
failed_tests: list[str] = []
477+
478+
for test in tests_to_run:
479+
if managed_state.exit_event.is_set():
480+
logger.info("Self-test interrupted, stopping remaining tests.")
481+
break
482+
483+
test_name = test.__name__
484+
485+
success = True
486+
logger.debug(f"Starting test {test_name}...")
487+
try:
488+
test(managed_state, logger, unit, testing_experiment)
489+
except Exception as e:
490+
success = False
491+
logger.debug(e, exc_info=True)
492+
logger.warning(f"{test_name.replace('_', ' ')}: {e}")
493+
494+
logger.debug(f"{test_name}: {'✅' if success else '❌'}")
495+
496+
count_tested += 1
497+
count_passed += int(success)
498+
if not success:
499+
failed_tests.append(test_name)
500+
501+
managed_state.publish_setting(test_name, int(success))
502+
503+
with local_persistent_storage("self_test_results") as c:
504+
c[test_name] = int(success)
505+
506+
return SummableDict(
507+
{
508+
"count_tested": count_tested,
509+
"count_passed": count_passed,
510+
"failed_tests": failed_tests,
511+
}
512+
)
505513

506514

507515
def get_failed_test_names() -> Iterator[str]:
@@ -526,21 +534,6 @@ def click_self_test(k: Optional[str], retry_failed: bool) -> int:
526534
testing_experiment = UNIVERSAL_EXPERIMENT
527535
logger = create_logger("self_test", unit=unit, experiment=testing_experiment)
528536

529-
A_TESTS = (
530-
test_pioreactor_HAT_present,
531-
test_detect_heating_pcb,
532-
test_positive_correlation_between_temperature_and_heating,
533-
test_aux_power_is_not_too_high,
534-
)
535-
B_TESTS = (
536-
test_all_positive_correlations_between_pds_and_leds,
537-
test_ambient_light_interference,
538-
test_REF_is_lower_than_0_dot_256_volts,
539-
test_REF_is_in_correct_position,
540-
test_PD_is_near_0_volts_for_blank,
541-
test_positive_correlation_between_rpm_and_stirring,
542-
)
543-
544537
with managed_lifecycle(unit, testing_experiment, "self_test") as managed_state:
545538
if any(
546539
is_pio_job_running(
@@ -562,20 +555,25 @@ def click_self_test(k: Optional[str], retry_failed: bool) -> int:
562555
if k:
563556
tests_to_run = (name for name in tests_to_run if k in name)
564557

565-
functions_to_test = {vars(sys.modules[__name__])[name] for name in tuple(tests_to_run)}
558+
test_names = tuple(tests_to_run)
559+
functions_to_test = [vars(sys.modules[__name__])[name] for name in test_names]
566560

567561
logger.info(f"Starting self-test. Running {len(functions_to_test)} tests.")
568562

569563
# and clear the mqtt cache first
570564
for f in functions_to_test:
571565
managed_state.publish_setting(f.__name__, None)
572566

573-
# some tests can be run in parallel.
574-
test_args = (managed_state, logger, unit, testing_experiment)
575-
RunnerA = BatchTestRunner([f for f in A_TESTS if f in functions_to_test], *test_args).start()
576-
RunnerB = BatchTestRunner([f for f in B_TESTS if f in functions_to_test], *test_args).start()
567+
try:
568+
results = run_tests(functions_to_test, managed_state, logger, unit, testing_experiment)
569+
except KeyboardInterrupt:
570+
logger.info("Self-test interrupted, waiting for cleanup.")
571+
raise click.Abort()
572+
573+
if managed_state.exit_event.is_set():
574+
logger.info("Self-test interrupted.")
575+
raise click.Abort()
577576

578-
results = RunnerA.collect() + RunnerB.collect()
579577
count_tested, count_passed = results["count_tested"], results["count_passed"]
580578
count_failures = int(count_tested - count_passed)
581579

@@ -587,7 +585,8 @@ def click_self_test(k: Optional[str], retry_failed: bool) -> int:
587585
logger.info("All tests passed ✅")
588586
elif count_failures > 0:
589587
logger.info(f"{count_failures} failed test{'s' if count_failures > 1 else ''} ❌")
590-
failed_tests = "\n ".join(f"{test_name}" for test_name in results["failed_tests"])
588+
failed_test_names = cast(list[str], results["failed_tests"])
589+
failed_tests = "\n ".join(f"{test_name}" for test_name in failed_test_names)
591590
logger.debug(f"Failed tests:\n {failed_tests}")
592591

593592
return int(count_failures > 0)

core/pioreactor/background_jobs/stirring.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -538,6 +538,11 @@ def on_sleeping_to_ready(self) -> None:
538538
def set_duty_cycle(self, value: float) -> None:
539539
with self.duty_cycle_lock:
540540
self.duty_cycle = clamp(0.0, round(value, 5), 100.0)
541+
542+
# exit if not ready
543+
if self.state is not st.READY:
544+
return
545+
541546
self.pwm.change_duty_cycle(self.duty_cycle)
542547

543548
def set_target_rpm(self, value: float) -> None:
@@ -546,19 +551,23 @@ def set_target_rpm(self, value: float) -> None:
546551
raise ValueError("Can't set target RPM when no RPM measurement is being made")
547552

548553
self.target_rpm = clamp(0.0, float(value), 5_000.0)
554+
self.pid.set_setpoint(self.target_rpm)
549555

550556
if self.target_rpm == 0:
551557
self._estimate_duty_cycle = 0
552558
else:
553559
self._estimate_duty_cycle = self.rpm_to_dc_lookup(self.target_rpm)
554560

561+
# exit if not ready
562+
if self.state is not st.READY:
563+
return
564+
555565
if self.duty_cycle == 0:
556566
# we are currently _not_ moving, need to kick for a moment.
557567
self.set_duty_cycle(100) # get momentum to start
558568
sleep(0.35)
559569

560570
self.set_duty_cycle(self._estimate_duty_cycle)
561-
self.pid.set_setpoint(self.target_rpm)
562571

563572
def sleep_if_ready(self, seconds):
564573
if self.state is st.READY:

0 commit comments

Comments
 (0)