diff --git a/robot-server/robot_server/protocols/protocol_store.py b/robot-server/robot_server/protocols/protocol_store.py index 0488a958a12..13676a798eb 100644 --- a/robot-server/robot_server/protocols/protocol_store.py +++ b/robot-server/robot_server/protocols/protocol_store.py @@ -24,6 +24,7 @@ analysis_primitive_type_rtp_table, analysis_csv_rtp_table, data_files_table, + run_csv_rtp_table, ProtocolKindSQLEnum, ) from robot_server.protocols.protocol_models import ProtocolKind @@ -310,20 +311,38 @@ def get_usage_info(self) -> List[ProtocolUsageInfo]: # TODO (spp, 2024-07-22): get files referenced in runs as well async def get_referenced_data_files(self, protocol_id: str) -> List[DataFile]: """Get a list of data files referenced in specified protocol's analyses and runs.""" - # Get analyses of protocol_id + # Get analyses and runs of protocol_id select_referencing_analysis_ids = sqlalchemy.select(analysis_table.c.id).where( analysis_table.c.protocol_id == protocol_id ) + select_referencing_run_ids = sqlalchemy.select(run_table.c.id).where( + run_table.c.protocol_id == protocol_id + ) # Get all entries in csv table that match the analyses - csv_file_ids = sqlalchemy.select(analysis_csv_rtp_table.c.file_id).where( + analysis_csv_file_ids = sqlalchemy.select( + analysis_csv_rtp_table.c.file_id + ).where( analysis_csv_rtp_table.c.analysis_id.in_(select_referencing_analysis_ids) ) + run_csv_file_ids = sqlalchemy.select(run_csv_rtp_table.c.file_id).where( + run_csv_rtp_table.c.run_id.in_(select_referencing_run_ids) + ) # Get list of data file IDs from the entries - select_data_file_rows_statement = data_files_table.select().where( - data_files_table.c.id.in_(csv_file_ids) + select_analysis_data_file_rows_statement = data_files_table.select().where( + data_files_table.c.id.in_(analysis_csv_file_ids) + ) + select_run_data_file_rows_statement = data_files_table.select().where( + data_files_table.c.id.in_(run_csv_file_ids) ) with self._sql_engine.begin() as transaction: - data_files_rows = transaction.execute(select_data_file_rows_statement).all() + analysis_data_files_rows = transaction.execute( + select_analysis_data_file_rows_statement + ).all() + run_data_files_rows = transaction.execute( + select_run_data_file_rows_statement + ).all() + + combine_data_file_rows = set(analysis_data_files_rows + run_data_files_rows) return [ DataFile( @@ -331,7 +350,7 @@ async def get_referenced_data_files(self, protocol_id: str) -> List[DataFile]: name=sql_row.name, createdAt=sql_row.created_at, ) - for sql_row in data_files_rows + for sql_row in combine_data_file_rows ] def get_referencing_run_ids(self, protocol_id: str) -> List[str]: diff --git a/robot-server/robot_server/runs/router/base_router.py b/robot-server/robot_server/runs/router/base_router.py index 14c5b822fda..1ed03b44cd7 100644 --- a/robot-server/robot_server/runs/router/base_router.py +++ b/robot-server/robot_server/runs/router/base_router.py @@ -176,6 +176,9 @@ async def create_run( rtp_values = ( request_body.data.runTimeParameterValues if request_body is not None else None ) + rtp_files = ( + request_body.data.runTimeParameterFiles if request_body is not None else None + ) protocol_resource = None deck_configuration = await deck_configuration_store.get_deck_configuration() @@ -206,6 +209,7 @@ async def create_run( labware_offsets=offsets, deck_configuration=deck_configuration, run_time_param_values=rtp_values, + run_time_param_files=rtp_files, protocol=protocol_resource, notify_publishers=notify_publishers, ) diff --git a/robot-server/robot_server/runs/run_data_manager.py b/robot-server/robot_server/runs/run_data_manager.py index de5eea82e45..c5cacbb7571 100644 --- a/robot-server/robot_server/runs/run_data_manager.py +++ b/robot-server/robot_server/runs/run_data_manager.py @@ -12,7 +12,10 @@ CommandPointer, Command, ) -from opentrons.protocol_engine.types import PrimitiveRunTimeParamValuesType +from opentrons.protocol_engine.types import ( + CSVRunTimeParamFilesType, + PrimitiveRunTimeParamValuesType, +) from robot_server.protocols.protocol_store import ProtocolResource from robot_server.service.task_runner import TaskRunner @@ -156,6 +159,7 @@ async def create( labware_offsets: List[LabwareOffsetCreate], deck_configuration: DeckConfigurationType, run_time_param_values: Optional[PrimitiveRunTimeParamValuesType], + run_time_param_files: Optional[CSVRunTimeParamFilesType], notify_publishers: Callable[[], None], protocol: Optional[ProtocolResource], ) -> Union[Run, BadRun]: @@ -168,6 +172,7 @@ async def create( deck_configuration: A mapping of fixtures to cutout fixtures the deck will be loaded with. notify_publishers: Utilized by the engine to notify publishers of state changes. run_time_param_values: Any runtime parameter values to set. + run_time_param_files: Any runtime parameter values to set. protocol: The protocol to load the runner with, if any. Returns: @@ -192,6 +197,7 @@ async def create( deck_configuration=deck_configuration, protocol=protocol, run_time_param_values=run_time_param_values, + run_time_param_files=run_time_param_files, notify_publishers=notify_publishers, ) run_resource = self._run_store.insert( @@ -210,7 +216,7 @@ async def create( run_resource=run_resource, state_summary=state_summary, current=True, - run_time_parameters=[], + run_time_parameters=self._run_orchestrator_store.get_run_time_parameters(), ) def get(self, run_id: str) -> Union[Run, BadRun]: diff --git a/robot-server/robot_server/runs/run_models.py b/robot-server/robot_server/runs/run_models.py index 45ad22e3167..db068870915 100644 --- a/robot-server/robot_server/runs/run_models.py +++ b/robot-server/robot_server/runs/run_models.py @@ -19,6 +19,7 @@ CommandNote, ) from opentrons.protocol_engine.types import ( + CSVRunTimeParamFilesType, RunTimeParameter, PrimitiveRunTimeParamValuesType, ) @@ -252,6 +253,10 @@ class RunCreate(BaseModel): None, description="Key-value pairs of run-time parameters defined in a protocol.", ) + runTimeParameterFiles: Optional[CSVRunTimeParamFilesType] = Field( + None, + description="Key-fileId pairs of CSV run-time parameters defined in a protocol.", + ) class RunUpdate(BaseModel): diff --git a/robot-server/robot_server/runs/run_orchestrator_store.py b/robot-server/robot_server/runs/run_orchestrator_store.py index 11448a81d0c..953c9758cb1 100644 --- a/robot-server/robot_server/runs/run_orchestrator_store.py +++ b/robot-server/robot_server/runs/run_orchestrator_store.py @@ -4,7 +4,11 @@ from typing import List, Optional, Callable from opentrons.protocol_engine.errors.exceptions import EStopActivatedError -from opentrons.protocol_engine.types import PostRunHardwareState, RunTimeParameter +from opentrons.protocol_engine.types import ( + CSVRunTimeParamFilesType, + PostRunHardwareState, + RunTimeParameter, +) from opentrons_shared_data.labware.labware_definition import LabwareDefinition from opentrons_shared_data.robot.types import RobotType @@ -188,6 +192,7 @@ async def create( notify_publishers: Callable[[], None], protocol: Optional[ProtocolResource], run_time_param_values: Optional[PrimitiveRunTimeParamValuesType] = None, + run_time_param_files: Optional[CSVRunTimeParamFilesType] = None, ) -> StateSummary: """Create and store a ProtocolRunner and ProtocolEngine for a given Run. @@ -198,6 +203,7 @@ async def create( notify_publishers: Utilized by the engine to notify publishers of state changes. protocol: The protocol to load the runner with, if any. run_time_param_values: Any runtime parameter values to set. + run_time_param_files: Any runtime parameter files to set. Returns: The initial equipment and status summary of the engine. @@ -243,8 +249,7 @@ async def create( await self.run_orchestrator.load( protocol.source, run_time_param_values=run_time_param_values, - # TODO (spp, 2024-07-16): update this once runs accept csv params - run_time_param_files={}, + run_time_param_files=run_time_param_files, parse_mode=ParseMode.ALLOW_LEGACY_METADATA_AND_REQUIREMENTS, ) else: diff --git a/robot-server/robot_server/runs/run_store.py b/robot-server/robot_server/runs/run_store.py index 6cf86d14af1..bbd50b1f713 100644 --- a/robot-server/robot_server/runs/run_store.py +++ b/robot-server/robot_server/runs/run_store.py @@ -25,6 +25,7 @@ run_table, run_command_table, action_table, + run_csv_rtp_table, ) from robot_server.persistence.pydantic import ( json_to_pydantic, @@ -85,6 +86,15 @@ class BadStateSummary: dataError: EnumeratedError +@dataclass +class CSVParameterRunResource: + """A CSV runtime parameter from a completed run, storable in a SQL database.""" + + run_id: str + parameter_variable_name: str + file_id: Optional[str] + + class CommandNotFoundError(ValueError): """Error raised when a given command ID is not found in the store.""" @@ -198,6 +208,39 @@ def insert_action(self, run_id: str, action: RunAction) -> None: self._clear_caches() + def get_all_csv_rtp(self) -> List[CSVParameterRunResource]: + """Get all of the csv rtp from the run_csv_rtp_table.""" + select_all_csv_rtp = sqlalchemy.select(run_csv_rtp_table).order_by( + sqlite_rowid.asc() + ) + + with self._sql_engine.begin() as transaction: + csv_rtps = transaction.execute(select_all_csv_rtp).all() + + return [_covert_row_to_csv_rtp(row) for row in csv_rtps] + + def insert_csv_rtp( + self, run_id: str, run_time_parameters: List[RunTimeParameter] + ) -> None: + """Save csv rtp to the run_csv_rtp_table.""" + insert_csv_rtp = sqlalchemy.insert(run_csv_rtp_table) + + with self._sql_engine.begin() as transaction: + if not self._run_exists(run_id, transaction): + raise RunNotFoundError(run_id=run_id) + for run_time_param in run_time_parameters: + if run_time_param.type == "csv_file": + transaction.execute( + insert_csv_rtp, + { + "run_id": run_id, + "parameter_variable_name": run_time_param.variableName, + "file_id": run_time_param.file.id + if run_time_param.file + else None, + }, + ) + def insert( self, run_id: str, @@ -531,6 +574,22 @@ def _clear_caches(self) -> None: _run_columns = [run_table.c.id, run_table.c.protocol_id, run_table.c.created_at] +def _covert_row_to_csv_rtp( + row: sqlalchemy.engine.Row, +) -> CSVParameterRunResource: + run_id = row.run_id + parameter_variable_name = row.parameter_variable_name + file_id = row.file_id + + assert isinstance(run_id, str) + assert isinstance(parameter_variable_name, str) + assert isinstance(file_id, str) or file_id is None + + return CSVParameterRunResource( + run_id=run_id, parameter_variable_name=parameter_variable_name, file_id=file_id + ) + + def _convert_row_to_run( row: sqlalchemy.engine.Row, action_rows: List[sqlalchemy.engine.Row], diff --git a/robot-server/tests/integration/http_api/runs/test_run_with_run_time_parameters.tavern.yaml b/robot-server/tests/integration/http_api/runs/test_run_with_run_time_parameters.tavern.yaml index 9d91abea32f..a616a50cc66 100644 --- a/robot-server/tests/integration/http_api/runs/test_run_with_run_time_parameters.tavern.yaml +++ b/robot-server/tests/integration/http_api/runs/test_run_with_run_time_parameters.tavern.yaml @@ -34,6 +34,7 @@ stages: save: json: run_id: data.id + run_time_parameters_data: data.runTimeParameters json: data: id: !anystr @@ -48,7 +49,49 @@ stages: modules: [] labware: [] labwareOffsets: [] - runTimeParameters: [] + runTimeParameters: + - displayName: Sample count + variableName: sample_count + type: int + default: 6.0 + min: 1.0 + max: 12.0 + value: 4.0 + description: How many samples to process. + - displayName: Pipette volume + variableName: volume + type: float + default: 20.1 + choices: + - displayName: Low Volume + value: 10.23 + - displayName: Medium Volume + value: 20.1 + - displayName: High Volume + value: 50.5 + value: 10.23 + description: How many microliters to pipette of each sample. + - displayName: Dry Run + variableName: dry_run + type: bool + default: false + value: true + description: Skip aspirate and dispense steps. + - displayName: Pipette Name + variableName: pipette + type: str + choices: + - displayName: Single channel 50µL + value: flex_1channel_50 + - displayName: Eight Channel 50µL + value: flex_8channel_50 + default: flex_1channel_50 + value: flex_8channel_50 + description: What pipette to use during the protocol. + - displayName: Liquid handling CSV file + variableName: liq_handling_csv_file + description: A CSV file that contains wells to use for pipetting + type: csv_file liquids: [] protocolId: '{protocol_id}' @@ -96,48 +139,7 @@ stages: createdAt: !re_fullmatch "\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+(Z|([+-]\\d{2}:\\d{2}))" status: succeeded current: True - runTimeParameters: - - displayName: Sample count - variableName: sample_count - type: int - default: 6.0 - min: 1.0 - max: 12.0 - value: 4.0 - description: How many samples to process. - - displayName: Pipette volume - variableName: volume - type: float - default: 20.1 - choices: - - displayName: Low Volume - value: 10.23 - - displayName: Medium Volume - value: 20.1 - - displayName: High Volume - value: 50.5 - value: 10.23 - description: How many microliters to pipette of each sample. - - displayName: Dry Run - variableName: dry_run - type: bool - default: false - value: true - description: Skip aspirate and dispense steps. - - displayName: Pipette Name - variableName: pipette - type: str - choices: - - displayName: Single channel 50µL - value: flex_1channel_50 - - displayName: Eight Channel 50µL - value: flex_8channel_50 - default: flex_1channel_50 - value: flex_8channel_50 - description: What pipette to use during the protocol. - - displayName: Liquid handling CSV file - variableName: liq_handling_csv_file - description: A CSV file that contains wells to use for pipetting + runTimeParameters: !force_original_structure '{run_time_parameters_data}' protocolId: '{protocol_id}' - name: Mark the run as not-current @@ -165,46 +167,5 @@ stages: createdAt: !re_fullmatch "\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+(Z|([+-]\\d{2}:\\d{2}))" status: succeeded current: False - runTimeParameters: - - displayName: Sample count - variableName: sample_count - type: int - default: 6.0 - min: 1.0 - max: 12.0 - value: 4.0 - description: How many samples to process. - - displayName: Pipette volume - variableName: volume - type: float - default: 20.1 - choices: - - displayName: Low Volume - value: 10.23 - - displayName: Medium Volume - value: 20.1 - - displayName: High Volume - value: 50.5 - value: 10.23 - description: How many microliters to pipette of each sample. - - displayName: Dry Run - variableName: dry_run - type: bool - default: false - value: true - description: Skip aspirate and dispense steps. - - displayName: Pipette Name - variableName: pipette - type: str - choices: - - displayName: Single channel 50µL - value: flex_1channel_50 - - displayName: Eight Channel 50µL - value: flex_8channel_50 - default: flex_1channel_50 - value: flex_8channel_50 - description: What pipette to use during the protocol. - - displayName: Liquid handling CSV file - variableName: liq_handling_csv_file - description: A CSV file that contains wells to use for pipetting + runTimeParameters: !force_original_structure '{run_time_parameters_data}' protocolId: '{protocol_id}' diff --git a/robot-server/tests/protocols/test_protocol_store.py b/robot-server/tests/protocols/test_protocol_store.py index 952bcb9c0fd..ff6d4ce7b49 100644 --- a/robot-server/tests/protocols/test_protocol_store.py +++ b/robot-server/tests/protocols/test_protocol_store.py @@ -1,4 +1,5 @@ """Tests for the ProtocolStore interface.""" +from opentrons.protocol_engine.types import CSVParameter, FileInfo import pytest from decoy import Decoy from datetime import datetime, timezone @@ -530,6 +531,7 @@ async def test_get_referenced_data_files( subject: ProtocolStore, data_files_store: DataFilesStore, completed_analysis_store: CompletedAnalysisStore, + run_store: RunStore, ) -> None: """It should fetch a list of data files referenced in protocol's analyses and runs.""" protocol_resource_1 = ProtocolResource( @@ -579,6 +581,7 @@ async def test_get_referenced_data_files( liquids=[], ), ) + subject.insert(protocol_resource_1) await data_files_store.insert( DataFileInfo( @@ -596,6 +599,32 @@ async def test_get_referenced_data_files( created_at=datetime(year=2021, month=1, day=1, tzinfo=timezone.utc), ) ) + await data_files_store.insert( + DataFileInfo( + id="data-file-id-3", + name="file-name", + file_hash="abc123", + created_at=datetime(year=2021, month=1, day=1, tzinfo=timezone.utc), + ) + ) + + run_store.insert( + run_id="run-id-1", + protocol_id="protocol-id", + created_at=datetime(year=2021, month=1, day=1, tzinfo=timezone.utc), + ) + + run_store.insert_csv_rtp( + run_id="run-id-1", + run_time_parameters=[ + CSVParameter( + variableName="csvFile", + displayName="csv param", + file=FileInfo(id="data-file-id-3", name="file-name"), + ) + ], + ) + await completed_analysis_store.make_room_and_add( completed_analysis_resource=analysis_resource1, primitive_rtp_resources=[], @@ -618,15 +647,24 @@ async def test_get_referenced_data_files( csv_rtp_resources=[], ) result = await subject.get_referenced_data_files("protocol-id") - assert result == [ - DataFile( - id="data-file-id", - name="file-name", - createdAt=datetime(year=2021, month=1, day=1, tzinfo=timezone.utc), - ), - DataFile( - id="data-file-id-2", - name="file-name", - createdAt=datetime(year=2021, month=1, day=1, tzinfo=timezone.utc), - ), - ] + + for data_file in result: + assert data_file in [ + DataFile( + id="data-file-id", + name="file-name", + createdAt=datetime(year=2021, month=1, day=1, tzinfo=timezone.utc), + ), + DataFile( + id="data-file-id-2", + name="file-name", + createdAt=datetime(year=2021, month=1, day=1, tzinfo=timezone.utc), + ), + DataFile( + id="data-file-id-3", + name="file-name", + createdAt=datetime(year=2021, month=1, day=1, tzinfo=timezone.utc), + ), + ] + + assert len(result) == 3 diff --git a/robot-server/tests/runs/router/test_base_router.py b/robot-server/tests/runs/router/test_base_router.py index 979d3a92371..fd1cdd8b58a 100644 --- a/robot-server/tests/runs/router/test_base_router.py +++ b/robot-server/tests/runs/router/test_base_router.py @@ -97,6 +97,7 @@ async def test_create_run( deck_configuration=[], protocol=None, run_time_param_values=None, + run_time_param_files=None, notify_publishers=mock_notify_publishers, ) ).then_return(expected_response) @@ -177,6 +178,7 @@ async def test_create_protocol_run( deck_configuration=[], protocol=protocol_resource, run_time_param_values={"foo": "bar"}, + run_time_param_files={"my_file": "file-id"}, notify_publishers=mock_notify_publishers, ) ).then_return(expected_response) @@ -184,7 +186,9 @@ async def test_create_protocol_run( result = await create_run( request_body=RequestModel( data=RunCreate( - protocolId="protocol-id", runTimeParameterValues={"foo": "bar"} + protocolId="protocol-id", + runTimeParameterValues={"foo": "bar"}, + runTimeParameterFiles={"my_file": "file-id"}, ) ), protocol_store=mock_protocol_store, @@ -245,6 +249,7 @@ async def test_create_run_conflict( deck_configuration=[], protocol=None, run_time_param_values=None, + run_time_param_files=None, notify_publishers=mock_notify_publishers, ) ).then_raise(RunConflictError("oh no")) diff --git a/robot-server/tests/runs/test_run_data_manager.py b/robot-server/tests/runs/test_run_data_manager.py index ba0b457f9f6..a369f7f47b0 100644 --- a/robot-server/tests/runs/test_run_data_manager.py +++ b/robot-server/tests/runs/test_run_data_manager.py @@ -19,6 +19,7 @@ ) from opentrons.protocol_engine import Liquid from opentrons.protocol_engine.error_recovery_policy import ErrorRecoveryPolicy +from opentrons.protocol_engine.types import BooleanParameter, CSVParameter from opentrons.protocol_runner import RunResult from opentrons.types import DeckSlotName @@ -167,9 +168,13 @@ async def test_create( protocol=None, deck_configuration=[], run_time_param_values=None, + run_time_param_files=None, notify_publishers=mock_notify_publishers, ) ).then_return(engine_state_summary) + + decoy.when(mock_run_orchestrator_store.get_run_time_parameters()).then_return([]) + decoy.when( mock_run_store.insert( run_id=run_id, @@ -185,6 +190,7 @@ async def test_create( protocol=None, deck_configuration=[], run_time_param_values=None, + run_time_param_files=None, notify_publishers=mock_notify_publishers, ) @@ -238,6 +244,7 @@ async def test_create_with_options( protocol=protocol, deck_configuration=[], run_time_param_values={"foo": "bar"}, + run_time_param_files={"my_file": "file-id"}, notify_publishers=mock_notify_publishers, ) ).then_return(engine_state_summary) @@ -250,6 +257,16 @@ async def test_create_with_options( ) ).then_return(run_resource) + bool_parameter = BooleanParameter( + displayName="foo", variableName="bar", default=True, value=False + ) + + file_parameter = CSVParameter(displayName="my_file", variableName="file-id") + + decoy.when(mock_run_orchestrator_store.get_run_time_parameters()).then_return( + [bool_parameter, file_parameter] + ) + result = await subject.create( run_id=run_id, created_at=created_at, @@ -257,6 +274,7 @@ async def test_create_with_options( protocol=protocol, deck_configuration=[], run_time_param_values={"foo": "bar"}, + run_time_param_files={"my_file": "file-id"}, notify_publishers=mock_notify_publishers, ) @@ -274,6 +292,7 @@ async def test_create_with_options( pipettes=engine_state_summary.pipettes, modules=engine_state_summary.modules, liquids=engine_state_summary.liquids, + runTimeParameters=[bool_parameter, file_parameter], ) @@ -294,6 +313,7 @@ async def test_create_engine_error( protocol=None, deck_configuration=[], run_time_param_values=None, + run_time_param_files=None, notify_publishers=mock_notify_publishers, ) ).then_raise(RunConflictError("oh no")) @@ -306,6 +326,7 @@ async def test_create_engine_error( protocol=None, deck_configuration=[], run_time_param_values=None, + run_time_param_files=None, notify_publishers=mock_notify_publishers, ) @@ -753,6 +774,7 @@ async def test_create_archives_existing( protocol=None, deck_configuration=[], run_time_param_values=None, + run_time_param_files=None, notify_publishers=mock_notify_publishers, ) ).then_return(engine_state_summary) @@ -772,6 +794,7 @@ async def test_create_archives_existing( protocol=None, deck_configuration=[], run_time_param_values=None, + run_time_param_files=None, notify_publishers=mock_notify_publishers, ) diff --git a/robot-server/tests/runs/test_run_store.py b/robot-server/tests/runs/test_run_store.py index 7e4155ef1b5..f4b2b8e154f 100644 --- a/robot-server/tests/runs/test_run_store.py +++ b/robot-server/tests/runs/test_run_store.py @@ -1,9 +1,11 @@ """Tests for robot_server.runs.run_store.""" from datetime import datetime, timezone +from pathlib import Path from typing import List, Optional, Type import pytest from decoy import Decoy +from robot_server.data_files.data_files_store import DataFileInfo, DataFilesStore from sqlalchemy.engine import Engine from unittest import mock @@ -12,6 +14,7 @@ from robot_server.protocols.protocol_store import ProtocolNotFoundError from robot_server.runs.run_store import ( + CSVParameterRunResource, RunStore, RunResource, CommandNotFoundError, @@ -157,6 +160,7 @@ def run_time_parameters() -> List[pe_types.RunTimeParameter]: displayName="Display Name 4", variableName="variable_name_4", description="a csv parameter without file id", + file=pe_types.FileInfo(id="file-id", name="csvFile"), ), ] @@ -202,7 +206,20 @@ def invalid_state_summary() -> StateSummary: ) -def test_update_run_state( +@pytest.fixture +def data_files_store(sql_engine: Engine, tmp_path: Path) -> DataFilesStore: + """Return a `DataFilesStore` linked to the same database as the subject under test. + + `DataFilesStore` is tested elsewhere. + We only need it here to prepare the database for the analysis store tests. + The CSV parameters table always needs a data file to link to. + """ + data_files_dir = tmp_path / "data_files" + data_files_dir.mkdir() + return DataFilesStore(sql_engine=sql_engine, data_files_directory=data_files_dir) + + +async def test_update_run_state( subject: RunStore, state_summary: StateSummary, protocol_commands: List[pe_commands.Command], @@ -252,6 +269,39 @@ def test_update_run_state( ) +async def test_insert_and_get_csv_rtp( + subject: RunStore, + data_files_store: DataFilesStore, + run_time_parameters: List[pe_types.RunTimeParameter], +) -> None: + """It should be able to insert and get csv rtp from the db.""" + await data_files_store.insert( + DataFileInfo( + id="file-id", + name="my_csv_file.csv", + file_hash="file-hash", + created_at=datetime(year=2024, month=1, day=1, tzinfo=timezone.utc), + ) + ) + + subject.insert( + run_id="run-id", + protocol_id=None, + created_at=datetime(year=2021, month=1, day=1, tzinfo=timezone.utc), + ) + + subject.insert_csv_rtp(run_id="run-id", run_time_parameters=run_time_parameters) + csv_rtp_result = subject.get_all_csv_rtp() + + assert csv_rtp_result == [ + CSVParameterRunResource( + run_id="run-id", + parameter_variable_name="variable_name_4", + file_id="file-id", + ) + ] + + def test_update_state_run_not_found( subject: RunStore, state_summary: StateSummary,