Skip to content

Commit

Permalink
Rework workflow handling, move sample data into package
Browse files Browse the repository at this point in the history
  • Loading branch information
multimeric committed Sep 15, 2023
1 parent d9695f0 commit 1b0b0f3
Show file tree
Hide file tree
Showing 24 changed files with 259 additions and 167 deletions.
40 changes: 21 additions & 19 deletions core/lls_core/cmds/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from lls_core.models.deconvolution import DeconvolutionParams
from lls_core.models.output import OutputParams
from lls_core import DeconvolutionChoice, DeskewDirection
import typer
from typer import Typer, Argument, Option

from lls_core.models.output import SaveFileType
from toolz.dicttoolz import merge
Expand All @@ -24,45 +24,48 @@ class CliDeskewDirection(StrEnum):
X = auto()
Y = auto()

app = Typer(add_completion=False)

@app.command()
def main(
image: Path = typer.Argument(help="Path to the image file to read, in a format readable by AICSImageIO, for example .tiff or .czi"),
skew: CliDeskewDirection = typer.Option(
image: Path = Argument(help="Path to the image file to read, in a format readable by AICSImageIO, for example .tiff or .czi"),
skew: CliDeskewDirection = Option(
default=DeskewParams.get_default("skew").name,
help=DeskewParams.get_description("skew")
),# DeskewParams.make_typer_field("skew"),
angle: float = DeskewParams.make_typer_field("angle") ,
pixel_sizes: Tuple[float, float, float] = typer.Option(
pixel_sizes: Tuple[float, float, float] = Option(
(
LatticeData.get_default("physical_pixel_sizes").X,
LatticeData.get_default("physical_pixel_sizes").Y,
LatticeData.get_default("physical_pixel_sizes").Z,
), help=DeskewParams.get_description("physical_pixel_sizes") + ". This takes three arguments, corresponding to the X Y and Z pixel dimensions respectively"
),

rois: List[Path] = typer.Option([], help="A list of paths pointing to regions of interest to crop to, in ImageJ format."),
rois: List[Path] = Option([], help="A list of paths pointing to regions of interest to crop to, in ImageJ format."),
# Ideally this and other range values would be defined as Tuples, but these seem to be broken: https://github.com/tiangolo/typer/discussions/667
z_start: Optional[int] = typer.Option(None, help="The index of the first Z slice to use. All prior Z slices will be discarded."),
z_end: Optional[int] = typer.Option(None, help="The index of the last Z slice to use. The selected index and all subsequent Z slices will be discarded."),
z_start: Optional[int] = Option(None, help="The index of the first Z slice to use. All prior Z slices will be discarded."),
z_end: Optional[int] = Option(None, help="The index of the last Z slice to use. The selected index and all subsequent Z slices will be discarded."),

enable_deconvolution: Annotated[bool, typer.Option("--deconvolution/--disable-deconvolution")] = False,
enable_deconvolution: Annotated[bool, Option("--deconvolution/--disable-deconvolution")] = False,
decon_processing: DeconvolutionChoice = DeconvolutionParams.make_typer_field("decon_processing"),
psf: Annotated[List[Path], typer.Option(help="A list of paths pointing to point spread functions to use for deconvolution. Each file should in a standard image format (.czi, .tiff etc), containing a 3D image array.")] = [],
psf: Annotated[List[Path], Option(help="A list of paths pointing to point spread functions to use for deconvolution. Each file should in a standard image format (.czi, .tiff etc), containing a 3D image array.")] = [],
psf_num_iter: int = DeconvolutionParams.make_typer_field("psf_num_iter"),
background: str = DeconvolutionParams.make_typer_field("background"),

time_start: Optional[int] = typer.Option(None, help="Index of the first time slice to use (inclusive)"),
time_end: Optional[int] = typer.Option(None, help="Index of the first time slice to use (exclusive)"),
time_start: Optional[int] = Option(None, help="Index of the first time slice to use (inclusive)"),
time_end: Optional[int] = Option(None, help="Index of the first time slice to use (exclusive)"),

channel_start: Optional[int] = typer.Option(None, help="Index of the first channel slice to use (inclusive)"),
channel_end: Optional[int] = typer.Option(None, help="Index of the first channel slice to use (exclusive)"),
channel_start: Optional[int] = Option(None, help="Index of the first channel slice to use (inclusive)"),
channel_end: Optional[int] = Option(None, help="Index of the first channel slice to use (exclusive)"),

save_dir: Path = OutputParams.make_typer_field("save_dir"),
save_name: Optional[str] = OutputParams.make_typer_field("save_name"),
save_type: SaveFileType = OutputParams.make_typer_field("save_type"),

workflow: Optional[Path] = typer.Option(None, help="Path to a Napari Workflow file, in JSON format. If provided, the configured desekewing processing will be added to the chosen workflow."),
json_config: Optional[Path] = typer.Option(None),
yaml_config: Optional[Path] = typer.Option(None)
workflow: Optional[Path] = Option(None, help="Path to a Napari Workflow file, in JSON format. If provided, the configured desekewing processing will be added to the chosen workflow."),
json_config: Optional[Path] = Option(None),
yaml_config: Optional[Path] = Option(None)
):
cli_args = dict(
image=image,
Expand All @@ -83,7 +86,7 @@ def main(
psf_num_iter = psf_num_iter,
background = background
),
workflow=None,
workflow=workflow,

time_range=(time_start, time_end),
channel_range=(channel_start, channel_end),
Expand All @@ -106,6 +109,5 @@ def main(

return LatticeData.parse_obj(merge(yaml_args, json_args, cli_args))


if __name__ == '__main__':
typer.run(main)
app()
110 changes: 88 additions & 22 deletions core/lls_core/models/lattice_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import tifffile

from typing import Any, Iterable, List, Literal, Optional, TYPE_CHECKING, Tuple
from typing_extensions import TypedDict, NotRequired
from typing_extensions import TypedDict, NotRequired, Generic, TypeVar

from aicsimageio.types import PhysicalPixelSizes
import pyclesperanto_prototype as cle
Expand All @@ -32,22 +32,35 @@
if TYPE_CHECKING:
import pyclesperanto_prototype as cle
from lls_core.models.deskew import DefinedPixelSizes
from numpy.typing import NDArray

import logging

logger = logging.getLogger(__name__)

class ProcessedVolume(BaseModel, arbitrary_types_allowed=True):
"""
A slice of the image processing result
"""
T = TypeVar("T")
S = TypeVar("S")
class SlicedData(BaseModel, Generic[T]):
data: T
time_index: NonNegativeInt
time: NonNegativeInt
channel_index: NonNegativeInt
channel: NonNegativeInt
data: ArrayLike
roi_index: Optional[NonNegativeInt] = None

def copy_with_data(self, data: S) -> SlicedData[S]:
"""
Return a modified version of this with new inner data
"""
from typing_extensions import cast
return cast(
SlicedData[S],
self.copy(update={
"data": data
})
)
ProcessedVolume = SlicedData[ArrayLike]

class ProcessedSlices(BaseModel):
#: Iterable of result slices.
#: Note that this is a finite iterator that can only be iterated once
Expand Down Expand Up @@ -292,7 +305,7 @@ def slice_data(self, time: int, channel: int) -> DataArray:

raise Exception("Lattice data must be 3-5 dimensions")

def iter_slices(self) -> Iterable[Tuple[int, int, int, int, ArrayLike]]:
def iter_slices(self) -> Iterable[SlicedData[ArrayLike]]:
"""
Yields array slices for each time and channel of interest.
Expand All @@ -301,7 +314,48 @@ def iter_slices(self) -> Iterable[Tuple[int, int, int, int, ArrayLike]]:
"""
for time_idx, time in enumerate(self.time_range):
for ch_idx, ch in enumerate(self.channel_range):
yield time_idx, time, ch_idx, ch, self.slice_data(time=time, channel=ch)
yield SlicedData(
data=self.slice_data(time=time, channel=ch),
time_index=time_idx,
time= time,
channel_index=ch_idx,
channel=ch,
)

def iter_sublattices(self, update_with: dict = {}) -> Iterable[SlicedData[LatticeData]]:
"""
Yields copies of the current LatticeData, one for each slice.
These copies can then be processed separately.
Args:
update_with: dictionary of arguments to update the generated lattices with
"""
for subarray in self.iter_slices():
yield subarray.copy_with_data(
self.copy(update={ "image": subarray,
**update_with
})
)

def generate_workflows(
self,
) -> Iterable[SlicedData[Workflow]]:
"""
Yields copies of the input workflow, modified with the addition of deskewing and optionally,
cropping and deconvolution
"""
if self.workflow is None:
return

from copy import copy
# We make a copy of the lattice for each slice, each of which has no associated workflow
for lattice_slice in self.iter_sublattices(update_with={"workflow": None}):
user_workflow = copy(self.workflow)
user_workflow.set(
"deskew_image",
LatticeData.process,
lattice_slice.data
)
yield lattice_slice.copy_with_data(user_workflow)

def check_incomplete_acquisition(self, volume: ArrayLike, time_point: int, channel: int):
"""
Expand Down Expand Up @@ -373,14 +427,15 @@ def _process_non_crop(self) -> Iterable[ProcessedVolume]:
"""
Yields processed image slices without cropping
"""
for time_idx, time, ch_idx, ch, data in self.iter_slices():
if isinstance(data, DaskArray):
data = data.compute()
for slice in self.iter_slices():
data: ArrayLike = slice.data
if isinstance(slice.data, DaskArray):
data = slice.data.compute()
if self.deconvolution is not None:
if self.deconvolution.decon_processing == DeconvolutionChoice.cuda_gpu:
data= pycuda_decon(
image=data,
psf=self.deconvolution.psf[ch],
psf=self.deconvolution.psf[slice.channel],
background=self.deconvolution.background,
dzdata=self.dz,
dxdata=self.dx,
Expand All @@ -391,35 +446,46 @@ def _process_non_crop(self) -> Iterable[ProcessedVolume]:
else:
data= skimage_decon(
vol_zyx=data,
psf=self.deconvolution.psf[ch],
psf=self.deconvolution.psf[slice.channel],
num_iter=self.deconvolution.psf_num_iter,
clip=False,
filter_epsilon=0,
boundary='nearest'
)

yield ProcessedVolume(
data = cle.pull_zyx(self.deskew_func(
yield slice.copy_with_data(
cle.pull_zyx(self.deskew_func(
input_image=data,
angle_in_degrees=self.angle,
linear_interpolation=True,
voxel_size_x=self.dx,
voxel_size_y=self.dy,
voxel_size_z=self.dz
)),
channel=ch,
channel_index=ch_idx,
time=time,
time_index=time_idx
))
)

def process(self) -> ProcessedSlices:
"""
Execute the processing and return the result.
This is the main public API for processing
"""
ProcessedSlices.update_forward_refs()
if self.cropping_enabled:

if self.workflow is not None:
outputs = []
for workflow in self.generate_workflows():
for leaf in workflow.data.leafs():
outputs.append(
workflow.copy_with_data(
workflow.data.get(leaf)
)
)

return ProcessedSlices(
slices = outputs,
lattice_data=self
)

elif self.cropping_enabled:
return ProcessedSlices(
lattice_data=self,
slices=self._process_crop()
Expand Down
Binary file added core/lls_core/sample/LLS7_t1_ch1.czi
Binary file not shown.
Binary file added core/lls_core/sample/LLS7_t1_ch3.czi
Binary file not shown.
Binary file added core/lls_core/sample/LLS7_t2_ch1.czi
Binary file not shown.
Binary file added core/lls_core/sample/LLS7_t2_ch3.czi
Binary file not shown.
File renamed without changes.
File renamed without changes.
4 changes: 4 additions & 0 deletions core/lls_core/sample/README.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
LLS7_t1_ch1: 3D array
LLS7_t1_ch3: 4D array with channel dimension value of 3
LLS7_t2_ch1: 4D array with time dimension value of 2
LLS7_t2_ch3: 4D array with time dimension of value 2 and channel of value 3
9 changes: 9 additions & 0 deletions core/lls_core/sample/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from pkg_resources import resource_filename

LLS7_T1_CH1 = resource_filename(__name__, "LLS7_t1_ch1.czi")
LLS7_T1_CH3 = resource_filename(__name__, "LLS7_t1_ch3.czi")
LLS7_T2_CH1 = resource_filename(__name__, "LLS7_t2_ch1.czi")
LLS7_T2_CH3 = resource_filename(__name__, "LLS7_t2_ch4.czi")
MULTICH_MULTITIME = resource_filename(__name__, "multich_multi_time.tif")
RBC_LATTICE_TIF = resource_filename(__name__, "RBC_lattice.tif")
RBC_TINY_CZI = resource_filename(__name__, "RBC_tiny.czi")
Loading

0 comments on commit 1b0b0f3

Please sign in to comment.