diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 158d251ad..52325b3dd 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -29,11 +29,12 @@ jobs: environment-file: environment-minimal.yml activate-environment: caiman conda-solver: libmamba + miniforge-version: latest - name: Install OS Dependencies shell: bash -l {0} run: | - sudo apt-get install libglapi-mesa libegl-mesa0 libegl1 libopengl0 libgl1-mesa-glx + sudo apt-get update && sudo apt-get install libglapi-mesa libegl-mesa0 libegl1 libopengl0 - name: Install Dependencies shell: bash -l {0} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 19e651254..9f179d0e8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,7 +1,7 @@ -# CaImAn contributors guide :hammer: -CaImAn is an open source project where *everyone* is welcome and encouraged to contribute. We have external contributors from all over the world, and we are always looking for more help. This guide explains how to contribute: if you have questions about the process, please feel free to reach out at [GitHub Discussions](https://github.com/flatironinstitute/CaImAn/discussions). Everyone needs help contributing and finds git/GitHub confusing, so please don't be shy about asking. +# CaImAn guide to contributing +CaImAn is an open source project with contributors from many people at many places. Help is available on [Gitter](https://app.gitter.im/#/room/#agiovann_Constrained_NMF:gitter.im) or [Github Discussions](https://github.com/flatironinstitute/CaImAn/discussions). -There are many different ways you can contribute to Caiman. The first and easiest way is to bring problems to our attention: if you find a bug, or think there is a feature that is lacking in Caiman, please [open an issue at Github](https://github.com/flatironinstitute/CaImAn/issues). You can also contribute just by *participating* in the different forums. +If you run into issues or want to suggest features, you can [open an issue on Github](https://github.com/flatironinstitute/CaImAn/issues). Second, let's say you want to improve something yourself: @@ -9,9 +9,9 @@ Second, let's say you want to improve something yourself: - The demo notebooks - The code base -We welcome *all* such contributions. To make them, you need to make changes on your local version of Caiman and then push make a *Pull Request* (PR) to our GitHub repository. We will walk through this process in the rest of the document. +To contribute, start by making changes on your local version of Caiman and then push make a *Pull Request* (PR) to our GitHub repository. We will walk through this process in the rest of the document. -Before you go through the work required to improve something, we recommend that you let us know your plans on GitHub either in discussions or issues. This way, we can avoid duplicated effort (if someone is already working on it), or wasted time (it could turn out the changes might not be feasible right now because it conflicts with some other major feature we are working on). If needed, can usually set up a video chat to talk about a feature proposal if that works for you. +Before you go through the work required to improve something, we recommend that you reach out on GitHub by filing a feature request in an issue. This way, we can avoid duplicated effort (if someone is already working on it), or wasted time (we may not agree on direction). ## Background: how do pull requests work? In this section we'll give general background on how making a contribution/PR works. If you know this stuff and just want to get started quickly, feel free to skip to the next section. @@ -28,7 +28,7 @@ The workflow for contributing to Caiman is roughly illustrated by the numbers in 3) Make a PR: this is when you request that your changes become merged into `dev` at Caiman. This merge won't be immediate, you will get feedback on your code, and probably be asked to make some changes. 4) Periodically, as features accumulate in the `dev` branch (every month or so), the `dev` branch will be merged with `main`. This will become a new version of Caiman that people install when they run `mamba install caiman`. -Below we have instructions on how to do all of the above steps. While all of this may seem like a lot, some of the steps are extremely simple. Also, once you have done it once, you will have the recipe and it will be pretty easy. Finally, it is a very rewarding experience to contribute to an open source project -- we hope you'll take the plunge! +Below we have instructions on how to do all of the above steps. ## First, create a dedicated development environment If you have downloaded Caiman for standard use, you probably installed it using `conda` or `mamba` as described on the README page. As a contributor, you will want to set up a dedicated development environment. This means you will be setting up a version of Caiman you will edit and tweak, uncoupled from your main installation for everyday use. To set up a development environment so you can follow the workflow outlined above, do the following: @@ -57,7 +57,7 @@ Go to the [Caiman repo](https://github.com/flatironinstitute/CaImAn) and hit the This installs Caiman directly from the downloaded source code. The `-e` stands for 'editable': when you edit the files, the changes should immediately be reflected in the code you run. -Note this section is partly based on the excellent [docs from Matplotlib](https://matplotlib.org/devdocs/devel/development_setup.html#installing-for-devs). +Note this section is partly based on the [docs from Matplotlib](https://matplotlib.org/devdocs/devel/development_setup.html#installing-for-devs). ## Second, work on a feature @@ -86,21 +86,15 @@ Note that all PRs are reviewed by other programmers. This is an important part o You may be asked to make some changes (or to *think* about making some changes). You will sometimes need to do more some more work on your branch and make more changes after making an initial PR. In this case, the workflow is simple: you will work within your your local `my_feature` branch as before, and run the `push` command again. Conveniently, this will automatically push the changes to the same work-in-progress PR at Caiman. Eventually, the feature will be merged into `dev` and your work is done! ## Fourth, wait for the work to show up in main :clock8: -Once your work is done, the `dev` branch will eventually be merged into `main` by the developers who maintain Caiman (label 4 in the figure). This is done every month or two, and is the stage when your work will actually be available to the people who download Caiman. It's at this point your name will appear when you click on the [list of Contributors](https://github.com/flatironinstitute/CaImAn/graphs/contributors) at GitHub. Please give yourself a pat on the back -- we really appreciate the folks who go through all this work to help make the package better! +Once your work is done, the `dev` branch will eventually be merged into `main` by the developers who maintain Caiman (label 4 in the figure). This is done every month or two, and is the stage when your work will actually be available to the people who download Caiman. It's at this point your name will appear when you click on the [list of Contributors](https://github.com/flatironinstitute/CaImAn/graphs/contributors) at GitHub. # What next? Once you have gone through the above steps, you can delete your local feature branch. Before working on a new feature, you will want to make sure that your fork stays up to date with Caiman. You can do this with the user interface at GitHub (there is a button to sync up your repo with the original repository on a particular branch). -Nobody remembers all the git commands, don't worry if you constantly are looking things up: that's what *everyone* does. If you want to learn more, check out the following resources: +If you want to learn more, check out the following resources: * [Getting started with git/github](https://github.com/EricThomson/git_learn) * [GitHub on Contributing to a Project](https://git-scm.com/book/en/v2/GitHub-Contributing-to-a-Project) * [GitHub skillbuilding](https://skills.github.com/) * [Scipy git resources](https://docs.scipy.org/doc/scipy/dev/gitwash/gitwash.html#using-git) -Again, if you want to contribute and find any of the above bits confusing, please reach out! - - - - - diff --git a/Jenkinsfile b/Jenkinsfile index 92e5c8506..4c4fff6e5 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -34,7 +34,7 @@ pipeline { export THEANO_FLAGS="base_compiledir=$TEMPDIR/theano_tmp" cd $TEMPDIR caimanmanager.py install - nosetests --traverse-namespace caiman + pytest --pyargs caiman caimanmanager.py demotest ''' } @@ -58,7 +58,7 @@ pipeline { export CAIMAN_DATA=$TEMPDIR/caiman_data cd $TEMPDIR caimanmanager.py install - nosetests --traverse-namespace caiman + pytest --pyargs caiman ''' } } diff --git a/README.md b/README.md index 72b8cf101..b7141dc8e 100644 --- a/README.md +++ b/README.md @@ -1,30 +1,54 @@ -CaImAn +Caiman ====== A Python toolbox for large-scale **Ca**lcium **Im**aging **An**alysis. -CaImAn implements a set of essential methods required to analyze calcium and voltage imaging data. It provides fast and scalable algorithms for motion correction, source extraction, spike deconvolution, and registering neurons across multiple sessions. It is suitable for both two-photon and one-photon fluorescence microscopy data, and can be run in both offline and online modes. Documentation is [here](https://caiman.readthedocs.io/en/latest/). +Caiman implements a set of essential methods required to analyze calcium and voltage imaging data. It provides fast and scalable algorithms for motion correction, source extraction, spike deconvolution, and registering neurons across multiple sessions. It is suitable for both two-photon and one-photon fluorescence microscopy data, and can be run in both offline and online modes. Documentation is [here](https://caiman.readthedocs.io/en/latest/). -Caiman Central --------------- -- [Caiman Central](https://github.com/flatironinstitute/caiman_central) is the hub for sharing information about CaImAn. Information on quarterly community meetings, workshops, other events, and any other communications between the developers and the user community can be found there. +# Installation +There are two primary ways to install Caiman. -# Quick start -Follow these three steps to get started quickly, from installation to working through a demo notebook. If you do not already have conda installed, [you can find it here](https://docs.conda.io/en/latest/miniconda.html). There is a video walkthrough of the following steps [here](https://youtu.be/b63zAmKihIY?si=m7WleTwdU0rJup_2). +## Route A +The easiest route is to install the miniforge distribution of Anaconda, and use that to install the rest using prebuilt packages. Most users should take this path. -### Step 1: Install caiman +## Route B +The alternative route is to make sure you have a working compiler, create a python virtualenv, grab the caiman sources, and use pip to populate the virtualenv and build Caiman. This route is not as tested and is not presently documented; it is a standard pip-based install. + +# Quick start (Route A) +Follow these three steps to get started quickly, from installation to working through a demo notebook. If you do not already have conda installed, [you can find it here](https://github.com/conda-forge/miniforge). The miniforge distribution of conda is preferred; it will require fewer steps and likely encounter fewer issues. If you are using a different distro of conda, you will likely need to add `-c conda-forge` to the commands you use to make your environment. + +Windows users will temporarily need to use an alternative install path. + +### Step 1: Install Caiman The following is all done in your anaconda prompt, starting in your base environment: - conda install -n base -c conda-forge mamba # install mamba in base environment - mamba create -n caiman -c conda-forge caiman # install caiman - conda activate caiman # activate virtual environment + mamba create -n caiman caiman # build a caiman environment + conda activate caiman # activate the environment + +### Step 1: Install Caiman (alternative for Windows users) +Windows users will need to follow an alternative set of steps because tensorflow does not have good packaging for Windows with conda (packaging changes are underway to solve this but are not available as of this writing). + +First, you will need to install Visual Studio 2019 or possibly a later version, with the C++ compiler and commandline utilities. +Then you will clone this repo to your windows system, and enter the checkout directory. + +Next, you will build and activate a mostly-empty conda environment: + + mamba create -n caiman python=3.11 pip vs2019_win-64 + conda activate caiman + +Finally, you will use pip to install Caiman's prerequisites and Caiman itself: + pip install . + +This step may fail if the compiler is not correctly installed and is the most fragile part of this install route; reach out if you encounter issues. + +After this, assuming you succeed, leave the source directory. Later steps will not function correctly when run in the source/checkout directory. ### Step 2: Download code samples and data sets -Create a working directory called `caiman_data` that includes code samples and related data. Run the following command from the same virtual environment that you created in Step 1: +Create a working directory called `caiman_data` that includes code samples and related data. Run the following command from the same conda environment that you created in Step 1: caimanmanager install @@ -32,14 +56,14 @@ Create a working directory called `caiman_data` that includes code samples and r Go into the working directory you created in Step 2, and open a Jupyter notebook: cd /caiman_data/ - jupyter notebook + jupyter lab Jupyter will open. Navigate to demos/notebooks/ and click on `demo_pipeline.ipynb` to get started with a demo. -> Note that what counts as `` in the first line depends on your OS/computer. Be sure to fill in your actual home directory. On Linux/Mac it is `~` while on Windows it will be something like `C:\Users\your_user_name\` +> `` in the first line is your home directory, its location depdnding on your OS/computer. On Linux/Mac it is `~` while on Windows it will be something like `C:\Users\your_user_name\` ## For installation help -Caiman should install easily on Linux, Mac, and Windows. If you run into problems, we have a dedicated [installation page](./docs/source/Installation.rst): the details there should help you troubleshoot. If you don't find what you need there, *please* [create an issue](https://github.com/flatironinstitute/CaImAn/issues) at GitHub, and we will help you get it sorted out. +Caiman should install easily on Linux, Mac, and Windows. If you run into problems, we have a dedicated [installation page](./docs/source/Installation.rst). If you don't find what you need there, [create an issue](https://github.com/flatironinstitute/Caiman/issues) on GitHub. # Demo notebooks Caiman provides demo notebooks to showcase each of our main features, from motion correction to online CNMF. We recommend starting with the CNMF notebook (`demo_pipeline.ipynb`), which contains more explanation and details than the other notebooks: it covers many concepts that will be used without explanation in the other notebooks. The CNMFE notebook (`demo_pipeline_cnmfE.ipynb`), is also more detailed. Once you've gotten things set up and worked through those "anchor" notebooks, the best way to get started is to work through the demo notebook that most closely matches your use case; you should be able to adapt it for your particular needs. @@ -67,9 +91,9 @@ Caiman also provides commandline demos, similar to the notebooks, demonstrating # How to get help - [Online documentation](https://caiman.readthedocs.io/en/latest/) contains a lot of general information about Caiman, the parameters, how to interpret its outputs, and more. -- [GitHub Discussions](https://github.com/flatironinstitute/CaImAn/discussions) is our preferred venue for users to ask for help. +- [GitHub Discussions](https://github.com/flatironinstitute/Caiman/discussions) is our preferred venue for users to ask for help. - The [Gitter forum](https://app.gitter.im/#/room/#agiovann_Constrained_NMF:gitter.im) is our old forum: we sometimes will ask people to join us there when something can best be solved in real time (e.g., installation problems). -- If you have found a bug, we recommend searching the [issues at github](https://github.com/flatironinstitute/CaImAn/issues) and opening a new issue if you can't find the solution there. +- If you have found a bug, we recommend searching the [issues at github](https://github.com/flatironinstitute/Caiman/issues) and opening a new issue if you can't find the solution there. - If there is a feature you would like to see implemented, feel free to come chat at the above forums or open an issue at Github. # How to contribute @@ -119,13 +143,12 @@ If possible, we'd also ask that you cite the papers where the original algorithm # Main developers * (emeritus) Eftychios A. Pnevmatikakis, **Flatiron Institute, Simons Foundation** * (emeritus) Andrea Giovannucci, **University of North Carolina, Chapel Hill** -* Johannes Friedrich, **Allen Institute, Seattle Washington** -* Changjia Cai, **University of North Carolina, Chapel Hill** +* (emeritus) Johannes Friedrich, **Allen Institute, Seattle Washington** +* (emeritus) Changjia Cai, **University of North Carolina, Chapel Hill** +* Kushal Kolar, **Flatiron Institute, Simons Foundation** * Pat Gunn, **Flatiron Institute, Simons Foundation** -* Eric Thomson, **Flatiron Institute, Simons Foundation** - -A complete list of contributors can be found [here](https://github.com/flatironinstitute/Caiman/graphs/contributors). Currently Pat Gunn, Johannes Friedrich, and Eric Thomson are the most active contributors. +A complete list of contributors can be found [here](https://github.com/flatironinstitute/Caiman/graphs/contributors). # Acknowledgements Special thanks to the following people for letting us use their datasets in demo files: @@ -136,6 +159,12 @@ Special thanks to the following people for letting us use their datasets in demo * Manolis Froudarakis, Jake Reimers, Andreas Tolias, Baylor College of Medicine * Clay Lacefield, Randy Bruno, Columbia University * Daniel Aharoni, Peyman Golshani, UCLA +* Darcy Peterka, Columbia + +Also a special thanks to: +* Eric Thompson, for various strong contributions to code and demos, both before and during his employment at the Flatiron Institute. +* Cai Changjia, for Volpy +* Ethan Blackwood, for several contributions in various areas # License This program is free software; you can redistribute it and/or diff --git a/VERSION b/VERSION index 0a5af26df..3d0e62313 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.11.3 +1.11.4 diff --git a/caiman/base/movies.py b/caiman/base/movies.py index a4652ff8f..f0a9a5796 100644 --- a/caiman/base/movies.py +++ b/caiman/base/movies.py @@ -2025,6 +2025,17 @@ def get_file_size(file_name, var_name_hdf5:str='mov') -> tuple[tuple, Union[int, logger.error(f'The file does not contain a variable named {var_name_hdf5}') raise Exception('Variable not found. Use one of the above') T, dims = siz[0], siz[1:] + elif extension in ('.npy', ): + with open(file_name, 'rb') as f: + version = np.lib.format.read_magic(f) + if version == (1, 0): + shape, _, _ = np.lib.format.read_array_header_1_0(f) + elif version == (2, 0): + shape, _, _ = np.lib.format.read_array_header_2_0(f) + else: + raise ValueError(f"Unsupported .npy file version: {version}. Update caiman.base.movies.get_file_size() to handle it.") + T = shape[0] + dims = shape[1:] elif extension in ('.sbx'): shape = caiman.utils.sbx_utils.sbx_shape(file_name[:-4]) T = shape[-1] diff --git a/caiman/base/rois.py b/caiman/base/rois.py index fddd1471f..d1c11c374 100644 --- a/caiman/base/rois.py +++ b/caiman/base/rois.py @@ -20,7 +20,7 @@ from typing import Any, Optional import zipfile -from caiman.motion_correction import tile_and_correct +from caiman.motion_correction import tile_and_correct, get_patch_centers, interpolate_shifts try: cv2.setNumThreads(0) @@ -318,7 +318,8 @@ def register_ROIs(A1, print_assignment=False, plot_results=False, Cn=None, - cmap='viridis'): + cmap='viridis', + align_options: Optional[dict] = None): """ Register ROIs across different sessions using an intersection over union metric and the Hungarian algorithm for optimal matching @@ -372,6 +373,9 @@ def register_ROIs(A1, cmap: string colormap for background image + + align_options: Optional[dict] + mcorr options to override defaults when align_flag is True and use_opt_flow is False Returns: matched_ROIs1: list @@ -414,25 +418,50 @@ def register_ROIs(A1, if use_opt_flow: template1_norm = np.uint8(template1 * (template1 > 0) * 255) template2_norm = np.uint8(template2 * (template2 > 0) * 255) - flow = cv2.calcOpticalFlowFarneback(np.uint8(template1_norm * 255), np.uint8(template2_norm * 255), None, + flow = cv2.calcOpticalFlowFarneback(template1_norm, template2_norm, None, 0.5, 3, 128, 3, 7, 1.5, 0) x_remap = (flow[:, :, 0] + x_grid).astype(np.float32) y_remap = (flow[:, :, 1] + y_grid).astype(np.float32) else: - template2, shifts, _, xy_grid = tile_and_correct(template2, - template1 - template1.min(), - [int(dims[0] / 4), int(dims[1] / 4)], [16, 16], [10, 10], - add_to_movie=template2.min(), - shifts_opencv=True) - - dims_grid = tuple(np.max(np.stack(xy_grid, axis=0), axis=0) - np.min(np.stack(xy_grid, axis=0), axis=0) + 1) - _sh_ = np.stack(shifts, axis=0) - shifts_x = np.reshape(_sh_[:, 1], dims_grid, order='C').astype(np.float32) - shifts_y = np.reshape(_sh_[:, 0], dims_grid, order='C').astype(np.float32) - - x_remap = (-np.resize(shifts_x, dims) + x_grid).astype(np.float32) - y_remap = (-np.resize(shifts_y, dims) + y_grid).astype(np.float32) + align_defaults = { + "strides": (int(dims[0] / 4), int(dims[1] / 4)), + "overlaps": (16, 16), + "max_shifts": (10, 10), + "shifts_opencv": True, + "upsample_factor_grid": 4, + "shifts_interpolate": True, + "max_deviation_rigid": 2 + # any other argument to tile_and_correct can also be used in align_options + } + + if align_options: + # override defaults with input options + align_defaults.update(align_options) + align_options = align_defaults + + template2, shifts, _, _ = tile_and_correct(template2, template1 - template1.min(), + add_to_movie=template2.min(), **align_options) + + if align_options["max_deviation_rigid"] == 0: + # repeat rigid shifts to size of the image + shifts_x_full = np.full(dims, -shifts[1]) + shifts_y_full = np.full(dims, -shifts[0]) + else: + # piecewise - interpolate from patches to get shifts per pixel + patch_centers = get_patch_centers(dims, overlaps=align_options["overlaps"], strides=align_options["strides"], + shifts_opencv=align_options["shifts_opencv"], + upsample_factor_grid=align_options["upsample_factor_grid"]) + patch_grid = tuple(len(centers) for centers in patch_centers) + _sh_ = np.stack(shifts, axis=0) + shifts_x = np.reshape(_sh_[:, 1], patch_grid, order='C').astype(np.float32) + shifts_y = np.reshape(_sh_[:, 0], patch_grid, order='C').astype(np.float32) + + shifts_x_full = interpolate_shifts(-shifts_x, patch_centers, tuple(range(d) for d in dims)) + shifts_y_full = interpolate_shifts(-shifts_y, patch_centers, tuple(range(d) for d in dims)) + + x_remap = (shifts_x_full + x_grid).astype(np.float32) + y_remap = (shifts_y_full + y_grid).astype(np.float32) A_2t = np.reshape(A2, dims + (-1,), order='F').transpose(2, 0, 1) A2 = np.stack([cv2.remap(img.astype(np.float32), x_remap, y_remap, cv2.INTER_NEAREST) for img in A_2t], axis=0) diff --git a/caiman/caimanmanager.py b/caiman/caimanmanager.py index 62580930c..968bbfca6 100755 --- a/caiman/caimanmanager.py +++ b/caiman/caimanmanager.py @@ -125,31 +125,31 @@ def do_check_install(targdir: str, inplace: bool = False) -> None: raise Exception("Install is dirty") -def do_run_nosetests(targdir: str) -> None: - out, err, ret = runcmd(["nosetests", "--verbose", "--traverse-namespace", "caiman"]) +def do_run_pytest(targdir: str) -> None: + out, err, ret = runcmd(["pytest", "--verbose", "--pyargs", "caiman"]) if ret != 0: - print(f"Nosetests failed with return code {ret}") + print(f"pytest failed with return code {ret}") sys.exit(ret) else: - print("Nosetests success!") + print("pytest success!") -def do_run_coverage_nosetests(targdir: str) -> None: - # Run nosetests, but invoke coverage so we get statistics on how much our tests actually exercise +def do_run_coverage_pytest(targdir: str) -> None: + # Run pytest, but invoke coverage so we get statistics on how much our tests actually exercise # the code. It would probably be a mistake to do CI testing around these figures (as we often add things to # the codebase before they're fully fleshed out), but we can at least make the command below easier to invoke # with this frontend. # # This command will not function from the conda package, because there would be no reason to use it in that case. # If we ever change our mind on this, it's a simple addition of the coverage package to the feedstock. - out, err, ret = runcmd(["nosetests", "--verbose", "--with-coverage", "--cover-package=caiman", "--cover-erase", "--traverse-namespace", "caiman"]) + out, err, ret = runcmd(["pytest", "--verbose", "--cov=caiman", "caiman"]) if ret != 0: - print("Nosetests failed with return code " + str(ret)) + print("pytestfailed with return code " + str(ret)) print("If it failed due to a message like the following, it is a known issue:") print("ValueError: cannot resize an array that references or is referenced by another array in this way.") print("We believe this to be harmless and caused by coverage having additional rules for code") sys.exit(ret) else: - print("Nosetests success!") + print("pytest success!") def do_run_demotests(targdir: str) -> None: @@ -255,9 +255,9 @@ def main(): elif cfg.command == 'check': do_check_install(cfg.userdir, cfg.inplace) elif cfg.command == 'test': - do_run_nosetests(cfg.userdir) + do_run_pytest(cfg.userdir) elif cfg.command == 'covtest': - do_run_coverage_nosetests(cfg.userdir) + do_run_coverage_pytest(cfg.userdir) elif cfg.command == 'demotest': if os.name == 'nt': do_nt_run_demotests(cfg.userdir) diff --git a/caiman/mmapping.py b/caiman/mmapping.py index 1f15f7988..a69558169 100644 --- a/caiman/mmapping.py +++ b/caiman/mmapping.py @@ -46,9 +46,12 @@ def load_memmap(filename: str, mode: str = 'r') -> tuple[Any, tuple, int]: """ logger = logging.getLogger("caiman") - if pathlib.Path(filename).suffix != '.mmap': + allowed_extensions = {'.mmap', '.npy'} + + extension = pathlib.Path(filename).suffix + if extension not in allowed_extensions: logger.error(f"Unknown extension for file {filename}") - raise ValueError(f'Unknown file extension for file {filename} (should be .mmap)') + raise ValueError(f'Unknown file extension for file {filename} (should be .mmap or .npy)') # Strip path components and use CAIMAN_DATA/example_movies # TODO: Eventually get the code to save these in a different dir #fn_without_path = os.path.split(filename)[-1] @@ -63,7 +66,22 @@ def load_memmap(filename: str, mode: str = 'r') -> tuple[Any, tuple, int]: #d1, d2, d3, T, order = int(fpart[-9]), int(fpart[-7]), int(fpart[-5]), int(fpart[-1]), fpart[-3] filename = caiman.paths.fn_relocated(filename) - Yr = np.memmap(filename, mode=mode, shape=prepare_shape((d1 * d2 * d3, T)), dtype=np.float32, order=order) + shape = prepare_shape((d1 * d2 * d3, T)) + if extension == '.mmap': + Yr = np.memmap(filename, mode=mode, shape=shape, dtype=np.float32, order=order) + elif extension == '.npy': + Yr = np.load(filename, mmap_mode=mode) + if Yr.shape != shape: + raise ValueError(f"Data in npy file was an unexpected shape: {Yr.shape}, expected: {shape}") + if Yr.dtype != np.float32: + raise ValueError(f"Data in npy file was an unexpected dtype: {Yr.dtype}, expected: np.float32") + if order == 'C' and not Yr.flags['C_CONTIGUOUS']: + raise ValueError("Data in npy file is not in C-contiguous order as expected.") + elif order == 'F' and not Yr.flags['F_CONTIGUOUS']: + raise ValueError("Data in npy file is not in Fortran-contiguous order as expected.") + + + if d3 == 1: return (Yr, (d1, d2), T) else: diff --git a/caiman/motion_correction.py b/caiman/motion_correction.py index cae11df49..9b3a4e7fb 100644 --- a/caiman/motion_correction.py +++ b/caiman/motion_correction.py @@ -46,12 +46,12 @@ from numpy.fft import ifftshift import os import scipy -from skimage.transform import resize as resize_sk -from skimage.transform import warp as warp_sk +import scipy.interpolate +from skimage.transform import resize as resize_sk, rescale as rescale_sk, warp as warp_sk import sys import tifffile from tqdm import tqdm -from typing import Optional +from typing import Optional, Literal, Union import caiman import caiman.base.movies @@ -69,16 +69,6 @@ except: def profile(a): return a -opencv = True - -try: - import pycuda.gpuarray as gpuarray - import pycuda.driver as cudadrv - import atexit - HAS_CUDA = True -except ImportError: - HAS_CUDA = False - class MotionCorrect(object): """ class implementing motion correction operations @@ -87,8 +77,8 @@ class implementing motion correction operations def __init__(self, fname, min_mov=None, dview=None, max_shifts=(6, 6), niter_rig=1, splits_rig=14, num_splits_to_process_rig=None, strides=(96, 96), overlaps=(32, 32), splits_els=14, num_splits_to_process_els=None, upsample_factor_grid=4, max_deviation_rigid=3, shifts_opencv=True, nonneg_movie=True, gSig_filt=None, - use_cuda=False, border_nan=True, pw_rigid=False, num_frames_split=80, var_name_hdf5='mov',is3D=False, - indices=(slice(None), slice(None))): + use_cuda=False, border_nan=True, pw_rigid=False, num_frames_split=80, var_name_hdf5='mov', is3D=False, + indices=(slice(None), slice(None)), shifts_interpolate=False): """ Constructor class for motion correction operations @@ -105,11 +95,11 @@ def __init__(self, fname, min_mov=None, dview=None, max_shifts=(6, 6), niter_rig max_shifts: tuple maximum allow rigid shift - niter_rig':int + niter_rig:int maximum number of iterations rigid motion correction, in general is 1. 0 will quickly initialize a template with the first frames - splits_rig': int + splits_rig: int for parallelization split the movies in num_splits chunks across time num_splits_to_process_rig: list, @@ -122,13 +112,10 @@ def __init__(self, fname, min_mov=None, dview=None, max_shifts=(6, 6), niter_rig overlaps: tuple overlap between patches (size of patch strides+overlaps) - pw_rigig: bool, default: False - flag for performing motion correction when calling motion_correct - splits_els':list for parallelization split the movies in num_splits chunks across time - num_splits_to_process_els: UNUSED + num_splits_to_process_els: UNUSED, deprecated Legacy parameter, does not do anything upsample_factor_grid:int, @@ -137,20 +124,26 @@ def __init__(self, fname, min_mov=None, dview=None, max_shifts=(6, 6), niter_rig max_deviation_rigid:int maximum deviation allowed for patch with respect to rigid shift - shifts_opencv: Bool + shifts_opencv: bool apply shifts fast way (but smoothing results) - nonneg_movie: boolean + nonneg_movie: bool make the SAVED movie and template mostly nonnegative by removing min_mov from movie - use_cuda : bool, optional - Use skcuda.fft (if available). Default: False + gSig_filt: list[int], default None + if specified, indicates the shape of a Gaussian kernel used to perform high-pass spatial filtering of the video frames before performing motion correction. Useful for data with high background to emphasize landmarks. + + use_cuda : bool (DEPRECATED) + cuda is no longer supported for motion correction; this kwarg will be removed in a future version of Caiman border_nan : bool or string, optional Specifies how to deal with borders. (True, False, 'copy', 'min') TODO: make this just the bool, and make another variable called border_strategy to hold the how + pw_rigid: bool, default: False + flag for performing elastic motion correction when calling motion_correct + num_frames_split: int, default: 80 Number of frames in each batch. Used when constructing the options through the params object @@ -158,11 +151,14 @@ def __init__(self, fname, min_mov=None, dview=None, max_shifts=(6, 6), niter_rig var_name_hdf5: str, default: 'mov' If loading from hdf5, name of the variable to load - is3D: bool, default: False + is3D: bool, default: False Flag for 3D motion correction - indices: tuple(slice), default: (slice(None), slice(None)) + indices: tuple(slice), default: (slice(None), slice(None)) Use that to apply motion correction only on a part of the FOV + + shifts_interpolate: bool, default: False + use patch locations to interpolate shifts rather than just upscaling to size of image (for pw_rigid only) Returns: self @@ -204,8 +200,9 @@ def __init__(self, fname, min_mov=None, dview=None, max_shifts=(6, 6), niter_rig self.var_name_hdf5 = var_name_hdf5 self.is3D = bool(is3D) self.indices = indices - if self.use_cuda and not HAS_CUDA: - logger.debug("pycuda is unavailable. Falling back to default FFT.") + self.shifts_interpolate = shifts_interpolate + if self.use_cuda: + logger.warn("cuda is no longer supported; this kwarg will be removed in a future version of caiman") def motion_correct(self, template=None, save_movie=False): """general function for performing all types of motion correction. The @@ -260,7 +257,7 @@ def motion_correct(self, template=None, save_movie=False): self.mmap_file = self.fname_tot_els if self.pw_rigid else self.fname_tot_rig return self - def motion_correct_rigid(self, template=None, save_movie=False) -> None: + def motion_correct_rigid(self, template: Optional[np.ndarray] = None, save_movie=False) -> None: """ Perform rigid motion correction @@ -307,7 +304,8 @@ def motion_correct_rigid(self, template=None, save_movie=False) -> None: border_nan=self.border_nan, var_name_hdf5=self.var_name_hdf5, is3D=self.is3D, - indices=self.indices) + indices=self.indices, + shifts_interpolate=self.shifts_interpolate) if template is None: self.total_template_rig = _total_template_rig @@ -315,7 +313,7 @@ def motion_correct_rigid(self, template=None, save_movie=False) -> None: self.fname_tot_rig += [_fname_tot_rig] self.shifts_rig += _shifts_rig - def motion_correct_pwrigid(self, save_movie:bool=True, template:np.ndarray=None, show_template:bool=False) -> None: + def motion_correct_pwrigid(self, save_movie:bool=True, template: Optional[np.ndarray] = None, show_template:bool=False) -> None: """Perform pw-rigid motion correction Args: @@ -325,7 +323,7 @@ def motion_correct_pwrigid(self, save_movie:bool=True, template:np.ndarray=None, template: ndarray 2D (or 3D) if known, one can pass a template to register the frames to - show_template: boolean + show_template: bool whether to show the updated template at each iteration Important Fields: @@ -368,7 +366,7 @@ def motion_correct_pwrigid(self, save_movie:bool=True, template:np.ndarray=None, num_splits_to_process=None, num_iter=num_iter, template=self.total_template_els, shifts_opencv=self.shifts_opencv, save_movie=save_movie, nonneg_movie=self.nonneg_movie, gSig_filt=self.gSig_filt, use_cuda=self.use_cuda, border_nan=self.border_nan, var_name_hdf5=self.var_name_hdf5, is3D=self.is3D, - indices=self.indices) + indices=self.indices, shifts_interpolate=self.shifts_interpolate) if not self.is3D: if show_template: plt.imshow(new_template_els) @@ -388,8 +386,8 @@ def motion_correct_pwrigid(self, save_movie:bool=True, template:np.ndarray=None, self.z_shifts_els += _z_shifts_els self.coord_shifts_els += _coord_shifts_els - def apply_shifts_movie(self, fname, rigid_shifts:bool=None, save_memmap:bool=False, - save_base_name:str='MC', order:str='F', remove_min:bool=True): + def apply_shifts_movie(self, fname, rigid_shifts: Optional[bool] = None, save_memmap:bool=False, + save_base_name:str='MC', order: Literal['C', 'F'] = 'F', remove_min:bool=True): """ Applies shifts found by registering one file to a different file. Useful for cases when shifts computed from a structural channel are applied to a @@ -448,82 +446,28 @@ def apply_shifts_movie(self, fname, rigid_shifts:bool=None, save_memmap:bool=Fal sh[0], sh[1]), 0, is_freq=False, border_nan=self.border_nan) for img, sh in zip( Y, self.shifts_rig)] else: + # take potential upsampling into account when recreating patch grid + dims = Y.shape[1:] + patch_centers = get_patch_centers(dims, overlaps=self.overlaps, strides=self.strides, + shifts_opencv=self.shifts_opencv, upsample_factor_grid=self.upsample_factor_grid) if self.is3D: - xyz_grid = [(it[0], it[1], it[2]) for it in sliding_window_3d( - Y[0], self.overlaps, self.strides)] - dims_grid = tuple(np.add(xyz_grid[-1], 1)) - shifts_x = np.stack([np.reshape(_sh_, dims_grid, order='C').astype( - np.float32) for _sh_ in self.x_shifts_els], axis=0) - shifts_y = np.stack([np.reshape(_sh_, dims_grid, order='C').astype( - np.float32) for _sh_ in self.y_shifts_els], axis=0) - shifts_z = np.stack([np.reshape(_sh_, dims_grid, order='C').astype( - np.float32) for _sh_ in self.z_shifts_els], axis=0) - dims = Y.shape[1:] - x_grid, y_grid, z_grid = np.meshgrid(np.arange(0., dims[1]).astype( - np.float32), np.arange(0., dims[0]).astype(np.float32), - np.arange(0., dims[2]).astype(np.float32)) - if self.border_nan is not False: - if self.border_nan is True: - m_reg = [warp_sk(img, np.stack((resize_sk(shiftX.astype(np.float32), dims) + y_grid, - resize_sk(shiftY.astype(np.float32), dims) + x_grid, - resize_sk(shiftZ.astype(np.float32), dims) + z_grid), axis=0), - order=3, mode='constant', cval=np.nan) - for img, shiftX, shiftY, shiftZ in zip(Y, shifts_x, shifts_y, shifts_z)] - elif self.border_nan == 'min': - m_reg = [warp_sk(img, np.stack((resize_sk(shiftX.astype(np.float32), dims) + y_grid, - resize_sk(shiftY.astype(np.float32), dims) + x_grid, - resize_sk(shiftZ.astype(np.float32), dims) + z_grid), axis=0), - order=3, mode='constant', cval=np.min(img)) - for img, shiftX, shiftY, shiftZ in zip(Y, shifts_x, shifts_y, shifts_z)] - elif self.border_nan == 'copy': - m_reg = [warp_sk(img, np.stack((resize_sk(shiftX.astype(np.float32), dims) + y_grid, - resize_sk(shiftY.astype(np.float32), dims) + x_grid, - resize_sk(shiftZ.astype(np.float32), dims) + z_grid), axis=0), - order=3, mode='edge') - for img, shiftX, shiftY, shiftZ in zip(Y, shifts_x, shifts_y, shifts_z)] - else: - m_reg = [warp_sk(img, np.stack((resize_sk(shiftX.astype(np.float32), dims) + y_grid, - resize_sk(shiftY.astype(np.float32), dims) + x_grid, - resize_sk(shiftZ.astype(np.float32), dims) + z_grid), axis=0), - order=3, mode='constant') - for img, shiftX, shiftY, shiftZ in zip(Y, shifts_x, shifts_y, shifts_z)] - # borderValue=add_to_movie) # borderValue=add_to_movie) + # x_shifts_els and y_shifts_els are switched intentionally + m_reg = [ + apply_pw_shifts_remap_3d(img, shifts_y=-x_shifts, shifts_x=-y_shifts, shifts_z=-z_shifts, + patch_centers=patch_centers, border_nan=self.border_nan, + shifts_interpolate=self.shifts_interpolate) + for img, x_shifts, y_shifts, z_shifts in zip(Y, self.x_shifts_els, self.y_shifts_els, self.z_shifts_els) + ] + else: - xy_grid = [(it[0], it[1]) for it in sliding_window(Y[0], self.overlaps, self.strides)] - dims_grid = tuple(np.max(np.stack(xy_grid, axis=1), axis=1) - np.min( - np.stack(xy_grid, axis=1), axis=1) + 1) - shifts_x = np.stack([np.reshape(_sh_, dims_grid, order='C').astype( - np.float32) for _sh_ in self.x_shifts_els], axis=0) - shifts_y = np.stack([np.reshape(_sh_, dims_grid, order='C').astype( - np.float32) for _sh_ in self.y_shifts_els], axis=0) - dims = Y.shape[1:] - x_grid, y_grid = np.meshgrid(np.arange(0., dims[1]).astype( - np.float32), np.arange(0., dims[0]).astype(np.float32)) - if self.border_nan is not False: - if self.border_nan is True: - m_reg = [cv2.remap(img, -cv2.resize(shiftY, dims[::-1]) + x_grid, - -cv2.resize(shiftX, dims[::-1]) + y_grid, - cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, - borderValue=np.nan) - for img, shiftX, shiftY in zip(Y, shifts_x, shifts_y)] - - elif self.border_nan == 'min': - m_reg = [cv2.remap(img, -cv2.resize(shiftY, dims[::-1]) + x_grid, - -cv2.resize(shiftX, dims[::-1]) + y_grid, - cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, - borderValue=np.min(img)) - for img, shiftX, shiftY in zip(Y, shifts_x, shifts_y)] - elif self.border_nan == 'copy': - m_reg = [cv2.remap(img, -cv2.resize(shiftY, dims[::-1]) + x_grid, - -cv2.resize(shiftX, dims[::-1]) + y_grid, - cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE) - for img, shiftX, shiftY in zip(Y, shifts_x, shifts_y)] - else: - m_reg = [cv2.remap(img, -cv2.resize(shiftY, dims[::-1]) + x_grid, - -cv2.resize(shiftX, dims[::-1]) + y_grid, - cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, - borderValue=0.0) - for img, shiftX, shiftY in zip(Y, shifts_x, shifts_y)] + # x_shifts_els and y_shifts_els are switched intentionally + m_reg = [ + apply_pw_shifts_remap_2d(img, shifts_y=-x_shifts, shifts_x=-y_shifts, patch_centers=patch_centers, + border_nan=self.border_nan, shifts_interpolate=self.shifts_interpolate) + for img, x_shifts, y_shifts in zip(Y, self.x_shifts_els, self.y_shifts_els) + ] + + del Y m_reg = np.stack(m_reg, axis=0) if save_memmap: dims = m_reg.shape @@ -537,12 +481,12 @@ def apply_shifts_movie(self, fname, rigid_shifts:bool=None, save_memmap:bool=Fal else: return caiman.movie(m_reg) -def apply_shift_iteration(img, shift, border_nan:bool=False, border_type=cv2.BORDER_REFLECT): +def apply_shift_iteration(img, shift, border_nan=False, border_type=cv2.BORDER_REFLECT): # todo todocument sh_x_n, sh_y_n = shift w_i, h_i = img.shape - M = np.float32([[1, 0, sh_y_n], [0, 1, sh_x_n]]) + M = np.array([[1, 0, sh_y_n], [0, 1, sh_x_n]], dtype=np.float32) min_, max_ = np.nanmin(img), np.nanmax(img) img = np.clip(cv2.warpAffine(img, M, (h_i, w_i), flags=cv2.INTER_CUBIC, borderMode=border_type), min_, max_) @@ -575,10 +519,12 @@ def apply_shift_iteration(img, shift, border_nan:bool=False, border_type=cv2.BOR img[:, :max_w] = img[:, max_w, np.newaxis] if min_w < 0: img[:, min_w:] = img[:, min_w-1, np.newaxis] + else: + logging.warning(f'Unknown value of border_nan ({border_nan}); treating as False') return img -def apply_shift_online(movie_iterable, xy_shifts, save_base_name=None, order='F'): +def apply_shift_online(movie_iterable, xy_shifts, save_base_name=None, order: Literal['C', 'F'] = 'F'): """ Applies rigid shifts to a loaded movie. Useful when processing a dataset with CaImAn online and you want to obtain the registered movie after @@ -784,7 +730,7 @@ def motion_correct_online_multifile(list_files, add_to_movie, order='C', **kwarg return all_names, all_shifts, all_xcorrs, all_templates -def motion_correct_online(movie_iterable, add_to_movie, max_shift_w=25, max_shift_h=25, save_base_name=None, order='C', +def motion_correct_online(movie_iterable, add_to_movie, max_shift_w=25, max_shift_h=25, save_base_name=None, order: Literal['F', 'C']='C', init_frames_template=100, show_movie=False, bilateral_blur=False, template=None, min_count=1000, border_to_0=0, n_iter=1, remove_blanks=False, show_template=False, return_mov=False, use_median_as_template=False): @@ -810,7 +756,7 @@ def motion_correct_online(movie_iterable, add_to_movie, max_shift_w=25, max_shif init_mov = movie_iterable[slice(0, init_frames_template, 1)] dims = (len(movie_iterable),) + movie_iterable[0].shape # TODO: Refactor so length is either tracked separately or is last part of tuple - logger.debug("dimensions:" + str(dims)) + logger.debug(f"dimensions: {dims}") if use_median_as_template: template = bin_median(movie_iterable) @@ -1043,9 +989,6 @@ def bin_median(mat, window=10, exclude_nans=True): Returns: img: median image - - Raises: - Exception 'Path to template does not exist:'+template """ T, d1, d2 = np.shape(mat) @@ -1075,9 +1018,6 @@ def bin_median_3d(mat, window=10, exclude_nans=True): Returns: img: median image - - Raises: - Exception 'Path to template does not exist:'+template """ T, d1, d2, d3 = np.shape(mat) @@ -1162,9 +1102,6 @@ def motion_correct_parallel(file_names, fr=10, template=None, margins_out=0, Returns: base file names of the motion corrected files:list[str] - - Raises: - Exception """ logger = logging.getLogger("caiman") args_in = [] @@ -1276,15 +1213,13 @@ def _upsampled_dft(data, upsampled_region_size, upsampled_region_size = [upsampled_region_size, ] * data.ndim else: if len(upsampled_region_size) != data.ndim: - raise ValueError("shape of upsampled region sizes must be equal " - "to input data's number of dimensions.") + raise ValueError("shape of upsampled region sizes must be equal to input data's number of dimensions.") if axis_offsets is None: axis_offsets = [0, ] * data.ndim else: if len(axis_offsets) != data.ndim: - raise ValueError("number of axis offsets must be equal to input " - "data's number of dimensions.") + raise ValueError("number of axis offsets must be equal to input data's number of dimensions.") col_kernel = np.exp( (-1j * 2 * np.pi / (data.shape[1] * upsample_factor)) * @@ -1345,32 +1280,6 @@ def _compute_error(cross_correlation_max, src_amp, target_amp): (src_amp * target_amp) return np.sqrt(np.abs(error)) -def init_cuda_process(): - """ - Initialize a PyCUDA context at global scope so that it can be accessed - from processes when using multithreading - """ - global cudactx - - cudadrv.init() - dev = cudadrv.Device(0) - cudactx = dev.make_context() # type: ignore - atexit.register(cudactx.pop) # type: ignore - - -def close_cuda_process(n): - """ - Cleanup cuda process - """ - - global cudactx - - import skcuda.misc as cudamisc - try: - cudamisc.done_context(cudactx) # type: ignore - except: - pass - def register_translation_3d(src_image, target_image, upsample_factor = 1, space = "real", shifts_lb = None, shifts_ub = None, max_shifts = [10,10,10]): @@ -1423,13 +1332,11 @@ def register_translation_3d(src_image, target_image, upsample_factor = 1, # images must be the same shape if src_image.shape != target_image.shape: - raise ValueError("Error: images must really be same size for " - "register_translation_3d") + raise ValueError("Error: images must really be same size for register_translation_3d") # only 3D data makes sense right now if src_image.ndim != 3 and upsample_factor > 1: - raise NotImplementedError("Error: register_translation_3d only supports " - "subpixel registration for 3D images") + raise NotImplementedError("Error: register_translation_3d only supports subpixel registration for 3D images") # assume complex data is already in Fourier space if space.lower() == 'fourier': @@ -1444,13 +1351,11 @@ def register_translation_3d(src_image, target_image, upsample_factor = 1, src_freq = np.fft.fftn(src_image_cpx) target_freq = np.fft.fftn(target_image_cpx) else: - raise ValueError("Error: register_translation_3d only knows the \"real\" " - "and \"fourier\" values for the ``space`` argument.") + raise ValueError('Error: register_translation_3d only knows the "real" and "fourier" values for the ``space`` argument.') shape = src_freq.shape image_product = src_freq * target_freq.conj() cross_correlation = np.fft.ifftn(image_product) -# cross_correlation = ifftn(image_product) # TODO CHECK why this line is different new_cross_corr = np.abs(cross_correlation) CCmax = cross_correlation.max() @@ -1458,7 +1363,7 @@ def register_translation_3d(src_image, target_image, upsample_factor = 1, del cross_correlation if (shifts_lb is not None) or (shifts_ub is not None): - + # TODO this will fail if only one is not none - should this be an "and"? if (shifts_lb[0] < 0) and (shifts_ub[0] >= 0): new_cross_corr[shifts_ub[0]:shifts_lb[0], :, :] = 0 else: @@ -1585,8 +1490,8 @@ def register_translation(src_image, target_image, upsample_factor=1, will be FFT'd to compute the correlation, while "fourier" data will bypass FFT of input data. Case insensitive. - use_cuda : bool, optional - Use skcuda.fft (if available). Default: False + use_cuda : bool (DEPRECATED) + cuda is no longer supported for motion correction; this kwarg will be removed in a future version of Caiman Returns: shifts : ndarray @@ -1618,22 +1523,11 @@ def register_translation(src_image, target_image, upsample_factor=1, """ # images must be the same shape if src_image.shape != target_image.shape: - raise ValueError("Error: images must really be same size for " - "register_translation") + raise ValueError("Error: images must really be same size for register_translation") # only 2D data makes sense right now if src_image.ndim != 2 and upsample_factor > 1: - raise NotImplementedError("Error: register_translation only supports " - "subpixel registration for 2D images") - - if HAS_CUDA and use_cuda: - from skcuda.fft import Plan - from skcuda.fft import fft as cudafft - from skcuda.fft import ifft as cudaifft - try: - cudactx # type: ignore - except NameError: - init_cuda_process() + raise NotImplementedError("Error: register_translation only supports subpixel registration for 2D images") # assume complex data is already in Fourier space if space.lower() == 'fourier': @@ -1641,82 +1535,29 @@ def register_translation(src_image, target_image, upsample_factor=1, target_freq = target_image # real data needs to be fft'd. elif space.lower() == 'real': - if HAS_CUDA and use_cuda: - # src_image_cpx = np.array(src_image, dtype=np.complex128, copy=False) - # target_image_cpx = np.array(target_image, dtype=np.complex128, copy=False) - - image_gpu = gpuarray.to_gpu(np.stack((src_image, target_image)).astype(np.complex128)) - freq_gpu = gpuarray.empty((2, src_image.shape[0], src_image.shape[1]), dtype=np.complex128) - # src_image_gpu = gpuarray.to_gpu(src_image_cpx) - # src_freq_gpu = gpuarray.empty(src_image_cpx.shape, np.complex128) - - # target_image_gpu = gpuarray.to_gpu(target_image_cpx) - # target_freq_gpu = gpuarray.empty(target_image_cpx.shape, np.complex128) - - plan = Plan(src_image.shape, np.complex128, np.complex128, batch=2) - # cudafft(src_image_gpu, src_freq_gpu, plan, scale=True) - # cudafft(target_image_gpu, target_freq_gpu, plan, scale=True) - cudafft(image_gpu, freq_gpu, plan, scale=True) - # src_freq = src_freq_gpu.get() - # target_freq = target_freq_gpu.get() - freq = freq_gpu.get() - src_freq = freq[0, :, :] - target_freq = freq[1, :, :] - - # del(src_image_gpu) - # del(src_freq_gpu) - # del(target_image_gpu) - # del(target_freq_gpu) - del(image_gpu) - del(freq_gpu) - elif opencv: - src_freq_1 = cv2.dft( - src_image, flags=cv2.DFT_COMPLEX_OUTPUT + cv2.DFT_SCALE) - src_freq = src_freq_1[:, :, 0] + 1j * src_freq_1[:, :, 1] - src_freq = np.array(src_freq, dtype=np.complex128, copy=False) - target_freq_1 = cv2.dft( - target_image, flags=cv2.DFT_COMPLEX_OUTPUT + cv2.DFT_SCALE) - target_freq = target_freq_1[:, :, 0] + 1j * target_freq_1[:, :, 1] - target_freq = np.array( - target_freq, dtype=np.complex128, copy=False) - else: - src_image_cpx = np.array( - src_image, dtype=np.complex128, copy=False) - target_image_cpx = np.array( - target_image, dtype=np.complex128, copy=False) - src_freq = np.fft.fftn(src_image_cpx) - target_freq = np.fft.fftn(target_image_cpx) - + src_freq_1 = cv2.dft( + src_image, flags=cv2.DFT_COMPLEX_OUTPUT + cv2.DFT_SCALE) + src_freq = src_freq_1[:, :, 0] + 1j * src_freq_1[:, :, 1] + src_freq = np.array(src_freq, dtype=np.complex128, copy=False) + target_freq_1 = cv2.dft( + target_image, flags=cv2.DFT_COMPLEX_OUTPUT + cv2.DFT_SCALE) + target_freq = target_freq_1[:, :, 0] + 1j * target_freq_1[:, :, 1] + target_freq = np.array( + target_freq, dtype=np.complex128, copy=False) else: - raise ValueError("Error: register_translation only knows the \"real\" " - "and \"fourier\" values for the ``space`` argument.") + raise ValueError('Error: register_translation only knows the "real" and "fourier" values for the ``space`` argument.') # Whole-pixel shift - Compute cross-correlation by an IFFT shape = src_freq.shape image_product = src_freq * target_freq.conj() - if HAS_CUDA and use_cuda: - image_product_gpu = gpuarray.to_gpu(image_product) - cross_correlation_gpu = gpuarray.empty( - image_product.shape, np.complex128) - iplan = Plan(image_product.shape, np.complex128, np.complex128) - cudaifft(image_product_gpu, cross_correlation_gpu, iplan, scale=True) - cross_correlation = cross_correlation_gpu.get() - elif opencv: - - image_product_cv = np.dstack( - [np.real(image_product), np.imag(image_product)]) - cross_correlation = cv2.dft( - image_product_cv, flags=cv2.DFT_INVERSE + cv2.DFT_SCALE) - cross_correlation = cross_correlation[:, - :, 0] + 1j * cross_correlation[:, :, 1] - else: - cross_correlation = cv2.idft(image_product) + image_product_cv = np.dstack([np.real(image_product), np.imag(image_product)]) + cross_correlation = cv2.dft(image_product_cv, flags=cv2.DFT_INVERSE + cv2.DFT_SCALE) + cross_correlation = cross_correlation[:, :, 0] + 1j * cross_correlation[:, :, 1] # Locate maximum new_cross_corr = np.abs(cross_correlation) if (shifts_lb is not None) or (shifts_ub is not None): - if (shifts_lb[0] < 0) and (shifts_ub[0] >= 0): new_cross_corr[shifts_ub[0]:shifts_lb[0], :] = 0 else: @@ -1729,9 +1570,7 @@ def register_translation(src_image, target_image, upsample_factor=1, new_cross_corr[:, :shifts_lb[1]] = 0 new_cross_corr[:, shifts_ub[1]:] = 0 else: - new_cross_corr[max_shifts[0]:-max_shifts[0], :] = 0 - new_cross_corr[:, max_shifts[1]:-max_shifts[1]] = 0 maxima = np.unravel_index(np.argmax(new_cross_corr), @@ -1743,7 +1582,6 @@ def register_translation(src_image, target_image, upsample_factor=1, shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints] if upsample_factor == 1: - src_amp = np.sum(np.abs(src_freq) ** 2) / src_freq.size target_amp = np.sum(np.abs(target_freq) ** 2) / target_freq.size CCmax = cross_correlation.max() @@ -1911,71 +1749,150 @@ def apply_shifts_dft(src_freq, shifts, diffphase, is_freq=True, border_nan=True) new_img[:, :, :max_d] = new_img[:, :, max_d, np.newaxis] if min_d < 0: new_img[:, :, min_d:] = new_img[:, :, min_d-1, np.newaxis] + else: + logging.warning(f'Unknown value of border_nan ({border_nan}); treating as False') return new_img -def sliding_window(image, overlaps, strides): +def get_patch_edges(dims: tuple[int, ...], overlaps: tuple[int, ...], strides: tuple[int, ...], + ) -> tuple[list[int], ...]: + """For each dimension, return a vector of pixels along that dimension where patches start""" + windowSizes = np.add(overlaps, strides) + return tuple( + list(range(0, dim - windowSize, stride)) + [dim - windowSize] + for dim, windowSize, stride in zip(dims, windowSizes, strides) + ) + + +def get_patch_centers(dims: tuple[int, ...], overlaps: tuple[int, ...], strides: tuple[int, ...], + shifts_opencv=False, upsample_factor_grid=1) -> tuple[list[float], ...]: + """ + For each dimension, return a vector of patch center locations for pw_rigid correction + shifts_opencv just overrides upsample_factor_grid (forces it to 1), this is an easy way to + get the correct values by just providing the motion correction parameters, but + by default no extra upsampling is done. + """ + if not shifts_opencv and upsample_factor_grid != 1: + # account for upsampling step + strides = tuple(np.round(np.divide(strides, upsample_factor_grid)).astype(int)) + + patch_edges = get_patch_edges(dims, overlaps, strides) + windowSizes = np.add(overlaps, strides) + return tuple([edge + (sz - 1) / 2 for edge in edges] for edges, sz in zip(patch_edges, windowSizes)) + + +def sliding_window_dims(dims: tuple[int, ...], overlaps: tuple[int, ...], strides: tuple[int, ...]): + """ computes dimensions for a sliding window with given image dims, overlaps, and strides + + Args: + dims: tuple + the dimensions of the image + + overlaps: tuple + overlap of patches in each dimension, except that the last patch will be all the way + at the bottom/right regardless of overlap + + strides: tuple + stride in each dimension, except that the last patch will be all the way + at the bottom/right regardless of stride + + Returns: + iterator containing 3 items: + inds: (x, y, ...) coordinates in the patch grid + corner: (x, y, ...) location of corner of the patch in the original matrix + size: (x, y, ...) size of patch in pixels + """ + windowSizes = np.add(overlaps, strides) + edge_ranges = get_patch_edges(dims, overlaps, strides) + + for patch in itertools.product(*[enumerate(r) for r in edge_ranges]): + inds, corners = zip(*patch) + yield (tuple(inds), tuple(corners), windowSizes) + + +def sliding_window(image: np.ndarray, overlaps: tuple[int, int], strides: tuple[int, int]): """ efficiently and lazily slides a window across the image Args: - img:ndarray 2D - image that needs to be slices + img: ndarray 2D + image that needs to be sliced - windowSize: tuple - dimension of the patch + overlaps: tuple + overlap of patches in each dimension, except that the last patch will be all the way + at the bottom/right regardless of overlap strides: tuple - stride in each dimension + stride in each dimension, except that the last patch will be all the way + at the bottom/right regardless of stride Returns: iterator containing five items dim_1, dim_2 coordinates in the patch grid x, y: bottom border of the patch in the original matrix - patch: the patch - """ - windowSize = np.add(overlaps, strides) - range_1 = list(range( - 0, image.shape[0] - windowSize[0], strides[0])) + [image.shape[0] - windowSize[0]] - range_2 = list(range( - 0, image.shape[1] - windowSize[1], strides[1])) + [image.shape[1] - windowSize[1]] - for dim_1, x in enumerate(range_1): - for dim_2, y in enumerate(range_2): - # yield the current window - yield (dim_1, dim_2, x, y, image[x:x + windowSize[0], y:y + windowSize[1]]) - -def sliding_window_3d(image, overlaps, strides): + """ + if image.ndim != 2: + raise ValueError('Input to sliding_window must be 2D') + + for inds, corner, size in sliding_window_dims(image.shape, overlaps, strides): + patch = image[corner[0]:corner[0] + size[0], corner[1]:corner[1] + size[1]] + yield inds + corner + (patch,) + + +def sliding_window_3d(image: np.ndarray, overlaps: tuple[int, int, int], strides: tuple[int, int, int]): """ efficiently and lazily slides a window across the image Args: - img:ndarray 3D - image that needs to be slices + img: ndarray 3D + image that needs to be sliced - windowSize: tuple - dimension of the patch + overlaps: tuple + overlap of patches in each dimension, except that the last patch will be all the way + at the bottom/right regardless of overlap strides: tuple - stride in each dimension + stride in each dimension, except that the last patch will be all the way + at the bottom/right regardless of stride Returns: iterator containing seven items dim_1, dim_2, dim_3 coordinates in the patch grid x, y, z: bottom border of the patch in the original matrix - patch: the patch """ - windowSize = np.add(overlaps, strides) - range_1 = list(range( - 0, image.shape[0] - windowSize[0], strides[0])) + [image.shape[0] - windowSize[0]] - range_2 = list(range( - 0, image.shape[1] - windowSize[1], strides[1])) + [image.shape[1] - windowSize[1]] - range_3 = list(range( - 0, image.shape[2] - windowSize[2], strides[2])) + [image.shape[2] - windowSize[2]] - for dim_1, x in enumerate(range_1): - for dim_2, y in enumerate(range_2): - for dim_3, z in enumerate(range_3): - # yield the current window - yield (dim_1, dim_2, dim_3, x, y, z, image[x:x + windowSize[0], y:y + windowSize[1], z:z + windowSize[2]]) + if image.ndim != 3: + raise ValueError('Input to sliding_window_3d must be 3D') + + for inds, corner, size in sliding_window_dims(image.shape, overlaps, strides): + patch = image[corner[0]:corner[0] + size[0], + corner[1]:corner[1] + size[1], + corner[2]:corner[2] + size[2]] + yield inds + corner + (patch,) + + +def interpolate_shifts(shifts, coords_orig: tuple, coords_new: tuple) -> np.ndarray: + """ + Interpolate piecewise shifts onto new coordinates. Pixels outside the original coordinates will be filled with edge values. + + Args: + shifts: ndarray or other array-like + shifts to interpolate; must have the same number of elements as the outer product of coords_orig + + coords_orig: tuple of float vectors + patch center coordinates along each dimension (e.g. outputs of get_patch_centers) + + coords_new: tuple of float vectors + coordinates along each dimension at which to output interpolated shifts + + Returns: + ndarray of interpolated shifts, of shape tuple(len(coords) for coords in coords_new) + """ + # clip new coordinates to avoid extrapolation + coords_new_clipped = [np.clip(coord, min(coord_orig), max(coord_orig)) for coord, coord_orig in zip(coords_new, coords_orig)] + coords_new_stacked = np.stack(np.meshgrid(*coords_new_clipped, indexing='ij'), axis=-1) + shifts_grid = np.reshape(shifts, tuple(len(coord) for coord in coords_orig)) + return scipy.interpolate.interpn(coords_orig, shifts_grid, coords_new_stacked, method="cubic") + def iqr(a): return np.percentile(a, 75) - np.percentile(a, 25) @@ -2066,7 +1983,7 @@ def high_pass_filter_space(img_orig, gSig_filt=None, freq=None, order=None): def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=None, newstrides=None, upsample_factor_grid=4, upsample_factor_fft=10, show_movie=False, max_deviation_rigid=2, add_to_movie=0, shifts_opencv=False, gSig_filt=None, - use_cuda=False, border_nan=True): + use_cuda=False, border_nan=True, shifts_interpolate=False): """ perform piecewise rigid motion correction iteration, by 1) dividing the FOV in patches 2) motion correcting each patch separately @@ -2089,33 +2006,41 @@ def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=N max_shifts: tuple max shifts in x and y - newstrides:tuple - strides between patches along each dimension when upsampling the vector fields - newoverlaps:tuple amount of pixel overlapping between patches along each dimension when upsampling the vector fields + newstrides:tuple + strides between patches along each dimension when upsampling the vector fields + upsample_factor_grid: int if newshapes or newstrides are not specified this is inferred upsampling by a constant factor the cvector field upsample_factor_fft: int resolution of fractional shifts - show_movie: boolean whether to visualize the original and corrected frame during motion correction + show_movie: bool + whether to visualize the original and corrected frame during motion correction max_deviation_rigid: int maximum deviation in shifts of each patch from the rigid shift (should not be large) - add_to_movie: if movie is too negative the correction might have some issues. In this case it is good to add values so that it is non negative most of the times + add_to_movie: int + if movie is too negative the correction might have some issues. In this case it is good to add values so that it is non negative most of the times + + shifts_opencv: bool + FIXME: (UNDOCUMENTED) - filt_sig_size: tuple + gSig_filt: tuple standard deviation and size of gaussian filter to center filter data in case of one photon imaging data - use_cuda : bool, optional - Use skcuda.fft (if available). Default: False + use_cuda: bool (DEPRECATED) + cuda is no longer supported for motion correction; this kwarg will be removed in a future version of Caiman border_nan : bool or string, optional specifies how to deal with borders. (True, False, 'copy', 'min') + + shifts_interpolate: bool + use patch locations to interpolate shifts rather than just upscaling to size of image. Default: False Returns: (new_img, total_shifts, start_step, xy_grid) @@ -2129,7 +2054,6 @@ def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=N template = template.astype(np.float64).copy() if gSig_filt is not None: - img_orig = img.copy() img = high_pass_filter_space(img_orig, gSig_filt) @@ -2141,7 +2065,6 @@ def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=N img, template, upsample_factor=upsample_factor_fft, max_shifts=max_shifts, use_cuda=use_cuda) if max_deviation_rigid == 0: - if shifts_opencv: if gSig_filt is not None: img = img_orig @@ -2150,10 +2073,9 @@ def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=N img, (-rigid_shts[0], -rigid_shts[1]), border_nan=border_nan) else: - if gSig_filt is not None: raise Exception( - 'The use of FFT and filtering options have not been tested. Set opencv=True') + 'The use of FFT and filtering options have not been tested. Set shifts_opencv=True or do not provide gSig_filt') new_img = apply_shifts_dft( sfr_freq, (-rigid_shts[0], -rigid_shts[1]), diffphase, border_nan=border_nan) @@ -2161,30 +2083,29 @@ def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=N return new_img - add_to_movie, (-rigid_shts[0], -rigid_shts[1]), None, None else: # extract patches - logger.info("Extracting patches") - templates = [ - it[-1] for it in sliding_window(template, overlaps=overlaps, strides=strides)] - xy_grid = [(it[0], it[1]) for it in sliding_window( - template, overlaps=overlaps, strides=strides)] - num_tiles = np.prod(np.add(xy_grid[-1], 1)) - imgs = [it[-1] - for it in sliding_window(img, overlaps=overlaps, strides=strides)] + logger.debug("Extracting patches") + xy_grid: list[tuple[int, int]] = [] + templates: list[np.ndarray] = [] + + for (xind, yind, _, _, patch) in sliding_window(template, overlaps=overlaps, strides=strides): + xy_grid.append((xind, yind)) + templates.append(patch) + + imgs = [it[-1] for it in sliding_window(img, overlaps=overlaps, strides=strides)] dim_grid = tuple(np.add(xy_grid[-1], 1)) + num_tiles = len(xy_grid) if max_deviation_rigid is not None: - lb_shifts = np.ceil(np.subtract( rigid_shts, max_deviation_rigid)).astype(int) ub_shifts = np.floor( np.add(rigid_shts, max_deviation_rigid)).astype(int) - else: - lb_shifts = None ub_shifts = None # extract shifts for each patch - logger.info("extracting shifts for each patch") + logger.debug("Extracting shifts for each patch") shfts_et_all = [register_translation( a, b, c, shifts_lb=lb_shifts, shifts_ub=ub_shifts, max_shifts=max_shifts, use_cuda=use_cuda) for a, b, c in zip( imgs, templates, [upsample_factor_fft] * num_tiles)] @@ -2200,12 +2121,11 @@ def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=N img = img_orig dims = img.shape - x_grid, y_grid = np.meshgrid(np.arange(0., dims[1]).astype( - np.float32), np.arange(0., dims[0]).astype(np.float32)) - m_reg = cv2.remap(img, cv2.resize(shift_img_y.astype(np.float32), dims[::-1]) + x_grid, - cv2.resize(shift_img_x.astype(np.float32), dims[::-1]) + y_grid, - cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE) - # borderValue=add_to_movie) + patch_centers = get_patch_centers(dims, strides=strides, overlaps=overlaps) + # shift_img_x and shift_img_y are switched intentionally + m_reg = apply_pw_shifts_remap_2d(img, shifts_y=shift_img_x, shifts_x=shift_img_y, + patch_centers=patch_centers, border_nan=border_nan, + shifts_interpolate=shifts_interpolate) total_shifts = [ (-x, -y) for x, y in zip(shift_img_x.reshape(num_tiles), shift_img_y.reshape(num_tiles))] return m_reg - add_to_movie, total_shifts, None, None @@ -2219,25 +2139,32 @@ def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=N newshapes = np.add(newstrides, newoverlaps) - imgs = [it[-1] - for it in sliding_window(img, overlaps=newoverlaps, strides=newstrides)] + xy_grid: list[tuple[int, int]] = [] + start_step: list[tuple[int, int]] = [] + imgs: list[np.ndarray] = [] - xy_grid = [(it[0], it[1]) for it in sliding_window( - img, overlaps=newoverlaps, strides=newstrides)] - - start_step = [(it[2], it[3]) for it in sliding_window( - img, overlaps=newoverlaps, strides=newstrides)] + for (xind, yind, xstart, ystart, patch) in sliding_window(img, overlaps=newoverlaps, strides=newstrides): + xy_grid.append((xind, yind)) + start_step.append((xstart, ystart)) + imgs.append(patch) dim_new_grid = tuple(np.add(xy_grid[-1], 1)) + num_tiles = len(xy_grid) + + if shifts_interpolate: + patch_centers_orig = get_patch_centers(img.shape, strides=strides, overlaps=overlaps) + patch_centers_new = get_patch_centers(img.shape, strides=newstrides, overlaps=newoverlaps) + shift_img_x = interpolate_shifts(shift_img_x, patch_centers_orig, patch_centers_new) + shift_img_y = interpolate_shifts(shift_img_y, patch_centers_orig, patch_centers_new) + diffs_phase_grid_us = interpolate_shifts(diffs_phase_grid, patch_centers_orig, patch_centers_new) + else: + shift_img_x = cv2.resize( + shift_img_x, dim_new_grid[::-1], interpolation=cv2.INTER_CUBIC) + shift_img_y = cv2.resize( + shift_img_y, dim_new_grid[::-1], interpolation=cv2.INTER_CUBIC) + diffs_phase_grid_us = cv2.resize( + diffs_phase_grid, dim_new_grid[::-1], interpolation=cv2.INTER_CUBIC) - shift_img_x = cv2.resize( - shift_img_x, dim_new_grid[::-1], interpolation=cv2.INTER_CUBIC) - shift_img_y = cv2.resize( - shift_img_y, dim_new_grid[::-1], interpolation=cv2.INTER_CUBIC) - diffs_phase_grid_us = cv2.resize( - diffs_phase_grid, dim_new_grid[::-1], interpolation=cv2.INTER_CUBIC) - - num_tiles = np.prod(dim_new_grid) max_shear = np.percentile( [np.max(np.abs(np.diff(ssshh, axis=xxsss))) for ssshh, xxsss in itertools.product( @@ -2250,7 +2177,7 @@ def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=N if gSig_filt is not None: raise Exception( - 'The use of FFT and filtering options have not been tested. Set opencv=True') + 'The use of FFT and filtering options have not been tested. Set shifts_opencv=True') imgs = [apply_shifts_dft(im, ( sh[0], sh[1]), dffphs, is_freq=False, border_nan=border_nan) for im, sh, dffphs in zip( @@ -2314,7 +2241,7 @@ def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=N def tile_and_correct_3d(img:np.ndarray, template:np.ndarray, strides:tuple, overlaps:tuple, max_shifts:tuple, newoverlaps:Optional[tuple]=None, newstrides:Optional[tuple]=None, upsample_factor_grid:int=4, upsample_factor_fft:int=10, show_movie:bool=False, max_deviation_rigid:int=2, add_to_movie:int=0, shifts_opencv:bool=True, gSig_filt=None, - use_cuda:bool=False, border_nan:bool=True): + use_cuda:bool=False, border_nan:bool=True, shifts_interpolate:bool=False): """ perform piecewise rigid motion correction iteration, by 1) dividing the FOV in patches 2) motion correcting each patch separately @@ -2322,7 +2249,7 @@ def tile_and_correct_3d(img:np.ndarray, template:np.ndarray, strides:tuple, over 4) stiching back together the corrected subpatches Args: - img: ndaarray 3D + img: ndarray 3D image to correct template: ndarray @@ -2349,21 +2276,28 @@ def tile_and_correct_3d(img:np.ndarray, template:np.ndarray, strides:tuple, over upsample_factor_fft: int resolution of fractional shifts - show_movie: boolean whether to visualize the original and corrected frame during motion correction + show_movie: bool + whether to visualize the original and corrected frame during motion correction max_deviation_rigid: int maximum deviation in shifts of each patch from the rigid shift (should not be large) add_to_movie: if movie is too negative the correction might have some issues. In this case it is good to add values so that it is non negative most of the times - filt_sig_size: tuple + shifts_opencv: bool + FIXME: (UNDOCUMENTED) + + gSig_filt: tuple standard deviation and size of gaussian filter to center filter data in case of one photon imaging data - use_cuda : bool, optional - Use skcuda.fft (if available). Default: False + use_cuda : bool (DEPRECATED) + cuda is no longer supported for motion correction; this kwarg will be removed in a future version of Caiman border_nan : bool or string, optional specifies how to deal with borders. (True, False, 'copy', 'min') + + shifts_interpolate: bool + use patch locations to interpolate shifts rather than just upscaling to size of image. Default: False Returns: (new_img, total_shifts, start_step, xyz_grid) @@ -2372,11 +2306,11 @@ def tile_and_correct_3d(img:np.ndarray, template:np.ndarray, strides:tuple, over """ + # TODO The clarity of this function is poor; it'd be good to refactor it towards being easier to read/understand img = img.astype(np.float64).copy() template = template.astype(np.float64).copy() if gSig_filt is not None: - img_orig = img.copy() img = high_pass_filter_space(img_orig, gSig_filt) @@ -2388,14 +2322,9 @@ def tile_and_correct_3d(img:np.ndarray, template:np.ndarray, strides:tuple, over img, template, upsample_factor=upsample_factor_fft, max_shifts=max_shifts) if max_deviation_rigid == 0: # if rigid shifts only - -# if shifts_opencv: - # NOTE: opencv does not support 3D operations - skimage is used instead - # else: - if gSig_filt is not None: raise Exception( - 'The use of FFT and filtering options have not been tested. Set opencv=True') + 'The use of FFT and filtering options has not been tested. Do not use gSig_filt with rigid shifts only') new_img = apply_shifts_dft( # TODO: check sfr_freq, (-rigid_shts[0], -rigid_shts[1], -rigid_shts[2]), diffphase, border_nan=border_nan) @@ -2403,24 +2332,23 @@ def tile_and_correct_3d(img:np.ndarray, template:np.ndarray, strides:tuple, over return new_img - add_to_movie, (-rigid_shts[0], -rigid_shts[1], -rigid_shts[2]), None, None else: # extract patches - templates = [ - it[-1] for it in sliding_window_3d(template, overlaps=overlaps, strides=strides)] - xyz_grid = [(it[0], it[1], it[2]) for it in sliding_window_3d( - template, overlaps=overlaps, strides=strides)] - num_tiles = np.prod(np.add(xyz_grid[-1], 1)) - imgs = [it[-1] - for it in sliding_window_3d(img, overlaps=overlaps, strides=strides)] + xyz_grid: list[tuple[int, int, int]] = [] + templates: list[np.ndarray] = [] + + for (xind, yind, zind, _, _, _, patch) in sliding_window_3d(template, overlaps=overlaps, strides=strides): + xyz_grid.append((xind, yind, zind)) + templates.append(patch) + + imgs = [it[-1] for it in sliding_window_3d(img, overlaps=overlaps, strides=strides)] dim_grid = tuple(np.add(xyz_grid[-1], 1)) + num_tiles = len(xyz_grid) if max_deviation_rigid is not None: - lb_shifts = np.ceil(np.subtract( rigid_shts, max_deviation_rigid)).astype(int) ub_shifts = np.floor( np.add(rigid_shts, max_deviation_rigid)).astype(int) - else: - lb_shifts = None ub_shifts = None @@ -2440,37 +2368,19 @@ def tile_and_correct_3d(img:np.ndarray, template:np.ndarray, strides:tuple, over if shifts_opencv: if gSig_filt is not None: img = img_orig + + patch_centers = get_patch_centers(img.shape, strides=strides, overlaps=overlaps) + # shift_img_x and shift_img_y are switched intentionally + m_reg = apply_pw_shifts_remap_3d( + img, shifts_y=shift_img_x, shifts_x=shift_img_y, shifts_z=shift_img_z, + patch_centers=patch_centers, border_nan=border_nan, + shifts_interpolate=shifts_interpolate) - dims = img.shape - x_grid, y_grid, z_grid = np.meshgrid(np.arange(0., dims[1]).astype( - np.float32), np.arange(0., dims[0]).astype(np.float32), - np.arange(0., dims[2]).astype(np.float32)) - if border_nan is not False: - if border_nan is True: - m_reg = warp_sk(img, np.stack((resize_sk(shift_img_x.astype(np.float32), dims) + y_grid, - resize_sk(shift_img_y.astype(np.float32), dims) + x_grid, - resize_sk(shift_img_z.astype(np.float32), dims) + z_grid), axis=0), - order=3, mode='constant', cval=np.nan) - elif border_nan == 'min': - m_reg = warp_sk(img, np.stack((resize_sk(shift_img_x.astype(np.float32), dims) + y_grid, - resize_sk(shift_img_y.astype(np.float32), dims) + x_grid, - resize_sk(shift_img_z.astype(np.float32), dims) + z_grid), axis=0), - order=3, mode='constant', cval=np.min(img)) - elif border_nan == 'copy': - m_reg = warp_sk(img, np.stack((resize_sk(shift_img_x.astype(np.float32), dims) + y_grid, - resize_sk(shift_img_y.astype(np.float32), dims) + x_grid, - resize_sk(shift_img_z.astype(np.float32), dims) + z_grid), axis=0), - order=3, mode='edge') - else: - m_reg = warp_sk(img, np.stack((resize_sk(shift_img_x.astype(np.float32), dims) + y_grid, - resize_sk(shift_img_y.astype(np.float32), dims) + x_grid, - resize_sk(shift_img_z.astype(np.float32), dims) + z_grid), axis=0), - order=3, mode='constant') total_shifts = [ (-x, -y, -z) for x, y, z in zip(shift_img_x.reshape(num_tiles), shift_img_y.reshape(num_tiles), shift_img_z.reshape(num_tiles))] return m_reg - add_to_movie, total_shifts, None, None - # create automatically upsample parameters if not passed + # create automatically upsampled parameters if not passed if newoverlaps is None: newoverlaps = overlaps if newstrides is None: @@ -2479,29 +2389,37 @@ def tile_and_correct_3d(img:np.ndarray, template:np.ndarray, strides:tuple, over newshapes = np.add(newstrides, newoverlaps) - imgs = [it[-1] - for it in sliding_window_3d(img, overlaps=newoverlaps, strides=newstrides)] - - xyz_grid = [(it[0], it[1], it[2]) for it in sliding_window_3d( - img, overlaps=newoverlaps, strides=newstrides)] + xyz_grid: list[tuple[int, int, int]] = [] + start_step: list[tuple[int, int, int]] = [] + imgs: list[np.ndarray] = [] - start_step = [(it[3], it[4], it[5]) for it in sliding_window_3d( - img, overlaps=newoverlaps, strides=newstrides)] + for (xind, yind, zind, xstart, ystart, zstart, patch) in sliding_window_3d( + img, overlaps=newoverlaps, strides=newstrides): + xyz_grid.append((xind, yind, zind)) + start_step.append((xstart, ystart, zstart)) + imgs.append(patch) dim_new_grid = tuple(np.add(xyz_grid[-1], 1)) - - shift_img_x = resize_sk( - shift_img_x, dim_new_grid[::-1], order=3) - shift_img_y = resize_sk( - shift_img_y, dim_new_grid[::-1], order=3) - shift_img_z = resize_sk( - shift_img_z, dim_new_grid[::-1], order=3) - diffs_phase_grid_us = resize_sk( - diffs_phase_grid, dim_new_grid[::-1], order=3) - - num_tiles = np.prod(dim_new_grid) - - # what dimension shear should be looked at? shearing for 3d point scanning happens in y and z but no for plane-scanning + num_tiles = len(xyz_grid) + + if shifts_interpolate: + patch_centers_orig = get_patch_centers(img.shape, strides=strides, overlaps=overlaps) + patch_centers_new = get_patch_centers(img.shape, strides=newstrides, overlaps=newoverlaps) + shift_img_x = interpolate_shifts(shift_img_x, patch_centers_orig, patch_centers_new) + shift_img_y = interpolate_shifts(shift_img_y, patch_centers_orig, patch_centers_new) + shift_img_z = interpolate_shifts(shift_img_z, patch_centers_orig, patch_centers_new) + diffs_phase_grid_us = interpolate_shifts(diffs_phase_grid, patch_centers_orig, patch_centers_new) + else: + shift_img_x = resize_sk( + shift_img_x, dim_new_grid[::-1], order=3) + shift_img_y = resize_sk( + shift_img_y, dim_new_grid[::-1], order=3) + shift_img_z = resize_sk( + shift_img_z, dim_new_grid[::-1], order=3) + diffs_phase_grid_us = resize_sk( + diffs_phase_grid, dim_new_grid[::-1], order=3) + + # what dimension shear should be looked at? shearing for 3d point scanning happens in y and z but not for plane-scanning max_shear = np.percentile( [np.max(np.abs(np.diff(ssshh, axis=xxsss))) for ssshh, xxsss in itertools.product( [shift_img_x, shift_img_y], [0, 1])], 75) @@ -2513,7 +2431,7 @@ def tile_and_correct_3d(img:np.ndarray, template:np.ndarray, strides:tuple, over if gSig_filt is not None: raise Exception( - 'The use of FFT and filtering options have not been tested. Set opencv=True') + 'The use of FFT and filtering options have not been tested. Do not use gSig_filt in this mode without shifts_opencv') imgs = [apply_shifts_dft(im, ( sh[0], sh[1], sh[2]), dffphs, is_freq=False, border_nan=border_nan) for im, sh, dffphs in zip( @@ -2571,7 +2489,7 @@ def tile_and_correct_3d(img:np.ndarray, template:np.ndarray, strides:tuple, over sfr_freq, (-rigid_shts[0], -rigid_shts[1], -rigid_shts[2]), diffphase, border_nan=border_nan) img_show = np.vstack([new_img, img]) - img_show = resize_sk(img_show, None, fx=1, fy=1, fz=1) + img_show = rescale_sk(img_show, 1) # TODO does this actually do anything?? cv2.imshow('frame', img_show / np.percentile(template, 99)) cv2.waitKey(int(1. / 500 * 1000)) @@ -2582,6 +2500,117 @@ def tile_and_correct_3d(img:np.ndarray, template:np.ndarray, strides:tuple, over except: pass return new_img - add_to_movie, total_shifts, start_step, xyz_grid + +def apply_pw_shifts_remap_2d(img: np.ndarray, shifts_y: np.ndarray, shifts_x: np.ndarray, + patch_centers: tuple[list[float], ...], border_nan: Union[bool, Literal['copy', 'min']], + shifts_interpolate=False) -> np.ndarray: + """ + Use OpenCV remap to apply 2D piecewise shifts + Inputs: + img: the 2D image to apply shifts to + shifts_y: array of y shifts for each patch (C order) (this is the actual Y i.e. the first dimension of the image) + shifts_x: array of x shifts for each patch (C order) + patch_centers: tuple of patch locations in each dimension. + border_nan: how to deal with borders when remapping + shifts_interpolate: if true, uses interpn to upsample shifts based on patch centers instead of resize + Outputs: + img_remapped: the remapped image + """ + # reshape shifts_y and shifts_x based on patch grid + patch_grid = tuple(len(centers) for centers in patch_centers) + shift_img_y = np.reshape(shifts_y, patch_grid) + shift_img_x = np.reshape(shifts_x, patch_grid) + + # get full image shape/coordinates + dims = img.shape + x_coords, y_coords = [np.arange(0., dims[dim]).astype(np.float32) for dim in (1, 0)] + x_grid, y_grid = np.meshgrid(x_coords, y_coords) + + # up-sample shifts + if shifts_interpolate: + shifts_y = interpolate_shifts(shift_img_y, patch_centers, (y_coords, x_coords)).astype(np.float32) + shifts_x = interpolate_shifts(shift_img_x, patch_centers, (y_coords, x_coords)).astype(np.float32) + else: + shifts_y = cv2.resize(shift_img_y.astype(np.float32), dims[::-1]) + shifts_x = cv2.resize(shift_img_x.astype(np.float32), dims[::-1]) + + # apply to image + if border_nan is False: + mode = cv2.BORDER_CONSTANT + value = 0.0 + elif border_nan is True: + mode = cv2.BORDER_CONSTANT + value = np.nan + elif border_nan == 'min': + mode = cv2.BORDER_CONSTANT + value = np.min(img) + elif border_nan == 'copy': + mode = cv2.BORDER_REPLICATE + value = 0.0 + else: + raise ValueError(f'Unknown value of border_nan ({border_nan})') + + return cv2.remap(img, shifts_x + x_grid, shifts_y + y_grid, cv2.INTER_CUBIC, + borderMode=mode, borderValue=value) + +def apply_pw_shifts_remap_3d(img: np.ndarray, shifts_y: np.ndarray, shifts_x: np.ndarray, shifts_z: np.ndarray, + patch_centers: tuple[list[float], ...], border_nan: Union[bool, Literal['copy', 'min']], + shifts_interpolate=False) -> np.ndarray: + """ + Use skimage warp to apply 3D piecewise shifts + Inputs: + img: the 3D image to apply shifts to + shifts_y: array of y shifts for each patch (C order) (this is the actual Y i.e. the first dimension of the image) + shifts_x: array of x shifts for each patch (C order) + shifts_z: array of z shifts for each patch (C order) + patch_centers: tuple of patch locations in each dimension. + border_nan: how to deal with borders when remapping + shifts_interpolate: if true, uses interpn to upsample shifts based on patch centers instead of resize + Outputs: + img_remapped: the remapped image + """ + # reshape shifts based on patch grid + patch_grid = tuple(len(centers) for centers in patch_centers) + shift_img_y = np.reshape(shifts_y, patch_grid) + shift_img_x = np.reshape(shifts_x, patch_grid) + shift_img_z = np.reshape(shifts_z, patch_grid) + + # get full image shape/coordinates + dims = img.shape + x_coords, y_coords, z_coords = [np.arange(0., dims[dim]).astype(np.float32) for dim in (1, 0, 2)] + x_grid, y_grid, z_grid = np.meshgrid(x_coords, y_coords, z_coords) + + # up-sample shifts + if shifts_interpolate: + coords_new = (y_coords, x_coords, z_coords) + shifts_y = interpolate_shifts(shift_img_y, patch_centers, coords_new).astype(np.float32) + shifts_x = interpolate_shifts(shift_img_x, patch_centers, coords_new).astype(np.float32) + shifts_z = interpolate_shifts(shift_img_z, patch_centers, coords_new).astype(np.float32) + else: + shifts_y = resize_sk(shift_img_y.astype(np.float32), dims) + shifts_x = resize_sk(shift_img_x.astype(np.float32), dims) + shifts_z = resize_sk(shift_img_z.astype(np.float32), dims) + + shift_map = np.stack((shifts_y + y_grid, shifts_x + x_grid, shifts_z + z_grid), axis=0) + + # apply to image + if border_nan is False: + mode = 'constant' + value = 0.0 + elif border_nan is True: + mode = 'constant' + value = np.nan + elif border_nan == 'min': + mode = 'constant' + value = np.min(img) + elif border_nan == 'copy': + mode = 'edge' + value = 0.0 + else: + raise ValueError(f'Unknown value of border_nan ({border_nan})') + + return warp_sk(img, shift_map, order=3, mode=mode, cval=value) + def compute_flow_single_frame(frame, templ, pyr_scale=.5, levels=3, winsize=100, iterations=15, poly_n=5, poly_sigma=1.2 / 5, flags=0): @@ -2596,7 +2625,7 @@ def compute_metrics_motion_correction(fname, final_size_x, final_size_y, swap_di gSig_filt=None): #todo: todocument logger = logging.getLogger("caiman") - if os.environ.get('ENABLE_TQDM') == 'True': + if os.environ.get('ENABLE_TQDM') == 'True': # TODO Find a better way to toggle this or remove the code; this is also not documented anywhere disable_tqdm = False else: disable_tqdm = True @@ -2724,7 +2753,8 @@ def compute_metrics_motion_correction(fname, final_size_x, final_size_y, swap_di def motion_correct_batch_rigid(fname, max_shifts, dview=None, splits=56, num_splits_to_process=None, num_iter=1, template=None, shifts_opencv=False, save_movie_rigid=False, add_to_movie=None, nonneg_movie=False, gSig_filt=None, subidx=slice(None, None, 1), use_cuda=False, - border_nan=True, var_name_hdf5='mov', is3D=False, indices=(slice(None), slice(None))): + border_nan=True, var_name_hdf5='mov', is3D=False, indices=(slice(None), slice(None)), + shifts_interpolate=False): """ Function that perform memory efficient hyper parallelized rigid motion corrections while also saving a memory mappable file @@ -2750,17 +2780,17 @@ def motion_correct_batch_rigid(fname, max_shifts, dview=None, splits=56, num_spl template: ndarray if a good approximation of the template to register is available, it can be used - shifts_opencv: boolean + shifts_opencv: bool toggle the shifts applied with opencv, if yes faster but induces some smoothing - save_movie_rigid: boolean + save_movie_rigid: bool toggle save movie subidx: slice Indices to slice - use_cuda : bool, optional - Use skcuda.fft (if available). Default: False + use_cuda : bool (DEPRECATED) + cuda is no longer supported for motion correction; this kwarg will be removed in a future version of Caiman indices: tuple(slice), default: (slice(None), slice(None)) Use that to apply motion correction only on a part of the FOV @@ -2807,8 +2837,6 @@ def motion_correct_batch_rigid(fname, max_shifts, dview=None, splits=56, num_spl if is3D: # TODO - motion_correct_3d needs to be implemented in movies.py template = caiman.motion_correction.bin_median_3d(m) # motion_correct_3d has not been implemented yet - instead initialize to just median image -# template = caiman.motion_correction.bin_median_3d( -# m.motion_correct_3d(max_shifts[2], max_shifts[1], max_shifts[0], template=None)[0]) else: if not m.flags['WRITEABLE']: m = m.copy() @@ -2846,7 +2874,7 @@ def motion_correct_batch_rigid(fname, max_shifts, dview=None, splits=56, num_spl dview=dview, save_movie=save_movie, base_name=base_name, subidx = subidx, num_splits=num_splits_to_process, shifts_opencv=shifts_opencv, nonneg_movie=nonneg_movie, gSig_filt=gSig_filt, use_cuda=use_cuda, border_nan=border_nan, var_name_hdf5=var_name_hdf5, is3D=is3D, - indices=indices) + indices=indices, shifts_interpolate=shifts_interpolate) if is3D: new_templ = np.nanmedian(np.stack([r[-1] for r in res_rig]), 0) else: @@ -2869,7 +2897,7 @@ def motion_correct_batch_pwrigid(fname, max_shifts, strides, overlaps, add_to_mo splits=56, num_splits_to_process=None, num_iter=1, template=None, shifts_opencv=False, save_movie=False, nonneg_movie=False, gSig_filt=None, use_cuda=False, border_nan=True, var_name_hdf5='mov', is3D=False, - indices=(slice(None), slice(None))): + indices=(slice(None), slice(None)), shifts_interpolate=False): """ Function that perform memory efficient hyper parallelized rigid motion corrections while also saving a memory mappable file @@ -2907,14 +2935,14 @@ def motion_correct_batch_pwrigid(fname, max_shifts, strides, overlaps, add_to_mo template: ndarray if a good approximation of the template to register is available, it can be used - shifts_opencv: boolean + shifts_opencv: bool toggle the shifts applied with opencv, if yes faster but induces some smoothing - save_movie_rigid: boolean + save_movie_rigid: bool toggle save movie - use_cuda : bool, optional - Use skcuda.fft (if available). Default: False + use_cuda : bool (DEPRECATED) + cuda is no longer supported for motion correction; this kwarg will be removed in a future version of Caiman indices: tuple(slice), default: (slice(None), slice(None)) Use that to apply motion correction only on a part of the FOV @@ -2961,7 +2989,7 @@ def motion_correct_batch_pwrigid(fname, max_shifts, strides, overlaps, add_to_mo else: logger.debug(f'saving mmap of {fname}') - if isinstance(fname, tuple): + if isinstance(fname, tuple): # TODO Switch to using standard path functions base_name=os.path.splitext(os.path.split(fname[0])[-1])[0] + '_els_' else: base_name=os.path.splitext(os.path.split(fname)[-1])[0] + '_els_' @@ -2974,7 +3002,7 @@ def motion_correct_batch_pwrigid(fname, max_shifts, strides, overlaps, add_to_mo base_name=base_name, num_splits=num_splits_to_process, shifts_opencv=shifts_opencv, nonneg_movie=nonneg_movie, gSig_filt=gSig_filt, use_cuda=use_cuda, border_nan=border_nan, var_name_hdf5=var_name_hdf5, is3D=is3D, - indices=indices) + indices=indices, shifts_interpolate=shifts_interpolate) if is3D: new_templ = np.nanmedian(np.stack([r[-1] for r in res_el]), 0) else: @@ -3021,7 +3049,6 @@ def tile_and_correct_wrapper(params): # todo todocument logger = logging.getLogger("caiman") - try: cv2.setNumThreads(0) except: @@ -3030,7 +3057,7 @@ def tile_and_correct_wrapper(params): img_name, out_fname, idxs, shape_mov, template, strides, overlaps, max_shifts,\ add_to_movie, max_deviation_rigid, upsample_factor_grid, newoverlaps, newstrides, \ shifts_opencv, nonneg_movie, gSig_filt, is_fiji, use_cuda, border_nan, var_name_hdf5, \ - is3D, indices = params + is3D, indices, shifts_interpolate = params if isinstance(img_name, tuple): @@ -3056,7 +3083,8 @@ def tile_and_correct_wrapper(params): upsample_factor_fft=10, show_movie=False, max_deviation_rigid=max_deviation_rigid, shifts_opencv=shifts_opencv, gSig_filt=gSig_filt, - use_cuda=use_cuda, border_nan=border_nan) + use_cuda=use_cuda, border_nan=border_nan, + shifts_interpolate=shifts_interpolate) shift_info.append([total_shift, start_step, xyz_grid]) else: @@ -3067,7 +3095,8 @@ def tile_and_correct_wrapper(params): upsample_factor_fft=10, show_movie=False, max_deviation_rigid=max_deviation_rigid, shifts_opencv=shifts_opencv, gSig_filt=gSig_filt, - use_cuda=use_cuda, border_nan=border_nan) + use_cuda=use_cuda, border_nan=border_nan, + shifts_interpolate=shifts_interpolate) shift_info.append([total_shift, start_step, xy_grid]) if out_fname is not None: @@ -3088,7 +3117,7 @@ def motion_correction_piecewise(fname, splits, strides, overlaps, add_to_movie=0 upsample_factor_grid=4, order='F', dview=None, save_movie=True, base_name=None, subidx = None, num_splits=None, shifts_opencv=False, nonneg_movie=False, gSig_filt=None, use_cuda=False, border_nan=True, var_name_hdf5='mov', is3D=False, - indices=(slice(None), slice(None))): + indices=(slice(None), slice(None)), shifts_interpolate=False): """ """ @@ -3123,7 +3152,7 @@ def motion_correction_piecewise(fname, splits, strides, overlaps, add_to_movie=0 if num_splits is not None: idxs = np.array(idxs)[np.random.randint(0, len(idxs), num_splits)] save_movie = False - logger.warning('**** MOVIE NOT SAVED BECAUSE num_splits is not None ****') + logger.warning('**** Movie not saved because num_splits is not None ****') if save_movie: if base_name is None: @@ -3143,14 +3172,11 @@ def motion_correction_piecewise(fname, splits, strides, overlaps, add_to_movie=0 pars.append([fname, fname_tot, idx, shape_mov, template, strides, overlaps, max_shifts, np.array( add_to_movie, dtype=np.float32), max_deviation_rigid, upsample_factor_grid, newoverlaps, newstrides, shifts_opencv, nonneg_movie, gSig_filt, is_fiji, - use_cuda, border_nan, var_name_hdf5, is3D, indices]) + use_cuda, border_nan, var_name_hdf5, is3D, indices, shifts_interpolate]) if dview is not None: logger.info('** Starting parallel motion correction **') - if HAS_CUDA and use_cuda: - res = dview.map(tile_and_correct_wrapper,pars) - dview.map(close_cuda_process, range(len(pars))) - elif 'multiprocessing' in str(type(dview)): + if 'multiprocessing' in str(type(dview)): logger.debug("entering multiprocessing tile_and_correct_wrapper") res = dview.map_async(tile_and_correct_wrapper, pars).get(4294967) else: @@ -3160,3 +3186,4 @@ def motion_correction_piecewise(fname, splits, strides, overlaps, add_to_movie=0 res = list(map(tile_and_correct_wrapper, pars)) return fname_tot, res + diff --git a/caiman/source_extraction/cnmf/cnmf.py b/caiman/source_extraction/cnmf/cnmf.py index 05ce34a6d..c8f8d6bdc 100644 --- a/caiman/source_extraction/cnmf/cnmf.py +++ b/caiman/source_extraction/cnmf/cnmf.py @@ -109,8 +109,8 @@ def __init__(self, n_processes, k=5, gSig=[4, 4], gSiz=None, merge_thresh=0.8, p p: int order of the autoregressive process used to estimate deconvolution - Ain: ndarray - if know, it is the initial estimate of spatial filters + Ain: np.ndarray + if known, it is the initial estimate of spatial filters. Array must be of type `bool` in 'F' order of shape: [n_pixels, n_components] ssub: int downsampleing factor in space diff --git a/caiman/source_extraction/cnmf/estimates.py b/caiman/source_extraction/cnmf/estimates.py index d32a1ce23..d92945a21 100644 --- a/caiman/source_extraction/cnmf/estimates.py +++ b/caiman/source_extraction/cnmf/estimates.py @@ -36,7 +36,8 @@ def __init__(self, A=None, b=None, C=None, f=None, R=None, dims=None): Args: A: scipy.sparse.csc_matrix (dimensions: # of pixels x # components) - set of spatial footprints. Each footprint is represented in a column of A, flattened with order = 'F' + set of spatial footprints. Each footprint is represented in a column of A, flattened with order = 'F'. + Must be a np.ndarray of type `bool` if used for manual seeded initialization. C: np.ndarray (dimensions: # of components x # of timesteps) set of temporal traces (each row of C corresponds to a trace) diff --git a/caiman/source_extraction/cnmf/initialization.py b/caiman/source_extraction/cnmf/initialization.py index 31ac48e0a..f1ff6755c 100644 --- a/caiman/source_extraction/cnmf/initialization.py +++ b/caiman/source_extraction/cnmf/initialization.py @@ -148,7 +148,7 @@ def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter min_corr=0.8, min_pnr=10, seed_method='auto', ring_size_factor=1.5, center_psf=False, ssub_B=2, init_iter=2, remove_baseline = True, SC_kernel='heat', SC_sigma=1, SC_thr=0, SC_normalize=True, SC_use_NN=False, - SC_nnn=20, lambda_gnmf=1): + SC_nnn=20, lambda_gnmf=1, snmf_l1_ratio:float=0.0): """ Initialize components. This function initializes the spatial footprints, temporal components, and background which are then further refined by the CNMF iterations. There are four @@ -256,6 +256,9 @@ def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter init_iter: int, optional number of iterations for 1-photon imaging initialization + snmf_l1_ratio: float + Used only by sparse NMF, passed to NMF call + Returns: Ain: np.ndarray (d1 * d2 [ * d3]) x K , spatial filter of each neuron. @@ -272,11 +275,6 @@ def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter fin: np.ndarray nb x T matrix, initialization of temporal background - Raises: - Exception "Unsupported method" - - Exception 'You need to define arguments for local NMF' - """ logger = logging.getLogger("caiman") method = method_init @@ -339,7 +337,7 @@ def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter elif method == 'sparse_nmf': Ain, Cin, _, b_in, f_in = sparseNMF( Y_ds, nr=K, nb=nb, max_iter_snmf=max_iter_snmf, alpha=alpha_snmf, - sigma_smooth=sigma_smooth_snmf, remove_baseline=remove_baseline, perc_baseline=perc_baseline_snmf) + sigma_smooth=sigma_smooth_snmf, remove_baseline=remove_baseline, perc_baseline=perc_baseline_snmf, l1_ratio=snmf_l1_ratio) elif method == 'compressed_nmf': Ain, Cin, _, b_in, f_in = compressedNMF( @@ -485,7 +483,7 @@ def ICA_PCA(Y_ds, nr, sigma_smooth=(.5, .5, .5), truncate=2, fun='logcosh', return A_in, C_in, center, b_in, f_in def sparseNMF(Y_ds, nr, max_iter_snmf=200, alpha=0.5, sigma_smooth=(.5, .5, .5), - remove_baseline=True, perc_baseline=20, nb=1, truncate=2): + remove_baseline=True, perc_baseline=20, nb=1, truncate=2, l1_ratio=0.0): """ Initialization using sparse NMF @@ -511,6 +509,9 @@ def sparseNMF(Y_ds, nr, max_iter_snmf=200, alpha=0.5, sigma_smooth=(.5, .5, .5), nb: int Number of background components + l1_ratio: float + Parameter to NMF call + Returns: A: np.array 2d array of size (# of pixels) x nr with the spatial components. @@ -523,6 +524,7 @@ def sparseNMF(Y_ds, nr, max_iter_snmf=200, alpha=0.5, sigma_smooth=(.5, .5, .5), 2d array of size nr x 2 [ or 3] with the components centroids """ logger = logging.getLogger("caiman") + m = scipy.ndimage.gaussian_filter(np.transpose( Y_ds, np.roll(np.arange(Y_ds.ndim), 1)), sigma=sigma_smooth, mode='nearest', truncate=truncate) @@ -539,7 +541,7 @@ def sparseNMF(Y_ds, nr, max_iter_snmf=200, alpha=0.5, sigma_smooth=(.5, .5, .5), d = np.prod(dims) yr = np.reshape(m1, [T, d], order='F') - logger.debug(f"Running SparseNMF with alpha_W={alpha}") + logger.info(f"Running SparseNMF with alpha_W={alpha} and l1_ratio={l1_ratio}") mdl = NMF(n_components=nr, verbose=False, init='nndsvd', @@ -547,7 +549,7 @@ def sparseNMF(Y_ds, nr, max_iter_snmf=200, alpha=0.5, sigma_smooth=(.5, .5, .5), max_iter=max_iter_snmf, shuffle=False, alpha_W=alpha, - l1_ratio=0.0) + l1_ratio=l1_ratio) C = mdl.fit_transform(yr).T A = mdl.components_.T A_in = A diff --git a/caiman/source_extraction/cnmf/map_reduce.py b/caiman/source_extraction/cnmf/map_reduce.py index efe789e4f..178f3e74e 100644 --- a/caiman/source_extraction/cnmf/map_reduce.py +++ b/caiman/source_extraction/cnmf/map_reduce.py @@ -454,7 +454,7 @@ def run_CNMF_patches(file_name, shape, params, gnb=1, dview=None, YrA_tot *= nA[:, None] nB = np.ravel(np.sqrt(B_tot.power(2).sum(0))) B_tot /= nB - B_tot = np.array(B_tot, dtype=np.float32) + B_tot = B_tot.toarray().astype(np.float32) # B_tot = scipy.sparse.coo_matrix(B_tot) F_tot *= nB[:, None] diff --git a/caiman/source_extraction/cnmf/online_cnmf.py b/caiman/source_extraction/cnmf/online_cnmf.py index c84ec3f5a..55b2828ba 100644 --- a/caiman/source_extraction/cnmf/online_cnmf.py +++ b/caiman/source_extraction/cnmf/online_cnmf.py @@ -270,7 +270,6 @@ def _prepare_object(self, Yr, T, new_dims=None, idx_components=None): if self.is1p: estim = self.estimates - d1, d2 = estim.dims estim.Yres_buf -= estim.b0 if ssub_B == 1: estim.Atb = estim.Ab.T.dot(estim.W.dot(estim.b0) - estim.b0) @@ -389,6 +388,7 @@ def get_indices_of_pixels_on_ring(self, pixel): else: Yres -= estim.upscale_matrix.dot(estim.W.dot( estim.downscale_matrix.dot(Yres))) + d1, d2 = self.estimates.dims Yres = Yres.reshape((d1, d2, -1), order='F') (self.estimates.first_moment, self.estimates.second_moment, diff --git a/caiman/source_extraction/cnmf/params.py b/caiman/source_extraction/cnmf/params.py index 4ad83eaac..36c9996bb 100644 --- a/caiman/source_extraction/cnmf/params.py +++ b/caiman/source_extraction/cnmf/params.py @@ -588,6 +588,9 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), pw_rigid: bool, default: False flag for performing pw-rigid motion correction. + shifts_interpolate: bool, default: False + use patch locations to interpolate shifts rather than just upscaling to size of image (for pw_rigid only) + shifts_opencv: bool, default: True flag for applying shifts using cubic interpolation (otherwise FFT) @@ -715,7 +718,8 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), 'gSiz': gSiz, 'init_iter': init_iter, 'kernel': None, # user specified template for greedyROI - 'lambda_gnmf' :1, # regularization weight for graph NMF + 'lambda_gnmf': 1, # regularization weight for graph NMF + 'snmf_l1_ratio': 0.0, # L1 ratio, used by sparse nmf mode only 'maxIter': 5, # number of HALS iterations 'max_iter_snmf': 500, 'method_init': method_init, # can be greedy_roi, corr_pnr sparse_nmf, local_NMF @@ -869,6 +873,7 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), 'num_splits_to_process_rig': None, # DO NOT MODIFY 'overlaps': (32, 32), # overlap between patches in pw-rigid motion correction 'pw_rigid': False, # flag for performing pw-rigid motion correction + 'shifts_interpolate': False, # interpolate shifts based on patch locations instead of resizing 'shifts_opencv': True, # flag for applying shifts using cubic interpolation (otherwise FFT) 'splits_els': 14, # number of splits across time for pw-rigid registration 'splits_rig': 14, # number of splits across time for rigid registration diff --git a/caiman/tests/test_hdf5.py b/caiman/tests/test_hdf5.py new file mode 100644 index 000000000..ab228d463 --- /dev/null +++ b/caiman/tests/test_hdf5.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +import numpy as np +import numpy.testing as npt +import os +from caiman.utils import utils +from caiman.paths import get_tempdir + + +def _recursively_assert_array_equal(a, b): + """Get around array_equal not ignoring nans for nested objects""" + if isinstance(a, dict): + if not isinstance(b, dict): + raise AssertionError('Values have different types') + if len(a) != len(b): + raise AssertionError('Dicts have different sizes') + + for key in a: + if key not in b: + raise AssertionError(f'Dicts have different keys ({key} not found)') + _recursively_assert_array_equal(a[key], b[key]) + else: + npt.assert_array_equal(a, b) + + +def test_save_and_load_dict_to_hdf5(): + filename = os.path.join(get_tempdir(), 'test_hdf5.hdf5') + dict_to_save = { + 'int_scalar': 1, + 'int_vector': np.array([1, 2], dtype=int), + 'int_matrix': np.array([[1, 2], [3, 4]], dtype=int), + 'float32': np.array([[1., 2.], [3., 4.]], dtype='float32'), + 'float32_w_nans': np.array([[1., 2.], [3., np.nan]], dtype='float32'), + 'float64_w_nans': np.array([[1., 2.], [3., np.nan]], dtype='float64'), + 'dict': { + 'nested_float': np.array([1.0, 2.0]) + }, + 'string': 'foobar', + 'bool': True, + 'dxy': (1.0, 2.0) # specific key that should be saved as a tuple + } + # test no validation error on save + utils.save_dict_to_hdf5(dict_to_save, filename) + + # test that the same data gets loaded + loaded = utils.load_dict_from_hdf5(filename) + _recursively_assert_array_equal(dict_to_save, loaded) + + +if __name__ == '__main__': + test_save_and_load_dict_to_hdf5() \ No newline at end of file diff --git a/caiman/tests/test_memmapping.py b/caiman/tests/test_memmapping.py index 6a3e6df9f..99d1eb7f9 100644 --- a/caiman/tests/test_memmapping.py +++ b/caiman/tests/test_memmapping.py @@ -1,22 +1,12 @@ import pathlib import numpy as np -import nose +import pytest from caiman import mmapping from caiman.paths import caiman_datadir -TWO_D_FNAME = ( - pathlib.Path(caiman_datadir()) - / "testdata/memmap__d1_10_d2_11_d3_1_order_F_frames_12_.mmap" -) -THREE_D_FNAME = ( - pathlib.Path(caiman_datadir()) - / "testdata/memmap__d1_10_d2_11_d3_13_order_F_frames_12_.mmap" -) - - def test_load_raises_wrong_ext(): fname = "a.mmapp" try: @@ -37,38 +27,46 @@ def test_load_raises_multiple_ext(): assert False -def setup_2d_mmap(): - np.memmap( - TWO_D_FNAME, mode="w+", dtype=np.float32, shape=(12, 10, 11, 13), order="F" +@pytest.fixture(scope="function") +def three_d_mmap_fname(): + THREE_D_FNAME = ( + pathlib.Path(caiman_datadir()) + / "testdata/memmap__d1_10_d2_11_d3_13_order_F_frames_12_.mmap" ) - - -def teardown_2d_mmap(): - TWO_D_FNAME.unlink() - - -def setup_3d_mmap(): np.memmap( THREE_D_FNAME, mode="w+", dtype=np.float32, shape=(12, 10, 11, 13), order="F" ) + try: + yield THREE_D_FNAME + finally: + THREE_D_FNAME.unlink() -def teardown_3d_mmap(): - THREE_D_FNAME.unlink() +@pytest.fixture(scope="function") +def two_d_mmap_fname(): + TWO_D_FNAME = ( + pathlib.Path(caiman_datadir()) + / "testdata/memmap__d1_10_d2_11_d3_1_order_F_frames_12_.mmap" + ) + np.memmap( + TWO_D_FNAME, mode="w+", dtype=np.float32, shape=(12, 10, 11, 13), order="F" + ) + try: + yield TWO_D_FNAME + finally: + TWO_D_FNAME.unlink() -@nose.with_setup(setup_2d_mmap, teardown_2d_mmap) -def test_load_successful_2d(): - fname = pathlib.Path(caiman_datadir()) / "testdata" / TWO_D_FNAME +def test_load_successful_2d(two_d_mmap_fname): + fname = two_d_mmap_fname Yr, (d1, d2), T = mmapping.load_memmap(str(fname)) assert (d1, d2) == (10, 11) assert T == 12 assert isinstance(Yr, np.memmap) -@nose.with_setup(setup_3d_mmap, teardown_3d_mmap) -def test_load_successful_3d(): - fname = pathlib.Path(caiman_datadir()) / "testdata" / THREE_D_FNAME +def test_load_successful_3d(three_d_mmap_fname): + fname = three_d_mmap_fname Yr, (d1, d2, d3), T = mmapping.load_memmap(str(fname)) assert (d1, d2, d3) == (10, 11, 13) assert T == 12 diff --git a/caiman/tests/test_toydata.py b/caiman/tests/test_toydata.py index beb51a786..c796eaddd 100644 --- a/caiman/tests/test_toydata.py +++ b/caiman/tests/test_toydata.py @@ -2,17 +2,23 @@ import numpy.testing as npt import numpy as np +import os from scipy.ndimage.filters import gaussian_filter import caiman.source_extraction.cnmf.params from caiman.source_extraction import cnmf as cnmf from caiman.utils.visualization import get_contours +from caiman.paths import fn_relocated, generate_fname_tot +from caiman import save_memmap, load_memmap +TOYDATA_DIMS = { + 2: (20, 30), + 3: (12, 14, 16) + } -#%% def gen_data(D=3, noise=.5, T=300, framerate=30, firerate=2.): N = 4 # number of neurons - dims = [(20, 30), (12, 14, 16)][D - 2] # size of image + dims = TOYDATA_DIMS[D] # size of image sig = (2, 2, 2)[:D] # neurons size bkgrd = 10 # fluorescence baseline gamma = .9 # calcium decay time constant @@ -35,22 +41,83 @@ def gen_data(D=3, noise=.5, T=300, framerate=30, firerate=2.): return Yr, trueC, trueS, trueA, centers, dims -def pipeline(D): +def compare_contour_coords(coords1: np.ndarray, coords2: np.ndarray): + """ + Compare 2 matrices of contour coordinates that should be the same, but may be calculated in a different order/ + from different starting points. + + The first point of each contour component is repeated, and this may be a different point depending on orientation. + To get around this, compare differences instead (have to take absolute value b/c direction may be opposite). + Also sort coordinates b/c starting point is unimportant & depends on orientation + """ + diffs_sorted = [] + for coords in [coords1, coords2]: + abs_diffs = np.abs(np.diff(coords, axis=0)) + sort_order = np.lexsort(abs_diffs.T) + diffs_sorted.append(abs_diffs[sort_order, :]) + npt.assert_allclose(diffs_sorted[0], diffs_sorted[1]) + + +def check_get_contours_swap_dim(cnm: cnmf.CNMF): + """Check that get_contours works regardless of swap_dim""" + dims = cnm.estimates.dims + coor_normal = get_contours(cnm.estimates.A, cnm.estimates.dims, swap_dim=False) + coor_swapped = get_contours(cnm.estimates.A, cnm.estimates.dims[::-1], swap_dim=True) + for c_normal, c_swapped in zip(coor_normal, coor_swapped): + if len(dims) == 3: + for plane_coor_normal, plane_coor_swapped in zip(c_normal['coordinates'], c_swapped['coordinates']): + compare_contour_coords(plane_coor_normal, plane_coor_swapped[:, ::-1]) + else: + compare_contour_coords(c_normal['coordinates'], c_swapped['coordinates'][:, ::-1]) + npt.assert_allclose(c_normal['CoM'], c_swapped['CoM'][::-1]) + + +def get_params_dicts(D: int): + """Get combinations of parameters to test""" + dims = TOYDATA_DIMS[D] + return { + 'no-patch': { + 'data': {'dims': dims}, + 'init': {'K': 4, 'gSig': [2, 2, 2][:D]}, + 'preprocess': {'p': 1, 'n_pixels_per_process': np.prod(dims)}, + 'spatial': {'n_pixels_per_process': np.prod(dims), 'thr_method': 'nrg', 'extract_cc': False}, + 'temporal': {'p': 1, 'block_size_temp': np.prod(dims)}, + }, + 'patch': { + 'data': {'dims': dims}, + 'init': {'K': 4, 'gSig': [2, 2, 2][:D]}, + 'patch': {'rf': [d // 2 for d in dims], 'stride': 1} # use one big patch to get the same results as without patches + }, + 'patch-not-lowrank': { + 'data': {'dims': dims}, + 'init': {'K': 4, 'gSig': [2, 2, 2][:D]}, + 'patch': {'rf': [d // 2 for d in dims], 'stride': 1, 'low_rank_background': False} + } + } + + +def pipeline(D, params_dict, name): #%% GENERATE GROUND TRUTH DATA Yr, trueC, trueS, trueA, centers, dims = gen_data(D) N, T = trueC.shape + # INIT - params = caiman.source_extraction.cnmf.params.CNMFParams(dims=dims, - k=4, - gSig=[2, 2, 2][:D], - p=1, - n_pixels_per_process=np.prod(dims), - block_size_temp=np.prod(dims)) - params.spatial['thr_method'] = 'nrg' - params.spatial['extract_cc'] = False + params = caiman.source_extraction.cnmf.params.CNMFParams(params_dict=params_dict) cnm = cnmf.CNMF(2, params=params) + # FIT images = np.reshape(Yr.T, (T,) + dims, order='F') + if params.patch['rf'] is not None: + # have to save memmap + base_name = fn_relocated(f'test-{name}.mmap', force_temp=True) + mmap_name = save_memmap([images], base_name=base_name, order='C', is_3D=(D==3)) + # mmap_name = generate_fname_tot(base_name, dims, order='C') + f'_frames_{T}.mmap' + # images = np.memmap(mmap_name, dtype=np.float32, mode='w+', shape=Yr.shape, order='C') + # images[:] = Yr + images, _, _ = load_memmap(mmap_name) + images = np.reshape(images.T, (T,) + dims, order='F') + params.change_params({'data': {'fnames': [mmap_name]}}) + cnm = cnm.fit(images) # VERIFY HIGH CORRELATION WITH GROUND TRUTH @@ -64,39 +131,23 @@ def pipeline(D): cnm.estimates.A.toarray()[:, i])[0, 1] for i in range(N) ] npt.assert_allclose(corr, 1, .05) + return cnm - # Check that get_contours works regardless of swap_dim - coor_normal = get_contours(cnm.estimates.A, dims, swap_dim=False) - coor_swapped = get_contours(cnm.estimates.A, dims[::-1], swap_dim=True) - for c_normal, c_swapped in zip(coor_normal, coor_swapped): - if D == 3: - for plane_coor_normal, plane_coor_swapped in zip(c_normal['coordinates'], c_swapped['coordinates']): - compare_contour_coords(plane_coor_normal, plane_coor_swapped[:, ::-1]) - else: - compare_contour_coords(c_normal['coordinates'], c_swapped['coordinates'][:, ::-1]) - - npt.assert_allclose(c_normal['CoM'], c_swapped['CoM'][::-1]) - -def compare_contour_coords(coords1: np.ndarray, coords2: np.ndarray): - """ - Compare 2 matrices of contour coordinates that should be the same, but may be calculated in a different order/ - from different starting points. - - The first point of each contour component is repeated, and this may be a different point depending on orientation. - To get around this, compare differences instead (have to take absolute value b/c direction may be opposite). - Also sort coordinates b/c starting point is unimportant & depends on orientation - """ - diffs_sorted = [] - for coords in [coords1, coords2]: - abs_diffs = np.abs(np.diff(coords, axis=0)) - sort_order = np.lexsort(abs_diffs.T) - diffs_sorted.append(abs_diffs[sort_order, :]) - npt.assert_allclose(diffs_sorted[0], diffs_sorted[1]) +def pipeline_all_params(D): + cnm = None + for name, params_dict in get_params_dicts(D).items(): + try: + cnm = pipeline(D, params_dict, name) + except Exception as e: + print(f'Params set {name} failed') + raise + assert cnm is not None + # check get_contours on just the last set of results + check_get_contours_swap_dim(cnm) def test_2D(): - pipeline(2) - + pipeline_all_params(2) def test_3D(): - pipeline(3) + pipeline_all_params(3) diff --git a/caiman/utils/utils.py b/caiman/utils/utils.py index 54f028e3d..112f616bf 100644 --- a/caiman/utils/utils.py +++ b/caiman/utils/utils.py @@ -516,7 +516,7 @@ def recursively_save_dict_contents_to_group(h5file:h5py.File, path:str, dic:dict except: item = np.array(item).astype('|S32') h5file[path + key] = item - if not np.array_equal(h5file[path + key][()], item): + if not np.array_equal(h5file[path + key][()], item, equal_nan=item.dtype.kind == 'f'): # just using True gives "ufunc 'isnan' not supported for the input types" raise ValueError(f'Error while saving ndarray {key} of dtype {item.dtype}') # save dictionaries elif isinstance(item, dict): diff --git a/demos/notebooks/demo_OnACID_mesoscope.ipynb b/demos/notebooks/demo_OnACID_mesoscope.ipynb index 015e54334..07b8ecfc3 100644 --- a/demos/notebooks/demo_OnACID_mesoscope.ipynb +++ b/demos/notebooks/demo_OnACID_mesoscope.ipynb @@ -36,7 +36,8 @@ "\n", "logfile = None # Replace with a path if you want to log to a file\n", "logger = logging.getLogger('caiman')\n", - "logger.setLevel(logging.INFO)\n", + "# Set to logging.INFO if you want much output, potentially much more output\n", + "logger.setLevel(logging.WARNING)\n", "logfmt = logging.Formatter('%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s')\n", "if logfile is not None:\n", " handler = logging.FileHandler(logfile)\n", @@ -189,7 +190,7 @@ "Now inspect the components extracted by OnACID. Note that if single pass was used then several components would be non-zero only for the part of the time interval indicating that they were detected online by OnACID.\n", "\n", "Note that if you get data rate error you can start Jupyter notebooks using:\n", - "'jupyter notebook --NotebookApp.iopub_data_rate_limit=1.0e10'" + "'jupyter lab --ZMQChannelsWebsocketConnection.iopub_data_rate_limit=1.0e10'" ] }, { diff --git a/demos/notebooks/demo_Ring_CNN.ipynb b/demos/notebooks/demo_Ring_CNN.ipynb index 2b261b58d..ac764fa31 100644 --- a/demos/notebooks/demo_Ring_CNN.ipynb +++ b/demos/notebooks/demo_Ring_CNN.ipynb @@ -25,7 +25,8 @@ "\n", "logfile = None # Replace with a path if you want to log to a file\n", "logger = logging.getLogger('caiman')\n", - "logger.setLevel(logging.INFO)\n", + "# Set to logging.INFO if you want much output, potentially much more output\n", + "logger.setLevel(logging.WARNING)\n", "logfmt = logging.Formatter('%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s')\n", "if logfile is not None:\n", " handler = logging.FileHandler(logfile)\n", @@ -278,7 +279,7 @@ "Now inspect the components extracted by OnACID. Note that if single pass was used then several components would be non-zero only for the part of the time interval indicating that they were detected online by OnACID.\n", "\n", "Note that if you get data rate error you can start Jupyter notebooks using:\n", - "'jupyter notebook --NotebookApp.iopub_data_rate_limit=1.0e10'" + "'jupyter lab --ZMQChannelsWebsocketConnection.iopub_data_rate_limit=1.0e10'" ] }, { diff --git a/demos/notebooks/demo_VST.ipynb b/demos/notebooks/demo_VST.ipynb index 40703c3a4..1df3febe0 100644 --- a/demos/notebooks/demo_VST.ipynb +++ b/demos/notebooks/demo_VST.ipynb @@ -26,7 +26,8 @@ "\n", "logfile = None # Replace with a path if you want to log to a file\n", "logger = logging.getLogger('caiman')\n", - "logger.setLevel(logging.INFO)\n", + "# Set to logging.INFO if you want much output, potentially much more output\n", + "logger.setLevel(logging.WARNING)\n", "logfmt = logging.Formatter('%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s')\n", "if logfile is not None:\n", " handler = logging.FileHandler(logfile)\n", diff --git a/demos/notebooks/demo_caiman_cnmf_3D.ipynb b/demos/notebooks/demo_caiman_cnmf_3D.ipynb index 9503f1a2b..607758214 100644 --- a/demos/notebooks/demo_caiman_cnmf_3D.ipynb +++ b/demos/notebooks/demo_caiman_cnmf_3D.ipynb @@ -41,7 +41,8 @@ "\n", "logfile = None # Replace with a path if you want to log to a file\n", "logger = logging.getLogger('caiman')\n", - "logger.setLevel(logging.INFO)\n", + "# Set to logging.INFO if you want much output, potentially much more output\n", + "logger.setLevel(logging.WARNING)\n", "logfmt = logging.Formatter('%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s')\n", "if logfile is not None:\n", " handler = logging.FileHandler(logfile)\n", diff --git a/demos/notebooks/demo_dendritic.ipynb b/demos/notebooks/demo_dendritic.ipynb index 1e18d72d2..a8ae0191f 100644 --- a/demos/notebooks/demo_dendritic.ipynb +++ b/demos/notebooks/demo_dendritic.ipynb @@ -70,7 +70,8 @@ "else:\n", " log_file = None\n", "logger = logging.getLogger('caiman')\n", - "logger.setLevel(logging.INFO)\n", + "# Set to logging.INFO if you want much output, potentially much more output\n", + "logger.setLevel(logging.WARNING)\n", "logfmt = logging.Formatter('%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s')\n", "if log_file is not None:\n", " handler = logging.FileHandler(log_file)\n", diff --git a/demos/notebooks/demo_motion_correction.ipynb b/demos/notebooks/demo_motion_correction.ipynb index 9a65e159e..7c785bd57 100644 --- a/demos/notebooks/demo_motion_correction.ipynb +++ b/demos/notebooks/demo_motion_correction.ipynb @@ -42,7 +42,8 @@ "\n", "logfile = None # Replace with a path if you want to log to a file\n", "logger = logging.getLogger('caiman')\n", - "logger.setLevel(logging.INFO)\n", + "# Set to logging.INFO if you want much output, potentially much more output\n", + "logger.setLevel(logging.WARNING)\n", "logfmt = logging.Formatter('%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s')\n", "if logfile is not None:\n", " handler = logging.FileHandler(logfile)\n", diff --git a/demos/notebooks/demo_online_3D.ipynb b/demos/notebooks/demo_online_3D.ipynb index 7f188dc9c..52aa0430d 100644 --- a/demos/notebooks/demo_online_3D.ipynb +++ b/demos/notebooks/demo_online_3D.ipynb @@ -38,6 +38,7 @@ "\n", "logfile = None # Replace with a path if you want to log to a file\n", "logger = logging.getLogger('caiman')\n", + "# Set to logging.INFO if you want much output, potentially much more output\n", "logger.setLevel(logging.WARNING)\n", "logfmt = logging.Formatter('%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s')\n", "if logfile is not None:\n", diff --git a/demos/notebooks/demo_online_cnmfE.ipynb b/demos/notebooks/demo_online_cnmfE.ipynb index acb282346..cc92ecde9 100644 --- a/demos/notebooks/demo_online_cnmfE.ipynb +++ b/demos/notebooks/demo_online_cnmfE.ipynb @@ -38,6 +38,7 @@ "\n", "logfile = None # Replace with a path if you want to log to a file\n", "logger = logging.getLogger('caiman')\n", + "# Set to logging.INFO if you want much output, potentially much more output\n", "logger.setLevel(logging.WARNING)\n", "logfmt = logging.Formatter('%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s')\n", "if logfile is not None:\n", diff --git a/demos/notebooks/demo_pipeline.ipynb b/demos/notebooks/demo_pipeline.ipynb index b97279f73..42d58d441 100644 --- a/demos/notebooks/demo_pipeline.ipynb +++ b/demos/notebooks/demo_pipeline.ipynb @@ -95,6 +95,7 @@ "# set up logging\n", "logfile = None # Replace with a path if you want to log to a file\n", "logger = logging.getLogger('caiman')\n", + "# Set to logging.INFO if you want much output, potentially much more output\n", "logger.setLevel(logging.WARNING)\n", "logfmt = logging.Formatter('%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s')\n", "if logfile is not None:\n", @@ -842,7 +843,7 @@ "Note this is just an initial result, which will contain many false positives, which is to be expected. The main concern to watch for here is whether you have lots of false *negatives* (has the algorithm missed neurons?). False negatives are hard to fix later, so if you have an unacceptable number, be sure to go back and re-run CNMF with new parameters.\n", "\n", "> If you get a data rate error with any notebook plotting commmands, can start your notebook using \n", - "`jupyter notebook --NotebookApp.iopub_data_rate_limit=1.0e10`" + "`jupyter lab --ZMQChannelsWebsocketConnection.iopub_data_rate_limit=1.0e10`" ] }, { diff --git a/demos/notebooks/demo_pipeline_cnmfE.ipynb b/demos/notebooks/demo_pipeline_cnmfE.ipynb index e2ebcd081..f4f3e4405 100644 --- a/demos/notebooks/demo_pipeline_cnmfE.ipynb +++ b/demos/notebooks/demo_pipeline_cnmfE.ipynb @@ -85,6 +85,7 @@ "# set up logging\n", "logfile = None # Replace with a path if you want to log to a file\n", "logger = logging.getLogger('caiman')\n", + "# Set to logging.INFO if you want much output, potentially much more output\n", "logger.setLevel(logging.WARNING)\n", "logfmt = logging.Formatter('%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s')\n", "if logfile is not None:\n", diff --git a/demos/notebooks/demo_pipeline_voltage_imaging.ipynb b/demos/notebooks/demo_pipeline_voltage_imaging.ipynb index dbd701bd4..d2fcbea36 100644 --- a/demos/notebooks/demo_pipeline_voltage_imaging.ipynb +++ b/demos/notebooks/demo_pipeline_voltage_imaging.ipynb @@ -62,6 +62,7 @@ "\n", "logfile = None # Replace with a path if you want to log to a file\n", "logger = logging.getLogger('caiman')\n", + "# Set to logging.INFO if you want much output, potentially much more output\n", "logger.setLevel(logging.ERROR)\n", "logfmt = logging.Formatter('%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s')\n", "if logfile is not None:\n", diff --git a/demos/notebooks/demo_realtime_cnmfE.ipynb b/demos/notebooks/demo_realtime_cnmfE.ipynb index f5bf8183e..e91c7517e 100644 --- a/demos/notebooks/demo_realtime_cnmfE.ipynb +++ b/demos/notebooks/demo_realtime_cnmfE.ipynb @@ -53,6 +53,7 @@ "\n", "logfile = None # Replace with a path if you want to log to a file\n", "logger = logging.getLogger('caiman')\n", + "# Set to logging.INFO if you want much output, potentially much more output\n", "logger.setLevel(logging.WARNING)\n", "logfmt = logging.Formatter('%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s')\n", "if logfile is not None:\n", diff --git a/demos/notebooks/demo_seeded_CNMF.ipynb b/demos/notebooks/demo_seeded_CNMF.ipynb index 280e00c30..17ec16671 100644 --- a/demos/notebooks/demo_seeded_CNMF.ipynb +++ b/demos/notebooks/demo_seeded_CNMF.ipynb @@ -47,6 +47,7 @@ "\n", "logfile = None # Replace with a path if you want to log to a file\n", "logger = logging.getLogger('caiman')\n", + "# Set to logging.INFO if you want much output, potentially much more output\n", "logger.setLevel(logging.WARNING)\n", "logfmt = logging.Formatter('%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s')\n", "if logfile is not None:\n", diff --git a/docs/source/CaImAn_features_and_references.rst b/docs/source/CaImAn_features_and_references.rst index 1e1d6e188..9f1e4e609 100644 --- a/docs/source/CaImAn_features_and_references.rst +++ b/docs/source/CaImAn_features_and_references.rst @@ -12,8 +12,6 @@ calcium (and voltage) imaging data: - Can be run also in online mode (i.e. one frame at a time) - Corrects for non-rigid artifacts due to raster scanning or non-uniform brain motion - - FFTs can be computed on GPUs (experimental). Requires pycuda and - skcuda to be installed. | - **Source extraction** diff --git a/docs/source/Getting_Started.rst b/docs/source/Getting_Started.rst index c9e92033d..5f3ff42b1 100644 --- a/docs/source/Getting_Started.rst +++ b/docs/source/Getting_Started.rst @@ -10,11 +10,10 @@ Demos .. code:: bash - jupyter notebook --NotebookApp.iopub_data_rate_limit=1.0e10 + jupyter lab --ZMQChannelsWebsocketConnection.iopub_data_rate_limit=1.0e10 - and select the notebook from within Jupyter’s browser. The argument - ``--NotebookApp.iopub_data_rate_limit=1.0e10`` will prevent any - memory issues while plotting on a notebook. + and select the notebook from within Jupyter’s browser. The argument provided + will prevent any output from being lost while using a notebook - demo files are also found in the demos/general subfolder. We suggest trying demo_pipeline.py first as it contains most of the tasks diff --git a/docs/source/Installation.rst b/docs/source/Installation.rst index 939d56120..8fe21f58a 100644 --- a/docs/source/Installation.rst +++ b/docs/source/Installation.rst @@ -18,7 +18,7 @@ Caiman setup consists of two main steps: We will discuss each of these steps for different operating systems below. In a separate section, we will discuss how to upgrade once you've already installed. -If you do not already have conda installed, first install a 3.x version for your platform `here `_. +If you do not already have conda installed, first install a 3.x version of miniforge (conda distribution) for your platform `here `_. When installing, allow conda to modify your PATH variable. If you are using an M1-based Mac, please ignore the ARM builds of conda; install an x86 version instead (ignore any warnings you get while doing so; it will work fine). @@ -45,17 +45,17 @@ should be good to go. Details for conda install This process is the same on every operating system, and is what most users will need who just want to use Caiman to -get things running quickly. You will install mamba into your base environment, create a new environment with the -caiman package from conda-forge, and then activate the new environment. +get things running quickly. You will create a new environment with the caiman package from conda-forge, and then activate the new environment. .. code:: bash - conda install -n base -c conda-forge mamba - mamba create -n caiman -c conda-forge caiman + mamba create -n caiman caiman conda activate caiman Note if you are installing on Windows, run the above commands in the anaconda prompt rather than powershell or the dos shell: +If you do not have a mamba command, you may have started with a different conda distribution than miniforge; this is fine, but you'll want to add the mamba package manually first. + **Known issues** If you are on Windows, have used Caiman before using our github repo and now want to use the conda-forge package, @@ -89,11 +89,17 @@ The Windows installation process differs quite a bit from installation on Linux or MacOSX. If you can work on Linux, it will be easier. Everything below should be from a Conda-prompt rather than from Powershell or any other shell. -- Install Microsoft Build Tools for Visual Studio 2019, which you can download - from (https://visualstudio.microsoft.com/vs/older-downloads/). Check the - “Build Tools” box, and in the detailed view on the right check the “C/C++ CLI - Tools” component too. The specifics of this occasionally change as Microsoft - changes its products and website; you may need to go off-script. +- Install Microsoft Build Tools for Visual Studio 2019. + + - If you are on Windows 10 version 1709 or later + or Windows 11, you can use ``winget``. Run in a command prompt: + + ``winget install --id=Microsoft.VisualStudio.2019.BuildTools -e`` + - Otherwise, you can install it from + from (https://visualstudio.microsoft.com/vs/older-downloads/). Check the + “Build Tools” box, and in the detailed view on the right check the “C/C++ CLI + Tools” component too. The specifics of this occasionally change as Microsoft + changes its products and website; you may need to go off-script. - Remove any associations you may have made between .py files and an existing python interpreter or editor - If you have less than 64GB of RAM, increase the maximum size of your pagefile to 64G or more @@ -203,7 +209,7 @@ Section 3A: Upgrade conda install
Updating the conda-forge package -From within your caiman environment type ```conda update caiman -c conda-forge```. In most cases this should be enough. +From within your caiman environment type ```conda update caiman```. In most cases this should be enough. If not, you may want to create a new environmrent from scratch. @@ -325,24 +331,7 @@ Support for Linux on ARM (e.g. AWS Graviton) is not available (but it may work w if you compile Caiman yourself - we do not have binary packages and this is untested). If you care about this, please let us know. - -Section 4B: Installing additional packages -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Caiman installs through the conda-forge conda channel. Some packages are available on multiple conda channels, and in this -case it is important that you use the conda-forge channel if possible. To do this, when installing new packages -inside your environment, use the following command: - -:: - - mamba install -c conda-forge --override-channels NEW_PACKAGE_NAME - -You will notice that any packages installed this way will mention, in their listing, -that they are from conda-forge, with none of them having a blank origin. If you don't do this, -differences between how packages are built in different channels could lead to some packages failing to work -(e.g., OpenCV). - -Section 4C: Setting up environment variables +Section 4B: Setting up environment variables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is only important for people who are doing the dev-mode install. If you @@ -369,7 +358,7 @@ following the instructions `here `__. -Section 4D: Other topics +Section 4C: Other topics ~~~~~~~~~~~~~~~~~~~~~~~~~ See also: diff --git a/docs/source/readme-gpu.md b/docs/source/readme-gpu.md index 71d33ed80..6339360bc 100644 --- a/docs/source/readme-gpu.md +++ b/docs/source/readme-gpu.md @@ -1,6 +1,6 @@ CaImAn and GPUs =============== -Some parts of Caiman already use Tensorflow, and those parts can benefit from using a +Some parts of Caiman use Tensorflow, and those parts can benefit from using a hardware-accelerated version of Tensorflow. These versions can be installed using Conda (presently only on the Linux and Windows platforms). Note that this is experimental and our ability to support this environment is limited. To do this: @@ -14,39 +14,3 @@ build of opencv (which may not have graphical bindings). If this happens, you ma search for the most recent opencv with openblas and switch to it using existing conda tooling. To do this, check after switching package versions that KERAS_BACKEND is set to tensorflow. If not, you can set it in your terminal by typing `KERAS_BACKEND=tensorflow`. On some platforms it defaults to theano, which will not be hardware-accelerated. This can affect the computational of the Caiman online algorithm. - -Caiman and CUDA ---------------- - -Caiman has experimental support for computing FFTs on your GPU, -using the pycuda libraries instead of OpenCV or numpy. This can be used during motion correction. - -Installation ------------- -We assume you have Caiman generally working first. If you do not, -follow the general instructions first and verify you have a working -environment. - -One (but less-tested with Caiman) route is to install pycuda from conda-forge; activate your -caiman environment and then do `conda install -c conda-forge pycuda`. This may be all you need. - -It is possible to instead install CUDA, then use pip to install pycuda, but this is involved enough -that it's better to try the above first. - -Use ---- -The CUDA codepaths will only be active if the needed libraries are installed on your system. Otherwise non-CUDA codepaths will be active (even if CUDA is requested in our code). - -The following functions have been extended with a -use_cuda flag (defaults to false) that if set true will use CUDA for FFT -computation (if the needed libraries are involved): - -* The MotionCorrect class - this is most likely the interface you want -* MotionCorrect::motion_correct_rigid() - used internally by MotionCorrect class -* register_translation() -* tile_and_correct() -* motion_correct_batch_rigid() -* motion_correct_batch_pwrigid() -* motion_correction_piecewise() - -If you have your environment set up correctly, you can add use_cuda=True to the arguments of MotionCorrect() in your code and it will use your GPU for motion correction. diff --git a/environment-minimal.yml b/environment-minimal.yml index e2f6f7f81..81fa6898c 100644 --- a/environment-minimal.yml +++ b/environment-minimal.yml @@ -1,6 +1,5 @@ channels: - conda-forge -- defaults dependencies: - python <=3.12 - av @@ -12,7 +11,7 @@ dependencies: - ipywidgets - matplotlib - moviepy -- nose +- pytest - numpy <2.0.0,>=1.26 - numpydoc - opencv diff --git a/environment.yml b/environment.yml index 6394dbbb1..014c3532c 100644 --- a/environment.yml +++ b/environment.yml @@ -1,6 +1,5 @@ channels: - conda-forge -- defaults dependencies: - python <=3.12 - av @@ -17,7 +16,8 @@ dependencies: - matplotlib - moviepy - mypy -- nose +- pytest +- pytest-cov - numpy <2.0.0,>=1.26 - numpydoc - opencv diff --git a/pyproject.toml b/pyproject.toml index d9f85e955..96941901d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,6 +11,43 @@ authors = [ { name = "Jeremie Kalfon"}] readme = "README.md" dynamic = ["classifiers", "keywords", "license", "scripts", "version"] +dependencies = [ + "av", + "bokeh", + "coverage", + "cython", + "h5py", + "holoviews", + "ipykernel", + "ipython", + "ipyparallel", + "ipywidgets", + "keras", + "matplotlib", + "mypy", + "numpy", + "numpydoc", + "opencv-python", + "peakutils", + "pims", + "psutil", + "pynwb", + "scikit-image", + "scikit-learn", + "scipy", + "tensorflow", + "tifffile", + "tqdm", + "yapf", + "zarr" +] + +[project.optional-dependencies] +jupyter = [ + "jupyter", + "pyqtgraph", + "tk" +] [build-system] requires = ["cython", "numpy", "setuptools", "wheel"] diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index f3a39222c..000000000 --- a/requirements.txt +++ /dev/null @@ -1,30 +0,0 @@ -av -bokeh -coverage -cython -h5py -holoviews -ipykernel -ipython -ipyparallel -jupyter -keras -matplotlib -mypy -numpy -numpydoc -opencv-python -peakutils -pims -psutil -pynwb -pyqtgraph -scikit-image -scikit-learn -scipy -tensorflow -tifffile -tk -tqdm -yapf -zarr diff --git a/requirements_gpu.txt b/requirements_gpu.txt deleted file mode 100644 index 6ab23e301..000000000 --- a/requirements_gpu.txt +++ /dev/null @@ -1,28 +0,0 @@ -av -coverage -cython -h5py -ipykernel -ipython -ipyparallel -jupyter -keras -matplotlib -mypy -numpy -numpydoc -opencv-python -peakutils -pims -psutil -pynwb -scikit-image -scikit-learn -scipy -tensorflow -tensorflow-addons -tifffile -tk -tqdm -yapf -zarr