From 6dd9166346d3367aa31c79da3dc2e7700c5b831d Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Fri, 7 Feb 2025 10:28:03 -0600 Subject: [PATCH 01/59] Initial attempt to adopt array API --- ccdproc/core.py | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 26b27a20..d8118922 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -7,6 +7,7 @@ import numbers import warnings +import array_api_compat import numpy as np from astropy import nddata, stats from astropy import units as u @@ -347,6 +348,8 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): units as the data in the parameter ``ccd_data``. """ + # Get array namespace + xp = array_api_compat.array_namespace(ccd_data.data) if gain is not None and not isinstance(gain, Quantity): raise TypeError("gain must be a astropy.units.Quantity.") @@ -373,7 +376,7 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): if disregard_nan: data[mask] = 0 else: - data[mask] = np.nan + data[mask] = xp.nan logging.warning("Negative values in array will be replaced with nan") # calculate the deviation @@ -480,6 +483,9 @@ def subtract_overscan( if not isinstance(ccd, CCDData): raise TypeError("ccddata is not a CCDData object.") + # Set array namespace + xp = array_api_compat.array_namespace(ccd.data) + if (overscan is not None and fits_section is not None) or ( overscan is None and fits_section is None ): @@ -498,24 +504,24 @@ def subtract_overscan( overscan_axis = 0 if overscan.shape[1] > overscan.shape[0] else 1 if median: - oscan = np.median(overscan.data, axis=overscan_axis) + oscan = xp.median(overscan.data, axis=overscan_axis) else: - oscan = np.mean(overscan.data, axis=overscan_axis) + oscan = xp.mean(overscan.data, axis=overscan_axis) if model is not None: of = fitting.LinearLSQFitter() - yarr = np.arange(len(oscan)) + yarr = xp.arange(len(oscan)) oscan = of(model, yarr, oscan) oscan = oscan(yarr) if overscan_axis == 1: - oscan = np.reshape(oscan, (oscan.size, 1)) + oscan = xp.reshape(oscan, (oscan.size, 1)) else: - oscan = np.reshape(oscan, (1, oscan.size)) + oscan = xp.reshape(oscan, (1, oscan.size)) else: if overscan_axis == 1: - oscan = np.reshape(oscan, oscan.shape + (1,)) + oscan = xp.reshape(oscan, oscan.shape + (1,)) else: - oscan = np.reshape(oscan, (1,) + oscan.shape) + oscan = xp.reshape(oscan, (1,) + oscan.shape) subtracted = ccd.copy() @@ -968,6 +974,9 @@ def wcs_project(ccd, target_wcs, target_shape=None, order="bilinear"): from astropy.nddata.ccddata import _generate_wcs_and_update_header from reproject import reproject_interp + # Set array namespace + xp = array_api_compat.array_namespace(ccd.data) + if not (ccd.wcs.is_celestial and target_wcs.is_celestial): raise ValueError("one or both WCS is not celestial.") @@ -990,7 +999,7 @@ def wcs_project(ccd, target_wcs, target_shape=None, order="bilinear"): # The reprojection will contain nan for any pixels for which the source # was outside the original image. Those should be masked also. - output_mask = np.isnan(projected_image_raw) + output_mask = xp.isnan(projected_image_raw) if reprojected_mask is not None: output_mask = output_mask | reprojected_mask @@ -1216,14 +1225,21 @@ def rebin(ccd, newshape): rebin(arr1, (20,20)) """ # check to see that is in a nddata type - if isinstance(ccd, np.ndarray): + try: + xp = array_api_compat.array_namespace(ccd) + except TypeError: + # This will also raise aTypeError if ccd.data isn't an array + # but that is fine. + xp = array_api_compat.array_namespace(ccd.data) + + if isinstance(ccd, xp.ndarray): # check to see that the two arrays are going to be the same length if len(ccd.shape) != len(newshape): raise ValueError("newshape does not have the same dimensions as " "ccd.") slices = [slice(0, old, old / new) for old, new in zip(ccd.shape, newshape)] - coordinates = np.mgrid[slices] + coordinates = xp.mgrid[slices] indices = coordinates.astype("i") return ccd[tuple(indices)] From d3b620375acc1a6333f6b7baad6a4347499fc49f Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sun, 9 Feb 2025 12:30:24 -0600 Subject: [PATCH 02/59] A couple more changes --- ccdproc/core.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index d8118922..04bf65b5 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -1228,9 +1228,12 @@ def rebin(ccd, newshape): try: xp = array_api_compat.array_namespace(ccd) except TypeError: - # This will also raise aTypeError if ccd.data isn't an array - # but that is fine. - xp = array_api_compat.array_namespace(ccd.data) + try: + # This will also raise a TypeError if ccd.data isn't an array + # but that is fine. + xp = array_api_compat.array_namespace(ccd.data) + except AttributeError as e: + raise TypeError("ccd is not an ndarray or a CCDData object.") from e if isinstance(ccd, xp.ndarray): @@ -1265,8 +1268,11 @@ def rebin(ccd, newshape): raise TypeError("ccd is not an ndarray or a CCDData object.") -def block_reduce(ccd, block_size, func=np.sum): +def block_reduce(ccd, block_size, func=None): """Thin wrapper around `astropy.nddata.block_reduce`.""" + if func is None: + xp = array_api_compat.array_namespace(ccd.data) + func = xp.sum data = nddata.block_reduce(ccd, block_size, func) if isinstance(ccd, CCDData): # unit and meta "should" be unaffected by the change of shape and can @@ -1277,7 +1283,10 @@ def block_reduce(ccd, block_size, func=np.sum): def block_average(ccd, block_size): """Like `block_reduce` but with predefined ``func=np.mean``.""" - data = nddata.block_reduce(ccd, block_size, np.mean) + + xp = array_api_compat.array_namespace(ccd.data) + + data = nddata.block_reduce(ccd, block_size, xp.mean) # Like in block_reduce: if isinstance(ccd, CCDData): data = CCDData(data, unit=ccd.unit, meta=ccd.meta.copy()) @@ -1988,13 +1997,16 @@ def ccdmask( # No data attribute or data has no shape attribute. raise ValueError('"ratio" should be a "CCDData".') from err + # Get array namespace + xp = array_api_compat.array_namespace(ratio.data) + def _sigma_mask(baseline, one_sigma_value, lower_sigma, upper_sigma): """Helper function to mask values outside of the specified sigma range.""" return (baseline < -lower_sigma * one_sigma_value) | ( baseline > upper_sigma * one_sigma_value ) - mask = ~np.isfinite(ratio.data) + mask = ~xp.isfinite(ratio.data) medsub = ratio.data - ndimage.median_filter(ratio.data, size=(nlmed, ncmed)) if byblocks: From f0bc6b8ca7c14d8b334b6b1f719c2084aa4cb8d1 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Mon, 10 Feb 2025 11:06:59 -0600 Subject: [PATCH 03/59] Add function for check if something is an array --- ccdproc/core.py | 113 ++++++++++++++++++++++++++++-------------------- 1 file changed, 65 insertions(+), 48 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 04bf65b5..01eb2c3c 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -69,6 +69,14 @@ } +def _is_array(arr): + try: + array_api_compat.array_namespace(arr) + except TypeError: + return False + return True + + @log_to_metadata def ccd_process( ccd, @@ -223,6 +231,9 @@ def ccd_process( # make a copy of the object nccd = ccd.copy() + # Set array namespace + xp = array_api_compat.array_namespace(nccd.data) + # apply the overscan correction if isinstance(oscan, CCDData): nccd = subtract_overscan( @@ -252,12 +263,13 @@ def ccd_process( raise ValueError("gain and readnoise must be specified to create error frame.") # apply the bad pixel mask - if isinstance(bad_pixel_mask, np.ndarray): - nccd.mask = bad_pixel_mask - elif bad_pixel_mask is None: + if bad_pixel_mask is None: + # Handle this simple case first.... pass + elif _is_array(bad_pixel_mask): + nccd.mask = xp.asarray(bad_pixel_mask, dtype=bool) else: - raise TypeError("bad_pixel_mask is not None or numpy.ndarray.") + raise TypeError("bad_pixel_mask is not None or an array.") # apply the gain correction if not (gain is None or isinstance(gain, Quantity)): @@ -489,7 +501,7 @@ def subtract_overscan( if (overscan is not None and fits_section is not None) or ( overscan is None and fits_section is None ): - raise TypeError("specify either overscan or fits_section, but not " "both.") + raise TypeError("specify either overscan or fits_section, but not both.") if (overscan is not None) and (not isinstance(overscan, CCDData)): raise TypeError("overscan is not a CCDData object.") @@ -1598,39 +1610,7 @@ def cosmicray_lacosmic( asy_background_kwargs = dict(inbkg=inbkg, invar=invar) - if isinstance(ccd, np.ndarray): - data = ccd - - crmask, cleanarr = detect_cosmics( - data + data_offset, - inmask=None, - sigclip=sigclip, - sigfrac=sigfrac, - objlim=objlim, - gain=gain.value, - readnoise=readnoise.value, - satlevel=satlevel, - niter=niter, - sepmed=sepmed, - cleantype=cleantype, - fsmode=fsmode, - psfmodel=psfmodel, - psffwhm=psffwhm, - psfsize=psfsize, - psfk=psfk, - psfbeta=psfbeta, - verbose=verbose, - **asy_background_kwargs, - ) - - cleanarr = cleanarr - data_offset - cleanarr = _astroscrappy_gain_apply_helper( - cleanarr, gain.value, gain_apply, old_astroscrappy_interface - ) - - return cleanarr, crmask - - elif isinstance(ccd, CCDData): + if isinstance(ccd, CCDData): # Start with a check for a special case: ccd is in electron, and # gain and readnoise have no units. In that case we issue a warning # instead of raising an error to avoid crashing user's pipelines. @@ -1697,6 +1677,37 @@ def cosmicray_lacosmic( nccd.mask = nccd.mask + crmask return nccd + elif _is_array(ccd): + data = ccd + + crmask, cleanarr = detect_cosmics( + data + data_offset, + inmask=None, + sigclip=sigclip, + sigfrac=sigfrac, + objlim=objlim, + gain=gain.value, + readnoise=readnoise.value, + satlevel=satlevel, + niter=niter, + sepmed=sepmed, + cleantype=cleantype, + fsmode=fsmode, + psfmodel=psfmodel, + psffwhm=psffwhm, + psfsize=psfsize, + psfk=psfk, + psfbeta=psfbeta, + verbose=verbose, + **asy_background_kwargs, + ) + + cleanarr = cleanarr - data_offset + cleanarr = _astroscrappy_gain_apply_helper( + cleanarr, gain.value, gain_apply, old_astroscrappy_interface + ) + + return cleanarr, crmask else: raise TypeError("ccd is not a CCDData or ndarray object.") @@ -1810,22 +1821,26 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): mask of the object will be created if it did not previously exist or be updated with the detected cosmic rays. """ - if isinstance(ccd, np.ndarray): - data = ccd + if _is_array(ccd): + xp = array_api_compat.array_namespace(ccd) + + # Masked data is not part of the array API so remove mask if present. + # Only look at the data array, guessing that if there is a .mask then + # there is also a .data. + if hasattr(ccd, "mask"): + data = ccd.data + + data = xp.asarray(ccd) if error_image is None: - error_image = data.std() - else: - if not isinstance(error_image, (float, np.ndarray)): + error_image = xp.std(data) + elif not isinstance(error_image, float): + if not _is_array(error_image): raise TypeError("error_image is not a float or ndarray.") # create the median image marr = ndimage.median_filter(data, size=(mbox, mbox)) - # Only look at the data array - if isinstance(data, np.ma.MaskedArray): - data = data.data - # Find the residual image rarr = (data - marr) / error_image @@ -1839,7 +1854,9 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): # replace bad pixels in the image ndata = data.copy() if rbox > 0: - data = np.ma.masked_array(data, (crarr == 1)) + # Fun fact: scipy.ndimage ignores the mask, so may as well not + # bother with it. + # data = np.ma.masked_array(data, (crarr == 1)) mdata = ndimage.median_filter(data, rbox) ndata[crarr == 1] = mdata[crarr == 1] From 424ebdc0432c876e4f36fa278c2bcde4c116392c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 12 Feb 2025 10:31:09 -0600 Subject: [PATCH 04/59] Remove numpy from core --- ccdproc/core.py | 85 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 76 insertions(+), 9 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 01eb2c3c..fa24793f 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -8,7 +8,6 @@ import warnings import array_api_compat -import numpy as np from astropy import nddata, stats from astropy import units as u from astropy.modeling import fitting @@ -70,6 +69,20 @@ def _is_array(arr): + """ + Check whether an object is an array by tring to find a namespace + for it. + + Parameters + ---------- + arr : object + Object to be tested. + + Returns + ------- + is_array : bool + ``True`` if arr is an array, ``False`` otherwise. + """ try: array_api_compat.array_namespace(arr) except TypeError: @@ -77,6 +90,38 @@ def _is_array(arr): return True +def _percentile_fallback(array, percentiles): + """ + Try calculating percentile using namespace, otherwise fall back to + an implmentation that uses sort. As of the 2023 version of the array API + there is no percentile function in the API but there is a sort function. + + Parameters + ---------- + array : array-like + Array from which to calculate the percentile. + + percentiles : float or list-like + Percentile to calculate. + + Returns + ------- + percentile : float or list-like + Calculated percentile. + """ + xp = array_api_compat.array_namespace(array) + try: + return xp.percentile(array, percentiles) + except AttributeError: + pass + + # Fall back to using sort + sorted_array = xp.sort(array) + + indexes = xp.astype(len(sorted_array) * xp.asarray(percentiles), int) + return sorted_array[indexes] + + @log_to_metadata def ccd_process( ccd, @@ -2036,18 +2081,40 @@ def _sigma_mask(baseline, one_sigma_value, lower_sigma, upper_sigma): c1 = j * ncsig c2 = min((j + 1) * ncsig, ncols) block = medsub[l1:l2, c1:c2] - high = np.percentile(block.ravel(), 69.1) - low = np.percentile(block.ravel(), 30.9) + # The array API has no percentile function, so we use a small + # function that first tries percentile in case a particular + # array package has it but otherwise falls back to a sort. + # This is the case at least as of the 2023.12 API. + high = _percentile_fallback( + xp.reshape(block, (xp.prod(block.shape),)), 69.1 + ) + low = _percentile_fallback( + xp.reshape(block, (xp.prod(block.shape),)), 30.9 + ) block_sigma = (high - low) / 2.0 block_mask = _sigma_mask(block, block_sigma, lsigma, hsigma) - mblock = np.ma.MaskedArray(block, mask=block_mask, copy=False) + # mblock = np.ma.MaskedArray(block, mask=block_mask, copy=False) if findbadcolumns: - csum = np.ma.sum(mblock, axis=0) + # Not clear yet what the right solution to masking is in the array + # API, so we'll use a boolean index to get the elements we want + # and sum them....unfortunately, we'll need to do this in a loop + # as far as I can tell. + csum = [] + all_masked = [] + for k in range(block.shape[1]): + subset = block[:, k] + csum.append(xp.sum(subset[~block_mask[:, k]])) + all_masked.append(xp.all(block_mask[:, k])) + csum = xp.array(csum) csum[csum <= 0] = 0 - csum_sigma = np.ma.MaskedArray(np.sqrt(c2 - c1 - csum)) - colmask = _sigma_mask(csum.filled(1), csum_sigma, lsigma, hsigma) - block_mask[:, :] |= colmask[np.newaxis, :] + csum_sigma = xp.array(xp.sqrt(c2 - c1 - csum)) + # The prior code filled the csum array with the value 1, which + # only affects those cases where all of the input values to + # the csum were masked, so we fill those with 1. + csum[all_masked] = 1 + colmask = _sigma_mask(csum, csum_sigma, lsigma, hsigma) + block_mask[:, :] |= colmask[xp.newaxis, :] mask[l1:l2, c1:c2] = block_mask else: @@ -2065,7 +2132,7 @@ def _sigma_mask(baseline, one_sigma_value, lower_sigma, upper_sigma): if mask[line, col]: for i in range(2, ngood + 2): lend = line + i - if mask[lend, col] and not np.all(mask[line : lend + 1, col]): + if mask[lend, col] and not xp.all(mask[line : lend + 1, col]): mask[line:lend, col] = True return mask From 7f5050dfed0d5d42f77b561790e3dc67cb8b1c49 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 4 Mar 2025 10:10:54 -0600 Subject: [PATCH 05/59] WIP rewrite of a boolean index --- ccdproc/core.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index fa24793f..a6ee524e 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -430,14 +430,15 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): # remove values that might be negative or treat as nan data = gain_value * ccd_data.data mask = data < 0 + if disregard_nan: - data[mask] = 0 + data = data * ~mask else: - data[mask] = xp.nan + # data[mask] = xp.nan logging.warning("Negative values in array will be replaced with nan") # calculate the deviation - var = (data + readnoise_value**2) ** 0.5 + var = (xp.sqrt(data) ** 2 + readnoise_value**2) ** 0.5 # ensure uncertainty and image data have same unit ccd = ccd_data.copy() From 6a6bfa29c508d0744d65941167a77ea63a3714b1 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 4 Mar 2025 10:12:34 -0600 Subject: [PATCH 06/59] WIP testing updates --- ccdproc/tests/pytest_fixtures.py | 4 +- ccdproc/tests/test_ccdproc.py | 110 +++++++++++++++++-------------- 2 files changed, 64 insertions(+), 50 deletions(-) diff --git a/ccdproc/tests/pytest_fixtures.py b/ccdproc/tests/pytest_fixtures.py index e900e0f8..0c27a94a 100644 --- a/ccdproc/tests/pytest_fixtures.py +++ b/ccdproc/tests/pytest_fixtures.py @@ -2,6 +2,8 @@ from shutil import rmtree +# import dask.array as da +import jax.numpy as jnp import numpy as np import pytest from astropy import units as u @@ -59,7 +61,7 @@ def ccd_data( data = rng.normal(loc=mean, size=[size, size], scale=scale) fake_meta = {"my_key": 42, "your_key": "not 42"} - ccd = CCDData(data, unit=u.adu) + ccd = CCDData(jnp.array(data), unit=u.adu) ccd.header = fake_meta return ccd diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 55a1ccbe..54ee069a 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -1,8 +1,9 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst +# import array_api_compat import astropy import astropy.units as u -import numpy as np +import jax.numpy as np import pytest import skimage from astropy.io import fits @@ -11,6 +12,7 @@ from astropy.units.quantity import Quantity from astropy.utils.exceptions import AstropyUserWarning from astropy.wcs import WCS +from numpy import testing as np_testing from ccdproc.core import ( Keyword, @@ -36,7 +38,15 @@ except ImportError: HAS_BLOCK_X_FUNCS = False -_NUMPY_COPY_IF_NEEDED = False if np.__version__.startswith("1.") else None +_NUMPY_COPY_IF_NEEDED = None # False if np.__version__.startswith("1.") else None + + +# import dask.array as da +# import numpy +# data = numpy.arange(100_000).reshape(200, 500) +# a = da.from_array(data, chunks=(100, 100)) + +# np = array_api_compat.array_namespace(a) # Test creating deviation @@ -65,12 +75,12 @@ def test_create_deviation(u_image, u_gain, u_readnoise, expect_success): ccd_var = create_deviation(ccd_data, gain=gain, readnoise=readnoise) assert ccd_var.uncertainty.array.shape == (10, 10) assert ccd_var.uncertainty.array.size == 100 - assert ccd_var.uncertainty.array.dtype == np.dtype(float) + assert np.isdtype(ccd_var.uncertainty.array.dtype, "real floating") if gain is not None: expected_var = np.sqrt(2 * ccd_data.data + 5**2) / 2 else: expected_var = np.sqrt(ccd_data.data + 5**2) - np.testing.assert_allclose(ccd_var.uncertainty.array, expected_var) + np_testing.assert_allclose(ccd_var.uncertainty.array, expected_var) assert ccd_var.unit == ccd_data.unit # Uncertainty should *not* have any units -- does it? with pytest.raises(AttributeError): @@ -87,7 +97,7 @@ def test_create_deviation_from_negative(): ccd_var = create_deviation( ccd_data, gain=None, readnoise=readnoise, disregard_nan=False ) - np.testing.assert_array_equal( + np_testing.assert_array_equal( ccd_data.data < 0, np.isnan(ccd_var.uncertainty.array) ) @@ -102,7 +112,7 @@ def test_create_deviation_from_negative_2(): mask = ccd_data.data < 0 ccd_data.data[mask] = 0 expected_var = np.sqrt(ccd_data.data + readnoise.value**2) - np.testing.assert_allclose(ccd_var.uncertainty.array, expected_var) + np_testing.assert_allclose(ccd_var.uncertainty.array, expected_var) def test_create_deviation_keywords_must_have_unit(): @@ -164,7 +174,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): ) # Is the mean of the "science" region the sum of sky and the mean the # "science" section had before backgrounds were added? - np.testing.assert_almost_equal( + np_testing.assert_almost_equal( ccd_data_overscan.data[science_region].mean(), sky + original_mean ) # Is the overscan region zero? @@ -181,14 +191,14 @@ def test_subtract_overscan(median, transpose, data_rectangle): ) # Is the mean of the "science" region the sum of sky and the mean the # "science" section had before backgrounds were added? - np.testing.assert_almost_equal( + np_testing.assert_almost_equal( ccd_data_fits_section.data[science_region].mean(), sky + original_mean ) # Is the overscan region zero? assert (ccd_data_fits_section.data[oscan_region] == 0).all() # Do both ways of subtracting overscan give exactly the same result? - np.testing.assert_allclose( + np_testing.assert_allclose( ccd_data_overscan[science_region], ccd_data_fits_section[science_region] ) @@ -201,7 +211,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np.testing.assert_almost_equal( + np_testing.assert_almost_equal( ccd_data_overscan_auto.data[science_region].mean(), sky + original_mean ) # Use overscan_axis=None with a FITS section @@ -212,7 +222,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np.testing.assert_almost_equal( + np_testing.assert_almost_equal( ccd_data_fits_section_overscan_auto.data[science_region].mean(), sky + original_mean, ) @@ -237,7 +247,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np.testing.assert_allclose(ccd_data_square_overscan_auto, ccd_data_square) + np_testing.assert_allclose(ccd_data_square_overscan_auto, ccd_data_square) # A more substantial test of overscan modeling @@ -273,7 +283,7 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np.testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) + np_testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) # Set the overscan_axis explicitly to None, and let the routine # figure it out. ccd_data = subtract_overscan( @@ -283,7 +293,7 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np.testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) + np_testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) def test_subtract_overscan_fails(): @@ -326,7 +336,7 @@ def test_trim_image_fits_section(mask_data, uncertainty): trimmed = trim_image(ccd_data, fits_section="[20:40,:]") # FITS reverse order, bounds are inclusive and starting index is 1-based assert trimmed.shape == (50, 21) - np.testing.assert_allclose(trimmed.data, ccd_data[:, 19:40]) + np_testing.assert_allclose(trimmed.data, ccd_data[:, 19:40]) if mask_data: assert trimmed.shape == trimmed.mask.shape if uncertainty: @@ -337,7 +347,7 @@ def test_trim_image_no_section(): ccd_data = ccd_data_func(data_size=50) trimmed = trim_image(ccd_data[:, 19:40]) assert trimmed.shape == (50, 21) - np.testing.assert_allclose(trimmed.data, ccd_data[:, 19:40]) + np_testing.assert_allclose(trimmed.data, ccd_data[:, 19:40]) def test_trim_with_wcs_alters_wcs(): @@ -367,7 +377,7 @@ def test_subtract_bias(): master_bias = CCDData(master_bias_array, unit=ccd_data.unit) no_bias = subtract_bias(ccd_data, master_bias, add_keyword=None) # Does the data we are left with have the correct average? - np.testing.assert_almost_equal(no_bias.data.mean(), data_avg) + np_testing.assert_almost_equal(no_bias.data.mean(), data_avg) # With logging turned off, metadata should not change assert no_bias.header == ccd_data.header del no_bias.header["key"] @@ -434,7 +444,9 @@ def test_subtract_dark(explicit_times, scale, exposure_keyword): (exptime / dark_exptime) * (exposure_unit / dark_exposure_unit) ) - np.testing.assert_allclose(ccd_data.data - dark_scale * dark_level, dark_sub.data) + np_testing.assert_allclose( + ccd_data.data - dark_scale * dark_level, dark_sub.data + ) # Headers should have the same content...do they? assert dark_sub.header == ccd_data.header # But the headers should not be the same object -- a copy was made @@ -535,10 +547,10 @@ def test_flat_correct(): # Check that the flat was normalized # Should be the case that flat * flat_data = ccd_data * flat.data.mean # if the normalization was done correctly. - np.testing.assert_almost_equal( + np_testing.assert_almost_equal( (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat.data.mean() ) - np.testing.assert_allclose( + np_testing.assert_allclose( ccd_data.data / flat_data.data, flat.data / flat.data.mean() ) @@ -563,11 +575,11 @@ def test_flat_correct_min_value(): # Check that the flat was normalized. The asserts below, which look a # little odd, are correctly testing that # flat_corrected_data = ccd_data / (flat_with_min / mean(flat_with_min)) - np.testing.assert_almost_equal( + np_testing.assert_almost_equal( (flat_corrected_data.data * flat_with_min.data).mean(), (ccd_data.data * flat_with_min.data.mean()).mean(), ) - np.testing.assert_allclose( + np_testing.assert_allclose( ccd_data.data / flat_corrected_data.data, flat_with_min.data / flat_with_min.data.mean(), ) @@ -593,10 +605,10 @@ def test_flat_correct_norm_value(): # Check that the flat was normalized # Should be the case that flat * flat_data = ccd_data * flat_mean # if the normalization was done correctly. - np.testing.assert_almost_equal( + np_testing.assert_almost_equal( (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat_mean ) - np.testing.assert_allclose(ccd_data.data / flat_data.data, flat.data / flat_mean) + np_testing.assert_allclose(ccd_data.data / flat_data.data, flat.data / flat_mean) def test_flat_correct_norm_value_bad_value(): @@ -641,7 +653,7 @@ def test_gain_correct(): ccd_data = ccd_data_func() init_data = ccd_data.data gain_data = gain_correct(ccd_data, gain=3, add_keyword=None) - np.testing.assert_allclose(gain_data.data, 3 * init_data) + np_testing.assert_allclose(gain_data.data, 3 * init_data) assert ccd_data.meta == gain_data.meta @@ -651,7 +663,7 @@ def test_gain_correct_quantity(): g = Quantity(3, u.electron / u.adu) ccd_data = gain_correct(ccd_data, gain=g) - np.testing.assert_allclose(ccd_data.data, 3 * init_data) + np_testing.assert_allclose(ccd_data.data, 3 * init_data) assert ccd_data.unit == u.electron @@ -699,13 +711,13 @@ def tran(arr): tran = transform_image(ccd_data, tran) - np.testing.assert_allclose(10 * ccd_data.data, tran.data) + np_testing.assert_allclose(10 * ccd_data.data, tran.data) if mask_data: assert tran.shape == tran.mask.shape - np.testing.assert_array_equal(ccd_data.mask, tran.mask) + np_testing.assert_allclose(ccd_data.mask, tran.mask) if uncertainty: assert tran.shape == tran.uncertainty.array.shape - np.testing.assert_allclose( + np_testing.assert_allclose( 10 * ccd_data.uncertainty.array, tran.uncertainty.array ) @@ -814,7 +826,7 @@ def test__overscan_schange(): old_data = ccd_data.copy() new_data = subtract_overscan(ccd_data, overscan=ccd_data[:, 1], overscan_axis=0) assert not np.allclose(old_data.data, new_data.data) - np.testing.assert_allclose(old_data.data, ccd_data.data) + np_testing.assert_allclose(old_data.data, ccd_data.data) def test_create_deviation_does_not_change_input(): @@ -823,7 +835,7 @@ def test_create_deviation_does_not_change_input(): _ = create_deviation( ccd_data, gain=5 * u.electron / u.adu, readnoise=10 * u.electron ) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -835,7 +847,7 @@ def test_cosmicray_median_does_not_change_input(): _ = cosmicray_median( ccd_data, error_image=error, thresh=5, mbox=11, gbox=0, rbox=0 ) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -843,7 +855,7 @@ def test_cosmicray_lacosmic_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = cosmicray_lacosmic(ccd_data) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -853,7 +865,7 @@ def test_flat_correct_does_not_change_input(): flat = CCDData(np.zeros_like(ccd_data), unit=ccd_data.unit) with np.errstate(invalid="ignore"): _ = flat_correct(ccd_data, flat=flat) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -861,7 +873,7 @@ def test_gain_correct_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = gain_correct(ccd_data, gain=1, gain_unit=ccd_data.unit) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -870,7 +882,7 @@ def test_subtract_bias_does_not_change_input(): original = ccd_data.copy() master_frame = CCDData(np.zeros_like(ccd_data), unit=ccd_data.unit) _ = subtract_bias(ccd_data, master=master_frame) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -878,7 +890,7 @@ def test_trim_image_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = trim_image(ccd_data, fits_section=None) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -887,7 +899,7 @@ def test_transform_image_does_not_change_input(): original = ccd_data.copy() with np.errstate(invalid="ignore"): _ = transform_image(ccd_data, np.sqrt) - np.testing.assert_allclose(original.data, ccd_data) + np_testing.assert_allclose(original.data, ccd_data) assert original.unit == ccd_data.unit @@ -922,7 +934,7 @@ def test_wcs_project_onto_same_wcs(): assert new_ccd.wcs.wcs.compare(target_wcs.wcs) # Make sure data matches within some reasonable tolerance. - np.testing.assert_allclose(ccd_data.data, new_ccd.data, rtol=1e-5) + np_testing.assert_allclose(ccd_data.data, new_ccd.data, rtol=1e-5) def test_wcs_project_onto_same_wcs_remove_headers(): @@ -958,10 +970,10 @@ def test_wcs_project_onto_shifted_wcs(): # that the pixels should all be shifted. masked_input = np.ma.array(ccd_data.data, mask=ccd_data.mask) masked_output = np.ma.array(new_ccd.data, mask=new_ccd.mask) - np.testing.assert_allclose(masked_input[:-1, :-1], masked_output[1:, 1:], rtol=1e-5) + np_testing.assert_allclose(masked_input[:-1, :-1], masked_output[1:, 1:], rtol=1e-5) # The masks should all be shifted too. - np.testing.assert_array_equal(ccd_data.mask[:-1, :-1], new_ccd.mask[1:, 1:]) + np_testing.assert_array_equal(ccd_data.mask[:-1, :-1], new_ccd.mask[1:, 1:]) # We should have more values that are masked in the output array # than on input because some on output were not in the footprint @@ -1017,7 +1029,7 @@ def test_wcs_project_onto_scale_wcs(): # Make sure data matches within some reasonable tolerance, keeping in mind # that the pixels have been scaled. - np.testing.assert_allclose(ccd_data.data / 4, data_cutout, rtol=1e-5) + np_testing.assert_allclose(ccd_data.data / 4, data_cutout, rtol=1e-5) # Mask should be true for four pixels (all nearest neighbors) # of the single pixel we masked initially. @@ -1038,7 +1050,7 @@ def test_ccd_process_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = ccd_process(ccd_data, gain=5 * u.electron / u.adu, readnoise=10 * u.electron) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -1111,9 +1123,9 @@ def test_ccd_process(): # Final results should be (10 - 2) / 2.0 - 2 = 2 # Error should be (4 + 5)**0.5 / 0.5 = 3.0 - np.testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) - np.testing.assert_almost_equal(3.0 * np.ones((100, 90)), occd.uncertainty.array) - np.testing.assert_array_equal(mask, occd.mask) + np_testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) + np_testing.assert_allclose(3.0 * np.ones((100, 90)), occd.uncertainty.array) + np_testing.assert_array_equal(mask, occd.mask) assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 assert occd.meta["testkw"] == 100 @@ -1157,9 +1169,9 @@ def test_ccd_process_gain_corrected(): # Final results should be (10 - 2) / 2.0 - 2 = 2 # Error should be (4 + 5)**0.5 / 0.5 = 3.0 - np.testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) - np.testing.assert_almost_equal(3.0 * np.ones((100, 90)), occd.uncertainty.array) - np.testing.assert_array_equal(mask, occd.mask) + np_testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) + np_testing.assert_allclose(3.0 * np.ones((100, 90)), occd.uncertainty.array) + np_testing.assert_array_equal(mask, occd.mask) assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 assert occd.meta["testkw"] == 100 From 1d2d171b9a68f419a2e28de8f3539ced2001b0c5 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 5 Mar 2025 09:46:36 -0600 Subject: [PATCH 07/59] Change more almost_equal to allclose --- ccdproc/tests/test_ccdproc.py | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 54ee069a..76b345ed 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -174,8 +174,8 @@ def test_subtract_overscan(median, transpose, data_rectangle): ) # Is the mean of the "science" region the sum of sky and the mean the # "science" section had before backgrounds were added? - np_testing.assert_almost_equal( - ccd_data_overscan.data[science_region].mean(), sky + original_mean + np_testing.assert_allclose( + ccd_data_overscan.data[science_region].mean(), sky + original_mean, rtol=1e-6 ) # Is the overscan region zero? assert (ccd_data_overscan.data[oscan_region] == 0).all() @@ -191,8 +191,10 @@ def test_subtract_overscan(median, transpose, data_rectangle): ) # Is the mean of the "science" region the sum of sky and the mean the # "science" section had before backgrounds were added? - np_testing.assert_almost_equal( - ccd_data_fits_section.data[science_region].mean(), sky + original_mean + np_testing.assert_allclose( + ccd_data_fits_section.data[science_region].mean(), + sky + original_mean, + rtol=1e-6, ) # Is the overscan region zero? assert (ccd_data_fits_section.data[oscan_region] == 0).all() @@ -211,8 +213,10 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np_testing.assert_almost_equal( - ccd_data_overscan_auto.data[science_region].mean(), sky + original_mean + np_testing.assert_allclose( + ccd_data_overscan_auto.data[science_region].mean(), + sky + original_mean, + rtol=1e-6, ) # Use overscan_axis=None with a FITS section ccd_data_fits_section_overscan_auto = subtract_overscan( @@ -222,9 +226,10 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np_testing.assert_almost_equal( + np_testing.assert_allclose( ccd_data_fits_section_overscan_auto.data[science_region].mean(), sky + original_mean, + rtol=1e-6, ) # Overscan_axis should be 1 for a square overscan region # This test only works for a non-square data region, but the @@ -283,7 +288,7 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np_testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) + np_testing.assert_allclose(ccd_data.data[science_region].mean(), original_mean) # Set the overscan_axis explicitly to None, and let the routine # figure it out. ccd_data = subtract_overscan( @@ -293,7 +298,7 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np_testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) + np_testing.assert_allclose(ccd_data.data[science_region].mean(), original_mean) def test_subtract_overscan_fails(): @@ -377,7 +382,7 @@ def test_subtract_bias(): master_bias = CCDData(master_bias_array, unit=ccd_data.unit) no_bias = subtract_bias(ccd_data, master_bias, add_keyword=None) # Does the data we are left with have the correct average? - np_testing.assert_almost_equal(no_bias.data.mean(), data_avg) + np_testing.assert_allclose(no_bias.data.mean(), data_avg) # With logging turned off, metadata should not change assert no_bias.header == ccd_data.header del no_bias.header["key"] @@ -444,9 +449,7 @@ def test_subtract_dark(explicit_times, scale, exposure_keyword): (exptime / dark_exptime) * (exposure_unit / dark_exposure_unit) ) - np_testing.assert_allclose( - ccd_data.data - dark_scale * dark_level, dark_sub.data - ) + np_testing.assert_allclose(ccd_data.data - dark_scale * dark_level, dark_sub.data) # Headers should have the same content...do they? assert dark_sub.header == ccd_data.header # But the headers should not be the same object -- a copy was made @@ -547,7 +550,7 @@ def test_flat_correct(): # Check that the flat was normalized # Should be the case that flat * flat_data = ccd_data * flat.data.mean # if the normalization was done correctly. - np_testing.assert_almost_equal( + np_testing.assert_allclose( (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat.data.mean() ) np_testing.assert_allclose( @@ -575,7 +578,7 @@ def test_flat_correct_min_value(): # Check that the flat was normalized. The asserts below, which look a # little odd, are correctly testing that # flat_corrected_data = ccd_data / (flat_with_min / mean(flat_with_min)) - np_testing.assert_almost_equal( + np_testing.assert_allclose( (flat_corrected_data.data * flat_with_min.data).mean(), (ccd_data.data * flat_with_min.data.mean()).mean(), ) @@ -605,7 +608,7 @@ def test_flat_correct_norm_value(): # Check that the flat was normalized # Should be the case that flat * flat_data = ccd_data * flat_mean # if the normalization was done correctly. - np_testing.assert_almost_equal( + np_testing.assert_allclose( (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat_mean ) np_testing.assert_allclose(ccd_data.data / flat_data.data, flat.data / flat_mean) From 22bc04d95156a2afb10779753aa4d13c362a7420 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 5 Mar 2025 10:19:14 -0600 Subject: [PATCH 08/59] Use the random number generator from numpy --- ccdproc/tests/test_ccdproc.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 76b345ed..2dd82365 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -12,6 +12,7 @@ from astropy.units.quantity import Quantity from astropy.utils.exceptions import AstropyUserWarning from astropy.wcs import WCS +from numpy import random as np_random from numpy import testing as np_testing from ccdproc.core import ( @@ -40,6 +41,7 @@ _NUMPY_COPY_IF_NEEDED = None # False if np.__version__.startswith("1.") else None +RNG = np_random.default_rng # import dask.array as da # import numpy @@ -335,7 +337,7 @@ def test_trim_image_fits_section(mask_data, uncertainty): if mask_data: ccd_data.mask = np.zeros_like(ccd_data) if uncertainty: - err = np.random.default_rng().normal(size=ccd_data.shape) + err = RNG().normal(size=ccd_data.shape) ccd_data.uncertainty = StdDevUncertainty(err) trimmed = trim_image(ccd_data, fits_section="[20:40,:]") @@ -543,7 +545,7 @@ def test_flat_correct(): ccd_data.header["my_key"] = 42 size = ccd_data.shape[0] # create the flat, with some scatter - data = 2 * np.random.default_rng().normal(loc=1.0, scale=0.05, size=(size, size)) + data = 2 * RNG().normal(loc=1.0, scale=0.05, size=(size, size)) flat = CCDData(data, meta=fits.header.Header(), unit=ccd_data.unit) flat_data = flat_correct(ccd_data, flat, add_keyword=None) @@ -567,7 +569,7 @@ def test_flat_correct_min_value(): size = ccd_data.shape[0] # Create the flat - data = 2 * np.random.default_rng().normal(loc=1.0, scale=0.05, size=(size, size)) + data = 2 * RNG().normal(loc=1.0, scale=0.05, size=(size, size)) flat = CCDData(data, meta=fits.header.Header(), unit=ccd_data.unit) flat_orig_data = flat.data.copy() min_value = 2.1 # Should replace some, but not all, values @@ -601,7 +603,7 @@ def test_flat_correct_norm_value(): # Note that mean value of flat is set below and is different than # the mean of the flat data. flat_mean = 5.0 - data = np.random.default_rng().normal(loc=1.0, scale=0.05, size=ccd_data.shape) + data = RNG().normal(loc=1.0, scale=0.05, size=ccd_data.shape) flat = CCDData(data, meta=fits.Header(), unit=ccd_data.unit) flat_data = flat_correct(ccd_data, flat, add_keyword=None, norm_value=flat_mean) @@ -620,7 +622,7 @@ def test_flat_correct_norm_value_bad_value(): # it is given a bad norm_value. Bad means <=0. # Create the flat, with some scatter - data = np.random.default_rng().normal(loc=1.0, scale=0.05, size=ccd_data.shape) + data = RNG().normal(loc=1.0, scale=0.05, size=ccd_data.shape) flat = CCDData(data, meta=fits.Header(), unit=ccd_data.unit) with pytest.raises(ValueError) as e: flat_correct(ccd_data, flat, add_keyword=None, norm_value=-7) @@ -706,7 +708,7 @@ def test_transform_image(mask_data, uncertainty): ccd_data.mask = np.zeros_like(ccd_data) ccd_data.mask[10, 10] = 1 if uncertainty: - err = np.random.default_rng().normal(size=ccd_data.shape) + err = RNG().normal(size=ccd_data.shape) ccd_data.uncertainty = StdDevUncertainty(err) def tran(arr): @@ -962,7 +964,7 @@ def test_wcs_project_onto_shifted_wcs(): target_wcs = wcs_for_testing(ccd_data.shape) target_wcs.wcs.crpix += [1, 1] - ccd_data.mask = np.random.default_rng().choice([0, 1], size=ccd_data.shape) + ccd_data.mask = RNG().choice([0, 1], size=ccd_data.shape) new_ccd = wcs_project(ccd_data, target_wcs) From 245d641109c9d99417e09fbe4fff14ca1941f427 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 5 Mar 2025 10:20:33 -0600 Subject: [PATCH 09/59] Remove more in-place array operations --- ccdproc/tests/test_ccdproc.py | 38 +++++++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 2dd82365..ae5c11cb 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -112,7 +112,10 @@ def test_create_deviation_from_negative_2(): ccd_data, gain=None, readnoise=readnoise, disregard_nan=True ) mask = ccd_data.data < 0 - ccd_data.data[mask] = 0 + # Set the variance to zero where the data is negative + # In-place replacement of values does not work in some array + # libraries. + ccd_data.data = ccd_data.data * ~mask expected_var = np.sqrt(ccd_data.data + readnoise.value**2) np_testing.assert_allclose(ccd_var.uncertainty.array, expected_var) @@ -160,12 +163,19 @@ def test_subtract_overscan(median, transpose, data_rectangle): science_region = science_region[::-1] overscan_axis = 0 - ccd_data.data[oscan_region] = oscan + # Since some array libraries do not support in-place operations, we + # work on the science and overscan regions separately. + science_data = ccd_data.data[science_region].copy() + overscan_data = 0 * ccd_data.data[oscan_region].copy() + oscan + # Add a fake sky background so the "science" part of the image has a # different average than the "overscan" part. sky = 10.0 - original_mean = ccd_data.data[science_region].mean() - ccd_data.data[science_region] += oscan + sky + original_mean = science_data.mean() + science_data = science_data + oscan + sky + + # Reconstruct the full image + ccd_data.data = np.concat([overscan_data, science_data], axis=overscan_axis) # Test once using the overscan argument to specify the overscan region ccd_data_overscan = subtract_overscan( ccd_data, @@ -280,8 +290,12 @@ def test_subtract_overscan_model(transpose): original_mean = ccd_data.data[science_region].mean() - ccd_data.data[oscan_region] = 0.0 # Only want overscan in that region - ccd_data.data = ccd_data.data + scan + science_data = ccd_data.data[science_region].copy() + # Set any existing overscan to zero. Overscan is stored for the entire + # image, so we need to do this before we add the new overscan. + overscan_data = 0 * ccd_data.data[oscan_region].copy() + # Reconstruct the full image + ccd_data.data = np.concat([overscan_data, science_data], axis=overscan_axis) + scan ccd_data = subtract_overscan( ccd_data, @@ -766,14 +780,22 @@ def test_block_reduce(): reason="Incompatibility between scikit-image " "and numpy 1.16", ) def test_block_average(): + data = np.array( + [ + [2.0, 1.0, 2.0, 1.0], + [1.0, 1.0, 1.0, 1.0], + [2.0, 1.0, 2.0, 1.0], + [1.0, 1.0, 1.0, 1.0], + ] + ) ccd = CCDData( - np.ones((4, 4)), + data, unit="adu", meta={"testkw": 1}, mask=np.zeros((4, 4), dtype=bool), uncertainty=StdDevUncertainty(np.ones((4, 4))), ) - ccd.data[::2, ::2] = 2 + with pytest.warns(AstropyUserWarning) as w: ccd_avgd = block_average(ccd, (2, 2)) assert len(w) == 1 From 55b90bec2499e5b68a43508587e0b8fc15fcbd96 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 5 Mar 2025 10:21:06 -0600 Subject: [PATCH 10/59] Set a reasonable tolerance value in float comparisons --- ccdproc/tests/test_ccdproc.py | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index ae5c11cb..e8210a3e 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -304,7 +304,9 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np_testing.assert_allclose(ccd_data.data[science_region].mean(), original_mean) + np_testing.assert_allclose( + ccd_data.data[science_region].mean(), original_mean, atol=1e-5 + ) # Set the overscan_axis explicitly to None, and let the routine # figure it out. ccd_data = subtract_overscan( @@ -314,7 +316,9 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np_testing.assert_allclose(ccd_data.data[science_region].mean(), original_mean) + np_testing.assert_allclose( + ccd_data.data[science_region].mean(), original_mean, atol=1e-5 + ) def test_subtract_overscan_fails(): @@ -465,7 +469,9 @@ def test_subtract_dark(explicit_times, scale, exposure_keyword): (exptime / dark_exptime) * (exposure_unit / dark_exposure_unit) ) - np_testing.assert_allclose(ccd_data.data - dark_scale * dark_level, dark_sub.data) + np_testing.assert_allclose( + ccd_data.data - dark_scale * dark_level, dark_sub.data, rtol=1e-6 + ) # Headers should have the same content...do they? assert dark_sub.header == ccd_data.header # But the headers should not be the same object -- a copy was made @@ -567,10 +573,14 @@ def test_flat_correct(): # Should be the case that flat * flat_data = ccd_data * flat.data.mean # if the normalization was done correctly. np_testing.assert_allclose( - (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat.data.mean() + (flat_data.data * flat.data).mean(), + ccd_data.data.mean() * flat.data.mean(), + rtol=1e-6, ) np_testing.assert_allclose( - ccd_data.data / flat_data.data, flat.data / flat.data.mean() + ccd_data.data / flat_data.data, + flat.data / flat.data.mean(), + rtol=1e-6, ) # Check that metadata is unchanged (since logging is turned off) @@ -597,10 +607,12 @@ def test_flat_correct_min_value(): np_testing.assert_allclose( (flat_corrected_data.data * flat_with_min.data).mean(), (ccd_data.data * flat_with_min.data.mean()).mean(), + rtol=1e-6, ) np_testing.assert_allclose( ccd_data.data / flat_corrected_data.data, flat_with_min.data / flat_with_min.data.mean(), + rtol=1e-6, ) # Test that flat is not modified. @@ -625,9 +637,13 @@ def test_flat_correct_norm_value(): # Should be the case that flat * flat_data = ccd_data * flat_mean # if the normalization was done correctly. np_testing.assert_allclose( - (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat_mean + (flat_data.data * flat.data).mean(), + ccd_data.data.mean() * flat_mean, + rtol=1e-6, + ) + np_testing.assert_allclose( + ccd_data.data / flat_data.data, flat.data / flat_mean, rtol=1e-6 ) - np_testing.assert_allclose(ccd_data.data / flat_data.data, flat.data / flat_mean) def test_flat_correct_norm_value_bad_value(): From 23de261afb08209ded492ec8c26aae21b9b44597 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 6 Mar 2025 10:58:12 -0600 Subject: [PATCH 11/59] Continue to use numpy arrays in a few places These are necessary because upstream dependencies (astropy) explicitly check for numpy arrays rather than using the array API. --- ccdproc/tests/test_ccdproc.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index e8210a3e..4f4ca5bd 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -12,6 +12,7 @@ from astropy.units.quantity import Quantity from astropy.utils.exceptions import AstropyUserWarning from astropy.wcs import WCS +from numpy import array as np_array from numpy import random as np_random from numpy import testing as np_testing @@ -675,7 +676,11 @@ def test_flat_correct_deviation(): # Test the uncertainty on the data after flat correction def test_flat_correct_data_uncertainty(): # Regression test for #345 - dat = CCDData(np.ones([100, 100]), unit="adu", uncertainty=np.ones([100, 100])) + # Temporarily work around the fact that NDUncertainty explicitly checks + # whether the value is a numpy array. + dat = CCDData( + np.ones([100, 100]), unit="adu", uncertainty=np_array(np.ones([100, 100])) + ) # Note flat is set to 10, error, if present, is set to one. flat = CCDData(10 * np.ones([100, 100]), unit="adu") res = flat_correct(dat, flat) @@ -971,6 +976,8 @@ def test_wcs_project_onto_same_wcs(): target_wcs = wcs_for_testing(ccd_data.shape) ccd_data.wcs = wcs_for_testing(ccd_data.shape) + # Ugly hack for numpy-specific check in astropy.wcs + ccd_data.data = np_array(ccd_data.data) new_ccd = wcs_project(ccd_data, target_wcs) # Make sure new image has correct WCS. @@ -987,6 +994,8 @@ def test_wcs_project_onto_same_wcs_remove_headers(): ccd_data.wcs = wcs_for_testing(ccd_data.shape) ccd_data.header = ccd_data.wcs.to_header() + # Ugly hack for numpy-specific check in astropy.wcs + ccd_data.data = np_array(ccd_data.data) new_ccd = wcs_project(ccd_data, target_wcs) for k in ccd_data.wcs.to_header(): @@ -1004,6 +1013,8 @@ def test_wcs_project_onto_shifted_wcs(): ccd_data.mask = RNG().choice([0, 1], size=ccd_data.shape) + # Ugly hack for numpy-specific check in astropy.wcs + ccd_data.data = np_array(ccd_data.data) new_ccd = wcs_project(ccd_data, target_wcs) # Make sure new image has correct WCS. @@ -1054,6 +1065,8 @@ def test_wcs_project_onto_scale_wcs(): target_shape = 2 * np.array(ccd_data.shape) + 1 target_wcs.wcs.crpix = 2 * target_wcs.wcs.crpix + 1 + 0.5 + # Ugly hack for numpy-specific check in astropy.wcs + ccd_data.data = np_array(ccd_data.data) # Explicitly set the interpolation method so we know what to # expect for the mass. new_ccd = wcs_project( From dffadce3011c34fd39972da882c18c724ff2f79e Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 6 Mar 2025 10:58:59 -0600 Subject: [PATCH 12/59] Refactor warning handling for numerical warnings --- ccdproc/tests/test_ccdproc.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 4f4ca5bd..da13e574 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -1,5 +1,7 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst +import warnings + # import array_api_compat import astropy import astropy.units as u @@ -891,10 +893,7 @@ def test_cosmicray_median_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() error = np.zeros_like(ccd_data) - with np.errstate(invalid="ignore", divide="ignore"): - _ = cosmicray_median( - ccd_data, error_image=error, thresh=5, mbox=11, gbox=0, rbox=0 - ) + _ = cosmicray_median(ccd_data, error_image=error, thresh=5, mbox=11, gbox=0, rbox=0) np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -911,7 +910,8 @@ def test_flat_correct_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() flat = CCDData(np.zeros_like(ccd_data), unit=ccd_data.unit) - with np.errstate(invalid="ignore"): + # Ignore the divide by zero warning that is raised when the flat is zero. + with warnings.catch_warnings(action="ignore", category=RuntimeWarning): _ = flat_correct(ccd_data, flat=flat) np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -945,8 +945,7 @@ def test_trim_image_does_not_change_input(): def test_transform_image_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - with np.errstate(invalid="ignore"): - _ = transform_image(ccd_data, np.sqrt) + _ = transform_image(ccd_data, np.sqrt) np_testing.assert_allclose(original.data, ccd_data) assert original.unit == ccd_data.unit From 12ff8b06804716e4a684d73737e50f323b342df0 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 6 Mar 2025 11:00:03 -0600 Subject: [PATCH 13/59] Avoid explicit use of numpy masked arrays --- ccdproc/tests/test_ccdproc.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index da13e574..0de65df9 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -1021,9 +1021,7 @@ def test_wcs_project_onto_shifted_wcs(): # Make sure data matches within some reasonable tolerance, keeping in mind # that the pixels should all be shifted. - masked_input = np.ma.array(ccd_data.data, mask=ccd_data.mask) - masked_output = np.ma.array(new_ccd.data, mask=new_ccd.mask) - np_testing.assert_allclose(masked_input[:-1, :-1], masked_output[1:, 1:], rtol=1e-5) + np_testing.assert_allclose(ccd_data.data[:-1, :-1], new_ccd[1:, 1:], rtol=1e-5) # The masks should all be shifted too. np_testing.assert_array_equal(ccd_data.mask[:-1, :-1], new_ccd.mask[1:, 1:]) @@ -1035,7 +1033,7 @@ def test_wcs_project_onto_shifted_wcs(): # In the case of a shift, one row and one column should be nan, and they # will share one common nan where they intersect, so we know how many nan # there should be. - assert np.isnan(new_ccd.data).sum() == np.sum(new_ccd.shape) - 1 + assert np.sum(np.isnan(new_ccd.data)) == np.sum(np.array(new_ccd.shape)) - 1 # Use an odd number of pixels to make a well-defined center pixel From 63f81ba5550730266f7798832e7edce7088b55f2 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 6 Mar 2025 11:00:22 -0600 Subject: [PATCH 14/59] Rewrite test to not modify array in-place --- ccdproc/tests/test_ccdproc.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 0de65df9..ab4ebc60 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -1142,7 +1142,9 @@ def test_ccd_process_parameters_are_appropriate(): def test_ccd_process(): # Test the through ccd_process ccd_data = CCDData(10.0 * np.ones((100, 100)), unit=u.adu) - ccd_data.data[:, -10:] = 2 + # Rewrite to not change data in-place. + ccd_data.data = np.concat([ccd_data.data[:, :-10], 2 * np.ones((100, 10))], axis=1) + ccd_data.meta["testkw"] = 100 mask = np.zeros((100, 90)) @@ -1187,7 +1189,9 @@ def test_ccd_process(): def test_ccd_process_gain_corrected(): # Test the through ccd_process with gain_corrected as False ccd_data = CCDData(10.0 * np.ones((100, 100)), unit=u.adu) - ccd_data.data[:, -10:] = 2 + + # Rewrite to not change data in-place. + ccd_data.data = np.concat([ccd_data.data[:, :-10], 2 * np.ones((100, 10))], axis=1) ccd_data.meta["testkw"] = 100 mask = np.zeros((100, 90)) From 4ae3fc63f61ddead151ba6cfe2e509222b9fb2f9 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Fri, 7 Mar 2025 11:49:51 -0600 Subject: [PATCH 15/59] Use assert_allclose instead of older alternatives --- ccdproc/tests/test_combiner.py | 36 +++++++++++++++++----------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index b5936f4f..1cde6535 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -428,7 +428,7 @@ def test_combine_average_fitsimages(): fitsfilename_list = [fitsfile] * 3 avgccd = combine(fitsfilename_list, output_file=None, method="average", unit=u.adu) # averaging same fits images should give back same fits image - np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) + np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) def test_combine_numpyndarray(): @@ -446,7 +446,7 @@ def test_combine_numpyndarray(): fitsfilename_list = np.array([fitsfile] * 3) avgccd = combine(fitsfilename_list, output_file=None, method="average", unit=u.adu) # averaging same fits images should give back same fits image - np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) + np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) def test_combiner_result_dtype(): @@ -459,13 +459,13 @@ def test_combiner_result_dtype(): # The default dtype of Combiner is float64 assert res.data.dtype == np.float64 ref = np.ones((3, 3)) * 1.5 - np.testing.assert_array_almost_equal(res.data, ref) + np.testing.assert_allclose(res.data, ref) res = combine([ccd, ccd.multiply(2), ccd.multiply(3)], dtype=int) # The result dtype should be integer: assert res.data.dtype == np.int_ ref = np.ones((3, 3)) * 2 - np.testing.assert_array_almost_equal(res.data, ref) + np.testing.assert_allclose(res.data, ref) def test_combiner_image_file_collection_input(tmp_path): @@ -476,7 +476,7 @@ def test_combiner_image_file_collection_input(tmp_path): ifc = ImageFileCollection(tmp_path) comb = Combiner(ifc.ccds()) - np.testing.assert_array_almost_equal(ccd.data, comb.average_combine().data) + np.testing.assert_allclose(ccd.data, comb.average_combine().data) def test_combine_image_file_collection_input(tmp_path): @@ -492,8 +492,8 @@ def test_combine_image_file_collection_input(tmp_path): comb_ccds = combine(ifc.ccds(), method="average") - np.testing.assert_array_almost_equal(ccd.data, comb_files.data) - np.testing.assert_array_almost_equal(ccd.data, comb_ccds.data) + np.testing.assert_allclose(ccd.data, comb_files.data) + np.testing.assert_allclose(ccd.data, comb_ccds.data) with pytest.raises(FileNotFoundError): # This should fail because the test is not running in the @@ -511,7 +511,7 @@ def test_combine_average_ccddata(): avgccd = combine(ccd_list, output_file=None, method="average", unit=u.adu) # averaging same ccdData should give back same images - np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) + np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) # test combiner convenience function reads fits file and @@ -528,7 +528,7 @@ def test_combine_limitedmem_fitsimages(): fitsfilename_list, output_file=None, method="average", mem_limit=1e6, unit=u.adu ) # averaging same ccdData should give back same images - np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) + np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) # test combiner convenience function reads fits file and @@ -553,7 +553,7 @@ def test_combine_limitedmem_scale_fitsimages(): unit=u.adu, ) - np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data, decimal=4) + np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) # test the optional uncertainty function in average_combine @@ -593,7 +593,7 @@ def test_sum_combine_uncertainty(): c = Combiner(ccd_list) ccd = c.sum_combine(uncertainty_func=np.sum) uncert_ref = np.sum(c.data_arr, 0) * np.sqrt(3) - np.testing.assert_almost_equal(ccd.uncertainty.array, uncert_ref) + np.testing.assert_allclose(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call ccd2 = combine(ccd_list, method="sum", combine_uncertainty_function=np.sum) @@ -639,7 +639,7 @@ def test_combine_result_uncertainty_and_mask(comb_func, mask_point): ccd_list, method=combine_method_name, minmax_clip=True, minmax_clip_min=-100 ) - np.testing.assert_array_almost_equal( + np.testing.assert_allclose( ccd_comb.uncertainty.array, expected_result.uncertainty.array ) @@ -690,7 +690,7 @@ def test_combiner_uncertainty_average(): ref_uncertainty = np.ones((10, 10)) / 2 # Correction because we combined two images. ref_uncertainty /= np.sqrt(2) - np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) + np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) @@ -710,7 +710,7 @@ def test_combiner_uncertainty_average_mask(): # Correction because we combined two images. ref_uncertainty /= np.sqrt(3) ref_uncertainty[5, 5] = np.std([2, 3]) / np.sqrt(2) - np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) + np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) @@ -731,7 +731,7 @@ def test_combiner_uncertainty_median_mask(): # Correction because we combined two images. ref_uncertainty /= np.sqrt(3) # 0.855980789955 ref_uncertainty[5, 5] = mad_to_sigma * mad([2, 3]) / np.sqrt(2) # 0.524179041254 - np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) + np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) @@ -750,7 +750,7 @@ def test_combiner_uncertainty_sum_mask(): ref_uncertainty = np.ones((10, 10)) * np.std([1, 2, 3]) ref_uncertainty *= np.sqrt(3) ref_uncertainty[5, 5] = np.std([2, 3]) * np.sqrt(2) - np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) + np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) def test_combiner_3d(): @@ -762,11 +762,11 @@ def test_combiner_3d(): c = Combiner(ccd_list) assert c.data_arr.shape == (3, 5, 5, 5) - assert c.data_arr.mask.shape == (3, 5, 5, 5) + assert c.data_arr_mask.shape == (3, 5, 5, 5) ccd = c.average_combine() assert ccd.shape == (5, 5, 5) - np.testing.assert_array_almost_equal(ccd.data, data1, decimal=4) + np.testing.assert_allclose(ccd.data, data1) def test_3d_combiner_with_scaling(): From 7b2ed18a9218ba6224d0e07bca37384d0ec46ff7 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Fri, 7 Mar 2025 11:50:56 -0600 Subject: [PATCH 16/59] Initial attempt to adopt array API in combiner --- ccdproc/combiner.py | 240 +++++++++++++++++++++------------ ccdproc/tests/test_combiner.py | 36 ++--- 2 files changed, 173 insertions(+), 103 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 615f4cc5..deea6a32 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -3,7 +3,8 @@ """This module implements the combiner class.""" import numpy as np -from numpy import ma + +# from numpy import ma try: import bottleneck as bn @@ -12,6 +13,7 @@ else: HAS_BOTTLENECK = True +import array_api_compat from astropy import log from astropy.nddata import CCDData, StdDevUncertainty from astropy.stats import sigma_clip @@ -105,9 +107,6 @@ def __init__(self, ccd_iter, dtype=None): "ccd_iter should be a list or a generator of CCDData objects." ) - if dtype is None: - dtype = np.float64 - default_shape = None default_unit = None @@ -132,22 +131,28 @@ def __init__(self, ccd_iter, dtype=None): if not (default_unit == ccd.unit): raise TypeError("CCDData objects don't have the same unit.") + # Set array namespace + xp = array_api_compat.array_namespace(ccd_list[0].data) + if dtype is None: + dtype = xp.float64 self.ccd_list = ccd_list self.unit = default_unit self.weights = None self._dtype = dtype # set up the data array - new_shape = (len(ccd_list),) + default_shape - self.data_arr = ma.masked_all(new_shape, dtype=dtype) + # new_shape = (len(ccd_list),) + default_shape + self.data_arr = xp.array([ccd.data for ccd in ccd_list], dtype=dtype) # populate self.data_arr - for i, ccd in enumerate(ccd_list): - self.data_arr[i] = ccd.data + mask_list = [] + for ccd in ccd_list: if ccd.mask is not None: - self.data_arr.mask[i] = ccd.mask + mask_list.append(ccd.mask) else: - self.data_arr.mask[i] = ma.zeros(default_shape) + mask_list.append(xp.zeros(default_shape)) + + self.data_arr_mask = xp.array(mask_list, dtype=bool) # Must be after self.data_arr is defined because it checks the # length of the data array. @@ -173,20 +178,23 @@ def weights(self): @weights.setter def weights(self, value): if value is not None: - if isinstance(value, np.ndarray): - if value.shape != self.data_arr.data.shape: - if value.ndim != 1: - raise ValueError( - "1D weights expected when shapes of the " - "data and weights differ." - ) - if value.shape[0] != self.data_arr.data.shape[0]: - raise ValueError( - "Length of weights not compatible with specified axis." - ) - self._weights = value - else: - raise TypeError("weights must be a numpy.ndarray.") + try: + _ = array_api_compat.array_namespace(value) + except TypeError as err: + raise TypeError("weights must be an array.") from err + + if value.shape != self.data_arr.shape: + if value.ndim != 1: + raise ValueError( + "1D weights expected when shapes of the " + "data and weights differ." + ) + if value.shape[0] != self.data_arr.shape[0]: + raise ValueError( + "Length of weights not compatible with specified axis." + ) + self._weights = value + else: self._weights = None @@ -207,13 +215,14 @@ def scaling(self): @scaling.setter def scaling(self, value): + xp = array_api_compat.array_namespace(self.data_arr) if value is None: self._scaling = value else: - n_images = self.data_arr.data.shape[0] + n_images = self.data_arr.shape[0] if callable(value): self._scaling = [value(self.data_arr[i]) for i in range(n_images)] - self._scaling = np.array(self._scaling) + self._scaling = xp.array(self._scaling) else: try: len(value) @@ -227,10 +236,10 @@ def scaling(self, value): "scaling must be a function or an array " "the same length as the number of images." ) - self._scaling = np.array(value) + self._scaling = xp.array(value) # reshape so that broadcasting occurs properly - for _ in range(len(self.data_arr.data.shape) - 1): - self._scaling = self.scaling[:, np.newaxis] + for _ in range(len(self.data_arr.shape) - 1): + self._scaling = self.scaling[:, xp.newaxis] # set up IRAF-like minmax clipping def clip_extrema(self, nlow=0, nhigh=0): @@ -275,20 +284,20 @@ def clip_extrema(self, nlow=0, nhigh=0): .. [0] image.imcombine help text. http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?imcombine """ - + xp = array_api_compat.array_namespace(self.data_arr) if nlow is None: nlow = 0 if nhigh is None: nhigh = 0 - argsorted = np.argsort(self.data_arr.data, axis=0) - mg = np.mgrid[ + argsorted = xp.argsort(self.data_arr, axis=0) + mg = xp.mgrid[ [slice(ndim) for i, ndim in enumerate(self.data_arr.shape) if i > 0] ] for i in range(-1 * nhigh, nlow): # create a tuple with the indices where = tuple([argsorted[i, :, :].ravel()] + [i.ravel() for i in mg]) - self.data_arr.mask[where] = True + self.data_arr_mask[where] = True # set up min/max clipping algorithms def minmax_clipping(self, min_clip=None, max_clip=None): @@ -306,10 +315,12 @@ def minmax_clipping(self, min_clip=None, max_clip=None): """ if min_clip is not None: mask = self.data_arr < min_clip - self.data_arr.mask[mask] = True + # Written to avoid in-place modification of array + self.data_arr_mask = self.data_arr_mask | mask if max_clip is not None: mask = self.data_arr > max_clip - self.data_arr.mask[mask] = True + # Written to avoid in-place modification of array + self.data_arr_mask = self.data_arr_mask | mask # set up sigma clipping algorithms @deprecated_renamed_argument( @@ -368,18 +379,21 @@ def sigma_clipping( # Remove in 3.0 _ = kwd.pop("use_astropy", True) - self.data_arr.mask |= sigma_clip( - self.data_arr.data, - sigma_lower=low_thresh, - sigma_upper=high_thresh, - axis=kwd.get("axis", 0), - copy=kwd.get("copy", False), - maxiters=kwd.get("maxiters", 1), - cenfunc=func, - stdfunc=dev_func, - masked=True, - **kwd, - ).mask + self.data_arr_mask = ( + self.data_arr_mask + | sigma_clip( + self.data_arr, + sigma_lower=low_thresh, + sigma_upper=high_thresh, + axis=kwd.get("axis", 0), + copy=kwd.get("copy", False), + maxiters=kwd.get("maxiters", 1), + cenfunc=func, + stdfunc=dev_func, + masked=True, + **kwd, + ).mask + ) def _get_scaled_data(self, scale_arg): if scale_arg is not None: @@ -389,11 +403,17 @@ def _get_scaled_data(self, scale_arg): return self.data_arr def _get_nan_substituted_data(self, data): + xp = array_api_compat.array_namespace(self.data_arr) + # Get the data as an unmasked array with masked values filled as NaN - if self.data_arr.mask.any(): - data = np.ma.filled(data, fill_value=np.nan) + if self.data_arr_mask.any(): + # Workaround for array libraries that do not allow in-=place modification + try: + data[self.data_arr_mask] = xp.nan + except TypeError: + data = data.at[self.data_arr_mask].set(xp.nan) else: - data = data.data + data = data return data def _combination_setup(self, user_func, default_func, scale_to): @@ -401,16 +421,16 @@ def _combination_setup(self, user_func, default_func, scale_to): Handle the common pieces of image combination data/mask setup. """ data = self._get_scaled_data(scale_to) - + xp = array_api_compat.array_namespace(data) # Play it safe for now and only do the nan thing if the user is using # the default combination function. if user_func is None: combo_func = default_func # Subtitute NaN for masked entries data = self._get_nan_substituted_data(data) - masked_values = np.isnan(data).sum(axis=0) + masked_values = xp.isnan(data).sum(axis=0) else: - masked_values = self.data_arr.mask.sum(axis=0) + masked_values = self.data_arr_mask.sum(axis=0) combo_func = user_func return data, masked_values, combo_func @@ -458,7 +478,7 @@ def median_combine( data, masked_values, median_func = self._combination_setup( median_func, _default_median(), scale_to ) - + xp = array_api_compat.array_namespace(data) medianed = median_func(data, axis=0) # set the mask @@ -476,17 +496,17 @@ def median_combine( else: uncertainty = uncertainty_func(data, axis=0) # Divide uncertainty by the number of pixel (#309) - uncertainty /= np.sqrt(len(self.data_arr) - masked_values) + uncertainty /= xp.sqrt(len(self.data_arr) - masked_values) # Convert uncertainty to plain numpy array (#351) # There is no need to care about potential masks because the # uncertainty was calculated based on the data so potential masked # elements are also masked in the data. No need to keep two identical # masks. - uncertainty = np.asarray(uncertainty) + uncertainty = xp.asarray(uncertainty) # create the combined image with a dtype matching the combiner combined_image = CCDData( - np.asarray(medianed, dtype=self.dtype), + xp.asarray(medianed, dtype=self.dtype), mask=mask, unit=self.unit, uncertainty=StdDevUncertainty(uncertainty), @@ -503,9 +523,10 @@ def _weighted_sum(self, data, sum_func): Perform weighted sum, used by both ``sum_combine`` and in some cases by ``average_combine``. """ + xp = array_api_compat.array_namespace(data) if self.weights.shape != data.shape: # Add extra axes to the weights for broadcasting - weights = np.reshape(self.weights, [len(self.weights), 1, 1]) + weights = xp.reshape(self.weights, [len(self.weights), 1, 1]) else: weights = self.weights @@ -560,6 +581,8 @@ def average_combine( scale_func, _default_average(), scale_to ) + xp = array_api_compat.array_namespace(data) + # Do NOT modify data after this -- we need it to be intact when we # we get to the uncertainty calculation. if self.weights is not None: @@ -575,13 +598,13 @@ def average_combine( # set up the deviation uncertainty = uncertainty_func(data, axis=0) # Divide uncertainty by the number of pixel (#309) - uncertainty /= np.sqrt(len(data) - masked_values) + uncertainty /= xp.sqrt(len(data) - masked_values) # Convert uncertainty to plain numpy array (#351) - uncertainty = np.asarray(uncertainty) + uncertainty = xp.asarray(uncertainty) # create the combined image with a dtype that matches the combiner combined_image = CCDData( - np.asarray(mean, dtype=self.dtype), + xp.asarray(mean, dtype=self.dtype), mask=mask, unit=self.unit, uncertainty=StdDevUncertainty(uncertainty), @@ -633,6 +656,8 @@ def sum_combine( sum_func, _default_sum(), scale_to ) + xp = array_api_compat.array_namespace(data) + if self.weights is not None: summed, weights = self._weighted_sum(data, sum_func) else: @@ -644,15 +669,15 @@ def sum_combine( # set up the deviation uncertainty = uncertainty_func(data, axis=0) # Divide uncertainty by the number of pixel (#309) - uncertainty /= np.sqrt(len(data) - masked_values) + uncertainty /= xp.sqrt(len(data) - masked_values) # Convert uncertainty to plain numpy array (#351) - uncertainty = np.asarray(uncertainty) + uncertainty = xp.asarray(uncertainty) # Multiply uncertainty by square root of the number of images uncertainty *= len(data) - masked_values # create the combined image with a dtype that matches the combiner combined_image = CCDData( - np.asarray(summed, dtype=self.dtype), + xp.asarray(summed, dtype=self.dtype), mask=mask, unit=self.unit, uncertainty=StdDevUncertainty(uncertainty), @@ -696,8 +721,9 @@ def _calculate_size_of_image(ccd, combine_uncertainty_function): # If uncertainty_func is given for combine this will create an uncertainty # even if the originals did not have one. In that case we need to create # an empty placeholder. + xp = array_api_compat.array_namespace(ccd.data) if ccd.uncertainty is None and combine_uncertainty_function is not None: - ccd.uncertainty = StdDevUncertainty(np.zeros(ccd.data.shape)) + ccd.uncertainty = StdDevUncertainty(xp.zeros(ccd.data.shape)) size_of_an_img = ccd.data.nbytes try: @@ -737,8 +763,8 @@ def combine( sigma_clip=False, sigma_clip_low_thresh=3, sigma_clip_high_thresh=3, - sigma_clip_func=ma.mean, - sigma_clip_dev_func=ma.std, + sigma_clip_func=None, + sigma_clip_dev_func=None, dtype=None, combine_uncertainty_function=None, overwrite_output=False, @@ -852,21 +878,30 @@ def combine( combined_image : `~astropy.nddata.CCDData` CCDData object based on the combined input of CCDData objects. """ + # Handle case where the input is an array of file names first if not isinstance(img_list, list): - # If not a list, check whether it is a numpy ndarray or string of - # filenames separated by comma - if isinstance(img_list, np.ndarray): - img_list = img_list.tolist() - elif isinstance(img_list, str) and ("," in img_list): - img_list = img_list.split(",") + try: + _ = array_api_compat.array_namespace(img_list) + except TypeError: + pass else: - try: - # Maybe the input can be made into a list, so try that - img_list = list(img_list) - except TypeError as err: - raise ValueError( - "unrecognised input for list of images to combine." - ) from err + # If it is an array, convert it to a list + img_list = list(img_list) + if ( + not isinstance(img_list, list) + and isinstance(img_list, str) + and ("," in img_list) + ): + # Handle case where the input is a string of file names separated by comma + img_list = img_list.split(",") + else: + try: + # Maybe the input can be made into a list, so try that + img_list = list(img_list) + except TypeError as err: + raise ValueError( + "unrecognised input for list of images to combine." + ) from err # Select Combine function to call in Combiner if method == "average": @@ -885,24 +920,32 @@ def combine( # User has provided fits filenames to read from ccd = CCDData.read(img_list[0], **ccdkwargs) + # Get the array namespace + xp = array_api_compat.array_namespace(ccd.data) + if dtype is None: - dtype = np.float64 + dtype = xp.float64 + + if sigma_clip_func is None: + sigma_clip_func = xp.mean + if sigma_clip_dev_func is None: + sigma_clip_dev_func = xp.std # Convert the master image to the appropriate dtype so when overwriting it # later the data is not downcast and the memory consumption calculation # uses the internally used dtype instead of the original dtype. #391 if ccd.data.dtype != dtype: - ccd.data = ccd.data.astype(dtype) + ccd.data = xp.astype(ccd.data, dtype) # If the template image doesn't have an uncertainty, add one, because the # result always has an uncertainty. if ccd.uncertainty is None: - ccd.uncertainty = StdDevUncertainty(np.zeros_like(ccd.data)) + ccd.uncertainty = StdDevUncertainty(xp.zeros_like(ccd.data)) # If the template doesn't have a mask, add one, because the result may have # a mask if ccd.mask is None: - ccd.mask = np.zeros_like(ccd.data, dtype=bool) + ccd.mask = xp.zeros_like(ccd.data, dtype=bool) size_of_an_img = _calculate_size_of_image(ccd, combine_uncertainty_function) @@ -951,7 +994,7 @@ def combine( scalevalues.append(scale(imgccd.data)) - to_set_in_combiner["scaling"] = np.array(scalevalues) + to_set_in_combiner["scaling"] = xp.array(scalevalues) else: to_set_in_combiner["scaling"] = scale @@ -1008,11 +1051,32 @@ def combine( comb_tile = getattr(tile_combiner, combine_function)(**combine_kwds) # add it back into the master image - ccd.data[x:xend, y:yend] = comb_tile.data + # The try/except below is to handle array libraries that may not + # allow in-place operations. + try: + ccd.data[x:xend, y:yend] = comb_tile.data + except TypeError: + ccd.data = ccd.data.at[x:xend, y:yend].set(comb_tile.data) + if ccd.mask is not None: - ccd.mask[x:xend, y:yend] = comb_tile.mask + # Maybe temporary workaround for the mask not being writeable... + ccd.mask = ccd.mask.copy() + # The try/except below is to handle array libraries that may not + # allow in-place operations. + try: + ccd.mask[x:xend, y:yend] = comb_tile.mask + except (TypeError, ValueError): + ccd.mask = ccd.mask.at[x:xend, y:yend].set(comb_tile.mask) + if ccd.uncertainty is not None: - ccd.uncertainty.array[x:xend, y:yend] = comb_tile.uncertainty.array + # The try/except below is to handle array libraries that may not + # allow in-place operations. + try: + ccd.uncertainty.array[x:xend, y:yend] = comb_tile.uncertainty.array + except TypeError: + ccd.uncertainty.array = ccd.uncertainty.array.at[ + x:xend, y:yend + ].set(comb_tile.uncertainty.array) # Free up memory to try to stay under user's limit del comb_tile del tile_combiner diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 1cde6535..6f3fd440 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -83,7 +83,7 @@ def test_combiner_create(): ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) assert c.data_arr.shape == (3, 100, 100) - assert c.data_arr.mask.shape == (3, 100, 100) + assert c.data_arr_mask.shape == (3, 100, 100) # test if dtype matches the value that is passed @@ -112,8 +112,8 @@ def test_combiner_mask(): ccd_list = [ccd, ccd, ccd] c = Combiner(ccd_list) assert c.data_arr.shape == (3, 10, 10) - assert c.data_arr.mask.shape == (3, 10, 10) - assert not c.data_arr.mask[0, 5, 5] + assert c.data_arr_mask.shape == (3, 10, 10) + assert not c.data_arr_mask[0, 5, 5] def test_weights(): @@ -185,7 +185,7 @@ def test_combiner_minmax_max(): c = Combiner(ccd_list) c.minmax_clipping(min_clip=None, max_clip=500) - assert c.data_arr[2].mask.all() + assert c.data_arr_mask[2].all() def test_combiner_minmax_min(): @@ -197,7 +197,7 @@ def test_combiner_minmax_min(): c = Combiner(ccd_list) c.minmax_clipping(min_clip=-500, max_clip=None) - assert c.data_arr[1].mask.all() + assert c.data_arr_mask[1].all() def test_combiner_sigmaclip_high(): @@ -213,7 +213,7 @@ def test_combiner_sigmaclip_high(): c = Combiner(ccd_list) # using mad for more robust statistics vs. std c.sigma_clipping(high_thresh=3, low_thresh=None, func=np.ma.median, dev_func=mad) - assert c.data_arr[5].mask.all() + assert c.data_arr_mask[5].all() def test_combiner_sigmaclip_single_pix(): @@ -234,7 +234,7 @@ def test_combiner_sigmaclip_single_pix(): c.data_arr[3, 5, 5] = -5 c.data_arr[4, 5, 5] = 25 c.sigma_clipping(high_thresh=3, low_thresh=None, func=np.ma.median, dev_func=mad) - assert c.data_arr.mask[4, 5, 5] + assert c.data_arr_mask[4, 5, 5] def test_combiner_sigmaclip_low(): @@ -250,7 +250,7 @@ def test_combiner_sigmaclip_low(): c = Combiner(ccd_list) # using mad for more robust statistics vs. std c.sigma_clipping(high_thresh=None, low_thresh=3, func=np.ma.median, dev_func=mad) - assert c.data_arr[5].mask.all() + assert c.data_arr_mask[5].all() # test that the median combination works and returns a ccddata object @@ -334,6 +334,7 @@ def test_combiner_mask_average(): # are masked?! # assert ccd.data[0, 0] == 0 assert ccd.data[5, 5] == 1 + # THE LINE BELOW IS CATCHING A REAL ERROR assert ccd.mask[0, 0] assert not ccd.mask[5, 5] @@ -623,7 +624,11 @@ def test_combine_result_uncertainty_and_mask(comb_func, mask_point): if mask_point: # Make one pixel really negative so we can clip it and guarantee a resulting # pixel is masked. - ccd_data.data[0, 0] = -1000 + # Handle case where array is immutable + try: + ccd_data.data[0, 0] = -1000 + except TypeError: + ccd_data.data = ccd_data.data.at[0, 0].set(-1000) ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) @@ -889,9 +894,9 @@ def test_clip_extrema_with_other_rejection(): ccdlist[1].data[2, 0] = 100.1 c = Combiner(ccdlist) # Reject ccdlist[1].data[1,2] by other means - c.data_arr.mask[1, 1, 2] = True + c.data_arr_mask[1, 1, 2] = True # Reject ccdlist[1].data[1,2] by other means - c.data_arr.mask[3, 0, 0] = True + c.data_arr_mask[3, 0, 0] = True c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() @@ -934,7 +939,7 @@ def create_gen(): c = Combiner(create_gen()) assert c.data_arr.shape == (3, 100, 100) - assert c.data_arr.mask.shape == (3, 100, 100) + assert c.data_arr_mask.shape == (3, 100, 100) @pytest.mark.parametrize( @@ -996,16 +1001,17 @@ def test_user_supplied_combine_func_that_relies_on_masks(comb_func): if comb_func == "sum_combine": expected_result = 3 * data - actual_result = c.sum_combine(sum_func=np.ma.sum) + actual_result = c.sum_combine(sum_func=np.sum) elif comb_func == "average_combine": expected_result = data - actual_result = c.average_combine(scale_func=np.ma.mean) + actual_result = c.average_combine(scale_func=np.mean) elif comb_func == "median_combine": expected_result = data - actual_result = c.median_combine(median_func=np.ma.median) + actual_result = c.median_combine(median_func=np.median) # Two of the three values are masked, so no matter what the combination # method is the result in this pixel should be 2. expected_result[5, 5] = 2 + # THIS IS A REAL TEST FAILURE!!! np.testing.assert_almost_equal(expected_result, actual_result) From 1cbb7bc40538b39c05a511da2292e0c19ede183f Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Mon, 17 Mar 2025 11:09:27 -0500 Subject: [PATCH 17/59] Write a mask-aware sum function that is array API compatible --- ccdproc/tests/test_combiner.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 6f3fd440..1729da43 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -1,4 +1,5 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst +import array_api_compat import astropy import astropy.units as u import numpy as np @@ -1000,8 +1001,23 @@ def test_user_supplied_combine_func_that_relies_on_masks(comb_func): c = Combiner(ccd_list) if comb_func == "sum_combine": + + def my_summer(data, mask, axis=None): + xp = array_api_compat.array_namespace(data) + new_data = [] + for i in range(data.shape[0]): + if mask[i] is not None: + new_data.append(data[i] * ~mask[i]) + else: + new_data.append(xp.zeros_like(data[i])) + + new_data = xp.array(new_data) + + def sum_func(_, axis=axis): + return xp.sum(new_data, axis=axis) + expected_result = 3 * data - actual_result = c.sum_combine(sum_func=np.sum) + actual_result = c.sum_combine(sum_func=my_summer(c.data_arr, c.data_arr_mask)) elif comb_func == "average_combine": expected_result = data actual_result = c.average_combine(scale_func=np.mean) From da9035060cfe94074cf5cb3237fbc1fba851190e Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 19 Mar 2025 10:05:49 -0500 Subject: [PATCH 18/59] Eliminate most use of numpy in tests --- ccdproc/tests/test_cosmicray.py | 98 ++++++++++++++++++++++----------- 1 file changed, 67 insertions(+), 31 deletions(-) diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index cfccc747..dba0ea74 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -1,9 +1,13 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst -import numpy as np +import array_api_compat import pytest from astropy import units as u from astropy.utils.exceptions import AstropyDeprecationWarning +from numpy import array as np_array +from numpy.ma import array as np_ma_array +from numpy.random import default_rng +from numpy.testing import assert_allclose from ccdproc.core import ( background_deviation_box, @@ -21,17 +25,23 @@ def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): size = data.shape[0] - rng = np.random.default_rng(99) + rng = default_rng(99) crrays = rng.integers(0, size, size=(ncrays, 2)) # use (threshold + 15) below to make sure cosmic ray is well above the # threshold no matter what the random number generator returns # add_cosmicrays is highly sensitive to the seed # ideally threshold should be set so it is not sensitive to seed, but # this is not working right now - crflux = 10 * scale * rng.random(NCRAYS) + (threshold + 15) * scale + crflux = 10 * scale * rng.random(ncrays) + (threshold + 15) * scale for i in range(ncrays): y, x = crrays[i] - data.data[y, x] = crflux[i] + try: + data.data[y, x] = crflux[i] + except TypeError as e: + try: + data.data = data.data.at[y, x].set(crflux[i]) + except AttributeError as other_e: + raise other_e from e def test_cosmicray_lacosmic(): @@ -48,10 +58,13 @@ def test_cosmicray_lacosmic(): def test_cosmicray_lacosmic_ccddata(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5.9) # check the number of cosmic rays detected @@ -62,8 +75,9 @@ def test_cosmicray_lacosmic_ccddata(): def test_cosmicray_lacosmic_check_data(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) with pytest.raises(TypeError): - noise = DATA_SCALE * np.ones_like(ccd_data.data) + noise = DATA_SCALE * xp.ones_like(ccd_data.data) cosmicray_lacosmic(10, noise) @@ -76,10 +90,13 @@ def test_cosmicray_gain_correct(array_input, gain_correct_data): # data and returns that gain corrected data. That is not the # intent... ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) # No units here on purpose. gain = 2.0 @@ -93,22 +110,26 @@ def test_cosmicray_gain_correct(array_input, gain_correct_data): cr_mask = new_ccd.mask # Fill masked locations with 0 since there is no simple relationship # between the original value and the corrected value. - orig_data = np.ma.array(ccd_data.data, mask=cr_mask).filled(0) - new_data = np.ma.array(new_data.data, mask=cr_mask).filled(0) + # Masking using numpy is a handy way to check the results here. + orig_data = xp.array(np_ma_array(ccd_data.data, mask=cr_mask).filled(0)) + new_data = xp.array(np_ma_array(new_data.data, mask=cr_mask).filled(0)) if gain_correct_data: gain_for_test = gain else: gain_for_test = 1.0 - np.testing.assert_allclose(gain_for_test * orig_data, new_data) + assert_allclose(gain_for_test * orig_data, new_data) def test_cosmicray_lacosmic_accepts_quantity_gain(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) # The units below are the point of the test gain = 2.0 * u.electron / u.adu @@ -117,10 +138,13 @@ def test_cosmicray_lacosmic_accepts_quantity_gain(): def test_cosmicray_lacosmic_accepts_quantity_readnoise(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) gain = 2.0 * u.electron / u.adu # The units below are the point of this test readnoise = 6.5 * u.electron @@ -132,11 +156,14 @@ def test_cosmicray_lacosmic_detects_inconsistent_units(): # of adu, a readnoise in electrons and a gain in adu / electron. # That is not internally inconsistent. ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) ccd_data.unit = "adu" threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) readnoise = 6.5 * u.electron # The units below are deliberately incorrect. @@ -149,13 +176,16 @@ def test_cosmicray_lacosmic_detects_inconsistent_units(): def test_cosmicray_lacosmic_warns_on_ccd_in_electrons(): # Check that an input ccd in electrons raises a warning. ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) # The unit below is important for the test; this unit on # input is supposed to raise an error. ccd_data.unit = u.electron threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) # No units here on purpose. gain = 2.0 # Don't really need to set this (6.5 is the default value) but want to @@ -176,10 +206,13 @@ def test_cosmicray_lacosmic_invar_inbkg(new_args): # that calling with the new keyword arguments to astroscrappy # 1.1.0 raises no error. ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) with pytest.raises(TypeError): cosmicray_lacosmic(ccd_data, sigclip=5.9, **new_args) @@ -206,7 +239,9 @@ def test_cosmicray_median_ccddata(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - ccd_data.uncertainty = ccd_data.data * 0.0 + DATA_SCALE + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(ccd_data.data * 0.0 + DATA_SCALE) nccd = cosmicray_median(ccd_data, thresh=5, mbox=11, error_image=None) # check the number of cosmic rays detected @@ -217,7 +252,7 @@ def test_cosmicray_median_masked(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - data = np.ma.masked_array(ccd_data.data, (ccd_data.data > -1e6)) + data = np_ma_array(ccd_data.data, (ccd_data.data > -1e6)) ndata, crarr = cosmicray_median(data, thresh=5, mbox=11, error_image=DATA_SCALE) # check the number of cosmic rays detected @@ -243,7 +278,7 @@ def test_cosmicray_median_gbox(): data, crarr = cosmicray_median( ccd_data.data, error_image=error, thresh=5, mbox=11, rbox=0, gbox=5 ) - data = np.ma.masked_array(data, crarr) + data = np_ma_array(data, crarr) assert crarr.sum() > NCRAYS assert abs(data.std() - scale) < 0.1 @@ -269,28 +304,28 @@ def test_cosmicray_median_background_deviation(): def test_background_deviation_box(): scale = 5.3 - cd = np.random.default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) bd = background_deviation_box(cd, 25) assert abs(bd.mean() - scale) < 0.10 def test_background_deviation_box_fail(): scale = 5.3 - cd = np.random.default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) with pytest.raises(ValueError): background_deviation_box(cd, 0.5) def test_background_deviation_filter(): scale = 5.3 - cd = np.random.default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) bd = background_deviation_filter(cd, 25) assert abs(bd.mean() - scale) < 0.10 def test_background_deviation_filter_fail(): scale = 5.3 - cd = np.random.default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) with pytest.raises(ValueError): background_deviation_filter(cd, 0.5) @@ -321,10 +356,11 @@ def test_cosmicray_lacosmic_pssl_does_not_fail(): # to make sure that passing in pssl does not lead to an error # since the new interface does not include pssl. ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + ccd_data.uncertainty = np_array(noise) with pytest.warns(AstropyDeprecationWarning): # The deprecation warning is expected and should be captured nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5.9, pssl=0.0001) From bddb2de76d4f293b5be2fb11758e561fe4b04d56 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 19 Mar 2025 10:15:12 -0500 Subject: [PATCH 19/59] Fix a couple calls to numpy masked array --- ccdproc/tests/test_cosmicray.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index dba0ea74..f41d52dd 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -252,7 +252,7 @@ def test_cosmicray_median_masked(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - data = np_ma_array(ccd_data.data, (ccd_data.data > -1e6)) + data = np_ma_array(ccd_data.data, mask=(ccd_data.data > -1e6)) ndata, crarr = cosmicray_median(data, thresh=5, mbox=11, error_image=DATA_SCALE) # check the number of cosmic rays detected @@ -278,7 +278,7 @@ def test_cosmicray_median_gbox(): data, crarr = cosmicray_median( ccd_data.data, error_image=error, thresh=5, mbox=11, rbox=0, gbox=5 ) - data = np_ma_array(data, crarr) + data = np_ma_array(data, mask=crarr) assert crarr.sum() > NCRAYS assert abs(data.std() - scale) < 0.1 From 8ac481a26031b0bfac5b8f424f5813aab29ba672 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 19 Mar 2025 10:15:25 -0500 Subject: [PATCH 20/59] One more workaround for immutable arrays --- ccdproc/core.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index a6ee524e..bf02ef48 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -1904,7 +1904,10 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): # bother with it. # data = np.ma.masked_array(data, (crarr == 1)) mdata = ndimage.median_filter(data, rbox) - ndata[crarr == 1] = mdata[crarr == 1] + try: + ndata[crarr == 1] = mdata[crarr == 1] + except TypeError: + ndata = ndata.at[crarr == 1].set(mdata[crarr == 1]) return ndata, crarr elif isinstance(ccd, CCDData): From 220a96e1fad076a7b050754b65aea776a786f210 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 19 Mar 2025 10:25:41 -0500 Subject: [PATCH 21/59] Fix up dependencies and test environment setup --- pyproject.toml | 2 ++ tox.ini | 1 + 2 files changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 2d43ba32..0c9118a5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,6 +15,7 @@ authors = [ { name = "and Michael Seifert" }, ] dependencies = [ + "array_api_compat", "astropy>=5.0.1", "astroscrappy>=1.1.0", "numpy>=1.24", @@ -34,6 +35,7 @@ test = [ "pre-commit", "pytest-astropy>=0.10.0", "ruff", + "jax", ] [project.urls] diff --git a/tox.ini b/tox.ini index 62ff0d73..b35840ae 100644 --- a/tox.ini +++ b/tox.ini @@ -7,6 +7,7 @@ isolated_build = true [testenv] setenv = + test: JAX_ENABLE_X64 = True devdeps: PIP_EXTRA_INDEX_URL = https://pypi.anaconda.org/astropy/simple extras = test From 7378297774fd5a97617b48f71c454abf00ecf4a9 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 19 Mar 2025 12:57:40 -0500 Subject: [PATCH 22/59] Update minimum python to 3.10 --- .github/workflows/ci_tests.yml | 12 +++--------- pyproject.toml | 2 +- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index 614fc2cc..e80353cb 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -27,18 +27,12 @@ jobs: strategy: matrix: include: - - name: 'ubuntu-py38-oldestdeps' + - name: 'ubuntu-py310-oldestdeps' os: ubuntu-latest - python: '3.8' + python: '3.10' # Test the oldest supported dependencies on the oldest supported Python - tox_env: 'py38-test-oldestdeps' + tox_env: 'py310-test-oldestdeps' - - name: 'macos-py310-astroscrappy11' - # Keep this test until astroscrappy 1.1.0 is the oldest supported - # version. - os: macos-latest - python: '3.10' - tox_env: 'py310-test-astroscrappy11' - name: 'ubuntu-py312-bottleneck' os: ubuntu-latest diff --git a/pyproject.toml b/pyproject.toml index 0c9118a5..ca507d6e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ dynamic = ["version"] description = "Astropy affiliated package" readme = "README.rst" license = { text = "BSD-3-Clause" } -requires-python = ">=3.8" +requires-python = ">=3.10" authors = [ { name = "Steve Crawford", email = "ccdproc@gmail.com" }, { name = "Matt Craig" }, From 44858390b3caedb177e117f49e6d599423f629a3 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 20 Mar 2025 09:14:39 -0500 Subject: [PATCH 23/59] Fix mask access --- docs/image_combination.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/image_combination.rst b/docs/image_combination.rst index eed5ae1a..460a1570 100644 --- a/docs/image_combination.rst +++ b/docs/image_combination.rst @@ -104,11 +104,11 @@ To clip iteratively, continuing the clipping process until no more pixels are rejected, loop in the code calling the clipping method: >>> old_n_masked = 0 # dummy value to make loop execute at least once - >>> new_n_masked = combiner.data_arr.mask.sum() + >>> new_n_masked = combiner.data_arr_mask.sum() >>> while (new_n_masked > old_n_masked): ... combiner.sigma_clipping(func=np.ma.median) ... old_n_masked = new_n_masked - ... new_n_masked = combiner.data_arr.mask.sum() + ... new_n_masked = combiner.data_arr_mask.sum() Note that the default values for the high and low thresholds for rejection are 3 standard deviations. From 30e9e558caaf5e3d33b8d057e242ff2bed0dfd45 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 20 Mar 2025 09:18:11 -0500 Subject: [PATCH 24/59] Ignore warnings about negative values in square root --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index ca507d6e..d7c0139f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -147,6 +147,7 @@ filterwarnings= [ "ignore:numpy\\.ufunc size changed:RuntimeWarning", "ignore:numpy.ndarray size changed:RuntimeWarning", "ignore:`np.bool` is a deprecated alias for the builtin `bool`:DeprecationWarning", + "ignore:invalid value encountered in sqrt:RuntimeWarning", ] markers = [ "data_size(N): set dimension of square data array for ccd_data fixture", From a45d2cd9e48d6b9e58704dae9953d380cd545816 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 20 Mar 2025 09:36:38 -0500 Subject: [PATCH 25/59] Update several minimum dependencies --- .github/workflows/ci_tests.yml | 6 +++--- pyproject.toml | 8 ++++---- tox.ini | 10 ++++------ 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index e80353cb..4fcb269d 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -27,11 +27,11 @@ jobs: strategy: matrix: include: - - name: 'ubuntu-py310-oldestdeps' + - name: 'ubuntu-py311-oldestdeps' os: ubuntu-latest - python: '3.10' + python: '3.11' # Test the oldest supported dependencies on the oldest supported Python - tox_env: 'py310-test-oldestdeps' + tox_env: 'py311-test-oldestdeps' - name: 'ubuntu-py312-bottleneck' diff --git a/pyproject.toml b/pyproject.toml index d7c0139f..cc346c72 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ dynamic = ["version"] description = "Astropy affiliated package" readme = "README.rst" license = { text = "BSD-3-Clause" } -requires-python = ">=3.10" +requires-python = ">=3.11" authors = [ { name = "Steve Crawford", email = "ccdproc@gmail.com" }, { name = "Matt Craig" }, @@ -16,10 +16,10 @@ authors = [ ] dependencies = [ "array_api_compat", - "astropy>=5.0.1", + "astropy>=6.0.1", "astroscrappy>=1.1.0", - "numpy>=1.24", - "reproject>=0.7", + "numpy>=1.26", + "reproject>=0.9.1", "scikit-image", "scipy", ] diff --git a/tox.ini b/tox.ini index b35840ae..0ab4ee81 100644 --- a/tox.ini +++ b/tox.ini @@ -33,8 +33,7 @@ description = deps = cov: coverage - numpy124: numpy==1.24.* # current oldest suppported numpy - numpy126: numpy==1.26.* + numpy126: numpy==1.26.* # currently oldest support numpy version numpy200: numpy==2.0.* numpy210: numpy==2.1.* @@ -48,10 +47,9 @@ deps = # Remember to transfer any changes here to setup.cfg also. Only listing # packages which are constrained in the setup.cfg - oldestdeps: numpy==1.24.* - oldestdeps: astropy==5.0.* - oldestdeps: reproject==0.7 - oldestdeps: cython + oldestdeps: numpy==1.26.* + oldestdeps: astropy==6.0.* + oldestdeps: reproject==0.9.1 commands = pip freeze From e4afebcfb806ae414267d9479928c59f9080dfd1 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 20 Mar 2025 09:46:00 -0500 Subject: [PATCH 26/59] Fix linting errors --- ccdproc/core.py | 5 ++++- ccdproc/image_collection.py | 2 +- ccdproc/log_meta.py | 4 ++-- ccdproc/tests/test_combiner.py | 2 +- ccdproc/tests/test_image_collection.py | 6 +++--- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index bf02ef48..5d799980 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -1299,7 +1299,10 @@ def rebin(ccd, newshape): if len(ccd.shape) != len(newshape): raise ValueError("newshape does not have the same dimensions as " "ccd.") - slices = [slice(0, old, old / new) for old, new in zip(ccd.shape, newshape)] + slices = [ + slice(0, old, old / new) + for old, new in zip(ccd.shape, newshape, strict=True) + ] coordinates = xp.mgrid[slices] indices = coordinates.astype("i") return ccd[tuple(indices)] diff --git a/ccdproc/image_collection.py b/ccdproc/image_collection.py index 35a8acda..4d9eb575 100644 --- a/ccdproc/image_collection.py +++ b/ccdproc/image_collection.py @@ -696,7 +696,7 @@ def _find_keywords_by_values(self, **kwd): use_info = self._fits_summary(header_keywords=keywords) matches = np.ones(len(use_info), dtype=bool) - for key, value in zip(keywords, values): + for key, value in zip(keywords, values, strict=True): logger.debug("key %s, value %s", key, value) logger.debug("value in table %s", use_info[key]) value_missing = use_info[key].mask diff --git a/ccdproc/log_meta.py b/ccdproc/log_meta.py index 67a469c0..ebafa320 100644 --- a/ccdproc/log_meta.py +++ b/ccdproc/log_meta.py @@ -102,7 +102,7 @@ def wrapper(*args, **kwd): # been called as keywords. positional_args = original_args[: len(args)] - all_args = chain(zip(positional_args, args), kwd.items()) + all_args = chain(zip(positional_args, args, strict=True), kwd.items()) all_args = [ f"{name}={_replace_array_with_placeholder(val)}" for name, val in all_args @@ -134,7 +134,7 @@ def _replace_array_with_placeholder(value): return_type_not_value = False if isinstance(value, u.Quantity): return_type_not_value = not value.isscalar - elif isinstance(value, (NDData, np.ndarray)): + elif isinstance(value, NDData | np.ndarray): try: length = len(value) except TypeError: diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 1729da43..d7e3d4d0 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -297,7 +297,7 @@ def test_combiner_sum_weighted(): c = Combiner(ccd_list) c.weights = np.array([1, 2, 3]) ccd = c.sum_combine() - expected_result = sum(w * d.data for w, d in zip(c.weights, ccd_list)) + expected_result = sum(w * d.data for w, d in zip(c.weights, ccd_list, strict=True)) np.testing.assert_almost_equal(ccd, expected_result) diff --git a/ccdproc/tests/test_image_collection.py b/ccdproc/tests/test_image_collection.py index f165dc92..4b79afc0 100644 --- a/ccdproc/tests/test_image_collection.py +++ b/ccdproc/tests/test_image_collection.py @@ -160,7 +160,7 @@ def test_filtered_files_have_proper_path(self, triage_setup): plain_biases = list(plain_biases) # Same subset, but with full path. path_biases = ic.files_filtered(imagetyp="bias", include_path=True) - for path_b, plain_b in zip(path_biases, plain_biases): + for path_b, plain_b in zip(path_biases, plain_biases, strict=True): # If the path munging has been done properly, this will succeed. assert os.path.basename(path_b) == plain_b @@ -207,7 +207,7 @@ def test_generator_full_path(self, triage_setup): location=triage_setup.test_dir, keywords=["imagetyp"] ) - for path, file_name in zip(collection._paths(), collection.files): + for path, file_name in zip(collection._paths(), collection.files, strict=True): assert path == os.path.join(triage_setup.test_dir, file_name) def test_hdus(self, triage_setup): @@ -278,7 +278,7 @@ def test_multiple_extensions(self, triage_setup, extension): ccd_kwargs = {"unit": "adu"} for data, hdr, hdu, ccd in zip( - ic2.data(), ic2.headers(), ic2.hdus(), ic2.ccds(ccd_kwargs) + ic2.data(), ic2.headers(), ic2.hdus(), ic2.ccds(ccd_kwargs), strict=True ): np.testing.assert_allclose(data, ext2.data) assert hdr == ext2.header From 3724f0acfa3b69fc2f14369dc151eb48b4f83d63 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 20 Mar 2025 09:47:45 -0500 Subject: [PATCH 27/59] Drop unnecessary import --- docs/conf.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index cb0fa5fe..68a64539 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -39,10 +39,7 @@ ) sys.exit(1) -if sys.version_info < (3, 11): - import tomli as tomllib -else: - import tomllib +import tomllib # Grab minversion from pyproject.toml with (Path(__file__).parents[1] / "pyproject.toml").open("rb") as f: From c0928c884feb0d80d271665ba99faf6637e56ff5 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 20 Mar 2025 09:50:16 -0500 Subject: [PATCH 28/59] Drop unneeded test --- .github/workflows/ci_tests.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index 4fcb269d..99d41a90 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -39,11 +39,6 @@ jobs: python: '3.12' tox_env: 'py312-test-alldeps-bottleneck-cov' - - name: 'ubuntu-py310' - os: ubuntu-latest - python: '3.10' - tox_env: 'py310-test-alldeps-numpy124' - - name: 'ubuntu-py311' os: ubuntu-latest python: '3.11' From c04f84292abd5eb1cb3af17f000c311c5fd9aa4e Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 20 Mar 2025 10:02:24 -0500 Subject: [PATCH 29/59] Skip memory tests if jax is installed --- ccdproc/tests/test_memory_use.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/ccdproc/tests/test_memory_use.py b/ccdproc/tests/test_memory_use.py index 5c22248f..173e0f96 100644 --- a/ccdproc/tests/test_memory_use.py +++ b/ccdproc/tests/test_memory_use.py @@ -15,6 +15,13 @@ else: memory_profile_present = True +try: + import jax # noqa +except ImportError: + JAX_PRESENT = False +else: + JAX_PRESENT = True + image_size = 2000 # Square image, so 4000 x 4000 num_files = 10 @@ -30,8 +37,10 @@ def teardown_module(): fil.unlink() +@pytest.mark.skipif(JAX_PRESENT, reason="JAX is present, and does not allow os.fork") @pytest.mark.skipif( - not platform.startswith("linux"), reason="memory tests only work on linux" + not platform.startswith("linux"), + reason="memory tests only work on linux", ) @pytest.mark.skipif(not memory_profile_present, reason="memory_profiler not installed") @pytest.mark.parametrize("combine_method", ["average", "sum", "median"]) From 9fcc655419bb228879069d8c78d8bdf876694eaa Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Fri, 21 Mar 2025 10:22:46 -0500 Subject: [PATCH 30/59] Explain why numpy is still used in image_collection --- ccdproc/image_collection.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/ccdproc/image_collection.py b/ccdproc/image_collection.py index 4d9eb575..6a01066b 100644 --- a/ccdproc/image_collection.py +++ b/ccdproc/image_collection.py @@ -8,13 +8,20 @@ from os import listdir, path import astropy.io.fits as fits -import numpy as np +import numpy as np # see numpy comment below import numpy.ma as ma from astropy.table import MaskedColumn, Table from astropy.utils.exceptions import AstropyUserWarning from .ccddata import _recognized_fits_file_extensions, fits_ccddata_reader +# ==> numpy comment <== +# numpy is used internally to keep track of masking in the summary +# table. It is not used for any CCD processing, so there is no need +# to implement the array API here. In other words, ImageFileCollection +# is fine to implement its internal tables however it wantrs, regardless +# of what the user is using for their data arrays. + logger = logging.getLogger(__name__) __all__ = ["ImageFileCollection"] From 1fe1180a1ec8f2abb96be87b2a8ca8c5cd78749b Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Mon, 24 Mar 2025 10:24:06 -0500 Subject: [PATCH 31/59] Drop numpy import in combiner --- ccdproc/combiner.py | 88 ++++++++++++++++++++++++++-------- ccdproc/tests/test_combiner.py | 5 +- 2 files changed, 71 insertions(+), 22 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index deea6a32..77fb735c 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -2,10 +2,6 @@ """This module implements the combiner class.""" -import numpy as np - -# from numpy import ma - try: import bottleneck as bn except ImportError: @@ -24,32 +20,68 @@ __all__ = ["Combiner", "combine"] -def _default_median(): # pragma: no cover +def _default_median(xp=None): # pragma: no cover if HAS_BOTTLENECK: return bn.nanmedian else: - return np.nanmedian + if xp is None: + return None + + # No bottleneck, but we have a namespace. + try: + return xp.nanmedian + except AttributeError as e: + raise RuntimeError( + "No NaN-aware median function available. Please install bottleneck." + ) from e -def _default_average(): # pragma: no cover +def _default_average(xp=None): # pragma: no cover if HAS_BOTTLENECK: return bn.nanmean else: - return np.nanmean + if xp is None: + return None + + # No bottleneck, but we have a namespace. + try: + return xp.nanmean + except AttributeError as e: + raise RuntimeError( + "No NaN-aware mean function available. Please install bottleneck." + ) from e -def _default_sum(): # pragma: no cover +def _default_sum(xp=None): # pragma: no cover if HAS_BOTTLENECK: return bn.nansum else: - return np.nansum + if xp is None: + return None + + # No bottleneck, but we have a namespace. + try: + return xp.nansum + except AttributeError as e: + raise RuntimeError( + "No NaN-aware sum function available. Please install bottleneck." + ) from e -def _default_std(): # pragma: no cover +def _default_std(xp=None): # pragma: no cover if HAS_BOTTLENECK: return bn.nanstd else: - return np.nanstd + if xp is None: + return None + + # No bottleneck, but we have a namespace. + try: + return xp.nanstd + except AttributeError as e: + raise RuntimeError( + "No NaN-aware std function available. Please install bottleneck." + ) from e _default_sum_func = _default_sum() @@ -474,11 +506,14 @@ def median_combine( The uncertainty currently calculated using the median absolute deviation does not account for rejected pixels. """ + xp = array_api_compat.array_namespace(self.data_arr) + + _default_median_func = _default_median(xp=xp) data, masked_values, median_func = self._combination_setup( - median_func, _default_median(), scale_to + median_func, _default_median_func, scale_to ) - xp = array_api_compat.array_namespace(data) + medianed = median_func(data, axis=0) # set the mask @@ -577,12 +612,20 @@ def average_combine( combined_image: `~astropy.nddata.CCDData` CCDData object based on the combined input of CCDData objects. """ + xp = array_api_compat.array_namespace(self.data_arr) + + _default_average_func = _default_average(xp=xp) + + if sum_func is None: + sum_func = _default_sum(xp=xp) + + if uncertainty_func is None: + uncertainty_func = _default_std(xp=xp) + data, masked_values, scale_func = self._combination_setup( - scale_func, _default_average(), scale_to + scale_func, _default_average_func, scale_to ) - xp = array_api_compat.array_namespace(data) - # Do NOT modify data after this -- we need it to be intact when we # we get to the uncertainty calculation. if self.weights is not None: @@ -652,12 +695,17 @@ def sum_combine( CCDData object based on the combined input of CCDData objects. """ + xp = array_api_compat.array_namespace(self.data_arr) + + _default_sum_func = _default_sum(xp=xp) + + if uncertainty_func is None: + uncertainty_func = _default_std(xp=xp) + data, masked_values, sum_func = self._combination_setup( - sum_func, _default_sum(), scale_to + sum_func, _default_sum_func, scale_to ) - xp = array_api_compat.array_namespace(data) - if self.weights is not None: summed, weights = self._weighted_sum(data, sum_func) else: diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index d7e3d4d0..7838cfe9 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -973,13 +973,14 @@ def test_combiner_with_scaling_uncertainty(comb_func): avg_ccd = getattr(combiner, comb_func)() if comb_func != "median_combine": - uncertainty_func = _default_std() + xp = array_api_compat.array_namespace(ccd_data.data) + uncertainty_func = _default_std(xp=xp) else: uncertainty_func = sigma_func expected_unc = uncertainty_func(scaled_ccds, axis=0) - np.testing.assert_almost_equal(avg_ccd.uncertainty.array, expected_unc) + np.testing.assert_allclose(avg_ccd.uncertainty.array, expected_unc, atol=1e-10) @pytest.mark.parametrize( From 282eaee873d251fe1889f33e8b21ff975d19272c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 27 Mar 2025 10:31:45 -0500 Subject: [PATCH 32/59] Use array_api_extra to handle immutable arrays --- ccdproc/combiner.py | 16 +++++----------- ccdproc/core.py | 6 ++---- ccdproc/tests/test_combiner.py | 9 ++++----- ccdproc/tests/test_cosmicray.py | 9 ++------- pyproject.toml | 1 + 5 files changed, 14 insertions(+), 27 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 77fb735c..fc03094d 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -10,6 +10,7 @@ HAS_BOTTLENECK = True import array_api_compat +import array_api_extra as xpx from astropy import log from astropy.nddata import CCDData, StdDevUncertainty from astropy.stats import sigma_clip @@ -439,11 +440,8 @@ def _get_nan_substituted_data(self, data): # Get the data as an unmasked array with masked values filled as NaN if self.data_arr_mask.any(): - # Workaround for array libraries that do not allow in-=place modification - try: - data[self.data_arr_mask] = xp.nan - except TypeError: - data = data.at[self.data_arr_mask].set(xp.nan) + # Use array_api_extra so that we can use at with all array libraries + data = xpx.at(data)[self.data_arr_mask].set(xp.nan) else: data = data return data @@ -1099,12 +1097,8 @@ def combine( comb_tile = getattr(tile_combiner, combine_function)(**combine_kwds) # add it back into the master image - # The try/except below is to handle array libraries that may not - # allow in-place operations. - try: - ccd.data[x:xend, y:yend] = comb_tile.data - except TypeError: - ccd.data = ccd.data.at[x:xend, y:yend].set(comb_tile.data) + # Use array_api_extra so that we can use at with all array libraries + ccd.data = xpx.at(ccd.data)[x:xend, y:yend].set(comb_tile.data) if ccd.mask is not None: # Maybe temporary workaround for the mask not being writeable... diff --git a/ccdproc/core.py b/ccdproc/core.py index 5d799980..b15e253d 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -8,6 +8,7 @@ import warnings import array_api_compat +import array_api_extra as xpx from astropy import nddata, stats from astropy import units as u from astropy.modeling import fitting @@ -1907,10 +1908,7 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): # bother with it. # data = np.ma.masked_array(data, (crarr == 1)) mdata = ndimage.median_filter(data, rbox) - try: - ndata[crarr == 1] = mdata[crarr == 1] - except TypeError: - ndata = ndata.at[crarr == 1].set(mdata[crarr == 1]) + ndata = xpx.at(ndata)[crarr == 1].set(mdata[crarr == 1]) return ndata, crarr elif isinstance(ccd, CCDData): diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 7838cfe9..7c9ecae2 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -1,5 +1,6 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst import array_api_compat +import array_api_extra as xpx import astropy import astropy.units as u import numpy as np @@ -625,11 +626,9 @@ def test_combine_result_uncertainty_and_mask(comb_func, mask_point): if mask_point: # Make one pixel really negative so we can clip it and guarantee a resulting # pixel is masked. - # Handle case where array is immutable - try: - ccd_data.data[0, 0] = -1000 - except TypeError: - ccd_data.data = ccd_data.data.at[0, 0].set(-1000) + # Handle case where array is immutable by using array_api_extra, + # which provides at for all array libraries. + ccd_data.data = xpx.at(ccd_data.data)[0, 0].set(-1000) ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index f41d52dd..ced95aa3 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -1,6 +1,7 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst import array_api_compat +import array_api_extra as xpx import pytest from astropy import units as u from astropy.utils.exceptions import AstropyDeprecationWarning @@ -35,13 +36,7 @@ def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): crflux = 10 * scale * rng.random(ncrays) + (threshold + 15) * scale for i in range(ncrays): y, x = crrays[i] - try: - data.data[y, x] = crflux[i] - except TypeError as e: - try: - data.data = data.data.at[y, x].set(crflux[i]) - except AttributeError as other_e: - raise other_e from e + data.data = xpx.at(data.data)[y, x].set(crflux[i]) def test_cosmicray_lacosmic(): diff --git a/pyproject.toml b/pyproject.toml index cc346c72..2d205637 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ authors = [ ] dependencies = [ "array_api_compat", + "array_api_extra>=0.7.0", "astropy>=6.0.1", "astroscrappy>=1.1.0", "numpy>=1.26", From 5f27cd40cc24115da661376bca1006ee4796c22c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 27 Mar 2025 10:40:44 -0500 Subject: [PATCH 33/59] Use a consistent namespace for arrays --- ccdproc/core.py | 4 +++- ccdproc/tests/test_cosmicray.py | 5 +++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index b15e253d..2c7176a5 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -1907,7 +1907,9 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): # Fun fact: scipy.ndimage ignores the mask, so may as well not # bother with it. # data = np.ma.masked_array(data, (crarr == 1)) - mdata = ndimage.median_filter(data, rbox) + + # make sure that mdata is the same type as data + mdata = xp.asarray(ndimage.median_filter(data, rbox)) ndata = xpx.at(ndata)[crarr == 1].set(mdata[crarr == 1]) return ndata, crarr diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index ced95aa3..177a7f64 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -27,13 +27,14 @@ def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): size = data.shape[0] rng = default_rng(99) - crrays = rng.integers(0, size, size=(ncrays, 2)) + xp = array_api_compat.array_namespace(data.data) + crrays = xp.asarray(rng.integers(0, size, size=(ncrays, 2))) # use (threshold + 15) below to make sure cosmic ray is well above the # threshold no matter what the random number generator returns # add_cosmicrays is highly sensitive to the seed # ideally threshold should be set so it is not sensitive to seed, but # this is not working right now - crflux = 10 * scale * rng.random(ncrays) + (threshold + 15) * scale + crflux = xp.asarray(10 * scale * rng.random(ncrays) + (threshold + 15) * scale) for i in range(ncrays): y, x = crrays[i] data.data = xpx.at(data.data)[y, x].set(crflux[i]) From 5f7f3e0fa0fbc773ad71ea7bfbb29f080f40d375 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 27 Mar 2025 12:06:17 -0500 Subject: [PATCH 34/59] Clean up a couple more cases to use array_api_extra --- ccdproc/combiner.py | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index fc03094d..9b77315c 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -1103,22 +1103,14 @@ def combine( if ccd.mask is not None: # Maybe temporary workaround for the mask not being writeable... ccd.mask = ccd.mask.copy() - # The try/except below is to handle array libraries that may not - # allow in-place operations. - try: - ccd.mask[x:xend, y:yend] = comb_tile.mask - except (TypeError, ValueError): - ccd.mask = ccd.mask.at[x:xend, y:yend].set(comb_tile.mask) + # Handle immutable arrays with array_api_extra + ccd.mask = xpx.at(ccd.mask)[x:xend, y:yend].set(comb_tile.mask) if ccd.uncertainty is not None: - # The try/except below is to handle array libraries that may not - # allow in-place operations. - try: - ccd.uncertainty.array[x:xend, y:yend] = comb_tile.uncertainty.array - except TypeError: - ccd.uncertainty.array = ccd.uncertainty.array.at[ - x:xend, y:yend - ].set(comb_tile.uncertainty.array) + # Handle immutable arrays with array_api_extra + ccd.uncertainty.array = xpx.at(ccd.uncertainty.array)[ + x:xend, y:yend + ].set(comb_tile.uncertainty.array) # Free up memory to try to stay under user's limit del comb_tile del tile_combiner From b98f8f59c1cab08ca7d90276ba8e7cc8cfec0652 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 27 Mar 2025 12:38:23 -0500 Subject: [PATCH 35/59] Change where bottleneck is test on GitHub Actions --- .github/workflows/ci_tests.yml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index 99d41a90..fd71ca45 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -33,18 +33,21 @@ jobs: # Test the oldest supported dependencies on the oldest supported Python tox_env: 'py311-test-oldestdeps' - - - name: 'ubuntu-py312-bottleneck' + # Do not include bottleneck in this coverage test. By not including + # it we get a better measure of how we are covered when using the + # array API, which bottleneck short-circuits. + - name: 'ubuntu-py312-coverage' os: ubuntu-latest python: '3.12' - tox_env: 'py312-test-alldeps-bottleneck-cov' + tox_env: 'py312-test-alldeps-cov' - name: 'ubuntu-py311' os: ubuntu-latest python: '3.11' tox_env: 'py311-test-alldeps-numpy124' - - name: 'ubuntu-py312' + # Move bottleneck test a test without coverage + - name: 'ubuntu-py312-bottleneck' os: ubuntu-latest python: '3.12' tox_env: 'py312-test-alldeps-numpy126' From 6ce2309228ec66d3010ced8e8c0351e3b09e0fc2 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 27 Mar 2025 12:38:34 -0500 Subject: [PATCH 36/59] Skip coverage of one function --- ccdproc/core.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 2c7176a5..daaf7be5 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -91,7 +91,9 @@ def _is_array(arr): return True -def _percentile_fallback(array, percentiles): +# Ideally this would eventually be covered by tests. Looks like Sparse +# could be used to test this, since it has no percentile... +def _percentile_fallback(array, percentiles): # pragma: no cover """ Try calculating percentile using namespace, otherwise fall back to an implmentation that uses sort. As of the 2023 version of the array API From b66abbc3078b407de1c67292e7652b1878f9a798 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 27 Mar 2025 12:59:03 -0500 Subject: [PATCH 37/59] Remove unused argument and logic --- ccdproc/combiner.py | 7 ++----- ccdproc/tests/run_for_memory_profile.py | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 9b77315c..4f24ba59 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -763,13 +763,10 @@ def _calculate_step_sizes(x_size, y_size, num_chunks): return xstep, ystep -def _calculate_size_of_image(ccd, combine_uncertainty_function): +def _calculate_size_of_image(ccd): # If uncertainty_func is given for combine this will create an uncertainty # even if the originals did not have one. In that case we need to create # an empty placeholder. - xp = array_api_compat.array_namespace(ccd.data) - if ccd.uncertainty is None and combine_uncertainty_function is not None: - ccd.uncertainty = StdDevUncertainty(xp.zeros(ccd.data.shape)) size_of_an_img = ccd.data.nbytes try: @@ -993,7 +990,7 @@ def combine( if ccd.mask is None: ccd.mask = xp.zeros_like(ccd.data, dtype=bool) - size_of_an_img = _calculate_size_of_image(ccd, combine_uncertainty_function) + size_of_an_img = _calculate_size_of_image(ccd) no_of_img = len(img_list) diff --git a/ccdproc/tests/run_for_memory_profile.py b/ccdproc/tests/run_for_memory_profile.py index d9b445a2..50e39532 100644 --- a/ccdproc/tests/run_for_memory_profile.py +++ b/ccdproc/tests/run_for_memory_profile.py @@ -109,7 +109,7 @@ def run_memory_profile( ) ccd = CCDData.read(files[0]) - expected_img_size = _calculate_size_of_image(ccd, None) + expected_img_size = _calculate_size_of_image(ccd) if memory_limit: kwargs["mem_limit"] = memory_limit From e67ac8bc27dc257b035a6a4d5adcc2f20037f8ca Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 27 Mar 2025 13:04:42 -0500 Subject: [PATCH 38/59] Add a test --- ccdproc/tests/test_combiner.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 7c9ecae2..5c2bbb06 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -495,8 +495,13 @@ def test_combine_image_file_collection_input(tmp_path): comb_ccds = combine(ifc.ccds(), method="average") + comb_string = combine( + ",".join(ifc.files_filtered(include_path=True)), method="average" + ) + np.testing.assert_allclose(ccd.data, comb_files.data) np.testing.assert_allclose(ccd.data, comb_ccds.data) + np.testing.assert_allclose(ccd.data, comb_string.data) with pytest.raises(FileNotFoundError): # This should fail because the test is not running in the From cdb9c45c4a1f3ca81c973b50a445b1649443b6be Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 18 Jun 2025 16:42:52 -0500 Subject: [PATCH 39/59] Use tox environment to handle testing of different array libraries --- .github/workflows/ci_tests.yml | 7 +- ccdproc/conftest.py | 20 ++++ ccdproc/tests/pytest_fixtures.py | 7 +- ccdproc/tests/test_ccdproc.py | 152 +++++++++++++++---------------- ccdproc/tests/test_memory_use.py | 16 ++-- tox.ini | 6 +- 6 files changed, 117 insertions(+), 91 deletions(-) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index fd71ca45..ea5b783a 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -41,10 +41,11 @@ jobs: python: '3.12' tox_env: 'py312-test-alldeps-cov' - - name: 'ubuntu-py311' + # Run at least one non-numpy array library + - name: 'ubuntu-py313-jax' os: ubuntu-latest - python: '3.11' - tox_env: 'py311-test-alldeps-numpy124' + python: '3.13' + tox_env: 'py313-jax' # Move bottleneck test a test without coverage - name: 'ubuntu-py312-bottleneck' diff --git a/ccdproc/conftest.py b/ccdproc/conftest.py index 38a3277b..cf8af327 100644 --- a/ccdproc/conftest.py +++ b/ccdproc/conftest.py @@ -3,6 +3,7 @@ # this contains imports plugins that configure py.test for astropy tests. # by importing them here in conftest.py they are discoverable by py.test # no matter how it is invoked within the source tree. +import os try: # When the pytest_astropy_header package is installed @@ -33,3 +34,22 @@ def pytest_configure(config): PYTEST_HEADER_MODULES["astroscrappy"] = "astroscrappy" PYTEST_HEADER_MODULES["reproject"] = "reproject" PYTEST_HEADER_MODULES.pop("h5py", None) + +# Set up the array library to be used in tests +# What happens here is controlled by an environmental variable +array_library = os.environ.get("CCDPROC_ARRAY_LIBRARY", "numpy").lower() + +match array_library: + case "numpy": + import numpy as testing_array_library # noqa: F401 + + case "jax": + import jax.numpy as testing_array_library # noqa: F401 + + PYTEST_HEADER_MODULES["jax"] = "jax" + + case _: + raise ValueError( + f"Unsupported array library: {array_library}. " + "Supported libraries are 'numpy' and 'jax'." + ) diff --git a/ccdproc/tests/pytest_fixtures.py b/ccdproc/tests/pytest_fixtures.py index 0c27a94a..9b56290a 100644 --- a/ccdproc/tests/pytest_fixtures.py +++ b/ccdproc/tests/pytest_fixtures.py @@ -2,8 +2,6 @@ from shutil import rmtree -# import dask.array as da -import jax.numpy as jnp import numpy as np import pytest from astropy import units as u @@ -51,6 +49,9 @@ def ccd_data( The mean can be changed with the marker @pytest.marker.scale(m) on the test function, where m is the desired mean. """ + # Need the import here to avoid circular import issues + from ..conftest import testing_array_library as array_library + size = data_size scale = data_scale mean = data_mean @@ -61,7 +62,7 @@ def ccd_data( data = rng.normal(loc=mean, size=[size, size], scale=scale) fake_meta = {"my_key": 42, "your_key": "not 42"} - ccd = CCDData(jnp.array(data), unit=u.adu) + ccd = CCDData(array_library.array(data), unit=u.adu) ccd.header = fake_meta return ccd diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index ab4ebc60..f987150a 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -5,7 +5,6 @@ # import array_api_compat import astropy import astropy.units as u -import jax.numpy as np import pytest import skimage from astropy.io import fits @@ -18,6 +17,7 @@ from numpy import random as np_random from numpy import testing as np_testing +from ccdproc.conftest import testing_array_library as xp from ccdproc.core import ( Keyword, ccd_process, @@ -42,7 +42,7 @@ except ImportError: HAS_BLOCK_X_FUNCS = False -_NUMPY_COPY_IF_NEEDED = None # False if np.__version__.startswith("1.") else None +_NUMPY_COPY_IF_NEEDED = None # False if xp.__version__.startswith("1.") else None RNG = np_random.default_rng @@ -80,11 +80,11 @@ def test_create_deviation(u_image, u_gain, u_readnoise, expect_success): ccd_var = create_deviation(ccd_data, gain=gain, readnoise=readnoise) assert ccd_var.uncertainty.array.shape == (10, 10) assert ccd_var.uncertainty.array.size == 100 - assert np.isdtype(ccd_var.uncertainty.array.dtype, "real floating") + assert xp.isdtype(ccd_var.uncertainty.array.dtype, "real floating") if gain is not None: - expected_var = np.sqrt(2 * ccd_data.data + 5**2) / 2 + expected_var = xp.sqrt(2 * ccd_data.data + 5**2) / 2 else: - expected_var = np.sqrt(ccd_data.data + 5**2) + expected_var = xp.sqrt(ccd_data.data + 5**2) np_testing.assert_allclose(ccd_var.uncertainty.array, expected_var) assert ccd_var.unit == ccd_data.unit # Uncertainty should *not* have any units -- does it? @@ -103,7 +103,7 @@ def test_create_deviation_from_negative(): ccd_data, gain=None, readnoise=readnoise, disregard_nan=False ) np_testing.assert_array_equal( - ccd_data.data < 0, np.isnan(ccd_var.uncertainty.array) + ccd_data.data < 0, xp.isnan(ccd_var.uncertainty.array) ) @@ -119,7 +119,7 @@ def test_create_deviation_from_negative_2(): # In-place replacement of values does not work in some array # libraries. ccd_data.data = ccd_data.data * ~mask - expected_var = np.sqrt(ccd_data.data + readnoise.value**2) + expected_var = xp.sqrt(ccd_data.data + readnoise.value**2) np_testing.assert_allclose(ccd_var.uncertainty.array, expected_var) @@ -178,7 +178,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): science_data = science_data + oscan + sky # Reconstruct the full image - ccd_data.data = np.concat([overscan_data, science_data], axis=overscan_axis) + ccd_data.data = xp.concat([overscan_data, science_data], axis=overscan_axis) # Test once using the overscan argument to specify the overscan region ccd_data_overscan = subtract_overscan( ccd_data, @@ -280,7 +280,7 @@ def test_subtract_overscan_model(transpose): oscan_region = (slice(None), slice(0, 10)) science_region = (slice(None), slice(10, None)) - yscan, xscan = np.mgrid[0:size, 0:size] / 10.0 + 300.0 + yscan, xscan = xp.mgrid[0:size, 0:size] / 10.0 + 300.0 if transpose: oscan_region = oscan_region[::-1] @@ -298,7 +298,7 @@ def test_subtract_overscan_model(transpose): # image, so we need to do this before we add the new overscan. overscan_data = 0 * ccd_data.data[oscan_region].copy() # Reconstruct the full image - ccd_data.data = np.concat([overscan_data, science_data], axis=overscan_axis) + scan + ccd_data.data = xp.concat([overscan_data, science_data], axis=overscan_axis) + scan ccd_data = subtract_overscan( ccd_data, @@ -328,10 +328,10 @@ def test_subtract_overscan_fails(): ccd_data = ccd_data_func() # Do we get an error if the *image* is neither CCDData nor an array? with pytest.raises(TypeError): - subtract_overscan(3, np.zeros((5, 5))) + subtract_overscan(3, xp.zeros((5, 5))) # Do we get an error if the *overscan* is not an image or an array? with pytest.raises(TypeError): - subtract_overscan(np.zeros((10, 10)), 3, median=False, model=None) + subtract_overscan(xp.zeros((10, 10)), 3, median=False, model=None) # Do we get an error if we specify both overscan and fits_section? with pytest.raises(TypeError): subtract_overscan(ccd_data, overscan=ccd_data[0:10], fits_section="[1:10]") @@ -343,7 +343,7 @@ def test_subtract_overscan_fails(): subtract_overscan(ccd_data, fits_section=5) # Do we raise an error if the input is a plain array? with pytest.raises(TypeError): - subtract_overscan(np.zeros((10, 10)), fits_section="[1:10]") + subtract_overscan(xp.zeros((10, 10)), fits_section="[1:10]") def test_trim_image_fits_section_requires_string(): @@ -356,7 +356,7 @@ def test_trim_image_fits_section_requires_string(): def test_trim_image_fits_section(mask_data, uncertainty): ccd_data = ccd_data_func(data_size=50) if mask_data: - ccd_data.mask = np.zeros_like(ccd_data) + ccd_data.mask = xp.zeros_like(ccd_data) if uncertainty: err = RNG().normal(size=ccd_data.shape) ccd_data.uncertainty = StdDevUncertainty(err) @@ -382,8 +382,8 @@ def test_trim_with_wcs_alters_wcs(): ccd_data = ccd_data_func() # WCS construction example pulled form astropy.wcs docs wcs = WCS(naxis=2) - wcs.wcs.crpix = np.array(ccd_data.shape) / 2 - wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) + wcs.wcs.crpix = xp.array(ccd_data.shape) / 2 + wcs.wcs.cdelt = xp.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) @@ -401,7 +401,7 @@ def test_subtract_bias(): bias_level = 5.0 ccd_data.data = ccd_data.data + bias_level ccd_data.header["key"] = "value" - master_bias_array = np.zeros_like(ccd_data.data) + bias_level + master_bias_array = xp.zeros_like(ccd_data.data) + bias_level master_bias = CCDData(master_bias_array, unit=ccd_data.unit) no_bias = subtract_bias(ccd_data, master_bias, add_keyword=None) # Does the data we are left with have the correct average? @@ -416,11 +416,11 @@ def test_subtract_bias(): def test_subtract_bias_fails(): ccd_data = ccd_data_func(data_size=50) # Should fail if shapes don't match - bias = CCDData(np.array([200, 200]), unit=u.adu) + bias = CCDData(xp.array([200, 200]), unit=u.adu) with pytest.raises(ValueError): subtract_bias(ccd_data, bias) # Should fail because units don't match - bias = CCDData(np.zeros_like(ccd_data), unit=u.meter) + bias = CCDData(xp.zeros_like(ccd_data), unit=u.meter) with pytest.raises(u.UnitsError): subtract_bias(ccd_data, bias) @@ -434,7 +434,7 @@ def test_subtract_dark(explicit_times, scale, exposure_keyword): exptime_key = "exposure" exposure_unit = u.second dark_level = 1.7 - master_dark_data = np.zeros_like(ccd_data.data) + dark_level + master_dark_data = xp.zeros_like(ccd_data.data) + dark_level master_dark = CCDData(master_dark_data, unit=u.adu) master_dark.header[exptime_key] = 2 * exptime dark_exptime = master_dark.header[exptime_key] @@ -531,7 +531,7 @@ def test_subtract_dark_fails(): # Fail when the arrays are not the same size with pytest.raises(ValueError): small_master = CCDData(ccd_data) - small_master.data = np.zeros((1, 1)) + small_master.data = xp.zeros((1, 1)) subtract_dark(ccd_data, small_master) @@ -669,7 +669,7 @@ def test_flat_correct_deviation(): ccd_data.unit = u.electron ccd_data = create_deviation(ccd_data, readnoise=5 * u.electron) # Create the flat - data = 2 * np.ones((size, size)) + data = 2 * xp.ones((size, size)) flat = CCDData(data, meta=fits.header.Header(), unit=ccd_data.unit) flat = create_deviation(flat, readnoise=0.5 * u.electron) ccd_data = flat_correct(ccd_data, flat) @@ -681,10 +681,10 @@ def test_flat_correct_data_uncertainty(): # Temporarily work around the fact that NDUncertainty explicitly checks # whether the value is a numpy array. dat = CCDData( - np.ones([100, 100]), unit="adu", uncertainty=np_array(np.ones([100, 100])) + xp.ones([100, 100]), unit="adu", uncertainty=np_array(xp.ones([100, 100])) ) # Note flat is set to 10, error, if present, is set to one. - flat = CCDData(10 * np.ones([100, 100]), unit="adu") + flat = CCDData(10 * xp.ones([100, 100]), unit="adu") res = flat_correct(dat, flat) assert (res.data == dat.data).all() assert (res.uncertainty.array == dat.uncertainty.array).all() @@ -742,7 +742,7 @@ def tran(arr): def test_transform_image(mask_data, uncertainty): ccd_data = ccd_data_func(data_size=50) if mask_data: - ccd_data.mask = np.zeros_like(ccd_data) + ccd_data.mask = xp.zeros_like(ccd_data) ccd_data.mask[10, 10] = 1 if uncertainty: err = RNG().normal(size=ccd_data.shape) @@ -767,23 +767,23 @@ def tran(arr): # Test block_reduce and block_replicate wrapper @pytest.mark.skipif(not HAS_BLOCK_X_FUNCS, reason="needs astropy >= 1.1.x") @pytest.mark.skipif( - (skimage.__version__ < "0.14.2") and ("dev" in np.__version__), + (skimage.__version__ < "0.14.2") and ("dev" in xp.__version__), reason="Incompatibility between scikit-image " "and numpy 1.16", ) def test_block_reduce(): ccd = CCDData( - np.ones((4, 4)), + xp.ones((4, 4)), unit="adu", meta={"testkw": 1}, - mask=np.zeros((4, 4), dtype=bool), - uncertainty=StdDevUncertainty(np.ones((4, 4))), + mask=xp.zeros((4, 4), dtype=bool), + uncertainty=StdDevUncertainty(xp.ones((4, 4))), ) with pytest.warns(AstropyUserWarning) as w: ccd_summed = block_reduce(ccd, (2, 2)) assert len(w) == 1 assert "following attributes were set" in str(w[0].message) assert isinstance(ccd_summed, CCDData) - assert np.all(ccd_summed.data == 4) + assert xp.all(ccd_summed.data == 4) assert ccd_summed.data.shape == (2, 2) assert ccd_summed.unit == u.adu # Other attributes are set to None. In case the function is modified to @@ -799,11 +799,11 @@ def test_block_reduce(): @pytest.mark.skipif(not HAS_BLOCK_X_FUNCS, reason="needs astropy >= 1.1.x") @pytest.mark.skipif( - (skimage.__version__ < "0.14.2") and ("dev" in np.__version__), + (skimage.__version__ < "0.14.2") and ("dev" in xp.__version__), reason="Incompatibility between scikit-image " "and numpy 1.16", ) def test_block_average(): - data = np.array( + data = xp.array( [ [2.0, 1.0, 2.0, 1.0], [1.0, 1.0, 1.0, 1.0], @@ -815,8 +815,8 @@ def test_block_average(): data, unit="adu", meta={"testkw": 1}, - mask=np.zeros((4, 4), dtype=bool), - uncertainty=StdDevUncertainty(np.ones((4, 4))), + mask=xp.zeros((4, 4), dtype=bool), + uncertainty=StdDevUncertainty(xp.ones((4, 4))), ) with pytest.warns(AstropyUserWarning) as w: @@ -825,7 +825,7 @@ def test_block_average(): assert "following attributes were set" in str(w[0].message) assert isinstance(ccd_avgd, CCDData) - assert np.all(ccd_avgd.data == 1.25) + assert xp.all(ccd_avgd.data == 1.25) assert ccd_avgd.data.shape == (2, 2) assert ccd_avgd.unit == u.adu # Other attributes are set to None. In case the function is modified to @@ -843,11 +843,11 @@ def test_block_average(): @pytest.mark.skipif(not HAS_BLOCK_X_FUNCS, reason="needs astropy >= 1.1.x") def test_block_replicate(): ccd = CCDData( - np.ones((4, 4)), + xp.ones((4, 4)), unit="adu", meta={"testkw": 1}, - mask=np.zeros((4, 4), dtype=bool), - uncertainty=StdDevUncertainty(np.ones((4, 4))), + mask=xp.zeros((4, 4), dtype=bool), + uncertainty=StdDevUncertainty(xp.ones((4, 4))), ) with pytest.warns(AstropyUserWarning) as w: ccd_repl = block_replicate(ccd, (2, 2)) @@ -855,7 +855,7 @@ def test_block_replicate(): assert "following attributes were set" in str(w[0].message) assert isinstance(ccd_repl, CCDData) - assert np.all(ccd_repl.data == 0.25) + assert xp.all(ccd_repl.data == 0.25) assert ccd_repl.data.shape == (8, 8) assert ccd_repl.unit == u.adu # Other attributes are set to None. In case the function is modified to @@ -875,7 +875,7 @@ def test__overscan_schange(): ccd_data = ccd_data_func() old_data = ccd_data.copy() new_data = subtract_overscan(ccd_data, overscan=ccd_data[:, 1], overscan_axis=0) - assert not np.allclose(old_data.data, new_data.data) + assert not xp.allclose(old_data.data, new_data.data) np_testing.assert_allclose(old_data.data, ccd_data.data) @@ -892,7 +892,7 @@ def test_create_deviation_does_not_change_input(): def test_cosmicray_median_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - error = np.zeros_like(ccd_data) + error = xp.ones_like(ccd_data) _ = cosmicray_median(ccd_data, error_image=error, thresh=5, mbox=11, gbox=0, rbox=0) np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -909,7 +909,7 @@ def test_cosmicray_lacosmic_does_not_change_input(): def test_flat_correct_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - flat = CCDData(np.zeros_like(ccd_data), unit=ccd_data.unit) + flat = CCDData(xp.zeros_like(ccd_data), unit=ccd_data.unit) # Ignore the divide by zero warning that is raised when the flat is zero. with warnings.catch_warnings(action="ignore", category=RuntimeWarning): _ = flat_correct(ccd_data, flat=flat) @@ -928,7 +928,7 @@ def test_gain_correct_does_not_change_input(): def test_subtract_bias_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - master_frame = CCDData(np.zeros_like(ccd_data), unit=ccd_data.unit) + master_frame = CCDData(xp.zeros_like(ccd_data), unit=ccd_data.unit) _ = subtract_bias(ccd_data, master=master_frame) np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -945,7 +945,7 @@ def test_trim_image_does_not_change_input(): def test_transform_image_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - _ = transform_image(ccd_data, np.sqrt) + _ = transform_image(ccd_data, xp.sqrt) np_testing.assert_allclose(original.data, ccd_data) assert original.unit == ccd_data.unit @@ -961,7 +961,7 @@ def wcs_for_testing(shape): # Set up an "Airy's zenithal" projection # Vector properties may be set with Python lists, or Numpy arrays w.wcs.crpix = [shape[0] // 2, shape[1] // 2] - w.wcs.cdelt = np.array([-0.066667, 0.066667]) + w.wcs.cdelt = xp.array([-0.066667, 0.066667]) w.wcs.crval = [0, -90] w.wcs.ctype = ["RA---AIR", "DEC--AIR"] w.wcs.set_pv([(2, 1, 45.0)]) @@ -1033,7 +1033,7 @@ def test_wcs_project_onto_shifted_wcs(): # In the case of a shift, one row and one column should be nan, and they # will share one common nan where they intersect, so we know how many nan # there should be. - assert np.sum(np.isnan(new_ccd.data)) == np.sum(np.array(new_ccd.shape)) - 1 + assert xp.sum(xp.isnan(new_ccd.data)) == xp.sum(xp.array(new_ccd.shape)) - 1 # Use an odd number of pixels to make a well-defined center pixel @@ -1048,10 +1048,10 @@ def test_wcs_project_onto_scale_wcs(): ccd_data.wcs.wcs.crpix += 0.5 # Use uniform input data value for simplicity. - ccd_data.data = np.ones_like(ccd_data.data) + ccd_data.data = xp.ones_like(ccd_data.data) # Make mask zero... - ccd_data.mask = np.zeros_like(ccd_data.data) + ccd_data.mask = xp.zeros_like(ccd_data.data) # ...except the center pixel, which is one. ccd_data.mask[int(ccd_data.wcs.wcs.crpix[0]), int(ccd_data.wcs.wcs.crpix[1])] = 1 @@ -1059,7 +1059,7 @@ def test_wcs_project_onto_scale_wcs(): target_wcs.wcs.cdelt /= 2 # Choice below ensures we are really at the center pixel of an odd range. - target_shape = 2 * np.array(ccd_data.shape) + 1 + target_shape = 2 * xp.array(ccd_data.shape) + 1 target_wcs.wcs.crpix = 2 * target_wcs.wcs.crpix + 1 + 0.5 # Ugly hack for numpy-specific check in astropy.wcs @@ -1074,8 +1074,8 @@ def test_wcs_project_onto_scale_wcs(): assert new_ccd.wcs.wcs.compare(target_wcs.wcs) # Define a cutout from the new array that should match the old. - new_lower_bound = (np.array(new_ccd.shape) - np.array(ccd_data.shape)) // 2 - new_upper_bound = (np.array(new_ccd.shape) + np.array(ccd_data.shape)) // 2 + new_lower_bound = (xp.array(new_ccd.shape) - xp.array(ccd_data.shape)) // 2 + new_upper_bound = (xp.array(new_ccd.shape) + xp.array(ccd_data.shape)) // 2 data_cutout = new_ccd.data[ new_lower_bound[0] : new_upper_bound[0], new_lower_bound[1] : new_upper_bound[1] ] @@ -1086,8 +1086,8 @@ def test_wcs_project_onto_scale_wcs(): # Mask should be true for four pixels (all nearest neighbors) # of the single pixel we masked initially. - new_center = np.array(new_ccd.wcs.wcs.crpix, dtype=int, copy=_NUMPY_COPY_IF_NEEDED) - assert np.all( + new_center = xp.array(new_ccd.wcs.wcs.crpix, dtype=int, copy=_NUMPY_COPY_IF_NEEDED) + assert xp.all( new_ccd.mask[ new_center[0] : new_center[0] + 2, new_center[1] : new_center[1] + 2 ] @@ -1096,7 +1096,7 @@ def test_wcs_project_onto_scale_wcs(): # Those four, and any that reproject made nan because they draw on # pixels outside the footprint of the original image, are the only # pixels that should be masked. - assert new_ccd.mask.sum() == 4 + np.isnan(new_ccd.data).sum() + assert new_ccd.mask.sum() == 4 + xp.isnan(new_ccd.data).sum() def test_ccd_process_does_not_change_input(): @@ -1141,22 +1141,22 @@ def test_ccd_process_parameters_are_appropriate(): def test_ccd_process(): # Test the through ccd_process - ccd_data = CCDData(10.0 * np.ones((100, 100)), unit=u.adu) + ccd_data = CCDData(10.0 * xp.ones((100, 100)), unit=u.adu) # Rewrite to not change data in-place. - ccd_data.data = np.concat([ccd_data.data[:, :-10], 2 * np.ones((100, 10))], axis=1) + ccd_data.data = xp.concat([ccd_data.data[:, :-10], 2 * xp.ones((100, 10))], axis=1) ccd_data.meta["testkw"] = 100 - mask = np.zeros((100, 90)) + mask = xp.zeros((100, 90)) - masterbias = CCDData(2.0 * np.ones((100, 90)), unit=u.electron) - masterbias.uncertainty = StdDevUncertainty(np.zeros((100, 90))) + masterbias = CCDData(2.0 * xp.ones((100, 90)), unit=u.electron) + masterbias.uncertainty = StdDevUncertainty(xp.zeros((100, 90))) - dark_frame = CCDData(0.0 * np.ones((100, 90)), unit=u.electron) - dark_frame.uncertainty = StdDevUncertainty(np.zeros((100, 90))) + dark_frame = CCDData(0.0 * xp.ones((100, 90)), unit=u.electron) + dark_frame.uncertainty = StdDevUncertainty(xp.zeros((100, 90))) - masterflat = CCDData(10.0 * np.ones((100, 90)), unit=u.electron) - masterflat.uncertainty = StdDevUncertainty(np.zeros((100, 90))) + masterflat = CCDData(10.0 * xp.ones((100, 90)), unit=u.electron) + masterflat.uncertainty = StdDevUncertainty(xp.zeros((100, 90))) occd = ccd_process( ccd_data, @@ -1178,8 +1178,8 @@ def test_ccd_process(): # Final results should be (10 - 2) / 2.0 - 2 = 2 # Error should be (4 + 5)**0.5 / 0.5 = 3.0 - np_testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) - np_testing.assert_allclose(3.0 * np.ones((100, 90)), occd.uncertainty.array) + np_testing.assert_allclose(2.0 * xp.ones((100, 90)), occd.data) + np_testing.assert_allclose(3.0 * xp.ones((100, 90)), occd.uncertainty.array) np_testing.assert_array_equal(mask, occd.mask) assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 @@ -1188,22 +1188,22 @@ def test_ccd_process(): def test_ccd_process_gain_corrected(): # Test the through ccd_process with gain_corrected as False - ccd_data = CCDData(10.0 * np.ones((100, 100)), unit=u.adu) + ccd_data = CCDData(10.0 * xp.ones((100, 100)), unit=u.adu) # Rewrite to not change data in-place. - ccd_data.data = np.concat([ccd_data.data[:, :-10], 2 * np.ones((100, 10))], axis=1) + ccd_data.data = xp.concat([ccd_data.data[:, :-10], 2 * xp.ones((100, 10))], axis=1) ccd_data.meta["testkw"] = 100 - mask = np.zeros((100, 90)) + mask = xp.zeros((100, 90)) - masterbias = CCDData(4.0 * np.ones((100, 90)), unit=u.adu) - masterbias.uncertainty = StdDevUncertainty(np.zeros((100, 90))) + masterbias = CCDData(4.0 * xp.ones((100, 90)), unit=u.adu) + masterbias.uncertainty = StdDevUncertainty(xp.zeros((100, 90))) - dark_frame = CCDData(0.0 * np.ones((100, 90)), unit=u.adu) - dark_frame.uncertainty = StdDevUncertainty(np.zeros((100, 90))) + dark_frame = CCDData(0.0 * xp.ones((100, 90)), unit=u.adu) + dark_frame.uncertainty = StdDevUncertainty(xp.zeros((100, 90))) - masterflat = CCDData(5.0 * np.ones((100, 90)), unit=u.adu) - masterflat.uncertainty = StdDevUncertainty(np.zeros((100, 90))) + masterflat = CCDData(5.0 * xp.ones((100, 90)), unit=u.adu) + masterflat.uncertainty = StdDevUncertainty(xp.zeros((100, 90))) occd = ccd_process( ccd_data, @@ -1226,8 +1226,8 @@ def test_ccd_process_gain_corrected(): # Final results should be (10 - 2) / 2.0 - 2 = 2 # Error should be (4 + 5)**0.5 / 0.5 = 3.0 - np_testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) - np_testing.assert_allclose(3.0 * np.ones((100, 90)), occd.uncertainty.array) + np_testing.assert_allclose(2.0 * xp.ones((100, 90)), occd.data) + np_testing.assert_allclose(3.0 * xp.ones((100, 90)), occd.uncertainty.array) np_testing.assert_array_equal(mask, occd.mask) assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 diff --git a/ccdproc/tests/test_memory_use.py b/ccdproc/tests/test_memory_use.py index 173e0f96..1e56b709 100644 --- a/ccdproc/tests/test_memory_use.py +++ b/ccdproc/tests/test_memory_use.py @@ -15,12 +15,12 @@ else: memory_profile_present = True -try: - import jax # noqa -except ImportError: - JAX_PRESENT = False -else: - JAX_PRESENT = True + +# Only run memory tests if numpy is the testing array library +from ccdproc.conftest import testing_array_library + +USING_NUMPY_ARRAY_LIBRARY = testing_array_library.__name__ == "numpy" + image_size = 2000 # Square image, so 4000 x 4000 num_files = 10 @@ -37,7 +37,9 @@ def teardown_module(): fil.unlink() -@pytest.mark.skipif(JAX_PRESENT, reason="JAX is present, and does not allow os.fork") +@pytest.mark.skipif( + not USING_NUMPY_ARRAY_LIBRARY, reason="Memory test only done with numpy" +) @pytest.mark.skipif( not platform.startswith("linux"), reason="memory tests only work on linux", diff --git a/tox.ini b/tox.ini index 0ab4ee81..3a859ca5 100644 --- a/tox.ini +++ b/tox.ini @@ -7,7 +7,8 @@ isolated_build = true [testenv] setenv = - test: JAX_ENABLE_X64 = True + jax: CCDPROC_ARRAY_LIBRARY = jax + jax: JAX_ENABLE_X64 = True devdeps: PIP_EXTRA_INDEX_URL = https://pypi.anaconda.org/astropy/simple extras = test @@ -23,11 +24,12 @@ description = devdeps: with the latest developer version of key dependencies oldestdeps: with the oldest supported version of key dependencies cov: and test coverage - numpy124: with numpy 1.24.* numpy126: with numpy 1.26.* numpy200: with numpy 2.0.* numpy210: with numpy 2.1.* bottleneck: with bottleneck + jax: with JAX as the array library + # The following provides some specific pinnings for key packages deps = From ef302301aa6bfb3d62a99d9b0e72ef9eece6bffd Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 18 Jun 2025 16:43:54 -0500 Subject: [PATCH 40/59] Convert combiner tests to use Array API --- ccdproc/combiner.py | 4 +- ccdproc/tests/test_combiner.py | 427 +++++++++++++++++---------------- 2 files changed, 219 insertions(+), 212 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 4f24ba59..0f855965 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -330,7 +330,7 @@ def clip_extrema(self, nlow=0, nhigh=0): for i in range(-1 * nhigh, nlow): # create a tuple with the indices where = tuple([argsorted[i, :, :].ravel()] + [i.ravel() for i in mg]) - self.data_arr_mask[where] = True + self.data_arr_mask = xpx.at(self.data_arr_mask)[where].set(True) # set up min/max clipping algorithms def minmax_clipping(self, min_clip=None, max_clip=None): @@ -1107,7 +1107,7 @@ def combine( # Handle immutable arrays with array_api_extra ccd.uncertainty.array = xpx.at(ccd.uncertainty.array)[ x:xend, y:yend - ].set(comb_tile.uncertainty.array) + ].set(xp.astype(comb_tile.uncertainty.array, ccd.dtype)) # Free up memory to try to stay under user's limit del comb_tile del tile_combiner diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 5c2bbb06..6dd5d71f 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -1,14 +1,13 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst import array_api_compat import array_api_extra as xpx -import astropy import astropy.units as u -import numpy as np +import numpy.ma as np_ma import pytest from astropy.nddata import CCDData from astropy.stats import median_absolute_deviation as mad from astropy.utils.data import get_pkg_data_filename -from packaging.version import Version, parse +from numpy import testing as np_testing from ccdproc.combiner import ( Combiner, @@ -17,11 +16,12 @@ combine, sigma_func, ) + +# Set up the array library to be used in tests +from ccdproc.conftest import testing_array_library as xp from ccdproc.image_collection import ImageFileCollection from ccdproc.tests.pytest_fixtures import ccd_data as ccd_data_func -SUPER_OLD_ASTROPY = parse(astropy.__version__) < Version("4.3.0") - # Several tests have many more NaNs in them than real data. numpy generates # lots of warnings in those cases and it makes more sense to suppress them # than to generate them. @@ -33,7 +33,7 @@ def _make_mean_scaler(ccd_data): def scale_by_mean(x): # scale each array to the mean of the first image - return ccd_data.data.mean() / np.ma.average(x) + return ccd_data.data.mean() / np_ma.average(x) return scale_by_mean @@ -63,7 +63,7 @@ def test_ccddata_combiner_objects(): # objects do not have the same size def test_ccddata_combiner_size(): ccd_data = ccd_data_func() - ccd_large = CCDData(np.zeros((200, 100)), unit=u.adu) + ccd_large = CCDData(xp.zeros((200, 100)), unit=u.adu) ccd_list = [ccd_data, ccd_data, ccd_large] with pytest.raises(TypeError): Combiner(ccd_list) # arrays of different sizes should fail @@ -73,7 +73,7 @@ def test_ccddata_combiner_size(): # objects do not have the same units def test_ccddata_combiner_units(): ccd_data = ccd_data_func() - ccd_large = CCDData(np.zeros((100, 100)), unit=u.second) + ccd_large = CCDData(xp.zeros((100, 100)), unit=u.second) ccd_list = [ccd_data, ccd_data, ccd_large] with pytest.raises(TypeError): Combiner(ccd_list) @@ -92,8 +92,8 @@ def test_combiner_create(): def test_combiner_dtype(): ccd_data = ccd_data_func() ccd_list = [ccd_data, ccd_data, ccd_data] - c = Combiner(ccd_list, dtype=np.float32) - assert c.data_arr.dtype == np.float32 + c = Combiner(ccd_list, dtype=xp.float32) + assert c.data_arr.dtype == xp.float32 avg = c.average_combine() # dtype of average should match input dtype assert avg.dtype == c.dtype @@ -107,8 +107,8 @@ def test_combiner_dtype(): # test mask is created from ccd.data def test_combiner_mask(): - data = np.zeros((10, 10)) - data[5, 5] = 1 + data = xp.zeros((10, 10)) + data = xpx.at(data)[5, 5].set(1) mask = data == 0 ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] @@ -136,40 +136,40 @@ def test_weights_shape(): def test_1Dweights(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 1000, unit=u.adu), ] - c = Combiner(ccd_list) - c.weights = np.array([1, 5, 10]) - ccd = c.average_combine() - np.testing.assert_almost_equal(ccd.data, 312.5) + combo = Combiner(ccd_list) + combo.weights = xp.array([1, 5, 10]) + ccd = combo.average_combine() + np_testing.assert_allclose(ccd.data, 312.5) with pytest.raises(ValueError): - c.weights = np.array([1, 5, 10, 20]) + combo.weights = xp.array([1, 5, 10, 20]) def test_pixelwise_weights(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 1000, unit=u.adu), ] - c = Combiner(ccd_list) - c.weights = np.ones_like(c.data_arr) - c.weights[:, 5, 5] = [1, 5, 10] - ccd = c.average_combine() - np.testing.assert_almost_equal(ccd.data[5, 5], 312.5) - np.testing.assert_almost_equal(ccd.data[0, 0], 0) + combo = Combiner(ccd_list) + combo.weights = xp.ones_like(combo.data_arr) + combo.weights = xpx.at(combo.weights)[:, 5, 5].set(xp.array([1, 5, 10])) + ccd = combo.average_combine() + np_testing.assert_allclose(ccd.data[5, 5], 312.5) + np_testing.assert_allclose(ccd.data[0, 0], 0) # test the min-max rejection def test_combiner_minmax(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 1000, unit=u.adu), ] c = Combiner(ccd_list) @@ -180,9 +180,9 @@ def test_combiner_minmax(): def test_combiner_minmax_max(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 1000, unit=u.adu), ] c = Combiner(ccd_list) @@ -192,9 +192,9 @@ def test_combiner_minmax_max(): def test_combiner_minmax_min(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 1000, unit=u.adu), ] c = Combiner(ccd_list) @@ -204,54 +204,54 @@ def test_combiner_minmax_min(): def test_combiner_sigmaclip_high(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) - 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 1000, unit=u.adu), ] c = Combiner(ccd_list) # using mad for more robust statistics vs. std - c.sigma_clipping(high_thresh=3, low_thresh=None, func=np.ma.median, dev_func=mad) + c.sigma_clipping(high_thresh=3, low_thresh=None, func="median", dev_func=mad) assert c.data_arr_mask[5].all() def test_combiner_sigmaclip_single_pix(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) - 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) - 10, unit=u.adu), ] - c = Combiner(ccd_list) + combo = Combiner(ccd_list) # add a single pixel in another array to check that # that one gets rejected - c.data_arr[0, 5, 5] = 0 - c.data_arr[1, 5, 5] = -5 - c.data_arr[2, 5, 5] = 5 - c.data_arr[3, 5, 5] = -5 - c.data_arr[4, 5, 5] = 25 - c.sigma_clipping(high_thresh=3, low_thresh=None, func=np.ma.median, dev_func=mad) - assert c.data_arr_mask[4, 5, 5] + combo.data_arr = xpx.at(combo.data_arr)[0, 5, 5].set(0) + combo.data_arr = xpx.at(combo.data_arr)[1, 5, 5].set(-5) + combo.data_arr = xpx.at(combo.data_arr)[2, 5, 5].set(5) + combo.data_arr = xpx.at(combo.data_arr)[3, 5, 5].set(-5) + combo.data_arr = xpx.at(combo.data_arr)[4, 5, 5].set(25) + combo.sigma_clipping(high_thresh=3, low_thresh=None, func="median", dev_func=mad) + assert combo.data_arr_mask[4, 5, 5] def test_combiner_sigmaclip_low(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) - 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) - 1000, unit=u.adu), ] c = Combiner(ccd_list) # using mad for more robust statistics vs. std - c.sigma_clipping(high_thresh=None, low_thresh=3, func=np.ma.median, dev_func=mad) + c.sigma_clipping(high_thresh=None, low_thresh=3, func="median", dev_func=mad) assert c.data_arr_mask[5].all() @@ -296,10 +296,10 @@ def test_combiner_sum_weighted(): ccd_data = CCDData(data=[[0, 1], [2, 3]], unit="adu") ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) - c.weights = np.array([1, 2, 3]) + c.weights = xp.array([1, 2, 3]) ccd = c.sum_combine() expected_result = sum(w * d.data for w, d in zip(c.weights, ccd_list, strict=True)) - np.testing.assert_almost_equal(ccd, expected_result) + np_testing.assert_allclose(ccd, expected_result) # test weighted sum @@ -309,10 +309,10 @@ def test_combiner_sum_weighted_by_pixel(): c = Combiner(ccd_list) # Weights below are chosen so that every entry in weights_pixel = [[8, 4], [2, 1]] - c.weights = np.array([weights_pixel] * 3) + c.weights = xp.array([weights_pixel] * 3) ccd = c.sum_combine() expected_result = [[24, 24], [24, 24]] - np.testing.assert_almost_equal(ccd, expected_result) + np_testing.assert_allclose(ccd, expected_result) # This warning is generated by numpy and is expected when @@ -323,8 +323,8 @@ def test_combiner_sum_weighted_by_pixel(): ) def test_combiner_mask_average(): # test data combined with mask is created correctly - data = np.zeros((10, 10)) - data[5, 5] = 1 + data = xp.zeros((10, 10)) + data = xpx.at(data)[5, 5].set(1) mask = data == 0 ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] @@ -353,16 +353,16 @@ def test_combiner_with_scaling(): avg_ccd = combiner.average_combine() # Does the mean of the scaled arrays match the value to which it was # scaled? - np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) + np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape median_ccd = combiner.median_combine() # Does median also scale to the correct value? - np.testing.assert_almost_equal(np.median(median_ccd.data), np.median(ccd_data.data)) + np_testing.assert_allclose(xp.median(median_ccd.data), xp.median(ccd_data.data)) # Set the scaling manually... combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] avg_ccd = combiner.average_combine() - np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) + np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape @@ -380,8 +380,8 @@ def test_combiner_scaling_fails(): # test data combined with mask is created correctly def test_combiner_mask_median(): - data = np.zeros((10, 10)) - data[5, 5] = 1 + data = xp.zeros((10, 10)) + data = xpx.at(data)[5, 5].set(1) mask = data == 0 ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] @@ -398,8 +398,8 @@ def test_combiner_mask_median(): @pytest.mark.filterwarnings("ignore:Degrees of freedom <= 0:RuntimeWarning") def test_combiner_mask_sum(): # test data combined with mask is created correctly - data = np.zeros((10, 10)) - data[5, 5] = 1 + data = xp.zeros((10, 10)) + data = xpx.at(data)[5, 5].set(1) mask = data == 0 ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] @@ -431,7 +431,7 @@ def test_combine_average_fitsimages(): fitsfilename_list = [fitsfile] * 3 avgccd = combine(fitsfilename_list, output_file=None, method="average", unit=u.adu) # averaging same fits images should give back same fits image - np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) + np_testing.assert_allclose(avgccd.data, ccd_by_combiner.data) def test_combine_numpyndarray(): @@ -446,10 +446,10 @@ def test_combine_numpyndarray(): c = Combiner(ccd_list) ccd_by_combiner = c.average_combine() - fitsfilename_list = np.array([fitsfile] * 3) + fitsfilename_list = [fitsfile] * 3 avgccd = combine(fitsfilename_list, output_file=None, method="average", unit=u.adu) # averaging same fits images should give back same fits image - np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) + np_testing.assert_allclose(avgccd.data, ccd_by_combiner.data) def test_combiner_result_dtype(): @@ -457,18 +457,17 @@ def test_combiner_result_dtype(): The result should have the appropriate dtype not the dtype of the first input.""" - ccd = CCDData(np.ones((3, 3), dtype=np.uint16), unit="adu") + ccd = CCDData(xp.ones((3, 3), dtype=xp.uint16), unit="adu") res = combine([ccd, ccd.multiply(2)]) # The default dtype of Combiner is float64 - assert res.data.dtype == np.float64 - ref = np.ones((3, 3)) * 1.5 - np.testing.assert_allclose(res.data, ref) - + assert res.data.dtype == xp.float64 + ref = xp.ones((3, 3)) * 1.5 + np_testing.assert_allclose(res.data, ref) res = combine([ccd, ccd.multiply(2), ccd.multiply(3)], dtype=int) # The result dtype should be integer: - assert res.data.dtype == np.int_ - ref = np.ones((3, 3)) * 2 - np.testing.assert_allclose(res.data, ref) + assert res.data.dtype == xp.int_ + ref = xp.ones((3, 3)) * 2 + np_testing.assert_allclose(res.data, ref) def test_combiner_image_file_collection_input(tmp_path): @@ -479,7 +478,7 @@ def test_combiner_image_file_collection_input(tmp_path): ifc = ImageFileCollection(tmp_path) comb = Combiner(ifc.ccds()) - np.testing.assert_allclose(ccd.data, comb.average_combine().data) + np_testing.assert_allclose(ccd.data, comb.average_combine().data) def test_combine_image_file_collection_input(tmp_path): @@ -499,9 +498,9 @@ def test_combine_image_file_collection_input(tmp_path): ",".join(ifc.files_filtered(include_path=True)), method="average" ) - np.testing.assert_allclose(ccd.data, comb_files.data) - np.testing.assert_allclose(ccd.data, comb_ccds.data) - np.testing.assert_allclose(ccd.data, comb_string.data) + np_testing.assert_allclose(ccd.data, comb_files.data) + np_testing.assert_allclose(ccd.data, comb_ccds.data) + np_testing.assert_allclose(ccd.data, comb_string.data) with pytest.raises(FileNotFoundError): # This should fail because the test is not running in the @@ -519,7 +518,7 @@ def test_combine_average_ccddata(): avgccd = combine(ccd_list, output_file=None, method="average", unit=u.adu) # averaging same ccdData should give back same images - np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) + np_testing.assert_allclose(avgccd.data, ccd_by_combiner.data) # test combiner convenience function reads fits file and @@ -536,7 +535,7 @@ def test_combine_limitedmem_fitsimages(): fitsfilename_list, output_file=None, method="average", mem_limit=1e6, unit=u.adu ) # averaging same ccdData should give back same images - np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) + np_testing.assert_allclose(avgccd.data, ccd_by_combiner.data) # test combiner convenience function reads fits file and @@ -561,7 +560,7 @@ def test_combine_limitedmem_scale_fitsimages(): unit=u.adu, ) - np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) + np_testing.assert_allclose(avgccd.data, ccd_by_combiner.data) # test the optional uncertainty function in average_combine @@ -569,14 +568,14 @@ def test_average_combine_uncertainty(): ccd_data = ccd_data_func() ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) - ccd = c.average_combine(uncertainty_func=np.sum) - uncert_ref = np.sum(c.data_arr, 0) / np.sqrt(3) - np.testing.assert_allclose(ccd.uncertainty.array, uncert_ref) + ccd = c.average_combine(uncertainty_func=xp.sum) + uncert_ref = xp.sum(c.data_arr, 0) / xp.sqrt(3) + np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call - ccd2 = combine(ccd_list, method="average", combine_uncertainty_function=np.sum) - np.testing.assert_allclose(ccd.data, ccd2.data) - np.testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) + ccd2 = combine(ccd_list, method="average", combine_uncertainty_function=xp.sum) + np_testing.assert_allclose(ccd.data, ccd2.data) + np_testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) # test the optional uncertainty function in median_combine @@ -584,14 +583,14 @@ def test_median_combine_uncertainty(): ccd_data = ccd_data_func() ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) - ccd = c.median_combine(uncertainty_func=np.sum) - uncert_ref = np.sum(c.data_arr, 0) / np.sqrt(3) - np.testing.assert_allclose(ccd.uncertainty.array, uncert_ref) + ccd = c.median_combine(uncertainty_func=xp.sum) + uncert_ref = xp.sum(c.data_arr, 0) / xp.sqrt(3) + np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call - ccd2 = combine(ccd_list, method="median", combine_uncertainty_function=np.sum) - np.testing.assert_allclose(ccd.data, ccd2.data) - np.testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) + ccd2 = combine(ccd_list, method="median", combine_uncertainty_function=xp.sum) + np_testing.assert_allclose(ccd.data, ccd2.data) + np_testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) # test the optional uncertainty function in sum_combine @@ -599,14 +598,14 @@ def test_sum_combine_uncertainty(): ccd_data = ccd_data_func() ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) - ccd = c.sum_combine(uncertainty_func=np.sum) - uncert_ref = np.sum(c.data_arr, 0) * np.sqrt(3) - np.testing.assert_allclose(ccd.uncertainty.array, uncert_ref) + ccd = c.sum_combine(uncertainty_func=xp.sum) + uncert_ref = xp.sum(c.data_arr, 0) * xp.sqrt(3) + np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call - ccd2 = combine(ccd_list, method="sum", combine_uncertainty_function=np.sum) - np.testing.assert_allclose(ccd.data, ccd2.data) - np.testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) + ccd2 = combine(ccd_list, method="sum", combine_uncertainty_function=xp.sum) + np_testing.assert_allclose(ccd.data, ccd2.data) + np_testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) # Ignore warnings generated because most values are masked @@ -649,7 +648,7 @@ def test_combine_result_uncertainty_and_mask(comb_func, mask_point): ccd_list, method=combine_method_name, minmax_clip=True, minmax_clip_min=-100 ) - np.testing.assert_allclose( + np_testing.assert_allclose( ccd_comb.uncertainty.array, expected_result.uncertainty.array ) @@ -668,7 +667,7 @@ def test_combine_overwrite_output(tmp_path): """ output_file = tmp_path / "fake.fits" - ccd = CCDData(np.ones((3, 3)), unit="adu") + ccd = CCDData(xp.ones((3, 3)), unit="adu") # Make sure we have a file to overwrite ccd.write(output_file) @@ -685,88 +684,94 @@ def test_combine_overwrite_output(tmp_path): res_from_disk = CCDData.read(output_file) # Data should be the same - np.testing.assert_allclose(res.data, res_from_disk.data) + np_testing.assert_allclose(res.data, res_from_disk.data) # test resulting uncertainty is corrected for the number of images def test_combiner_uncertainty_average(): ccd_list = [ - CCDData(np.ones((10, 10)), unit=u.adu), - CCDData(np.ones((10, 10)) * 2, unit=u.adu), + CCDData(xp.ones((10, 10)), unit=u.adu), + CCDData(xp.ones((10, 10)) * 2, unit=u.adu), ] c = Combiner(ccd_list) ccd = c.average_combine() # Just the standard deviation of ccd data. - ref_uncertainty = np.ones((10, 10)) / 2 + ref_uncertainty = xp.ones((10, 10)) / 2 # Correction because we combined two images. - ref_uncertainty /= np.sqrt(2) - np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) + ref_uncertainty /= xp.sqrt(2) + np_testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) def test_combiner_uncertainty_average_mask(): - mask = np.zeros((10, 10), dtype=np.bool_) - mask[5, 5] = True - ccd_with_mask = CCDData(np.ones((10, 10)), unit=u.adu, mask=mask) + mask = xp.zeros((10, 10), dtype=bool) + mask = xpx.at(mask)[5, 5].set(True) + ccd_with_mask = CCDData(xp.ones((10, 10)), unit=u.adu, mask=mask) ccd_list = [ ccd_with_mask, - CCDData(np.ones((10, 10)) * 2, unit=u.adu), - CCDData(np.ones((10, 10)) * 3, unit=u.adu), + CCDData(xp.ones((10, 10)) * 2, unit=u.adu), + CCDData(xp.ones((10, 10)) * 3, unit=u.adu), ] c = Combiner(ccd_list) ccd = c.average_combine() # Just the standard deviation of ccd data. - ref_uncertainty = np.ones((10, 10)) * np.std([1, 2, 3]) + ref_uncertainty = xp.ones((10, 10)) * xp.std(xp.array([1, 2, 3])) # Correction because we combined two images. - ref_uncertainty /= np.sqrt(3) - ref_uncertainty[5, 5] = np.std([2, 3]) / np.sqrt(2) - np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) + ref_uncertainty /= xp.sqrt(3) + ref_uncertainty = xpx.at(ref_uncertainty)[5, 5].set( + xp.std(xp.array([2, 3])) / xp.sqrt(2) + ) + np_testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) def test_combiner_uncertainty_median_mask(): mad_to_sigma = 1.482602218505602 - mask = np.zeros((10, 10), dtype=np.bool_) - mask[5, 5] = True - ccd_with_mask = CCDData(np.ones((10, 10)), unit=u.adu, mask=mask) + mask = xp.zeros((10, 10), dtype=bool) + mask = xpx.at(mask)[5, 5].set(True) + ccd_with_mask = CCDData(xp.ones((10, 10)), unit=u.adu, mask=mask) ccd_list = [ ccd_with_mask, - CCDData(np.ones((10, 10)) * 2, unit=u.adu), - CCDData(np.ones((10, 10)) * 3, unit=u.adu), + CCDData(xp.ones((10, 10)) * 2, unit=u.adu), + CCDData(xp.ones((10, 10)) * 3, unit=u.adu), ] c = Combiner(ccd_list) ccd = c.median_combine() # Just the standard deviation of ccd data. - ref_uncertainty = np.ones((10, 10)) * mad_to_sigma * mad([1, 2, 3]) + ref_uncertainty = xp.ones((10, 10)) * mad_to_sigma * mad([1, 2, 3]) # Correction because we combined two images. - ref_uncertainty /= np.sqrt(3) # 0.855980789955 - ref_uncertainty[5, 5] = mad_to_sigma * mad([2, 3]) / np.sqrt(2) # 0.524179041254 - np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) + ref_uncertainty /= xp.sqrt(3) # 0.855980789955 + ref_uncertainty = xpx.at(ref_uncertainty)[5, 5].set( + mad_to_sigma * mad([2, 3]) / xp.sqrt(2) + ) # 0.524179041254 + np_testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) def test_combiner_uncertainty_sum_mask(): - mask = np.zeros((10, 10), dtype=np.bool_) - mask[5, 5] = True - ccd_with_mask = CCDData(np.ones((10, 10)), unit=u.adu, mask=mask) + mask = xp.zeros((10, 10), dtype=bool) + mask = xpx.at(mask)[5, 5].set(True) + ccd_with_mask = CCDData(xp.ones((10, 10)), unit=u.adu, mask=mask) ccd_list = [ ccd_with_mask, - CCDData(np.ones((10, 10)) * 2, unit=u.adu), - CCDData(np.ones((10, 10)) * 3, unit=u.adu), + CCDData(xp.ones((10, 10)) * 2, unit=u.adu), + CCDData(xp.ones((10, 10)) * 3, unit=u.adu), ] c = Combiner(ccd_list) ccd = c.sum_combine() # Just the standard deviation of ccd data. - ref_uncertainty = np.ones((10, 10)) * np.std([1, 2, 3]) - ref_uncertainty *= np.sqrt(3) - ref_uncertainty[5, 5] = np.std([2, 3]) * np.sqrt(2) - np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) + ref_uncertainty = xp.ones((10, 10)) * xp.std(xp.array([1, 2, 3])) + ref_uncertainty *= xp.sqrt(3) + ref_uncertainty = xpx.at(ref_uncertainty)[5, 5].set( + xp.std(xp.array([2, 3])) * xp.sqrt(2) + ) + np_testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) def test_combiner_3d(): - data1 = CCDData(3 * np.ones((5, 5, 5)), unit=u.adu) - data2 = CCDData(2 * np.ones((5, 5, 5)), unit=u.adu) - data3 = CCDData(4 * np.ones((5, 5, 5)), unit=u.adu) + data1 = CCDData(3 * xp.ones((5, 5, 5)), unit=u.adu) + data2 = CCDData(2 * xp.ones((5, 5, 5)), unit=u.adu) + data3 = CCDData(4 * xp.ones((5, 5, 5)), unit=u.adu) ccd_list = [data1, data2, data3] @@ -776,16 +781,16 @@ def test_combiner_3d(): ccd = c.average_combine() assert ccd.shape == (5, 5, 5) - np.testing.assert_allclose(ccd.data, data1) + np_testing.assert_allclose(ccd.data, data1) def test_3d_combiner_with_scaling(): ccd_data = ccd_data_func() # The factors below are not particularly important; just avoid anything # whose average is 1. - ccd_data = CCDData(np.ones((5, 5, 5)), unit=u.adu) - ccd_data_lower = CCDData(3 * np.ones((5, 5, 5)), unit=u.adu) - ccd_data_higher = CCDData(0.9 * np.ones((5, 5, 5)), unit=u.adu) + ccd_data = CCDData(xp.ones((5, 5, 5)), unit=u.adu) + ccd_data_lower = CCDData(3 * xp.ones((5, 5, 5)), unit=u.adu) + ccd_data_higher = CCDData(0.9 * xp.ones((5, 5, 5)), unit=u.adu) combiner = Combiner([ccd_data, ccd_data_higher, ccd_data_lower]) scale_by_mean = _make_mean_scaler(ccd_data) @@ -793,33 +798,33 @@ def test_3d_combiner_with_scaling(): avg_ccd = combiner.average_combine() # Does the mean of the scaled arrays match the value to which it was # scaled? - np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) + np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape median_ccd = combiner.median_combine() # Does median also scale to the correct value? - np.testing.assert_almost_equal(np.median(median_ccd.data), np.median(ccd_data.data)) + np_testing.assert_allclose(xp.median(median_ccd.data), xp.median(ccd_data.data)) # Set the scaling manually... combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] avg_ccd = combiner.average_combine() - np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) + np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape def test_clip_extrema_3d(): ccdlist = [ - CCDData(np.ones((3, 3, 3)) * 90.0, unit="adu"), - CCDData(np.ones((3, 3, 3)) * 20.0, unit="adu"), - CCDData(np.ones((3, 3, 3)) * 10.0, unit="adu"), - CCDData(np.ones((3, 3, 3)) * 40.0, unit="adu"), - CCDData(np.ones((3, 3, 3)) * 25.0, unit="adu"), - CCDData(np.ones((3, 3, 3)) * 35.0, unit="adu"), + CCDData(xp.ones((3, 3, 3)) * 90.0, unit="adu"), + CCDData(xp.ones((3, 3, 3)) * 20.0, unit="adu"), + CCDData(xp.ones((3, 3, 3)) * 10.0, unit="adu"), + CCDData(xp.ones((3, 3, 3)) * 40.0, unit="adu"), + CCDData(xp.ones((3, 3, 3)) * 25.0, unit="adu"), + CCDData(xp.ones((3, 3, 3)) * 35.0, unit="adu"), ] c = Combiner(ccdlist) c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() - expected = CCDData(np.ones((3, 3, 3)) * 30, unit="adu") - np.testing.assert_allclose(result, expected) + expected = CCDData(xp.ones((3, 3, 3)) * 30, unit="adu") + np_testing.assert_allclose(result, expected) @pytest.mark.parametrize( @@ -838,16 +843,16 @@ def test_writeable_after_combine(tmpdir, comb_func): def test_clip_extrema(): ccdlist = [ - CCDData(np.ones((3, 5)) * 90.0, unit="adu"), - CCDData(np.ones((3, 5)) * 20.0, unit="adu"), - CCDData(np.ones((3, 5)) * 10.0, unit="adu"), - CCDData(np.ones((3, 5)) * 40.0, unit="adu"), - CCDData(np.ones((3, 5)) * 25.0, unit="adu"), - CCDData(np.ones((3, 5)) * 35.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 90.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 20.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 10.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 40.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 25.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 35.0, unit="adu"), ] - ccdlist[0].data[0, 1] = 3.1 - ccdlist[1].data[1, 2] = 100.1 - ccdlist[1].data[2, 0] = 100.1 + ccdlist[0].data = xpx.at(ccdlist[0].data)[0, 1].set(3.1) + ccdlist[1].data = xpx.at(ccdlist[1].data)[1, 2].set(100.1) + ccdlist[1].data = xpx.at(ccdlist[1].data)[2, 0].set(100.1) c = Combiner(ccdlist) c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() @@ -856,21 +861,21 @@ def test_clip_extrema(): [30.0, 30.0, 47.5, 30.0, 30.0], [47.5, 30.0, 30.0, 30.0, 30.0], ] - np.testing.assert_allclose(result, expected) + np_testing.assert_allclose(result, expected) def test_clip_extrema_via_combine(): ccdlist = [ - CCDData(np.ones((3, 5)) * 90.0, unit="adu"), - CCDData(np.ones((3, 5)) * 20.0, unit="adu"), - CCDData(np.ones((3, 5)) * 10.0, unit="adu"), - CCDData(np.ones((3, 5)) * 40.0, unit="adu"), - CCDData(np.ones((3, 5)) * 25.0, unit="adu"), - CCDData(np.ones((3, 5)) * 35.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 90.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 20.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 10.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 40.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 25.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 35.0, unit="adu"), ] - ccdlist[0].data[0, 1] = 3.1 - ccdlist[1].data[1, 2] = 100.1 - ccdlist[1].data[2, 0] = 100.1 + ccdlist[0].data = xpx.at(ccdlist[0].data)[0, 1].set(3.1) + ccdlist[1].data = xpx.at(ccdlist[1].data)[1, 2].set(100.1) + ccdlist[1].data = xpx.at(ccdlist[1].data)[2, 0].set(100.1) result = combine( ccdlist, clip_extrema=True, @@ -882,26 +887,26 @@ def test_clip_extrema_via_combine(): [30.0, 30.0, 47.5, 30.0, 30.0], [47.5, 30.0, 30.0, 30.0, 30.0], ] - np.testing.assert_allclose(result, expected) + np_testing.assert_allclose(result, expected) def test_clip_extrema_with_other_rejection(): ccdlist = [ - CCDData(np.ones((3, 5)) * 90.0, unit="adu"), - CCDData(np.ones((3, 5)) * 20.0, unit="adu"), - CCDData(np.ones((3, 5)) * 10.0, unit="adu"), - CCDData(np.ones((3, 5)) * 40.0, unit="adu"), - CCDData(np.ones((3, 5)) * 25.0, unit="adu"), - CCDData(np.ones((3, 5)) * 35.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 90.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 20.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 10.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 40.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 25.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 35.0, unit="adu"), ] - ccdlist[0].data[0, 1] = 3.1 - ccdlist[1].data[1, 2] = 100.1 - ccdlist[1].data[2, 0] = 100.1 + ccdlist[0].data = xpx.at(ccdlist[0].data)[0, 1].set(3.1) + ccdlist[1].data = xpx.at(ccdlist[1].data)[1, 2].set(100.1) + ccdlist[1].data = xpx.at(ccdlist[1].data)[2, 0].set(100.1) c = Combiner(ccdlist) # Reject ccdlist[1].data[1,2] by other means - c.data_arr_mask[1, 1, 2] = True + c.data_arr_mask = xpx.at(c.data_arr_mask)[1, 1, 2].set(True) # Reject ccdlist[1].data[1,2] by other means - c.data_arr_mask[3, 0, 0] = True + c.data_arr_mask = xpx.at(c.data_arr_mask)[3, 0, 0].set(True) c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() @@ -910,7 +915,7 @@ def test_clip_extrema_with_other_rejection(): [30.0, 30.0, 47.5, 30.0, 30.0], [47.5, 30.0, 30.0, 30.0, 30.0], ] - np.testing.assert_allclose(result, expected) + np_testing.assert_allclose(result, expected) # The expected values below assume an image that is 2000x2000 @@ -966,7 +971,7 @@ def test_combiner_with_scaling_uncertainty(comb_func): scale_by_mean = _make_mean_scaler(ccd_data) combiner.scaling = scale_by_mean - scaled_ccds = np.array( + scaled_ccds = xp.array( [ ccd_data.data * scale_by_mean(ccd_data.data), ccd_data_lower.data * scale_by_mean(ccd_data_lower.data), @@ -977,14 +982,13 @@ def test_combiner_with_scaling_uncertainty(comb_func): avg_ccd = getattr(combiner, comb_func)() if comb_func != "median_combine": - xp = array_api_compat.array_namespace(ccd_data.data) uncertainty_func = _default_std(xp=xp) else: uncertainty_func = sigma_func expected_unc = uncertainty_func(scaled_ccds, axis=0) - np.testing.assert_allclose(avg_ccd.uncertainty.array, expected_unc, atol=1e-10) + np_testing.assert_allclose(avg_ccd.uncertainty.array, expected_unc, atol=1e-10) @pytest.mark.parametrize( @@ -995,8 +999,8 @@ def test_user_supplied_combine_func_that_relies_on_masks(comb_func): # does not affect results when the user supplies a function that # uses masks to screen out bad data. - data = np.ones((10, 10)) - data[5, 5] = 2 + data = xp.ones((10, 10)) + data = xpx.at(data)[5, 5].set(2) mask = data == 2 ccd = CCDData(data, unit=u.adu, mask=mask) # Same, but no mask @@ -1025,14 +1029,17 @@ def sum_func(_, axis=axis): actual_result = c.sum_combine(sum_func=my_summer(c.data_arr, c.data_arr_mask)) elif comb_func == "average_combine": expected_result = data - actual_result = c.average_combine(scale_func=np.mean) + actual_result = c.average_combine(scale_func=xp.mean) elif comb_func == "median_combine": expected_result = data - actual_result = c.median_combine(median_func=np.median) + if not hasattr(xp, "median"): + # If the array API does not have a median function, we + # cannot test this. + pytest.skip("The array library does not support median") + actual_result = c.median_combine(median_func=xp.median) # Two of the three values are masked, so no matter what the combination # method is the result in this pixel should be 2. - expected_result[5, 5] = 2 + expected_result = xpx.at(expected_result)[5, 5].set(2) - # THIS IS A REAL TEST FAILURE!!! - np.testing.assert_almost_equal(expected_result, actual_result) + np_testing.assert_allclose(expected_result, actual_result) From fda5ad46a8752d205d3a6e1046944f347ce61a34 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 18 Jun 2025 17:04:46 -0500 Subject: [PATCH 41/59] Remove unnecessary copy argument --- ccdproc/tests/test_ccdproc.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index f987150a..b784a3da 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -42,8 +42,6 @@ except ImportError: HAS_BLOCK_X_FUNCS = False -_NUMPY_COPY_IF_NEEDED = None # False if xp.__version__.startswith("1.") else None - RNG = np_random.default_rng # import dask.array as da @@ -1086,7 +1084,7 @@ def test_wcs_project_onto_scale_wcs(): # Mask should be true for four pixels (all nearest neighbors) # of the single pixel we masked initially. - new_center = xp.array(new_ccd.wcs.wcs.crpix, dtype=int, copy=_NUMPY_COPY_IF_NEEDED) + new_center = xp.array(new_ccd.wcs.wcs.crpix, dtype=int) assert xp.all( new_ccd.mask[ new_center[0] : new_center[0] + 2, new_center[1] : new_center[1] + 2 From 03d413ac2bea16afbf019ed2eb77e733ddf51509 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 18 Jun 2025 17:05:32 -0500 Subject: [PATCH 42/59] Use the array_api_compt numpy namespace instead of numpy This is really only needed for numpy 1.X since numpy 2.X is array API compliant. --- ccdproc/conftest.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ccdproc/conftest.py b/ccdproc/conftest.py index cf8af327..8a082dc4 100644 --- a/ccdproc/conftest.py +++ b/ccdproc/conftest.py @@ -5,6 +5,8 @@ # no matter how it is invoked within the source tree. import os +import array_api_compat # noqa: F401 + try: # When the pytest_astropy_header package is installed from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS @@ -41,7 +43,7 @@ def pytest_configure(config): match array_library: case "numpy": - import numpy as testing_array_library # noqa: F401 + import array_api_compat.numpy as testing_array_library # noqa: F401 case "jax": import jax.numpy as testing_array_library # noqa: F401 From dfc98cf9aeb59287abc23559fe2f6532d8185666 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 19 Jun 2025 21:21:27 -0500 Subject: [PATCH 43/59] Add test against dask and fix bugs uncovered by tests --- ccdproc/combiner.py | 27 +++++++++++++--- ccdproc/conftest.py | 5 +++ ccdproc/core.py | 53 ++++++++++++++++++++------------ ccdproc/tests/pytest_fixtures.py | 2 +- ccdproc/tests/test_ccdproc.py | 5 +-- ccdproc/tests/test_combiner.py | 43 +++++++++++++++++++++----- ccdproc/tests/test_cosmicray.py | 8 +++-- ccdproc/tests/test_rebin.py | 27 +++++++++++++--- tox.ini | 3 ++ 9 files changed, 130 insertions(+), 43 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 0f855965..db74a7c8 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -15,6 +15,8 @@ from astropy.nddata import CCDData, StdDevUncertainty from astropy.stats import sigma_clip from astropy.utils import deprecated_renamed_argument +from numpy import mgrid as np_mgrid +from numpy import ravel_multi_index as np_ravel_multi_index from .core import sigma_func @@ -324,13 +326,25 @@ def clip_extrema(self, nlow=0, nhigh=0): nhigh = 0 argsorted = xp.argsort(self.data_arr, axis=0) - mg = xp.mgrid[ - [slice(ndim) for i, ndim in enumerate(self.data_arr.shape) if i > 0] - ] + # Not every array package has mgrid, so make it in numpy and convert it to the + # array package used for the data. + mg = xp.asarray( + np_mgrid[ + [slice(ndim) for i, ndim in enumerate(self.data_arr.shape) if i > 0] + ] + ) for i in range(-1 * nhigh, nlow): # create a tuple with the indices - where = tuple([argsorted[i, :, :].ravel()] + [i.ravel() for i in mg]) - self.data_arr_mask = xpx.at(self.data_arr_mask)[where].set(True) + where = tuple([argsorted[i, :, :].ravel()] + [v.ravel() for v in mg]) + # Some array libraries don't support indexing with arrays in multiple + # dimensions, so we need to flatten the mask array, set the mask + # values for a flattened array, and then reshape it back to the + # original shape. + flat_index = np_ravel_multi_index(where, self.data_arr.shape) + self.data_arr_mask = xp.reshape( + xpx.at(xp.reshape(self.data_arr_mask, (-1,)))[flat_index].set(True), + self.data_arr.shape, + ) # set up min/max clipping algorithms def minmax_clipping(self, min_clip=None, max_clip=None): @@ -528,6 +542,9 @@ def median_combine( uncertainty = uncertainty_func(data, axis=0, ignore_nan=True) else: uncertainty = uncertainty_func(data, axis=0) + # Depending on how the uncertainty ws calculated it may or may not + # be an array of the same class as the data, so make sure it is + uncertainty = xp.asarray(uncertainty) # Divide uncertainty by the number of pixel (#309) uncertainty /= xp.sqrt(len(self.data_arr) - masked_values) # Convert uncertainty to plain numpy array (#351) diff --git a/ccdproc/conftest.py b/ccdproc/conftest.py index 8a082dc4..2361270a 100644 --- a/ccdproc/conftest.py +++ b/ccdproc/conftest.py @@ -50,6 +50,11 @@ def pytest_configure(config): PYTEST_HEADER_MODULES["jax"] = "jax" + case "dask": + import array_api_compat.dask.array as testing_array_library # noqa: F401 + + PYTEST_HEADER_MODULES["dask"] = "dask" + case _: raise ValueError( f"Unsupported array library: {array_library}. " diff --git a/ccdproc/core.py b/ccdproc/core.py index daaf7be5..b80d27b1 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -8,7 +8,6 @@ import warnings import array_api_compat -import array_api_extra as xpx from astropy import nddata, stats from astropy import units as u from astropy.modeling import fitting @@ -19,6 +18,7 @@ from astropy.units.quantity import Quantity from astropy.utils import deprecated, deprecated_renamed_argument from astropy.wcs.utils import proj_plane_pixel_area +from numpy import mgrid as np_mgrid from packaging import version as pkgversion from scipy import ndimage @@ -573,7 +573,9 @@ def subtract_overscan( of = fitting.LinearLSQFitter() yarr = xp.arange(len(oscan)) oscan = of(model, yarr, oscan) - oscan = oscan(yarr) + # The model will return something array-like but it may not be the same array + # library that we started with, so convert it back to the original + oscan = xp.asarray(oscan(yarr)) if overscan_axis == 1: oscan = xp.reshape(oscan, (oscan.size, 1)) else: @@ -878,6 +880,8 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None): ccd : `~astropy.nddata.CCDData` CCDData object with flat corrected. """ + # Get the array namespace + xp = array_api_compat.array_namespace(ccd.data) # Use the min_value to replace any values in the flat use_flat = flat if min_value is not None: @@ -895,7 +899,7 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None): raise ValueError("norm_value must be greater than zero.") else: # norm_value was not set, use mean of the image. - flat_mean = use_flat.data.mean() * use_flat.unit + flat_mean = xp.mean(use_flat.data) * use_flat.unit # Normalize the flat. flat_normed = use_flat.divide(flat_mean) @@ -1296,21 +1300,7 @@ def rebin(ccd, newshape): except AttributeError as e: raise TypeError("ccd is not an ndarray or a CCDData object.") from e - if isinstance(ccd, xp.ndarray): - - # check to see that the two arrays are going to be the same length - if len(ccd.shape) != len(newshape): - raise ValueError("newshape does not have the same dimensions as " "ccd.") - - slices = [ - slice(0, old, old / new) - for old, new in zip(ccd.shape, newshape, strict=True) - ] - coordinates = xp.mgrid[slices] - indices = coordinates.astype("i") - return ccd[tuple(indices)] - - elif isinstance(ccd, CCDData): + if isinstance(ccd, CCDData): # check to see that the two arrays are going to be the same length if len(ccd.shape) != len(newshape): raise ValueError("newshape does not have the same dimensions as ccd.") @@ -1329,7 +1319,29 @@ def rebin(ccd, newshape): return nccd else: - raise TypeError("ccd is not an ndarray or a CCDData object.") + # check to see that the two arrays are going to be the same length + if len(ccd.shape) != len(newshape): + raise ValueError("newshape does not have the same dimensions as " "ccd.") + + slices = [ + slice(0, old, old / new) + for old, new in zip(ccd.shape, newshape, strict=True) + ] + + # Not every array package has mgrid, so we do the mgrid with + # numpy and convert to the array package used by ccd.data. + + coordinates = xp.asarray(np_mgrid[slices]) + indices = coordinates.astype("i") + + try: + result = ccd[tuple(indices)] + except Exception as e: + raise TypeError( + f"The array library {xp.__name__} does not support this method of " + "rebinning. Please use block_reduce or block_replicate instead." + ) from e + return result def block_reduce(ccd, block_size, func=None): @@ -1912,7 +1924,8 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): # make sure that mdata is the same type as data mdata = xp.asarray(ndimage.median_filter(data, rbox)) - ndata = xpx.at(ndata)[crarr == 1].set(mdata[crarr == 1]) + ndata = xp.where(crarr == 1, mdata, data) + # ndata = xpx.at(ndata)[crarr == 1].set(mdata[crarr == 1]) return ndata, crarr elif isinstance(ccd, CCDData): diff --git a/ccdproc/tests/pytest_fixtures.py b/ccdproc/tests/pytest_fixtures.py index 9b56290a..7fa65f44 100644 --- a/ccdproc/tests/pytest_fixtures.py +++ b/ccdproc/tests/pytest_fixtures.py @@ -62,7 +62,7 @@ def ccd_data( data = rng.normal(loc=mean, size=[size, size], scale=scale) fake_meta = {"my_key": 42, "your_key": "not 42"} - ccd = CCDData(array_library.array(data), unit=u.adu) + ccd = CCDData(array_library.asarray(data), unit=u.adu) ccd.header = fake_meta return ccd diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index b784a3da..5f1629b2 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -14,6 +14,7 @@ from astropy.utils.exceptions import AstropyUserWarning from astropy.wcs import WCS from numpy import array as np_array +from numpy import mgrid as np_mgrid from numpy import random as np_random from numpy import testing as np_testing @@ -278,7 +279,7 @@ def test_subtract_overscan_model(transpose): oscan_region = (slice(None), slice(0, 10)) science_region = (slice(None), slice(10, None)) - yscan, xscan = xp.mgrid[0:size, 0:size] / 10.0 + 300.0 + yscan, xscan = xp.asarray(np_mgrid[0:size, 0:size]) / 10.0 + 300.0 if transpose: oscan_region = oscan_region[::-1] @@ -907,7 +908,7 @@ def test_cosmicray_lacosmic_does_not_change_input(): def test_flat_correct_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - flat = CCDData(xp.zeros_like(ccd_data), unit=ccd_data.unit) + flat = CCDData(xp.zeros_like(ccd_data.data), unit=ccd_data.unit) # Ignore the divide by zero warning that is raised when the flat is zero. with warnings.catch_warnings(action="ignore", category=RuntimeWarning): _ = flat_correct(ccd_data, flat=flat) diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 6dd5d71f..c20fb030 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -7,6 +7,7 @@ from astropy.nddata import CCDData from astropy.stats import median_absolute_deviation as mad from astropy.utils.data import get_pkg_data_filename +from numpy import median as np_median from numpy import testing as np_testing from ccdproc.combiner import ( @@ -357,7 +358,21 @@ def test_combiner_with_scaling(): assert avg_ccd.shape == ccd_data.shape median_ccd = combiner.median_combine() # Does median also scale to the correct value? - np_testing.assert_allclose(xp.median(median_ccd.data), xp.median(ccd_data.data)) + # Some array libraries do not have a median, and median is not part of the + # standard array API, so we use numpy's median here. + # Odd; for dask, which does not have a full median, even falling back to numpy does + # not work. For some reason the call to np_median fails. I suppose this is maybe + # because dask just adds a median to its task list/compute graph thingy + # and then tries to evaluate it itself? + + # Try doing a compute on the data first, and if that fails it is no big deal + try: + med_ccd = median_ccd.data.compute() + med_inp_data = ccd_data.data.compute() + except AttributeError: + pass + + np_testing.assert_allclose(np_median(med_ccd), np_median(med_inp_data)) # Set the scaling manually... combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] @@ -465,7 +480,7 @@ def test_combiner_result_dtype(): np_testing.assert_allclose(res.data, ref) res = combine([ccd, ccd.multiply(2), ccd.multiply(3)], dtype=int) # The result dtype should be integer: - assert res.data.dtype == xp.int_ + assert xp.isdtype(res.data, "integral") ref = xp.ones((3, 3)) * 2 np_testing.assert_allclose(res.data, ref) @@ -608,10 +623,12 @@ def test_sum_combine_uncertainty(): np_testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) -# Ignore warnings generated because most values are masked +# Ignore warnings generated because most values are masked and we divide +# by zero in at least one place @pytest.mark.filterwarnings( "ignore:Mean of empty slice:RuntimeWarning", "ignore:Degrees of freedom <= 0:RuntimeWarning", + "ignore:invalid value encountered in divide:RuntimeWarning", ) @pytest.mark.parametrize("mask_point", [True, False]) @pytest.mark.parametrize( @@ -741,9 +758,12 @@ def test_combiner_uncertainty_median_mask(): ref_uncertainty = xp.ones((10, 10)) * mad_to_sigma * mad([1, 2, 3]) # Correction because we combined two images. ref_uncertainty /= xp.sqrt(3) # 0.855980789955 - ref_uncertainty = xpx.at(ref_uncertainty)[5, 5].set( - mad_to_sigma * mad([2, 3]) / xp.sqrt(2) - ) # 0.524179041254 + # It turns out that the expression below evaluates to a np.float64, which + # introduces numpy into the array namespace, which raises an error + # when arrat_api_compat tries to figure out the namespace. Casting + # it to a regular float fixes that. + med_value = float(mad_to_sigma * mad([2, 3]) / xp.sqrt(2)) + ref_uncertainty = xpx.at(ref_uncertainty)[5, 5].set(med_value) # 0.524179041254 np_testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) @@ -802,7 +822,14 @@ def test_3d_combiner_with_scaling(): assert avg_ccd.shape == ccd_data.shape median_ccd = combiner.median_combine() # Does median also scale to the correct value? - np_testing.assert_allclose(xp.median(median_ccd.data), xp.median(ccd_data.data)) + # Once again, use numpy to find the median + try: + med_ccd = median_ccd.data.compute() + med_inp_data = ccd_data.data.compute() + except AttributeError: + pass + + np_testing.assert_allclose(np_median(med_ccd), np_median(med_inp_data)) # Set the scaling manually... combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] @@ -841,7 +868,7 @@ def test_writeable_after_combine(tmpdir, comb_func): ccd2.write(tmp_file.strpath) -def test_clip_extrema(): +def test_clip_extrema_moo(): ccdlist = [ CCDData(xp.ones((3, 5)) * 90.0, unit="adu"), CCDData(xp.ones((3, 5)) * 20.0, unit="adu"), diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index 177a7f64..ab506d03 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -1,7 +1,6 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst import array_api_compat -import array_api_extra as xpx import pytest from astropy import units as u from astropy.utils.exceptions import AstropyDeprecationWarning @@ -35,9 +34,14 @@ def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): # ideally threshold should be set so it is not sensitive to seed, but # this is not working right now crflux = xp.asarray(10 * scale * rng.random(ncrays) + (threshold + 15) * scale) + + # Some array libraris (Dask) do not support setting individual elements, + # so use nuympy. + data_as_np = np_array(data.data) for i in range(ncrays): y, x = crrays[i] - data.data = xpx.at(data.data)[y, x].set(crflux[i]) + data_as_np[y, x] = crflux[i] + data.data = xp.asarray(data_as_np) def test_cosmicray_lacosmic(): diff --git a/ccdproc/tests/test_rebin.py b/ccdproc/tests/test_rebin.py index 4dda68f6..d240f169 100644 --- a/ccdproc/tests/test_rebin.py +++ b/ccdproc/tests/test_rebin.py @@ -34,7 +34,11 @@ def test_rebin_larger(): ccd_data = ccd_data_func(data_size=10) a = ccd_data.data with pytest.warns(AstropyDeprecationWarning): - b = rebin(a, (20, 20)) + try: + b = rebin(a, (20, 20)) + except TypeError as e: + if "does not support this method of rebinning" in str(e): + pytest.skip("Rebinning not supported for this data type") assert b.shape == (20, 20) np.testing.assert_almost_equal(b.sum(), 4 * a.sum()) @@ -45,8 +49,12 @@ def test_rebin_smaller(): ccd_data = ccd_data_func(data_size=10) a = ccd_data.data with pytest.warns(AstropyDeprecationWarning): - b = rebin(a, (20, 20)) - c = rebin(b, (10, 10)) + try: + b = rebin(a, (20, 20)) + c = rebin(b, (10, 10)) + except TypeError as e: + if "does not support this method of rebinning" in str(e): + pytest.skip("Rebinning not supported for this data type") assert c.shape == (10, 10) assert (c - a).sum() == 0 @@ -63,7 +71,11 @@ def test_rebin_ccddata(mask_data, uncertainty): ccd_data.uncertainty = StdDevUncertainty(err) with pytest.warns(AstropyDeprecationWarning): - b = rebin(ccd_data, (20, 20)) + try: + b = rebin(ccd_data, (20, 20)) + except TypeError as e: + if "does not support this method of rebinning" in str(e): + pytest.skip("Rebinning not supported for this data type") assert b.shape == (20, 20) if mask_data: @@ -76,6 +88,11 @@ def test_rebin_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() with pytest.warns(AstropyDeprecationWarning): - _ = rebin(ccd_data, (20, 20)) + try: + _ = rebin(ccd_data, (20, 20)) + except TypeError as e: + if "does not support this method of rebinning" in str(e): + pytest.skip("Rebinning not supported for this data type") + np.testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit diff --git a/tox.ini b/tox.ini index 3a859ca5..4d2e63c4 100644 --- a/tox.ini +++ b/tox.ini @@ -9,6 +9,7 @@ isolated_build = true setenv = jax: CCDPROC_ARRAY_LIBRARY = jax jax: JAX_ENABLE_X64 = True + dask: CCDPROC_ARRAY_LIBRARY = dask devdeps: PIP_EXTRA_INDEX_URL = https://pypi.anaconda.org/astropy/simple extras = test @@ -53,6 +54,8 @@ deps = oldestdeps: astropy==6.0.* oldestdeps: reproject==0.9.1 + dask: dask + commands = pip freeze !cov-!oldestdeps: pytest --pyargs ccdproc {toxinidir}/docs {posargs} From 191da4e3d7887fadae41f886f4d44f275a653203 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 19 Jun 2025 21:22:52 -0500 Subject: [PATCH 44/59] Add dask test to CI --- .github/workflows/ci_tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index ea5b783a..e75b4d0b 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -53,10 +53,10 @@ jobs: python: '3.12' tox_env: 'py312-test-alldeps-numpy126' - - name: 'macos-py312' + - name: 'macos-py312-dask' os: macos-latest python: '3.12' - tox_env: 'py312-test-alldeps' + tox_env: 'py312-alldeps-dask' - name: 'windows-py312' os: windows-latest From 8fc85a5b9c6030447c6bc8d1e194bcb75ab154fe Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 19 Jun 2025 21:32:00 -0500 Subject: [PATCH 45/59] Fix some errors introduced when changing the tests for dask --- ccdproc/tests/test_combiner.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index c20fb030..25168973 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -365,10 +365,12 @@ def test_combiner_with_scaling(): # because dask just adds a median to its task list/compute graph thingy # and then tries to evaluate it itself? + med_ccd = median_ccd.data + med_inp_data = ccd_data.data # Try doing a compute on the data first, and if that fails it is no big deal try: - med_ccd = median_ccd.data.compute() - med_inp_data = ccd_data.data.compute() + med_ccd = med_ccd.compute() + med_inp_data = med_inp_data.compute() except AttributeError: pass @@ -480,7 +482,7 @@ def test_combiner_result_dtype(): np_testing.assert_allclose(res.data, ref) res = combine([ccd, ccd.multiply(2), ccd.multiply(3)], dtype=int) # The result dtype should be integer: - assert xp.isdtype(res.data, "integral") + assert xp.isdtype(res.data.dtype, "integral") ref = xp.ones((3, 3)) * 2 np_testing.assert_allclose(res.data, ref) @@ -823,9 +825,11 @@ def test_3d_combiner_with_scaling(): median_ccd = combiner.median_combine() # Does median also scale to the correct value? # Once again, use numpy to find the median + med_ccd = median_ccd.data + med_inp_data = ccd_data.data try: - med_ccd = median_ccd.data.compute() - med_inp_data = ccd_data.data.compute() + med_ccd = med_ccd.compute() + med_inp_data = med_inp_data.compute() except AttributeError: pass From 73cc0b08bc957589a3723618551a3e5ac20d5c25 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 24 Jun 2025 12:05:30 -0700 Subject: [PATCH 46/59] Allow cupy for testing --- ccdproc/conftest.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ccdproc/conftest.py b/ccdproc/conftest.py index 2361270a..0a9022d9 100644 --- a/ccdproc/conftest.py +++ b/ccdproc/conftest.py @@ -55,6 +55,11 @@ def pytest_configure(config): PYTEST_HEADER_MODULES["dask"] = "dask" + case "cupy": + import array_api_compat.cupy as testing_array_library # noqa: F401 + + PYTEST_HEADER_MODULES["cupy"] = "cupy" + case _: raise ValueError( f"Unsupported array library: {array_library}. " From 53fb79ab0fc1e081e370bd488925831a290dac2d Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 26 Jun 2025 15:51:35 -0700 Subject: [PATCH 47/59] Suppress square root warning generated in some array libraries A negative value in the square root will not be infrequent, so suppress this. One test was modified to avoid taking a square root since the point of the test was not to test the square root. --- ccdproc/core.py | 8 +++++++- ccdproc/tests/test_ccdproc.py | 5 ++++- ccdproc/tests/test_combiner.py | 2 +- pyproject.toml | 3 +++ 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index b80d27b1..d74a850e 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -441,7 +441,13 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): logging.warning("Negative values in array will be replaced with nan") # calculate the deviation - var = (xp.sqrt(data) ** 2 + readnoise_value**2) ** 0.5 + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message="invalid value encountered in sqrt", + category=RuntimeWarning, + ) + var = (xp.sqrt(data) ** 2 + readnoise_value**2) ** 0.5 # ensure uncertainty and image data have same unit ccd = ccd_data.copy() diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 5f1629b2..31e42c56 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -944,7 +944,10 @@ def test_trim_image_does_not_change_input(): def test_transform_image_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - _ = transform_image(ccd_data, xp.sqrt) + # Using a function here that is really unlikely to produce + # an invalid value (like square root does) to avoid + # warning messages. + _ = transform_image(ccd_data, xp.positive) np_testing.assert_allclose(original.data, ccd_data) assert original.unit == ccd_data.unit diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 25168973..eadde7b8 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -872,7 +872,7 @@ def test_writeable_after_combine(tmpdir, comb_func): ccd2.write(tmp_file.strpath) -def test_clip_extrema_moo(): +def test_clip_extrema_alone(): ccdlist = [ CCDData(xp.ones((3, 5)) * 90.0, unit="adu"), CCDData(xp.ones((3, 5)) * 20.0, unit="adu"), diff --git a/pyproject.toml b/pyproject.toml index 2d205637..ced4109f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -148,6 +148,9 @@ filterwarnings= [ "ignore:numpy\\.ufunc size changed:RuntimeWarning", "ignore:numpy.ndarray size changed:RuntimeWarning", "ignore:`np.bool` is a deprecated alias for the builtin `bool`:DeprecationWarning", + # This is here because dask can end up generating this warning much + # later when the spot in create_deviation where a square root is + # calculated. "ignore:invalid value encountered in sqrt:RuntimeWarning", ] markers = [ From 4bef390707a70abc14f26d347ebe00c90eafe223 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 26 Jun 2025 18:54:06 -0500 Subject: [PATCH 48/59] Apply suggestions from code review Co-authored-by: Nathaniel Starkman Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- ccdproc/core.py | 2 +- ccdproc/tests/test_cosmicray.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index d74a850e..67e5b45f 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -447,7 +447,7 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): message="invalid value encountered in sqrt", category=RuntimeWarning, ) - var = (xp.sqrt(data) ** 2 + readnoise_value**2) ** 0.5 + var = xp.sqrt(xp.abs(data) + readnoise_value**2) # ensure uncertainty and image data have same unit ccd = ccd_data.copy() diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index ab506d03..7e0ea6c2 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -35,8 +35,8 @@ def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): # this is not working right now crflux = xp.asarray(10 * scale * rng.random(ncrays) + (threshold + 15) * scale) - # Some array libraris (Dask) do not support setting individual elements, - # so use nuympy. + # Some array libraries (Dask) do not support setting individual elements, + # so use NumPy. data_as_np = np_array(data.data) for i in range(ncrays): y, x = crrays[i] From efeac769aa4a29c197d1dd9e1c295ed2fe90f09e Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 1 Jul 2025 16:38:47 -0700 Subject: [PATCH 49/59] Undo suggested edit --- ccdproc/core.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 67e5b45f..324f4148 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -447,7 +447,11 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): message="invalid value encountered in sqrt", category=RuntimeWarning, ) - var = xp.sqrt(xp.abs(data) + readnoise_value**2) + # The term below in which the square root of the data is calulated + # and then squared MUST stay the way it is so that negative values + # in the data end up as NaN. Do not replace it with an absolute + # value. + var = xp.sqrt(xp.sqrt(data) ** 2 + readnoise_value**2) # ensure uncertainty and image data have same unit ccd = ccd_data.copy() From 69a79d520fb10741070da18b159abf00ce47dd31 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 1 Jul 2025 16:46:38 -0700 Subject: [PATCH 50/59] Shorten up a loop with a comprehension --- ccdproc/combiner.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index db74a7c8..fe75cd42 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -180,13 +180,10 @@ def __init__(self, ccd_iter, dtype=None): self.data_arr = xp.array([ccd.data for ccd in ccd_list], dtype=dtype) # populate self.data_arr - mask_list = [] - for ccd in ccd_list: - if ccd.mask is not None: - mask_list.append(ccd.mask) - else: - mask_list.append(xp.zeros(default_shape)) - + mask_list = [ + ccd.mask if ccd.mask is not None else xp.zeros(default_shape) + for ccd in ccd_list + ] self.data_arr_mask = xp.array(mask_list, dtype=bool) # Must be after self.data_arr is defined because it checks the From f7ea0a9c6c4da3c10cc3600f67c146b7be107e1a Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 1 Jul 2025 16:48:44 -0700 Subject: [PATCH 51/59] Add minimum pin for dependency --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index ced4109f..02faefaf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,7 @@ authors = [ { name = "and Michael Seifert" }, ] dependencies = [ - "array_api_compat", + "array_api_compat>=1.12.0", "array_api_extra>=0.7.0", "astropy>=6.0.1", "astroscrappy>=1.1.0", From d3a732b558eec3dc24e258a758777a088d73d163 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 1 Jul 2025 16:50:00 -0700 Subject: [PATCH 52/59] Update black target versions --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 02faefaf..bc0203f8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,7 +57,7 @@ include = [ [tool.black] line-length = 88 -target-version = ['py310', 'py311', 'py312'] +target-version = ['py311', 'py312', 'py313'] include = '\.pyi?$|\.ipynb$' # 'extend-exclude' excludes files or directories in addition to the defaults extend-exclude = ''' From 309bbf42b53a254e7ed226952d60fd20b0490f1c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 1 Jul 2025 21:34:24 -0500 Subject: [PATCH 53/59] Store array namespace when Combiner is created --- ccdproc/combiner.py | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index fe75cd42..2143d582 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -109,6 +109,12 @@ class Combiner: description. If ``None`` it uses ``np.float64``. Default is ``None``. + xp : array namespace, optional + The array namespace to use for the data. If not provided, it will + be inferred from the first `~astropy.nddata.CCDData` object in + ``ccd_iter``. + Default is ``None``. + Raises ------ TypeError @@ -136,7 +142,7 @@ class Combiner: [ 0.66666667, 0.66666667, 0.66666667, 0.66666667]]...) """ - def __init__(self, ccd_iter, dtype=None): + def __init__(self, ccd_iter, dtype=None, xp=None): if ccd_iter is None: raise TypeError( "ccd_iter should be a list or a generator of CCDData objects." @@ -167,7 +173,8 @@ def __init__(self, ccd_iter, dtype=None): raise TypeError("CCDData objects don't have the same unit.") # Set array namespace - xp = array_api_compat.array_namespace(ccd_list[0].data) + xp = xp or array_api_compat.array_namespace(ccd_list[0].data) + self._xp = xp if dtype is None: dtype = xp.float64 self.ccd_list = ccd_list @@ -247,7 +254,7 @@ def scaling(self): @scaling.setter def scaling(self, value): - xp = array_api_compat.array_namespace(self.data_arr) + xp = self._xp if value is None: self._scaling = value else: @@ -316,7 +323,7 @@ def clip_extrema(self, nlow=0, nhigh=0): .. [0] image.imcombine help text. http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?imcombine """ - xp = array_api_compat.array_namespace(self.data_arr) + xp = self._xp if nlow is None: nlow = 0 if nhigh is None: @@ -447,7 +454,7 @@ def _get_scaled_data(self, scale_arg): return self.data_arr def _get_nan_substituted_data(self, data): - xp = array_api_compat.array_namespace(self.data_arr) + xp = self._xp # Get the data as an unmasked array with masked values filled as NaN if self.data_arr_mask.any(): @@ -462,7 +469,7 @@ def _combination_setup(self, user_func, default_func, scale_to): Handle the common pieces of image combination data/mask setup. """ data = self._get_scaled_data(scale_to) - xp = array_api_compat.array_namespace(data) + xp = self._xp # Play it safe for now and only do the nan thing if the user is using # the default combination function. if user_func is None: @@ -515,7 +522,7 @@ def median_combine( The uncertainty currently calculated using the median absolute deviation does not account for rejected pixels. """ - xp = array_api_compat.array_namespace(self.data_arr) + xp = self._xp _default_median_func = _default_median(xp=xp) @@ -565,12 +572,12 @@ def median_combine( # return the combined image return combined_image - def _weighted_sum(self, data, sum_func): + def _weighted_sum(self, data, sum_func, xp=None): """ Perform weighted sum, used by both ``sum_combine`` and in some cases by ``average_combine``. """ - xp = array_api_compat.array_namespace(data) + xp = xp or array_api_compat.array_namespace(data) if self.weights.shape != data.shape: # Add extra axes to the weights for broadcasting weights = xp.reshape(self.weights, [len(self.weights), 1, 1]) @@ -624,7 +631,7 @@ def average_combine( combined_image: `~astropy.nddata.CCDData` CCDData object based on the combined input of CCDData objects. """ - xp = array_api_compat.array_namespace(self.data_arr) + xp = self._xp _default_average_func = _default_average(xp=xp) @@ -641,7 +648,7 @@ def average_combine( # Do NOT modify data after this -- we need it to be intact when we # we get to the uncertainty calculation. if self.weights is not None: - weighted_sum, weights = self._weighted_sum(data, sum_func) + weighted_sum, weights = self._weighted_sum(data, sum_func, xp=xp) mean = weighted_sum / sum_func(weights, axis=0) else: mean = scale_func(data, axis=0) @@ -707,7 +714,7 @@ def sum_combine( CCDData object based on the combined input of CCDData objects. """ - xp = array_api_compat.array_namespace(self.data_arr) + xp = self._xp _default_sum_func = _default_sum(xp=xp) @@ -719,7 +726,7 @@ def sum_combine( ) if self.weights is not None: - summed, weights = self._weighted_sum(data, sum_func) + summed, weights = self._weighted_sum(data, sum_func, xp=xp) else: summed = sum_func(data, axis=0) From bd23ac441077e985d9ba51e2beca6ecf482f7e7c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 1 Jul 2025 21:34:54 -0500 Subject: [PATCH 54/59] Add optional namespace argument to several functions --- ccdproc/core.py | 82 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 61 insertions(+), 21 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 324f4148..84cbb5a4 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -93,7 +93,7 @@ def _is_array(arr): # Ideally this would eventually be covered by tests. Looks like Sparse # could be used to test this, since it has no percentile... -def _percentile_fallback(array, percentiles): # pragma: no cover +def _percentile_fallback(array, percentiles, xp=None): # pragma: no cover """ Try calculating percentile using namespace, otherwise fall back to an implmentation that uses sort. As of the 2023 version of the array API @@ -107,12 +107,16 @@ def _percentile_fallback(array, percentiles): # pragma: no cover percentiles : float or list-like Percentile to calculate. + xp : array namespace, optional + Array namespace to use for calculations. If not provided, the + namespace will be determined from the array. + Returns ------- percentile : float or list-like Calculated percentile. """ - xp = array_api_compat.array_namespace(array) + xp = xp or array_api_compat.array_namespace(array) try: return xp.percentile(array, percentiles) except AttributeError: @@ -285,11 +289,11 @@ def ccd_process( # apply the overscan correction if isinstance(oscan, CCDData): nccd = subtract_overscan( - nccd, overscan=oscan, median=oscan_median, model=oscan_model + nccd, overscan=oscan, median=oscan_median, model=oscan_model, xp=xp ) elif isinstance(oscan, str): nccd = subtract_overscan( - nccd, fits_section=oscan, median=oscan_median, model=oscan_model + nccd, fits_section=oscan, median=oscan_median, model=oscan_model, xp=xp ) elif oscan is None: pass @@ -298,6 +302,7 @@ def ccd_process( # apply the trim correction if isinstance(trim, str): + # No xp=... here because slicing can be done without knowing the array namespace nccd = trim_image(nccd, fits_section=trim) elif trim is None: pass @@ -306,7 +311,7 @@ def ccd_process( # create the error frame if error and gain is not None and readnoise is not None: - nccd = create_deviation(nccd, gain=gain, readnoise=readnoise) + nccd = create_deviation(nccd, gain=gain, readnoise=readnoise, xp=xp) elif error and (gain is None or readnoise is None): raise ValueError("gain and readnoise must be specified to create error frame.") @@ -324,10 +329,12 @@ def ccd_process( raise TypeError("gain is not None or astropy.units.Quantity.") if gain is not None and gain_corrected: + # No need for xp here because gain_correct does not need the namespace nccd = gain_correct(nccd, gain) # subtracting the master bias if isinstance(master_bias, CCDData): + # No need for xp here because subtract_bias does not need the namespace nccd = subtract_bias(nccd, master_bias) elif master_bias is None: pass @@ -336,6 +343,7 @@ def ccd_process( # subtract the dark frame if isinstance(dark_frame, CCDData): + # No need for xp here because subtract_dark does not need the namespace nccd = subtract_dark( nccd, dark_frame, @@ -352,7 +360,7 @@ def ccd_process( # test dividing the master flat if isinstance(master_flat, CCDData): - nccd = flat_correct(nccd, master_flat, min_value=min_value) + nccd = flat_correct(nccd, master_flat, min_value=min_value, xp=xp) elif master_flat is None: pass else: @@ -360,13 +368,14 @@ def ccd_process( # apply the gain correction only at the end if gain_corrected is False if gain is not None and not gain_corrected: + # No need for xp here because gain_correct does not need the namespace nccd = gain_correct(nccd, gain) return nccd @log_to_metadata -def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): +def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False, xp=None): """ Create a uncertainty frame. The function will update the uncertainty plane which gives the standard deviation for the data. Gain is used in @@ -393,6 +402,10 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): If ``True``, any value of nan in the output array will be replaced by the readnoise. + xp : array namespace, optional + Array namespace to use for calculations. If not provided, the + namespace will be determined from the array. + {log} Raises @@ -409,7 +422,7 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): """ # Get array namespace - xp = array_api_compat.array_namespace(ccd_data.data) + xp = xp or array_api_compat.array_namespace(ccd_data.data) if gain is not None and not isinstance(gain, Quantity): raise TypeError("gain must be a astropy.units.Quantity.") @@ -462,7 +475,13 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): @log_to_metadata def subtract_overscan( - ccd, overscan=None, overscan_axis=1, fits_section=None, median=False, model=None + ccd, + overscan=None, + overscan_axis=1, + fits_section=None, + median=False, + model=None, + xp=None, ): """ Subtract the overscan region from an image. @@ -502,6 +521,10 @@ def subtract_overscan( by the median or the mean. Default is ``None``. + xp : array namespace, optional + Array namespace to use for calculations. If not provided, the + namespace will be determined from the array. + {log} Raises @@ -555,7 +578,7 @@ def subtract_overscan( raise TypeError("ccddata is not a CCDData object.") # Set array namespace - xp = array_api_compat.array_namespace(ccd.data) + xp = xp or array_api_compat.array_namespace(ccd.data) if (overscan is not None and fits_section is not None) or ( overscan is None and fits_section is None @@ -857,7 +880,7 @@ def gain_correct(ccd, gain, gain_unit=None): @log_to_metadata -def flat_correct(ccd, flat, min_value=None, norm_value=None): +def flat_correct(ccd, flat, min_value=None, norm_value=None, xp=None): """Correct the image for flat fielding. The flat field image is normalized by its mean or a user-supplied value @@ -883,6 +906,10 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None): have the same scale. If this value is negative or 0, a ``ValueError`` is raised. Default is ``None``. + xp : array namespace, optional + Array namespace to use for calculations. If not provided, the + namespace will be determined from the array. + {log} Returns @@ -891,7 +918,7 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None): CCDData object with flat corrected. """ # Get the array namespace - xp = array_api_compat.array_namespace(ccd.data) + xp = xp or array_api_compat.array_namespace(ccd.data) # Use the min_value to replace any values in the flat use_flat = flat if min_value is not None: @@ -1008,7 +1035,7 @@ def transform_image(ccd, transform_func, **kwargs): @log_to_metadata -def wcs_project(ccd, target_wcs, target_shape=None, order="bilinear"): +def wcs_project(ccd, target_wcs, target_shape=None, order="bilinear", xp=None): """ Given a CCDData image with WCS, project it onto a target WCS and return the reprojected data as a new CCDData image. @@ -1039,6 +1066,10 @@ def wcs_project(ccd, target_wcs, target_shape=None, order="bilinear"): Default is ``'bilinear'``. + xp : array namespace, optional + Array namespace to use for calculations. If not provided, the + namespace will be determined from the array. + {log} Returns @@ -1050,7 +1081,7 @@ def wcs_project(ccd, target_wcs, target_shape=None, order="bilinear"): from reproject import reproject_interp # Set array namespace - xp = array_api_compat.array_namespace(ccd.data) + xp = xp or array_api_compat.array_namespace(ccd.data) if not (ccd.wcs.is_celestial and target_wcs.is_celestial): raise ValueError("one or both WCS is not celestial.") @@ -1354,10 +1385,10 @@ def rebin(ccd, newshape): return result -def block_reduce(ccd, block_size, func=None): +def block_reduce(ccd, block_size, func=None, xp=None): """Thin wrapper around `astropy.nddata.block_reduce`.""" if func is None: - xp = array_api_compat.array_namespace(ccd.data) + xp = xp or array_api_compat.array_namespace(ccd.data) func = xp.sum data = nddata.block_reduce(ccd, block_size, func) if isinstance(ccd, CCDData): @@ -1367,10 +1398,10 @@ def block_reduce(ccd, block_size, func=None): return data -def block_average(ccd, block_size): +def block_average(ccd, block_size, xp=None): """Like `block_reduce` but with predefined ``func=np.mean``.""" - xp = array_api_compat.array_namespace(ccd.data) + xp = xp or array_api_compat.array_namespace(ccd.data) data = nddata.block_reduce(ccd, block_size, xp.mean) # Like in block_reduce: @@ -1822,7 +1853,7 @@ def _astroscrappy_gain_apply_helper(cleaned_data, gain, gain_apply, old_interfac return cleaned_data -def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): +def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0, xp=None): """ Identify cosmic rays through median technique. The median technique identifies cosmic rays by identifying pixels by subtracting a median image @@ -1856,6 +1887,10 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): be replaced. Default is ``0``. + xp : array namespace, optional + The array namespace to use for the calculations. If not provided, the + array namespace of the input data will be used. + Notes ----- Similar implementation to crmedian in iraf.imred.crutil.crmedian. @@ -1896,7 +1931,7 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): updated with the detected cosmic rays. """ if _is_array(ccd): - xp = array_api_compat.array_namespace(ccd) + xp = xp or array_api_compat.array_namespace(ccd) # Masked data is not part of the array API so remove mask if present. # Only look at the data array, guessing that if there is a .mask then @@ -1979,6 +2014,7 @@ def ccdmask( lsigma=9, hsigma=9, ngood=5, + xp=None, ): """ Uses method based on the IRAF ccdmask task to generate a mask based on the @@ -2035,6 +2071,10 @@ def ccdmask( pixels masked in that column. Default is ``5``. + xp : array namespace, optional + The array namespace to use for the calculations. If not provided, the + array namespace of the input data will be used. + Returns ------- mask : `numpy.ndarray` @@ -2092,7 +2132,7 @@ def ccdmask( raise ValueError('"ratio" should be a "CCDData".') from err # Get array namespace - xp = array_api_compat.array_namespace(ratio.data) + xp = xp or array_api_compat.array_namespace(ratio.data) def _sigma_mask(baseline, one_sigma_value, lower_sigma, upper_sigma): """Helper function to mask values outside of the specified sigma range.""" From fcac48f8f32391ac54391506683b4f9d1791fa1c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 3 Jul 2025 13:22:16 -0500 Subject: [PATCH 55/59] Use array API in all cosmic ray tests --- ccdproc/combiner.py | 12 +++++------- ccdproc/core.py | 31 ++++++++++++++++++++----------- ccdproc/tests/test_cosmicray.py | 21 ++++++--------------- 3 files changed, 31 insertions(+), 33 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 2143d582..986b75e9 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -500,8 +500,7 @@ def median_combine( Parameters ---------- median_func : function, optional - Function that calculates median of a `numpy.ma.MaskedArray`. - Default is `numpy.ma.median`. + Function that calculates median of an array. scale_to : float or None, optional Scaling factor used in the average combined image. If given, @@ -612,19 +611,18 @@ def average_combine( Parameters ---------- scale_func : function, optional - Function to calculate the average. Defaults to - `numpy.nanmean`. + Function to calculate the average. scale_to : float or None, optional Scaling factor used in the average combined image. If given, it overrides `scaling`. Defaults to ``None``. uncertainty_func : function, optional - Function to calculate uncertainty. Defaults to `numpy.ma.std`. + Function to calculate uncertainty. sum_func : function, optional Function used to calculate sums, including the one done to - find the weighted average. Defaults to `numpy.nansum`. + find the weighted average. Returns ------- @@ -706,7 +704,7 @@ def sum_combine( it overrides `scaling`. Defaults to ``None``. uncertainty_func : function, optional - Function to calculate uncertainty. Defaults to `numpy.ma.std`. + Function to calculate uncertainty. Returns ------- diff --git a/ccdproc/core.py b/ccdproc/core.py index 84cbb5a4..69a5334b 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -8,6 +8,7 @@ import warnings import array_api_compat +import array_api_extra as xpx from astropy import nddata, stats from astropy import units as u from astropy.modeling import fitting @@ -101,7 +102,7 @@ def _percentile_fallback(array, percentiles, xp=None): # pragma: no cover Parameters ---------- - array : array-like + array : array_like Array from which to calculate the percentile. percentiles : float or list-like @@ -1205,7 +1206,7 @@ def setbox(x, y, mbox, xmax, ymax): return x1, x2, y1, y2 -def background_deviation_box(data, bbox): +def background_deviation_box(data, bbox, xp=None): """ Determine the background deviation with a box size of bbox. The algorithm steps through the image and calculates the deviation within each box. @@ -1214,12 +1215,16 @@ def background_deviation_box(data, bbox): Parameters ---------- - data : `numpy.ndarray` or `numpy.ma.MaskedArray` + data : `numpy.ndarray` or other array_like Data to measure background deviation. bbox : int Box size for calculating background deviation. + xp : array namespace, optional + Array namespace to use for calculations. If not provided, the + namespace will be determined from the array. + Raises ------ ValueError @@ -1227,7 +1232,7 @@ def background_deviation_box(data, bbox): Returns ------- - background : `numpy.ndarray` or `numpy.ma.MaskedArray` + background : array_like An array with the measured background deviation in each pixel. """ # Check to make sure the background box is an appropriate size @@ -1235,13 +1240,18 @@ def background_deviation_box(data, bbox): if bbox < 1: raise ValueError("bbox must be greater than 1.") + if xp is None: + # Get the array namespace + xp = array_api_compat.array_namespace(data) # make the background image - barr = data * 0.0 + data.std() + barr = data * 0.0 + xp.std(data) ylen, xlen = data.shape for i in range(int(0.5 * bbox), xlen, bbox): for j in range(int(0.5 * bbox), ylen, bbox): x1, x2, y1, y2 = setbox(i, j, bbox, xlen, ylen) - barr[y1:y2, x1:x2] = sigma_func(data[y1:y2, x1:x2]) + xpx.at(barr)[y1:y2, x1:x2].set( + xp.astype(sigma_func(data[y1:y2, x1:x2]), data.dtype) + ) return barr @@ -1266,7 +1276,7 @@ def background_deviation_filter(data, bbox): Returns ------- - background : `numpy.ndarray` or `numpy.ma.MaskedArray` + background : `numpy.ndarray` An array with the measured background deviation in each pixel. """ # Check to make sure the background box is an appropriate size @@ -1861,7 +1871,7 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0, x Parameters ---------- - ccd : `~astropy.nddata.CCDData`, `numpy.ndarray` or `numpy.ma.MaskedArray` + ccd : `~astropy.nddata.CCDData`, `numpy.ndarray` or other array_like Data to have cosmic ray cleaned. thresh : float, optional @@ -2079,7 +2089,7 @@ def ccdmask( ------- mask : `numpy.ndarray` A boolean ndarray where the bad pixels have a value of 1 (True) and - valid pixels 0 (False), following the numpy.ma conventions. + valid pixels 0 (False), following the numpy convention for masking. Notes ----- @@ -2235,8 +2245,7 @@ def bitfield_to_boolean_mask(bitfield, ignore_bits=0, flip_bits=None): Returns ------- mask : `numpy.ndarray` of boolean dtype - The bitfield converted to a boolean mask that can be used for - `numpy.ma.MaskedArray` or `~astropy.nddata.CCDData`. + The bitfield converted to a boolean mask. Examples -------- diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index 7e0ea6c2..b3aaaf95 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -1,6 +1,5 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst -import array_api_compat import pytest from astropy import units as u from astropy.utils.exceptions import AstropyDeprecationWarning @@ -9,6 +8,8 @@ from numpy.random import default_rng from numpy.testing import assert_allclose +# Set up the array library to be used in tests +from ccdproc.conftest import testing_array_library as xp from ccdproc.core import ( background_deviation_box, background_deviation_filter, @@ -26,7 +27,6 @@ def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): size = data.shape[0] rng = default_rng(99) - xp = array_api_compat.array_namespace(data.data) crrays = xp.asarray(rng.integers(0, size, size=(ncrays, 2))) # use (threshold + 15) below to make sure cosmic ray is well above the # threshold no matter what the random number generator returns @@ -58,7 +58,6 @@ def test_cosmicray_lacosmic(): def test_cosmicray_lacosmic_ccddata(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) @@ -75,7 +74,6 @@ def test_cosmicray_lacosmic_ccddata(): def test_cosmicray_lacosmic_check_data(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) with pytest.raises(TypeError): noise = DATA_SCALE * xp.ones_like(ccd_data.data) cosmicray_lacosmic(10, noise) @@ -90,7 +88,6 @@ def test_cosmicray_gain_correct(array_input, gain_correct_data): # data and returns that gain corrected data. That is not the # intent... ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) @@ -123,7 +120,6 @@ def test_cosmicray_gain_correct(array_input, gain_correct_data): def test_cosmicray_lacosmic_accepts_quantity_gain(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) @@ -138,7 +134,6 @@ def test_cosmicray_lacosmic_accepts_quantity_gain(): def test_cosmicray_lacosmic_accepts_quantity_readnoise(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) @@ -156,7 +151,6 @@ def test_cosmicray_lacosmic_detects_inconsistent_units(): # of adu, a readnoise in electrons and a gain in adu / electron. # That is not internally inconsistent. ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) ccd_data.unit = "adu" threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) @@ -176,7 +170,6 @@ def test_cosmicray_lacosmic_detects_inconsistent_units(): def test_cosmicray_lacosmic_warns_on_ccd_in_electrons(): # Check that an input ccd in electrons raises a warning. ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) # The unit below is important for the test; this unit on # input is supposed to raise an error. ccd_data.unit = u.electron @@ -206,7 +199,6 @@ def test_cosmicray_lacosmic_invar_inbkg(new_args): # that calling with the new keyword arguments to astroscrappy # 1.1.0 raises no error. ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) @@ -304,28 +296,28 @@ def test_cosmicray_median_background_deviation(): def test_background_deviation_box(): scale = 5.3 - cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = xp.asarray(default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale)) bd = background_deviation_box(cd, 25) assert abs(bd.mean() - scale) < 0.10 def test_background_deviation_box_fail(): scale = 5.3 - cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = xp.asarray(default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale)) with pytest.raises(ValueError): background_deviation_box(cd, 0.5) def test_background_deviation_filter(): scale = 5.3 - cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = xp.asarray(default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale)) bd = background_deviation_filter(cd, 25) assert abs(bd.mean() - scale) < 0.10 def test_background_deviation_filter_fail(): scale = 5.3 - cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = xp.asarray(default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale)) with pytest.raises(ValueError): background_deviation_filter(cd, 0.5) @@ -356,7 +348,6 @@ def test_cosmicray_lacosmic_pssl_does_not_fail(): # to make sure that passing in pssl does not lead to an error # since the new interface does not include pssl. ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) From d0bcdad5dcfc5c4b3efce005a67a8200715194ba Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 3 Jul 2025 13:51:08 -0500 Subject: [PATCH 56/59] cast number to float to avoid multiple namespaces --- ccdproc/core.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 69a5334b..02b82696 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -1249,9 +1249,7 @@ def background_deviation_box(data, bbox, xp=None): for i in range(int(0.5 * bbox), xlen, bbox): for j in range(int(0.5 * bbox), ylen, bbox): x1, x2, y1, y2 = setbox(i, j, bbox, xlen, ylen) - xpx.at(barr)[y1:y2, x1:x2].set( - xp.astype(sigma_func(data[y1:y2, x1:x2]), data.dtype) - ) + xpx.at(barr)[y1:y2, x1:x2].set(float(sigma_func(data[y1:y2, x1:x2]))) return barr From baae84fbf4da3044a814ae7d27f13e87fea7bf2f Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sat, 5 Jul 2025 10:04:58 -0500 Subject: [PATCH 57/59] Change internal data and mask to private properties --- ccdproc/combiner.py | 64 +++++++++++++++++----------------- ccdproc/tests/test_combiner.py | 58 +++++++++++++++--------------- 2 files changed, 61 insertions(+), 61 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 986b75e9..e1ecfe88 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -184,14 +184,14 @@ def __init__(self, ccd_iter, dtype=None, xp=None): # set up the data array # new_shape = (len(ccd_list),) + default_shape - self.data_arr = xp.array([ccd.data for ccd in ccd_list], dtype=dtype) + self._data_arr = xp.array([ccd.data for ccd in ccd_list], dtype=dtype) # populate self.data_arr mask_list = [ ccd.mask if ccd.mask is not None else xp.zeros(default_shape) for ccd in ccd_list ] - self.data_arr_mask = xp.array(mask_list, dtype=bool) + self._data_arr_mask = xp.array(mask_list, dtype=bool) # Must be after self.data_arr is defined because it checks the # length of the data array. @@ -222,13 +222,13 @@ def weights(self, value): except TypeError as err: raise TypeError("weights must be an array.") from err - if value.shape != self.data_arr.shape: + if value.shape != self._data_arr.shape: if value.ndim != 1: raise ValueError( "1D weights expected when shapes of the " "data and weights differ." ) - if value.shape[0] != self.data_arr.shape[0]: + if value.shape[0] != self._data_arr.shape[0]: raise ValueError( "Length of weights not compatible with specified axis." ) @@ -258,9 +258,9 @@ def scaling(self, value): if value is None: self._scaling = value else: - n_images = self.data_arr.shape[0] + n_images = self._data_arr.shape[0] if callable(value): - self._scaling = [value(self.data_arr[i]) for i in range(n_images)] + self._scaling = [value(self._data_arr[i]) for i in range(n_images)] self._scaling = xp.array(self._scaling) else: try: @@ -277,7 +277,7 @@ def scaling(self, value): ) self._scaling = xp.array(value) # reshape so that broadcasting occurs properly - for _ in range(len(self.data_arr.shape) - 1): + for _ in range(len(self._data_arr.shape) - 1): self._scaling = self.scaling[:, xp.newaxis] # set up IRAF-like minmax clipping @@ -329,12 +329,12 @@ def clip_extrema(self, nlow=0, nhigh=0): if nhigh is None: nhigh = 0 - argsorted = xp.argsort(self.data_arr, axis=0) + argsorted = xp.argsort(self._data_arr, axis=0) # Not every array package has mgrid, so make it in numpy and convert it to the # array package used for the data. mg = xp.asarray( np_mgrid[ - [slice(ndim) for i, ndim in enumerate(self.data_arr.shape) if i > 0] + [slice(ndim) for i, ndim in enumerate(self._data_arr.shape) if i > 0] ] ) for i in range(-1 * nhigh, nlow): @@ -344,10 +344,10 @@ def clip_extrema(self, nlow=0, nhigh=0): # dimensions, so we need to flatten the mask array, set the mask # values for a flattened array, and then reshape it back to the # original shape. - flat_index = np_ravel_multi_index(where, self.data_arr.shape) - self.data_arr_mask = xp.reshape( - xpx.at(xp.reshape(self.data_arr_mask, (-1,)))[flat_index].set(True), - self.data_arr.shape, + flat_index = np_ravel_multi_index(where, self._data_arr.shape) + self._data_arr_mask = xp.reshape( + xpx.at(xp.reshape(self._data_arr_mask, (-1,)))[flat_index].set(True), + self._data_arr.shape, ) # set up min/max clipping algorithms @@ -365,13 +365,13 @@ def minmax_clipping(self, min_clip=None, max_clip=None): Default is ``None``. """ if min_clip is not None: - mask = self.data_arr < min_clip + mask = self._data_arr < min_clip # Written to avoid in-place modification of array - self.data_arr_mask = self.data_arr_mask | mask + self._data_arr_mask = self._data_arr_mask | mask if max_clip is not None: - mask = self.data_arr > max_clip + mask = self._data_arr > max_clip # Written to avoid in-place modification of array - self.data_arr_mask = self.data_arr_mask | mask + self._data_arr_mask = self._data_arr_mask | mask # set up sigma clipping algorithms @deprecated_renamed_argument( @@ -430,10 +430,10 @@ def sigma_clipping( # Remove in 3.0 _ = kwd.pop("use_astropy", True) - self.data_arr_mask = ( - self.data_arr_mask + self._data_arr_mask = ( + self._data_arr_mask | sigma_clip( - self.data_arr, + self._data_arr, sigma_lower=low_thresh, sigma_upper=high_thresh, axis=kwd.get("axis", 0), @@ -448,18 +448,18 @@ def sigma_clipping( def _get_scaled_data(self, scale_arg): if scale_arg is not None: - return self.data_arr * scale_arg + return self._data_arr * scale_arg if self.scaling is not None: - return self.data_arr * self.scaling - return self.data_arr + return self._data_arr * self.scaling + return self._data_arr def _get_nan_substituted_data(self, data): xp = self._xp # Get the data as an unmasked array with masked values filled as NaN - if self.data_arr_mask.any(): + if self._data_arr_mask.any(): # Use array_api_extra so that we can use at with all array libraries - data = xpx.at(data)[self.data_arr_mask].set(xp.nan) + data = xpx.at(data)[self._data_arr_mask].set(xp.nan) else: data = data return data @@ -478,7 +478,7 @@ def _combination_setup(self, user_func, default_func, scale_to): data = self._get_nan_substituted_data(data) masked_values = xp.isnan(data).sum(axis=0) else: - masked_values = self.data_arr_mask.sum(axis=0) + masked_values = self._data_arr_mask.sum(axis=0) combo_func = user_func return data, masked_values, combo_func @@ -532,7 +532,7 @@ def median_combine( medianed = median_func(data, axis=0) # set the mask - mask = masked_values == len(self.data_arr) + mask = masked_values == len(self._data_arr) # set the uncertainty @@ -549,7 +549,7 @@ def median_combine( # be an array of the same class as the data, so make sure it is uncertainty = xp.asarray(uncertainty) # Divide uncertainty by the number of pixel (#309) - uncertainty /= xp.sqrt(len(self.data_arr) - masked_values) + uncertainty /= xp.sqrt(len(self._data_arr) - masked_values) # Convert uncertainty to plain numpy array (#351) # There is no need to care about potential masks because the # uncertainty was calculated based on the data so potential masked @@ -566,7 +566,7 @@ def median_combine( ) # update the meta data - combined_image.meta["NCOMBINE"] = len(self.data_arr) + combined_image.meta["NCOMBINE"] = len(self._data_arr) # return the combined image return combined_image @@ -653,7 +653,7 @@ def average_combine( # calculate the mask - mask = masked_values == len(self.data_arr) + mask = masked_values == len(self._data_arr) # set up the deviation uncertainty = uncertainty_func(data, axis=0) @@ -729,7 +729,7 @@ def sum_combine( summed = sum_func(data, axis=0) # set up the mask - mask = masked_values == len(self.data_arr) + mask = masked_values == len(self._data_arr) # set up the deviation uncertainty = uncertainty_func(data, axis=0) @@ -749,7 +749,7 @@ def sum_combine( ) # update the meta data - combined_image.meta["NCOMBINE"] = len(self.data_arr) + combined_image.meta["NCOMBINE"] = len(self._data_arr) # return the combined image return combined_image diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index eadde7b8..ddd67412 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -85,8 +85,8 @@ def test_combiner_create(): ccd_data = ccd_data_func() ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) - assert c.data_arr.shape == (3, 100, 100) - assert c.data_arr_mask.shape == (3, 100, 100) + assert c._data_arr.shape == (3, 100, 100) + assert c._data_arr_mask.shape == (3, 100, 100) # test if dtype matches the value that is passed @@ -94,7 +94,7 @@ def test_combiner_dtype(): ccd_data = ccd_data_func() ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list, dtype=xp.float32) - assert c.data_arr.dtype == xp.float32 + assert c._data_arr.dtype == xp.float32 avg = c.average_combine() # dtype of average should match input dtype assert avg.dtype == c.dtype @@ -114,9 +114,9 @@ def test_combiner_mask(): ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] c = Combiner(ccd_list) - assert c.data_arr.shape == (3, 10, 10) - assert c.data_arr_mask.shape == (3, 10, 10) - assert not c.data_arr_mask[0, 5, 5] + assert c._data_arr.shape == (3, 10, 10) + assert c._data_arr_mask.shape == (3, 10, 10) + assert not c._data_arr_mask[0, 5, 5] def test_weights(): @@ -158,7 +158,7 @@ def test_pixelwise_weights(): CCDData(xp.zeros((10, 10)) + 1000, unit=u.adu), ] combo = Combiner(ccd_list) - combo.weights = xp.ones_like(combo.data_arr) + combo.weights = xp.ones_like(combo._data_arr) combo.weights = xpx.at(combo.weights)[:, 5, 5].set(xp.array([1, 5, 10])) ccd = combo.average_combine() np_testing.assert_allclose(ccd.data[5, 5], 312.5) @@ -188,7 +188,7 @@ def test_combiner_minmax_max(): c = Combiner(ccd_list) c.minmax_clipping(min_clip=None, max_clip=500) - assert c.data_arr_mask[2].all() + assert c._data_arr_mask[2].all() def test_combiner_minmax_min(): @@ -200,7 +200,7 @@ def test_combiner_minmax_min(): c = Combiner(ccd_list) c.minmax_clipping(min_clip=-500, max_clip=None) - assert c.data_arr_mask[1].all() + assert c._data_arr_mask[1].all() def test_combiner_sigmaclip_high(): @@ -216,7 +216,7 @@ def test_combiner_sigmaclip_high(): c = Combiner(ccd_list) # using mad for more robust statistics vs. std c.sigma_clipping(high_thresh=3, low_thresh=None, func="median", dev_func=mad) - assert c.data_arr_mask[5].all() + assert c._data_arr_mask[5].all() def test_combiner_sigmaclip_single_pix(): @@ -231,13 +231,13 @@ def test_combiner_sigmaclip_single_pix(): combo = Combiner(ccd_list) # add a single pixel in another array to check that # that one gets rejected - combo.data_arr = xpx.at(combo.data_arr)[0, 5, 5].set(0) - combo.data_arr = xpx.at(combo.data_arr)[1, 5, 5].set(-5) - combo.data_arr = xpx.at(combo.data_arr)[2, 5, 5].set(5) - combo.data_arr = xpx.at(combo.data_arr)[3, 5, 5].set(-5) - combo.data_arr = xpx.at(combo.data_arr)[4, 5, 5].set(25) + combo._data_arr = xpx.at(combo._data_arr)[0, 5, 5].set(0) + combo._data_arr = xpx.at(combo._data_arr)[1, 5, 5].set(-5) + combo._data_arr = xpx.at(combo._data_arr)[2, 5, 5].set(5) + combo._data_arr = xpx.at(combo._data_arr)[3, 5, 5].set(-5) + combo._data_arr = xpx.at(combo._data_arr)[4, 5, 5].set(25) combo.sigma_clipping(high_thresh=3, low_thresh=None, func="median", dev_func=mad) - assert combo.data_arr_mask[4, 5, 5] + assert combo._data_arr_mask[4, 5, 5] def test_combiner_sigmaclip_low(): @@ -253,7 +253,7 @@ def test_combiner_sigmaclip_low(): c = Combiner(ccd_list) # using mad for more robust statistics vs. std c.sigma_clipping(high_thresh=None, low_thresh=3, func="median", dev_func=mad) - assert c.data_arr_mask[5].all() + assert c._data_arr_mask[5].all() # test that the median combination works and returns a ccddata object @@ -377,7 +377,7 @@ def test_combiner_with_scaling(): np_testing.assert_allclose(np_median(med_ccd), np_median(med_inp_data)) # Set the scaling manually... - combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] + combiner.scaling = [scale_by_mean(combiner._data_arr[i]) for i in range(3)] avg_ccd = combiner.average_combine() np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape @@ -586,7 +586,7 @@ def test_average_combine_uncertainty(): ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) ccd = c.average_combine(uncertainty_func=xp.sum) - uncert_ref = xp.sum(c.data_arr, 0) / xp.sqrt(3) + uncert_ref = xp.sum(c._data_arr, 0) / xp.sqrt(3) np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call @@ -601,7 +601,7 @@ def test_median_combine_uncertainty(): ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) ccd = c.median_combine(uncertainty_func=xp.sum) - uncert_ref = xp.sum(c.data_arr, 0) / xp.sqrt(3) + uncert_ref = xp.sum(c._data_arr, 0) / xp.sqrt(3) np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call @@ -616,7 +616,7 @@ def test_sum_combine_uncertainty(): ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) ccd = c.sum_combine(uncertainty_func=xp.sum) - uncert_ref = xp.sum(c.data_arr, 0) * xp.sqrt(3) + uncert_ref = xp.sum(c._data_arr, 0) * xp.sqrt(3) np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call @@ -798,8 +798,8 @@ def test_combiner_3d(): ccd_list = [data1, data2, data3] c = Combiner(ccd_list) - assert c.data_arr.shape == (3, 5, 5, 5) - assert c.data_arr_mask.shape == (3, 5, 5, 5) + assert c._data_arr.shape == (3, 5, 5, 5) + assert c._data_arr_mask.shape == (3, 5, 5, 5) ccd = c.average_combine() assert ccd.shape == (5, 5, 5) @@ -836,7 +836,7 @@ def test_3d_combiner_with_scaling(): np_testing.assert_allclose(np_median(med_ccd), np_median(med_inp_data)) # Set the scaling manually... - combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] + combiner.scaling = [scale_by_mean(combiner._data_arr[i]) for i in range(3)] avg_ccd = combiner.average_combine() np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape @@ -935,9 +935,9 @@ def test_clip_extrema_with_other_rejection(): ccdlist[1].data = xpx.at(ccdlist[1].data)[2, 0].set(100.1) c = Combiner(ccdlist) # Reject ccdlist[1].data[1,2] by other means - c.data_arr_mask = xpx.at(c.data_arr_mask)[1, 1, 2].set(True) + c._data_arr_mask = xpx.at(c._data_arr_mask)[1, 1, 2].set(True) # Reject ccdlist[1].data[1,2] by other means - c.data_arr_mask = xpx.at(c.data_arr_mask)[3, 0, 0].set(True) + c._data_arr_mask = xpx.at(c._data_arr_mask)[3, 0, 0].set(True) c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() @@ -979,8 +979,8 @@ def create_gen(): yield ccd_data c = Combiner(create_gen()) - assert c.data_arr.shape == (3, 100, 100) - assert c.data_arr_mask.shape == (3, 100, 100) + assert c._data_arr.shape == (3, 100, 100) + assert c._data_arr_mask.shape == (3, 100, 100) @pytest.mark.parametrize( @@ -1057,7 +1057,7 @@ def sum_func(_, axis=axis): return xp.sum(new_data, axis=axis) expected_result = 3 * data - actual_result = c.sum_combine(sum_func=my_summer(c.data_arr, c.data_arr_mask)) + actual_result = c.sum_combine(sum_func=my_summer(c._data_arr, c._data_arr_mask)) elif comb_func == "average_combine": expected_result = data actual_result = c.average_combine(scale_func=xp.mean) From 655e8443ef97163649c2da088ef38fe615f56564 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sat, 5 Jul 2025 10:23:29 -0500 Subject: [PATCH 58/59] Add properties for accessing the data and mask to be used in combination --- ccdproc/combiner.py | 12 ++++++++++++ docs/image_combination.rst | 4 ++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index e1ecfe88..ed496453 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -199,8 +199,20 @@ def __init__(self, ccd_iter, dtype=None, xp=None): @property def dtype(self): + """The dtype of the data array to be combined.""" return self._dtype + @property + def data(self): + """The data array to be combined.""" + return self._data_arr + + @property + def mask(self): + """The mask array to be used in image combination. This is *not* the mask + of the combined image, but the mask of the data array to be combined.""" + return self._data_arr_mask + @property def weights(self): """ diff --git a/docs/image_combination.rst b/docs/image_combination.rst index 460a1570..67e79ebb 100644 --- a/docs/image_combination.rst +++ b/docs/image_combination.rst @@ -104,11 +104,11 @@ To clip iteratively, continuing the clipping process until no more pixels are rejected, loop in the code calling the clipping method: >>> old_n_masked = 0 # dummy value to make loop execute at least once - >>> new_n_masked = combiner.data_arr_mask.sum() + >>> new_n_masked = combiner.mask.sum() >>> while (new_n_masked > old_n_masked): ... combiner.sigma_clipping(func=np.ma.median) ... old_n_masked = new_n_masked - ... new_n_masked = combiner.data_arr_mask.sum() + ... new_n_masked = combiner.mask.sum() Note that the default values for the high and low thresholds for rejection are 3 standard deviations. From 17c0dcaf4087fdc32a1ad4c8a5be9555aeca1dbc Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sat, 5 Jul 2025 16:09:16 -0500 Subject: [PATCH 59/59] Apply suggestions from code review Co-authored-by: Nathaniel Starkman --- ccdproc/combiner.py | 4 ++-- ccdproc/log_meta.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index ed496453..a0e387fc 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -110,10 +110,10 @@ class Combiner: Default is ``None``. xp : array namespace, optional - The array namespace to use for the data. If not provided, it will + The array namespace to use for the data. If `None` or not provided, it will be inferred from the first `~astropy.nddata.CCDData` object in ``ccd_iter``. - Default is ``None``. + Default is `None`. Raises ------ diff --git a/ccdproc/log_meta.py b/ccdproc/log_meta.py index ebafa320..21dfab2b 100644 --- a/ccdproc/log_meta.py +++ b/ccdproc/log_meta.py @@ -134,7 +134,7 @@ def _replace_array_with_placeholder(value): return_type_not_value = False if isinstance(value, u.Quantity): return_type_not_value = not value.isscalar - elif isinstance(value, NDData | np.ndarray): + elif isinstance(value, (NDData, np.ndarray)): try: length = len(value) except TypeError: