From becc645b53fa30853c45509bbf9a8df9e56e814a Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sun, 11 Aug 2024 18:36:05 -0500 Subject: [PATCH 1/3] Add black as a test requirement and set up rules exclude extern files from black --- pyproject.toml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 3e93aaf6..1fd50086 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,7 @@ docs = [ "sphinx-astropy", ] test = [ + "black", "memory_profiler", "pytest-astropy>=0.10.0", ] @@ -48,6 +49,19 @@ include = [ "/ccdproc", ] +[tool.black] +line-length = 88 +target-version = ['py310', 'py311'] +include = '\.pyi?$|\.ipynb$' +# 'extend-exclude' excludes files or directories in addition to the defaults +extend-exclude = ''' +# A regex preceded with ^/ will apply only to files and directories +# in the root of the project. +( + ^/ccdproc/extern +) +''' + [tool.coverage] [tool.coverage.run] source = ["ccdproc"] From f76a29df2b570e9f42bf2e14c15bbb9223bfa14f Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sun, 11 Aug 2024 18:35:43 -0500 Subject: [PATCH 2/3] blackify the code --- ccdproc/__init__.py | 10 +- ccdproc/_astropy_init.py | 5 +- ccdproc/ccddata.py | 4 +- ccdproc/combiner.py | 290 +++---- ccdproc/conftest.py | 14 +- ccdproc/core.py | 569 ++++++++------ ccdproc/image_collection.py | 293 ++++---- ccdproc/log_meta.py | 40 +- ccdproc/tests/make_mef.py | 28 +- ccdproc/tests/pytest_fixtures.py | 20 +- ccdproc/tests/run_for_memory_profile.py | 135 ++-- ccdproc/tests/run_with_file_number_limit.py | 133 ++-- ccdproc/tests/test_bitfield.py | 12 +- ccdproc/tests/test_ccdmask.py | 12 +- ccdproc/tests/test_ccdproc.py | 516 +++++++------ ccdproc/tests/test_ccdproc_logging.py | 68 +- ccdproc/tests/test_combine_open_files.py | 30 +- ccdproc/tests/test_combiner.py | 471 ++++++------ ccdproc/tests/test_cosmicray.py | 138 ++-- ccdproc/tests/test_gain.py | 24 +- ccdproc/tests/test_image_collection.py | 753 ++++++++++--------- ccdproc/tests/test_keyword.py | 31 +- ccdproc/tests/test_memory_use.py | 31 +- ccdproc/tests/test_rebin.py | 4 +- ccdproc/tests/test_wrapped_external_funcs.py | 77 +- ccdproc/utils/sample_directory.py | 52 +- ccdproc/utils/slices.py | 28 +- ccdproc/utils/tests/test_slices.py | 81 +- 28 files changed, 2135 insertions(+), 1734 deletions(-) diff --git a/ccdproc/__init__.py b/ccdproc/__init__.py index 73c00cac..85898bec 100644 --- a/ccdproc/__init__.py +++ b/ccdproc/__init__.py @@ -9,6 +9,7 @@ # should keep this content at the top. # ---------------------------------------------------------------------------- from ._astropy_init import * # noqa + # ---------------------------------------------------------------------------- # set up namespace @@ -21,12 +22,13 @@ class Conf(_config.ConfigNamespace): """Configuration parameters for ccdproc.""" + auto_logging = _config.ConfigItem( True, - 'Whether to automatically log operations to metadata' - 'If set to False, there is no need to specify add_keyword=False' - 'when calling processing operations.' - ) + "Whether to automatically log operations to metadata" + "If set to False, there is no need to specify add_keyword=False" + "when calling processing operations.", + ) conf = Conf() diff --git a/ccdproc/_astropy_init.py b/ccdproc/_astropy_init.py index bd453245..fa5b5c76 100644 --- a/ccdproc/_astropy_init.py +++ b/ccdproc/_astropy_init.py @@ -1,13 +1,14 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst import os -__all__ = ['__version__', 'test'] +__all__ = ["__version__", "test"] try: from .version import version as __version__ except ImportError: - __version__ = '' + __version__ = "" # Create the test function for self test from astropy.tests.runner import TestRunner + test = TestRunner.make_test_runner_in(os.path.dirname(__file__)) diff --git a/ccdproc/ccddata.py b/ccdproc/ccddata.py index c7b42880..052634ba 100644 --- a/ccdproc/ccddata.py +++ b/ccdproc/ccddata.py @@ -5,9 +5,9 @@ from astropy.nddata import fits_ccddata_reader, fits_ccddata_writer, CCDData -__all__ = ['CCDData', 'fits_ccddata_reader', 'fits_ccddata_writer'] +__all__ = ["CCDData", "fits_ccddata_reader", "fits_ccddata_writer"] # This should be be a tuple to ensure it isn't inadvertently changed # elsewhere. -_recognized_fits_file_extensions = ('fit', 'fits', 'fts') +_recognized_fits_file_extensions = ("fit", "fits", "fts") diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index e068437c..fbc8c294 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -19,31 +19,31 @@ from astropy.utils import deprecated_renamed_argument from astropy import log -__all__ = ['Combiner', 'combine'] +__all__ = ["Combiner", "combine"] -def _default_median(): # pragma: no cover +def _default_median(): # pragma: no cover if HAS_BOTTLENECK: return bn.nanmedian else: return np.nanmedian -def _default_average(): # pragma: no cover +def _default_average(): # pragma: no cover if HAS_BOTTLENECK: return bn.nanmean else: return np.nanmean -def _default_sum(): # pragma: no cover +def _default_sum(): # pragma: no cover if HAS_BOTTLENECK: return bn.nansum else: return np.nansum -def _default_std(): # pragma: no cover +def _default_std(): # pragma: no cover if HAS_BOTTLENECK: return bn.nanstd else: @@ -94,9 +94,12 @@ class Combiner: [ 0.66666667, 0.66666667, 0.66666667, 0.66666667], [ 0.66666667, 0.66666667, 0.66666667, 0.66666667]]...) """ + def __init__(self, ccd_iter, dtype=None): if ccd_iter is None: - raise TypeError("ccd_iter should be a list or a generator of CCDData objects.") + raise TypeError( + "ccd_iter should be a list or a generator of CCDData objects." + ) if dtype is None: dtype = np.float64 @@ -109,8 +112,7 @@ def __init__(self, ccd_iter, dtype=None): for ccd in ccd_list: # raise an error if the objects aren't CCDData objects if not isinstance(ccd, CCDData): - raise TypeError( - "ccd_list should only contain CCDData objects.") + raise TypeError("ccd_list should only contain CCDData objects.") # raise an error if the shape is different if default_shape is None: @@ -170,9 +172,13 @@ def weights(self, value): if isinstance(value, np.ndarray): if value.shape != self.data_arr.data.shape: if value.ndim != 1: - raise ValueError("1D weights expected when shapes of the data and weights differ.") + raise ValueError( + "1D weights expected when shapes of the data and weights differ." + ) if value.shape[0] != self.data_arr.data.shape[0]: - raise ValueError("Length of weights not compatible with specified axis.") + raise ValueError( + "Length of weights not compatible with specified axis." + ) self._weights = value else: raise TypeError("weights must be a numpy.ndarray.") @@ -201,18 +207,19 @@ def scaling(self, value): else: n_images = self.data_arr.data.shape[0] if callable(value): - self._scaling = [value(self.data_arr[i]) for - i in range(n_images)] + self._scaling = [value(self.data_arr[i]) for i in range(n_images)] self._scaling = np.array(self._scaling) else: try: len(value) == n_images self._scaling = np.array(value) except TypeError: - raise TypeError("scaling must be a function or an array " - "the same length as the number of images.") + raise TypeError( + "scaling must be a function or an array " + "the same length as the number of images." + ) # reshape so that broadcasting occurs properly - for i in range(len(self.data_arr.data.shape)-1): + for i in range(len(self.data_arr.data.shape) - 1): self._scaling = self.scaling[:, np.newaxis] # set up IRAF-like minmax clipping @@ -265,43 +272,47 @@ def clip_extrema(self, nlow=0, nhigh=0): nhigh = 0 argsorted = np.argsort(self.data_arr.data, axis=0) - mg = np.mgrid[[slice(ndim) - for i, ndim in enumerate(self.data_arr.shape) if i > 0]] - for i in range(-1*nhigh, nlow): + mg = np.mgrid[ + [slice(ndim) for i, ndim in enumerate(self.data_arr.shape) if i > 0] + ] + for i in range(-1 * nhigh, nlow): # create a tuple with the indices - where = tuple([argsorted[i, :, :].ravel()] + - [i.ravel() for i in mg]) + where = tuple([argsorted[i, :, :].ravel()] + [i.ravel() for i in mg]) self.data_arr.mask[where] = True # set up min/max clipping algorithms def minmax_clipping(self, min_clip=None, max_clip=None): """Mask all pixels that are below min_clip or above max_clip. - Parameters - ----------- - min_clip : float or None, optional - If not None, all pixels with values below min_clip will be masked. - Default is ``None``. + Parameters + ----------- + min_clip : float or None, optional + If not None, all pixels with values below min_clip will be masked. + Default is ``None``. - max_clip : float or None, optional - If not None, all pixels with values above min_clip will be masked. - Default is ``None``. + max_clip : float or None, optional + If not None, all pixels with values above min_clip will be masked. + Default is ``None``. """ if min_clip is not None: - mask = (self.data_arr < min_clip) + mask = self.data_arr < min_clip self.data_arr.mask[mask] = True if max_clip is not None: - mask = (self.data_arr > max_clip) + mask = self.data_arr > max_clip self.data_arr.mask[mask] = True # set up sigma clipping algorithms - @deprecated_renamed_argument('use_astropy', None, arg_in_kwargs=True, - since='2.4.0', - message='The use_astropy argument has been removed because ' - 'astropy sigma clipping is now always used.' - ) - def sigma_clipping(self, low_thresh=3, high_thresh=3, - func='mean', dev_func='std', **kwd): + @deprecated_renamed_argument( + "use_astropy", + None, + arg_in_kwargs=True, + since="2.4.0", + message="The use_astropy argument has been removed because " + "astropy sigma clipping is now always used.", + ) + def sigma_clipping( + self, low_thresh=3, high_thresh=3, func="mean", dev_func="std", **kwd + ): """ Pixels will be rejected if they have deviations greater than those set by the threshold values. The algorithm will first calculated @@ -346,18 +357,20 @@ def sigma_clipping(self, low_thresh=3, high_thresh=3, """ # Remove in 3.0 - _ = kwd.pop('use_astropy', True) - - self.data_arr.mask = sigma_clip(self.data_arr.data, - sigma_lower=low_thresh, - sigma_upper=high_thresh, - axis=kwd.get('axis', 0), - copy=kwd.get('copy', False), - maxiters=kwd.get('maxiters', 1), - cenfunc=func, - stdfunc=dev_func, - masked=True, - **kwd).mask + _ = kwd.pop("use_astropy", True) + + self.data_arr.mask = sigma_clip( + self.data_arr.data, + sigma_lower=low_thresh, + sigma_upper=high_thresh, + axis=kwd.get("axis", 0), + copy=kwd.get("copy", False), + maxiters=kwd.get("maxiters", 1), + cenfunc=func, + stdfunc=dev_func, + masked=True, + **kwd, + ).mask def _get_scaled_data(self, scale_arg): if scale_arg is not None: @@ -374,10 +387,7 @@ def _get_nan_substituted_data(self, data): data = data.data return data - def _combination_setup(self, - user_func, - default_func, - scale_to): + def _combination_setup(self, user_func, default_func, scale_to): """ Handle the common pieces of image combination data/mask setup. """ @@ -397,8 +407,9 @@ def _combination_setup(self, return data, masked_values, combo_func # set up the combining algorithms - def median_combine(self, median_func=None, scale_to=None, - uncertainty_func=sigma_func): + def median_combine( + self, median_func=None, scale_to=None, uncertainty_func=sigma_func + ): """ Median combine a set of arrays. @@ -435,15 +446,14 @@ def median_combine(self, median_func=None, scale_to=None, deviation does not account for rejected pixels. """ - data, masked_values, median_func = \ - self._combination_setup(median_func, - _default_median(), - scale_to) + data, masked_values, median_func = self._combination_setup( + median_func, _default_median(), scale_to + ) medianed = median_func(data, axis=0) # set the mask - mask = (masked_values == len(self.data_arr)) + mask = masked_values == len(self.data_arr) # set the uncertainty @@ -466,12 +476,15 @@ def median_combine(self, median_func=None, scale_to=None, uncertainty = np.asarray(uncertainty) # create the combined image with a dtype matching the combiner - combined_image = CCDData(np.asarray(medianed, dtype=self.dtype), - mask=mask, unit=self.unit, - uncertainty=StdDevUncertainty(uncertainty)) + combined_image = CCDData( + np.asarray(medianed, dtype=self.dtype), + mask=mask, + unit=self.unit, + uncertainty=StdDevUncertainty(uncertainty), + ) # update the meta data - combined_image.meta['NCOMBINE'] = len(self.data_arr) + combined_image.meta["NCOMBINE"] = len(self.data_arr) # return the combined image return combined_image @@ -490,12 +503,17 @@ def _weighted_sum(self, data, sum_func): # Turns out bn.nansum has an implementation that is not # precise enough for float32 sums. Doing this should # ensure the sums are carried out as float64 - weights = weights.astype('float64') + weights = weights.astype("float64") weighted_sum = sum_func(data * weights, axis=0) return weighted_sum, weights - def average_combine(self, scale_func=None, scale_to=None, - uncertainty_func=_default_std(), sum_func=_default_sum()): + def average_combine( + self, + scale_func=None, + scale_to=None, + uncertainty_func=_default_std(), + sum_func=_default_sum(), + ): """ Average combine together a set of arrays. @@ -529,10 +547,9 @@ def average_combine(self, scale_func=None, scale_to=None, combined_image: `~astropy.nddata.CCDData` CCDData object based on the combined input of CCDData objects. """ - data, masked_values, scale_func = \ - self._combination_setup(scale_func, - _default_average(), - scale_to) + data, masked_values, scale_func = self._combination_setup( + scale_func, _default_average(), scale_to + ) # # set up the data # data = self._get_scaled_data(scale_to) @@ -549,7 +566,7 @@ def average_combine(self, scale_func=None, scale_to=None, # calculate the mask - mask = (masked_values == len(self.data_arr)) + mask = masked_values == len(self.data_arr) # set up the deviation uncertainty = uncertainty_func(data, axis=0) @@ -559,18 +576,22 @@ def average_combine(self, scale_func=None, scale_to=None, uncertainty = np.asarray(uncertainty) # create the combined image with a dtype that matches the combiner - combined_image = CCDData(np.asarray(mean, dtype=self.dtype), - mask=mask, unit=self.unit, - uncertainty=StdDevUncertainty(uncertainty)) + combined_image = CCDData( + np.asarray(mean, dtype=self.dtype), + mask=mask, + unit=self.unit, + uncertainty=StdDevUncertainty(uncertainty), + ) # update the meta data - combined_image.meta['NCOMBINE'] = len(data) + combined_image.meta["NCOMBINE"] = len(data) # return the combined image return combined_image - def sum_combine(self, sum_func=None, scale_to=None, - uncertainty_func=_default_std()): + def sum_combine( + self, sum_func=None, scale_to=None, uncertainty_func=_default_std() + ): """ Sum combine together a set of arrays. @@ -604,10 +625,9 @@ def sum_combine(self, sum_func=None, scale_to=None, CCDData object based on the combined input of CCDData objects. """ - data, masked_values, sum_func = \ - self._combination_setup(sum_func, - _default_sum(), - scale_to) + data, masked_values, sum_func = self._combination_setup( + sum_func, _default_sum(), scale_to + ) if self.weights is not None: summed, weights = self._weighted_sum(data, sum_func) @@ -615,7 +635,7 @@ def sum_combine(self, sum_func=None, scale_to=None, summed = sum_func(data, axis=0) # set up the mask - mask = (masked_values == len(self.data_arr)) + mask = masked_values == len(self.data_arr) # set up the deviation uncertainty = uncertainty_func(data, axis=0) @@ -627,12 +647,15 @@ def sum_combine(self, sum_func=None, scale_to=None, uncertainty *= len(data) - masked_values # create the combined image with a dtype that matches the combiner - combined_image = CCDData(np.asarray(summed, dtype=self.dtype), - mask=mask, unit=self.unit, - uncertainty=StdDevUncertainty(uncertainty)) + combined_image = CCDData( + np.asarray(summed, dtype=self.dtype), + mask=mask, + unit=self.unit, + uncertainty=StdDevUncertainty(uncertainty), + ) # update the meta data - combined_image.meta['NCOMBINE'] = len(self.data_arr) + combined_image.meta["NCOMBINE"] = len(self.data_arr) # return the combined image return combined_image @@ -665,8 +688,7 @@ def _calculate_step_sizes(x_size, y_size, num_chunks): return xstep, ystep -def _calculate_size_of_image(ccd, - combine_uncertainty_function): +def _calculate_size_of_image(ccd, combine_uncertainty_function): # If uncertainty_func is given for combine this will create an uncertainty # even if the originals did not have one. In that case we need to create # an empty placeholder. @@ -695,15 +717,29 @@ def _calculate_size_of_image(ccd, return size_of_an_img -def combine(img_list, output_file=None, - method='average', weights=None, scale=None, mem_limit=16e9, - clip_extrema=False, nlow=1, nhigh=1, - minmax_clip=False, minmax_clip_min=None, minmax_clip_max=None, - sigma_clip=False, - sigma_clip_low_thresh=3, sigma_clip_high_thresh=3, - sigma_clip_func=ma.mean, sigma_clip_dev_func=ma.std, - dtype=None, combine_uncertainty_function=None, - overwrite_output=False, **ccdkwargs): +def combine( + img_list, + output_file=None, + method="average", + weights=None, + scale=None, + mem_limit=16e9, + clip_extrema=False, + nlow=1, + nhigh=1, + minmax_clip=False, + minmax_clip_min=None, + minmax_clip_max=None, + sigma_clip=False, + sigma_clip_low_thresh=3, + sigma_clip_high_thresh=3, + sigma_clip_func=ma.mean, + sigma_clip_dev_func=ma.std, + dtype=None, + combine_uncertainty_function=None, + overwrite_output=False, + **ccdkwargs, +): """ Convenience function for combining multiple images. @@ -818,23 +854,22 @@ def combine(img_list, output_file=None, # filenames separated by comma if isinstance(img_list, np.ndarray): img_list = img_list.tolist() - elif isinstance(img_list, str) and (',' in img_list): - img_list = img_list.split(',') + elif isinstance(img_list, str) and ("," in img_list): + img_list = img_list.split(",") else: try: # Maybe the input can be made into a list, so try that img_list = list(img_list) except TypeError: - raise ValueError( - "unrecognised input for list of images to combine.") + raise ValueError("unrecognised input for list of images to combine.") # Select Combine function to call in Combiner - if method == 'average': - combine_function = 'average_combine' - elif method == 'median': - combine_function = 'median_combine' - elif method == 'sum': - combine_function = 'sum_combine' + if method == "average": + combine_function = "average_combine" + elif method == "median": + combine_function = "median_combine" + elif method == "sum": + combine_function = "sum_combine" else: raise ValueError("unrecognised combine method : {0}.".format(method)) @@ -864,13 +899,12 @@ def combine(img_list, output_file=None, if ccd.mask is None: ccd.mask = np.zeros_like(ccd.data, dtype=bool) - size_of_an_img = _calculate_size_of_image(ccd, - combine_uncertainty_function) + size_of_an_img = _calculate_size_of_image(ccd, combine_uncertainty_function) no_of_img = len(img_list) # Set a memory use factor based on profiling - if method == 'median': + if method == "median": memory_factor = 3 else: memory_factor = 2 @@ -880,8 +914,10 @@ def combine(img_list, output_file=None, # determine the number of chunks to split the images into no_chunks = int((memory_factor * size_of_an_img * no_of_img) / mem_limit) + 1 if no_chunks > 1: - log.info('splitting each image into {0} chunks to limit memory usage ' - 'to {1} bytes.'.format(no_chunks, mem_limit)) + log.info( + "splitting each image into {0} chunks to limit memory usage " + "to {1} bytes.".format(no_chunks, mem_limit) + ) xs, ys = ccd.data.shape # Calculate strides for loop @@ -895,7 +931,7 @@ def combine(img_list, output_file=None, # Define all the Combiner properties one wants to apply before combining # images if weights is not None: - to_set_in_combiner['weights'] = weights + to_set_in_combiner["weights"] = weights if scale is not None: # If the scale is a function, then scaling function need to be applied @@ -910,24 +946,26 @@ def combine(img_list, output_file=None, scalevalues.append(scale(imgccd.data)) - to_set_in_combiner['scaling'] = np.array(scalevalues) + to_set_in_combiner["scaling"] = np.array(scalevalues) else: - to_set_in_combiner['scaling'] = scale + to_set_in_combiner["scaling"] = scale if clip_extrema: - to_call_in_combiner['clip_extrema'] = {'nlow': nlow, - 'nhigh': nhigh} + to_call_in_combiner["clip_extrema"] = {"nlow": nlow, "nhigh": nhigh} if minmax_clip: - to_call_in_combiner['minmax_clipping'] = {'min_clip': minmax_clip_min, - 'max_clip': minmax_clip_max} + to_call_in_combiner["minmax_clipping"] = { + "min_clip": minmax_clip_min, + "max_clip": minmax_clip_max, + } if sigma_clip: - to_call_in_combiner['sigma_clipping'] = { - 'low_thresh': sigma_clip_low_thresh, - 'high_thresh': sigma_clip_high_thresh, - 'func': sigma_clip_func, - 'dev_func': sigma_clip_dev_func} + to_call_in_combiner["sigma_clipping"] = { + "low_thresh": sigma_clip_low_thresh, + "high_thresh": sigma_clip_high_thresh, + "func": sigma_clip_func, + "dev_func": sigma_clip_dev_func, + } # Finally Run the input method on all the subsections of the image # and write final stitched image to ccd @@ -960,7 +998,7 @@ def combine(img_list, output_file=None, # Finally call the combine algorithm combine_kwds = {} if combine_uncertainty_function is not None: - combine_kwds['uncertainty_func'] = combine_uncertainty_function + combine_kwds["uncertainty_func"] = combine_uncertainty_function comb_tile = getattr(tile_combiner, combine_function)(**combine_kwds) diff --git a/ccdproc/conftest.py b/ccdproc/conftest.py index 2b662f45..a015ec23 100644 --- a/ccdproc/conftest.py +++ b/ccdproc/conftest.py @@ -8,11 +8,11 @@ try: # When the pytest_astropy_header package is installed - from pytest_astropy_header.display import (PYTEST_HEADER_MODULES, - TESTED_VERSIONS) + from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS def pytest_configure(config): config.option.astropy_header = True + except ImportError: PYTEST_HEADER_MODULES = {} TESTED_VERSIONS = {} @@ -24,7 +24,7 @@ def pytest_configure(config): try: from .version import version except ImportError: - version = 'dev' + version = "dev" packagename = os.path.basename(os.path.dirname(__file__)) TESTED_VERSIONS[packagename] = version @@ -32,9 +32,9 @@ def pytest_configure(config): # Add astropy to test header information and remove unused packages. try: - PYTEST_HEADER_MODULES['Astropy'] = 'astropy' - PYTEST_HEADER_MODULES['astroscrappy'] = 'astroscrappy' - PYTEST_HEADER_MODULES['reproject'] = 'reproject' - del PYTEST_HEADER_MODULES['h5py'] + PYTEST_HEADER_MODULES["Astropy"] = "astropy" + PYTEST_HEADER_MODULES["astroscrappy"] = "astroscrappy" + PYTEST_HEADER_MODULES["reproject"] = "reproject" + del PYTEST_HEADER_MODULES["h5py"] except KeyError: pass diff --git a/ccdproc/core.py b/ccdproc/core.py index a0a5f139..c7c3c349 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -27,39 +27,70 @@ logger = logging.getLogger(__name__) -__all__ = ['background_deviation_box', 'background_deviation_filter', - 'ccd_process', 'cosmicray_median', 'cosmicray_lacosmic', - 'create_deviation', 'flat_correct', 'gain_correct', 'rebin', - 'sigma_func', 'subtract_bias', 'subtract_dark', 'subtract_overscan', - 'transform_image', 'trim_image', 'wcs_project', 'Keyword', - 'median_filter', 'ccdmask', 'bitfield_to_boolean_mask'] +__all__ = [ + "background_deviation_box", + "background_deviation_filter", + "ccd_process", + "cosmicray_median", + "cosmicray_lacosmic", + "create_deviation", + "flat_correct", + "gain_correct", + "rebin", + "sigma_func", + "subtract_bias", + "subtract_dark", + "subtract_overscan", + "transform_image", + "trim_image", + "wcs_project", + "Keyword", + "median_filter", + "ccdmask", + "bitfield_to_boolean_mask", +] # The dictionary below is used to translate actual function names to names # that are FITS compliant, i.e. 8 characters or less. _short_names = { - 'background_deviation_box': 'bakdevbx', - 'background_deviation_filter': 'bakdfilt', - 'ccd_process': 'ccdproc', - 'cosmicray_median': 'crmedian', - 'create_deviation': 'creatvar', - 'flat_correct': 'flatcor', - 'gain_correct': 'gaincor', - 'subtract_bias': 'subbias', - 'subtract_dark': 'subdark', - 'subtract_overscan': 'suboscan', - 'trim_image': 'trimim', - 'transform_image': 'tranim', - 'wcs_project': 'wcsproj' + "background_deviation_box": "bakdevbx", + "background_deviation_filter": "bakdfilt", + "ccd_process": "ccdproc", + "cosmicray_median": "crmedian", + "create_deviation": "creatvar", + "flat_correct": "flatcor", + "gain_correct": "gaincor", + "subtract_bias": "subbias", + "subtract_dark": "subdark", + "subtract_overscan": "suboscan", + "trim_image": "trimim", + "transform_image": "tranim", + "wcs_project": "wcsproj", } @log_to_metadata -def ccd_process(ccd, oscan=None, trim=None, error=False, master_bias=None, - dark_frame=None, master_flat=None, bad_pixel_mask=None, - gain=None, readnoise=None, oscan_median=True, oscan_model=None, - min_value=None, dark_exposure=None, data_exposure=None, - exposure_key=None, exposure_unit=None, - dark_scale=False, gain_corrected=True): +def ccd_process( + ccd, + oscan=None, + trim=None, + error=False, + master_bias=None, + dark_frame=None, + master_flat=None, + bad_pixel_mask=None, + gain=None, + readnoise=None, + oscan_median=True, + oscan_model=None, + min_value=None, + dark_exposure=None, + data_exposure=None, + exposure_key=None, + exposure_unit=None, + dark_scale=False, + gain_corrected=True, +): """Perform basic processing on ccd data. The following steps can be included: @@ -194,17 +225,17 @@ def ccd_process(ccd, oscan=None, trim=None, error=False, master_bias=None, # apply the overscan correction if isinstance(oscan, CCDData): - nccd = subtract_overscan(nccd, overscan=oscan, - median=oscan_median, - model=oscan_model) + nccd = subtract_overscan( + nccd, overscan=oscan, median=oscan_median, model=oscan_model + ) elif isinstance(oscan, str): - nccd = subtract_overscan(nccd, fits_section=oscan, - median=oscan_median, - model=oscan_model) + nccd = subtract_overscan( + nccd, fits_section=oscan, median=oscan_median, model=oscan_model + ) elif oscan is None: pass else: - raise TypeError('oscan is not None, a string, or CCDData object.') + raise TypeError("oscan is not None, a string, or CCDData object.") # apply the trim correction if isinstance(trim, str): @@ -212,14 +243,13 @@ def ccd_process(ccd, oscan=None, trim=None, error=False, master_bias=None, elif trim is None: pass else: - raise TypeError('trim is not None or a string.') + raise TypeError("trim is not None or a string.") # create the error frame if error and gain is not None and readnoise is not None: nccd = create_deviation(nccd, gain=gain, readnoise=readnoise) elif error and (gain is None or readnoise is None): - raise ValueError( - 'gain and readnoise must be specified to create error frame.') + raise ValueError("gain and readnoise must be specified to create error frame.") # apply the bad pixel mask if isinstance(bad_pixel_mask, np.ndarray): @@ -227,11 +257,11 @@ def ccd_process(ccd, oscan=None, trim=None, error=False, master_bias=None, elif bad_pixel_mask is None: pass else: - raise TypeError('bad_pixel_mask is not None or numpy.ndarray.') + raise TypeError("bad_pixel_mask is not None or numpy.ndarray.") # apply the gain correction if not (gain is None or isinstance(gain, Quantity)): - raise TypeError('gain is not None or astropy.units.Quantity.') + raise TypeError("gain is not None or astropy.units.Quantity.") if gain is not None and gain_corrected: nccd = gain_correct(nccd, gain) @@ -242,21 +272,23 @@ def ccd_process(ccd, oscan=None, trim=None, error=False, master_bias=None, elif master_bias is None: pass else: - raise TypeError( - 'master_bias is not None or a CCDData object.') + raise TypeError("master_bias is not None or a CCDData object.") # subtract the dark frame if isinstance(dark_frame, CCDData): - nccd = subtract_dark(nccd, dark_frame, dark_exposure=dark_exposure, - data_exposure=data_exposure, - exposure_time=exposure_key, - exposure_unit=exposure_unit, - scale=dark_scale) + nccd = subtract_dark( + nccd, + dark_frame, + dark_exposure=dark_exposure, + data_exposure=data_exposure, + exposure_time=exposure_key, + exposure_unit=exposure_unit, + scale=dark_scale, + ) elif dark_frame is None: pass else: - raise TypeError( - 'dark_frame is not None or a CCDData object.') + raise TypeError("dark_frame is not None or a CCDData object.") # test dividing the master flat if isinstance(master_flat, CCDData): @@ -264,8 +296,7 @@ def ccd_process(ccd, oscan=None, trim=None, error=False, master_bias=None, elif master_flat is None: pass else: - raise TypeError( - 'master_flat is not None or a CCDData object.') + raise TypeError("master_flat is not None or a CCDData object.") # apply the gain correction only at the end if gain_corrected is False if gain is not None and not gain_corrected: @@ -318,13 +349,13 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): """ if gain is not None and not isinstance(gain, Quantity): - raise TypeError('gain must be a astropy.units.Quantity.') + raise TypeError("gain must be a astropy.units.Quantity.") if readnoise is None: - raise ValueError('must provide a readnoise.') + raise ValueError("must provide a readnoise.") if not isinstance(readnoise, Quantity): - raise TypeError('readnoise must be a astropy.units.Quantity.') + raise TypeError("readnoise must be a astropy.units.Quantity.") if gain is None: gain = 1.0 * u.dimensionless_unscaled @@ -339,15 +370,15 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): # remove values that might be negative or treat as nan data = gain_value * ccd_data.data - mask = (data < 0) + mask = data < 0 if disregard_nan: data[mask] = 0 else: data[mask] = np.nan - logging.warning('Negative values in array will be replaced with nan') + logging.warning("Negative values in array will be replaced with nan") # calculate the deviation - var = (data + readnoise_value ** 2) ** 0.5 + var = (data + readnoise_value**2) ** 0.5 # ensure uncertainty and image data have same unit ccd = ccd_data.copy() @@ -357,8 +388,9 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): @log_to_metadata -def subtract_overscan(ccd, overscan=None, overscan_axis=1, fits_section=None, - median=False, model=None): +def subtract_overscan( + ccd, overscan=None, overscan_axis=1, fits_section=None, median=False, model=None +): """ Subtract the overscan region from an image. @@ -447,19 +479,18 @@ def subtract_overscan(ccd, overscan=None, overscan_axis=1, fits_section=None, """ if not (isinstance(ccd, CCDData) or isinstance(ccd, np.ndarray)): - raise TypeError('ccddata is not a CCDData or ndarray object.') + raise TypeError("ccddata is not a CCDData or ndarray object.") - if ((overscan is not None and fits_section is not None) or - (overscan is None and fits_section is None)): - raise TypeError('specify either overscan or fits_section, but not ' - 'both.') + if (overscan is not None and fits_section is not None) or ( + overscan is None and fits_section is None + ): + raise TypeError("specify either overscan or fits_section, but not " "both.") if (overscan is not None) and (not isinstance(overscan, CCDData)): - raise TypeError('overscan is not a CCDData object.') + raise TypeError("overscan is not a CCDData object.") - if (fits_section is not None and - not isinstance(fits_section, str)): - raise TypeError('overscan is not a string.') + if fits_section is not None and not isinstance(fits_section, str): + raise TypeError("overscan is not a string.") if fits_section is not None: overscan = ccd[slice_from_string(fits_section, fits_convention=True)] @@ -546,8 +577,7 @@ def trim_image(ccd, fits_section=None): In this case, ``not_really_trimmed`` is a view of the underlying array ``arr1``, not a copy. """ - if (fits_section is not None and - not isinstance(fits_section, str)): + if fits_section is not None and not isinstance(fits_section, str): raise TypeError("fits_section must be a string.") trimmed = ccd.copy() if fits_section: @@ -581,10 +611,12 @@ def subtract_bias(ccd, master): try: result = ccd.subtract(master) except ValueError as e: - if 'operand units' in str(e): - raise u.UnitsError("Unit '{}' of the uncalibrated image does not " - "match unit '{}' of the calibration " - "image".format(ccd.unit, master.unit)) + if "operand units" in str(e): + raise u.UnitsError( + "Unit '{}' of the uncalibrated image does not " + "match unit '{}' of the calibration " + "image".format(ccd.unit, master.unit) + ) else: raise e @@ -593,9 +625,15 @@ def subtract_bias(ccd, master): @log_to_metadata -def subtract_dark(ccd, master, dark_exposure=None, data_exposure=None, - exposure_time=None, exposure_unit=None, - scale=False): +def subtract_dark( + ccd, + master, + dark_exposure=None, + data_exposure=None, + exposure_time=None, + exposure_unit=None, + scale=False, +): """ Subtract dark current from an image. @@ -638,22 +676,30 @@ def subtract_dark(ccd, master, dark_exposure=None, data_exposure=None, Dark-subtracted image. """ if ccd.shape != master.shape: - err_str = "operands could not be subtracted with shapes {} {}".format(ccd.shape, master.shape) + err_str = "operands could not be subtracted with shapes {} {}".format( + ccd.shape, master.shape + ) raise ValueError(err_str) if not (isinstance(ccd, CCDData) and isinstance(master, CCDData)): raise TypeError("ccd and master must both be CCDData objects.") - if (data_exposure is not None and - dark_exposure is not None and - exposure_time is not None): - raise TypeError("specify either exposure_time or " - "(dark_exposure and data_exposure), not both.") + if ( + data_exposure is not None + and dark_exposure is not None + and exposure_time is not None + ): + raise TypeError( + "specify either exposure_time or " + "(dark_exposure and data_exposure), not both." + ) if data_exposure is None and dark_exposure is None: if exposure_time is None: - raise TypeError("must specify either exposure_time or both " - "dark_exposure and data_exposure.") + raise TypeError( + "must specify either exposure_time or both " + "dark_exposure and data_exposure." + ) if isinstance(exposure_time, Keyword): data_exposure = exposure_time.value_from(ccd.header) dark_exposure = exposure_time.value_from(master.header) @@ -661,8 +707,9 @@ def subtract_dark(ccd, master, dark_exposure=None, data_exposure=None, data_exposure = ccd.header[exposure_time] dark_exposure = master.header[exposure_time] - if not (isinstance(dark_exposure, Quantity) and - isinstance(data_exposure, Quantity)): + if not ( + isinstance(dark_exposure, Quantity) and isinstance(data_exposure, Quantity) + ): if exposure_time: try: data_exposure *= exposure_unit @@ -670,32 +717,34 @@ def subtract_dark(ccd, master, dark_exposure=None, data_exposure=None, except TypeError: raise TypeError("must provide unit for exposure time.") else: - raise TypeError("exposure times must be astropy.units.Quantity " - "objects.") + raise TypeError("exposure times must be astropy.units.Quantity objects.") try: if scale: master_scaled = master.copy() # data_exposure and dark_exposure are both quantities, # so we can just have subtract do the scaling - master_scaled = master_scaled.multiply(data_exposure / - dark_exposure) + master_scaled = master_scaled.multiply(data_exposure / dark_exposure) result = ccd.subtract(master_scaled) else: result = ccd.subtract(master) except (u.UnitsError, u.UnitConversionError, ValueError) as e: # Astropy LTS (v1) returns a ValueError, not a UnitsError, so catch # that if it appears to really be a UnitsError. - if (isinstance(e, ValueError) and - 'operand units' not in str(e) and - astropy.__version__.startswith('1.0')): + if ( + isinstance(e, ValueError) + and "operand units" not in str(e) + and astropy.__version__.startswith("1.0") + ): raise e # Make the error message a little more explicit than what is returned # by default. - raise u.UnitsError("Unit '{}' of the uncalibrated image does not " - "match unit '{}' of the calibration " - "image".format(ccd.unit, master.unit)) + raise u.UnitsError( + "Unit '{}' of the uncalibrated image does not " + "match unit '{}' of the calibration " + "image".format(ccd.unit, master.unit) + ) result.meta = ccd.meta.copy() return result @@ -783,7 +832,7 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None): flat_mean_val = norm_value elif norm_value is not None: # norm_value was set to a bad value - raise ValueError('norm_value must be greater than zero.') + raise ValueError("norm_value must be greater than zero.") else: # norm_value was not set, use mean of the image. flat_mean_val = use_flat.data.mean() @@ -850,7 +899,7 @@ def transform_image(ccd, transform_func, **kwargs): """ # check that it is a ccddata object if not isinstance(ccd, CCDData): - raise TypeError('ccd is not a CCDData.') + raise TypeError("ccd is not a CCDData.") # make a copy of the object nccd = ccd.copy() @@ -859,14 +908,13 @@ def transform_image(ccd, transform_func, **kwargs): try: nccd.data = transform_func(nccd.data, **kwargs) except TypeError as exc: - if 'is not callable' in str(exc): - raise TypeError('transform_func is not a callable.') + if "is not callable" in str(exc): + raise TypeError("transform_func is not a callable.") raise # transform the uncertainty plane if it exists if nccd.uncertainty is not None: - nccd.uncertainty.array = transform_func(nccd.uncertainty.array, - **kwargs) + nccd.uncertainty.array = transform_func(nccd.uncertainty.array, **kwargs) # transform the mask plane if nccd.mask is not None: @@ -874,14 +922,14 @@ def transform_image(ccd, transform_func, **kwargs): nccd.mask = mask > 0 if nccd.wcs is not None: - warn = 'WCS information may be incorrect as no transformation was applied to it' + warn = "WCS information may be incorrect as no transformation was applied to it" warnings.warn(warn, UserWarning) return nccd @log_to_metadata -def wcs_project(ccd, target_wcs, target_shape=None, order='bilinear'): +def wcs_project(ccd, target_wcs, target_shape=None, order="bilinear"): """ Given a CCDData image with WCS, project it onto a target WCS and return the reprojected data as a new CCDData image. @@ -923,22 +971,20 @@ def wcs_project(ccd, target_wcs, target_shape=None, order='bilinear'): from reproject import reproject_interp if not (ccd.wcs.is_celestial and target_wcs.is_celestial): - raise ValueError('one or both WCS is not celestial.') + raise ValueError("one or both WCS is not celestial.") if target_shape is None: target_shape = ccd.shape - projected_image_raw, _ = reproject_interp((ccd.data, ccd.wcs), - target_wcs, - shape_out=target_shape, - order=order) + projected_image_raw, _ = reproject_interp( + (ccd.data, ccd.wcs), target_wcs, shape_out=target_shape, order=order + ) reprojected_mask = None if ccd.mask is not None: - reprojected_mask, _ = reproject_interp((ccd.mask, ccd.wcs), - target_wcs, - shape_out=target_shape, - order=order) + reprojected_mask, _ = reproject_interp( + (ccd.mask, ccd.wcs), target_wcs, shape_out=target_shape, order=order + ) # Make the mask 1 if the reprojected mask pixel value is non-zero. # A small threshold is included to allow for some rounding in # reproject_interp. @@ -952,8 +998,7 @@ def wcs_project(ccd, target_wcs, target_shape=None, order='bilinear'): output_mask = output_mask | reprojected_mask # Need to scale counts by ratio of pixel areas - area_ratio = (proj_plane_pixel_area(target_wcs) / - proj_plane_pixel_area(ccd.wcs)) + area_ratio = proj_plane_pixel_area(target_wcs) / proj_plane_pixel_area(ccd.wcs) # If nothing ended up masked, don't create a mask. if not output_mask.any(): @@ -962,9 +1007,13 @@ def wcs_project(ccd, target_wcs, target_shape=None, order='bilinear'): # If there are any wcs keywords in the header, remove them hdr, _ = _generate_wcs_and_update_header(ccd.header) - nccd = CCDData(area_ratio * projected_image_raw, wcs=target_wcs, - mask=output_mask, - header=hdr, unit=ccd.unit) + nccd = CCDData( + area_ratio * projected_image_raw, + wcs=target_wcs, + mask=output_mask, + header=hdr, + unit=ccd.unit, + ) return nccd @@ -991,9 +1040,10 @@ def sigma_func(arr, axis=None, ignore_nan=False): uncertainty : float uncertainty of array estimated from median absolute deviation. """ - return (stats.median_absolute_deviation(arr, axis=axis, - ignore_nan=ignore_nan) - * 1.482602218505602) + return ( + stats.median_absolute_deviation(arr, axis=axis, ignore_nan=ignore_nan) + * 1.482602218505602 + ) def setbox(x, y, mbox, xmax, ymax): @@ -1070,7 +1120,7 @@ def background_deviation_box(data, bbox): # Check to make sure the background box is an appropriate size # If it is too small, then insufficient statistics are generated if bbox < 1: - raise ValueError('bbox must be greater than 1.') + raise ValueError("bbox must be greater than 1.") # make the background image barr = data * 0.0 + data.std() @@ -1108,14 +1158,16 @@ def background_deviation_filter(data, bbox): """ # Check to make sure the background box is an appropriate size if bbox < 1: - raise ValueError('bbox must be greater than 1.') + raise ValueError("bbox must be greater than 1.") return ndimage.generic_filter(data, sigma_func, size=(bbox, bbox)) -@deprecated('1.1', - message='The rebin function will be removed in ccdproc 3.0 ' - 'Use block_reduce or block_replicate instead.') +@deprecated( + "1.1", + message="The rebin function will be removed in ccdproc 3.0 " + "Use block_reduce or block_replicate instead.", +) def rebin(ccd, newshape): """ Rebin an array to have a new shape. @@ -1170,20 +1222,17 @@ def rebin(ccd, newshape): # check to see that the two arrays are going to be the same length if len(ccd.shape) != len(newshape): - raise ValueError('newshape does not have the same dimensions as ' - 'ccd.') + raise ValueError("newshape does not have the same dimensions as " "ccd.") - slices = [slice(0, old, old/new) for old, new in - zip(ccd.shape, newshape)] + slices = [slice(0, old, old / new) for old, new in zip(ccd.shape, newshape)] coordinates = np.mgrid[slices] - indices = coordinates.astype('i') + indices = coordinates.astype("i") return ccd[tuple(indices)] elif isinstance(ccd, CCDData): # check to see that the two arrays are going to be the same length if len(ccd.shape) != len(newshape): - raise ValueError('newshape does not have the same dimensions as ' - 'ccd.') + raise ValueError("newshape does not have the same dimensions as ccd.") nccd = ccd.copy() # rebin the data plane @@ -1199,7 +1248,7 @@ def rebin(ccd, newshape): return nccd else: - raise TypeError('ccd is not an ndarray or a CCDData object.') + raise TypeError("ccd is not an ndarray or a CCDData object.") def block_reduce(ccd, block_size, func=np.sum): @@ -1213,8 +1262,7 @@ def block_reduce(ccd, block_size, func=np.sum): def block_average(ccd, block_size): - """Like `block_reduce` but with predefined ``func=np.mean``. - """ + """Like `block_reduce` but with predefined ``func=np.mean``.""" data = nddata.block_reduce(ccd, block_size, np.mean) # Like in block_reduce: if isinstance(ccd, CCDData): @@ -1235,7 +1283,7 @@ def block_replicate(ccd, block_size, conserve_sum=True): # Append original docstring to docstrings of these functions block_reduce.__doc__ += nddata.block_reduce.__doc__ block_replicate.__doc__ += nddata.block_replicate.__doc__ - __all__ += ['block_average', 'block_reduce', 'block_replicate'] + __all__ += ["block_average", "block_reduce", "block_replicate"] except AttributeError: # Astropy 1.0 has no block_reduce, block_average del block_reduce, block_average, block_replicate @@ -1274,21 +1322,24 @@ def _blkavg(data, newshape): """ # check to see that is in a nddata type if not isinstance(data, np.ndarray): - raise TypeError('data is not a ndarray object.') + raise TypeError("data is not a ndarray object.") # check to see that the two arrays are going to be the same length if len(data.shape) != len(newshape): - raise ValueError('newshape does not have the same dimensions as data.') + raise ValueError("newshape does not have the same dimensions as data.") shape = data.shape lenShape = len(shape) - factor = np.asarray(shape)/np.asarray(newshape) + factor = np.asarray(shape) / np.asarray(newshape) - evList = ['data.reshape('] + \ - ['newshape[%d],int(factor[%d]),' % (i, i) for i in range(lenShape)] + \ - [')'] + ['.mean(%d)' % (i + 1) for i in range(lenShape)] + evList = ( + ["data.reshape("] + + ["newshape[%d],int(factor[%d])," % (i, i) for i in range(lenShape)] + + [")"] + + [".mean(%d)" % (i + 1) for i in range(lenShape)] + ) - return eval(''.join(evList)) + return eval("".join(evList)) def median_filter(data, *args, **kwargs): @@ -1299,8 +1350,7 @@ def median_filter(data, *args, **kwargs): copied ``unit`` and ``meta``. """ if isinstance(data, CCDData): - out_kwargs = {'meta': data.meta.copy(), - 'unit': data.unit} + out_kwargs = {"meta": data.meta.copy(), "unit": data.unit} result = ndimage.median_filter(data.data, *args, **kwargs) return CCDData(result, **out_kwargs) else: @@ -1309,20 +1359,38 @@ def median_filter(data, *args, **kwargs): # This originally used the "message" argument but that is not # supported until astropy 5, so use alternative instead. -@deprecated_renamed_argument('pssl', None, '2.3.0', - arg_in_kwargs=True, - alternative='The pssl keyword will be removed in ' - 'ccdproc 3.0. Use inbkg instead to have ' - 'astroscrappy temporarily remove the background ' - 'during processing.') -def cosmicray_lacosmic(ccd, sigclip=4.5, sigfrac=0.3, - objlim=5.0, gain=1.0, readnoise=6.5, - satlevel=65535.0, pssl=0.0, niter=4, - sepmed=True, cleantype='meanmask', fsmode='median', - psfmodel='gauss', psffwhm=2.5, psfsize=7, - psfk=None, psfbeta=4.765, verbose=False, - gain_apply=True, - inbkg=None, invar=None): +@deprecated_renamed_argument( + "pssl", + None, + "2.3.0", + arg_in_kwargs=True, + alternative="The pssl keyword will be removed in " + "ccdproc 3.0. Use inbkg instead to have astroscrappy temporarily remove the " + "background during processing.", +) +def cosmicray_lacosmic( + ccd, + sigclip=4.5, + sigfrac=0.3, + objlim=5.0, + gain=1.0, + readnoise=6.5, + satlevel=65535.0, + pssl=0.0, + niter=4, + sepmed=True, + cleantype="meanmask", + fsmode="median", + psfmodel="gauss", + psffwhm=2.5, + psfsize=7, + psfk=None, + psfbeta=4.765, + verbose=False, + gain_apply=True, + inbkg=None, + invar=None, +): r""" Identify cosmic rays through the L.A. Cosmic technique. The L.A. Cosmic technique identifies cosmic rays by identifying pixels based on a variation @@ -1518,8 +1586,9 @@ def cosmicray_lacosmic(ccd, sigclip=4.5, sigfrac=0.3, readnoise = readnoise * u.electron # Handle transition from old astroscrappy interface to new - old_astroscrappy_interface = (pkgversion.parse(asy_version) < - pkgversion.parse('1.1.0')) + old_astroscrappy_interface = pkgversion.parse(asy_version) < pkgversion.parse( + "1.1.0" + ) # Use this dictionary to define which keyword arguments are actually # passed to astroscrappy. @@ -1529,7 +1598,7 @@ def cosmicray_lacosmic(ccd, sigclip=4.5, sigfrac=0.3, data_offset = 0 # Handle setting up the keyword arguments for both interfaces - if old_astroscrappy_interface: # pragma: no cover + if old_astroscrappy_interface: # pragma: no cover new_args = dict(inbkg=inbkg, invar=invar) bad_args = [] for k, v in new_args.items(): @@ -1537,10 +1606,12 @@ def cosmicray_lacosmic(ccd, sigclip=4.5, sigfrac=0.3, bad_args.append(k) if bad_args: - s = 's' if len(bad_args) > 1 else '' - bads = ', '.join(bad_args) - raise TypeError(f'The argument{s} {bads} only valid for astroscrappy ' - '1.1.0 or higher.') + s = "s" if len(bad_args) > 1 else "" + bads = ", ".join(bad_args) + raise TypeError( + f"The argument{s} {bads} only valid for astroscrappy " + "1.1.0 or higher." + ) if pssl != 0: asy_background_kwargs = dict(pssl=pssl) @@ -1548,7 +1619,7 @@ def cosmicray_lacosmic(ccd, sigclip=4.5, sigfrac=0.3, else: if pssl != 0: if (inbkg is not None) or (invar is not None): - raise ValueError('Cannot set both pssl and inbkg') + raise ValueError("Cannot set both pssl and inbkg") # The old version of astroscrappy added the bkg back in # if pssl was provided. The new one does not, so set an offset @@ -1561,20 +1632,31 @@ def cosmicray_lacosmic(ccd, sigclip=4.5, sigfrac=0.3, data = ccd crmask, cleanarr = detect_cosmics( - data + data_offset, inmask=None, sigclip=sigclip, - sigfrac=sigfrac, objlim=objlim, gain=gain.value, - readnoise=readnoise.value, satlevel=satlevel, - niter=niter, sepmed=sepmed, cleantype=cleantype, - fsmode=fsmode, psfmodel=psfmodel, psffwhm=psffwhm, - psfsize=psfsize, psfk=psfk, psfbeta=psfbeta, + data + data_offset, + inmask=None, + sigclip=sigclip, + sigfrac=sigfrac, + objlim=objlim, + gain=gain.value, + readnoise=readnoise.value, + satlevel=satlevel, + niter=niter, + sepmed=sepmed, + cleantype=cleantype, + fsmode=fsmode, + psfmodel=psfmodel, + psffwhm=psffwhm, + psfsize=psfsize, + psfk=psfk, + psfbeta=psfbeta, verbose=verbose, - **asy_background_kwargs) + **asy_background_kwargs, + ) cleanarr = cleanarr - data_offset - cleanarr = _astroscrappy_gain_apply_helper(cleanarr, - gain.value, - gain_apply, - old_astroscrappy_interface) + cleanarr = _astroscrappy_gain_apply_helper( + cleanarr, gain.value, gain_apply, old_astroscrappy_interface + ) return cleanarr, crmask @@ -1583,39 +1665,56 @@ def cosmicray_lacosmic(ccd, sigclip=4.5, sigfrac=0.3, # gain and readnoise have no units. In that case we issue a warning # instead of raising an error to avoid crashing user's pipelines. if ccd.unit.is_equivalent(u.electron) and gain.value != 1.0: - warnings.warn("Image unit is electron but gain value " - "is not 1.0. Data maybe end up being gain " - "corrected twice.") + warnings.warn( + "Image unit is electron but gain value " + "is not 1.0. Data maybe end up being gain " + "corrected twice." + ) else: - if ((readnoise.unit == u.electron) + if ( + (readnoise.unit == u.electron) and (ccd.unit == u.electron) - and (gain.value == 1.0)): + and (gain.value == 1.0) + ): gain = gain.value * u.one # Check unit consistency before taking the time to check for # cosmic rays. if not (gain * ccd).unit.is_equivalent(readnoise.unit): - raise ValueError('Inconsistent units for gain ({}) '.format(gain.unit) + - ' ccd ({}) and readnoise ({}).'.format(ccd.unit, - readnoise.unit)) + raise ValueError( + "Inconsistent units for gain ({}) ".format(gain.unit) + + " ccd ({}) and readnoise ({}).".format(ccd.unit, readnoise.unit) + ) crmask, cleanarr = detect_cosmics( - ccd.data + data_offset, inmask=ccd.mask, - sigclip=sigclip, sigfrac=sigfrac, objlim=objlim, gain=gain.value, - readnoise=readnoise.value, satlevel=satlevel, - niter=niter, sepmed=sepmed, cleantype=cleantype, - fsmode=fsmode, psfmodel=psfmodel, psffwhm=psffwhm, - psfsize=psfsize, psfk=psfk, psfbeta=psfbeta, verbose=verbose, - **asy_background_kwargs) + ccd.data + data_offset, + inmask=ccd.mask, + sigclip=sigclip, + sigfrac=sigfrac, + objlim=objlim, + gain=gain.value, + readnoise=readnoise.value, + satlevel=satlevel, + niter=niter, + sepmed=sepmed, + cleantype=cleantype, + fsmode=fsmode, + psfmodel=psfmodel, + psffwhm=psffwhm, + psfsize=psfsize, + psfk=psfk, + psfbeta=psfbeta, + verbose=verbose, + **asy_background_kwargs, + ) # create the new ccd data object nccd = ccd.copy() cleanarr = cleanarr - data_offset - cleanarr = _astroscrappy_gain_apply_helper(cleanarr, - gain.value, - gain_apply, - old_astroscrappy_interface) + cleanarr = _astroscrappy_gain_apply_helper( + cleanarr, gain.value, gain_apply, old_astroscrappy_interface + ) # Fix the units if the gain is being applied. nccd.unit = ccd.unit * gain.unit @@ -1629,11 +1728,10 @@ def cosmicray_lacosmic(ccd, sigclip=4.5, sigfrac=0.3, return nccd else: - raise TypeError('ccd is not a CCDData or ndarray object.') + raise TypeError("ccd is not a CCDData or ndarray object.") -def _astroscrappy_gain_apply_helper(cleaned_data, gain, - gain_apply, old_interface): +def _astroscrappy_gain_apply_helper(cleaned_data, gain, gain_apply, old_interface): """ Helper function for logic determining how to apply gain to cleaned data. In the old astroscrappy interface cleaned data was always @@ -1668,8 +1766,7 @@ def _astroscrappy_gain_apply_helper(cleaned_data, gain, return cleaned_data -def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, - rbox=0): +def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): """ Identify cosmic rays through median technique. The median technique identifies cosmic rays by identifying pixels by subtracting a median image @@ -1749,7 +1846,7 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, error_image = data.std() else: if not isinstance(error_image, (float, np.ndarray)): - raise TypeError('error_image is not a float or ndarray.') + raise TypeError("error_image is not a float or ndarray.") # create the median image marr = ndimage.median_filter(data, size=(mbox, mbox)) @@ -1762,7 +1859,7 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rarr = (data - marr) / error_image # identify all sources - crarr = (rarr > thresh) + crarr = rarr > thresh # grow the pixels if gbox > 0: @@ -1782,11 +1879,16 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, if error_image is None and ccd.uncertainty is not None: error_image = ccd.uncertainty.array if ccd.data.shape != error_image.shape: - raise ValueError('error_image is not the same shape as data.') + raise ValueError("error_image is not the same shape as data.") - data, crarr = cosmicray_median(ccd.data, error_image=error_image, - thresh=thresh, mbox=mbox, gbox=gbox, - rbox=rbox) + data, crarr = cosmicray_median( + ccd.data, + error_image=error_image, + thresh=thresh, + mbox=mbox, + gbox=gbox, + rbox=rbox, + ) # create the new ccd data object nccd = ccd.copy() @@ -1798,11 +1900,21 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, return nccd else: - raise TypeError('ccd is not an numpy.ndarray or a CCDData object.') - - -def ccdmask(ratio, findbadcolumns=False, byblocks=False, ncmed=7, nlmed=7, - ncsig=15, nlsig=15, lsigma=9, hsigma=9, ngood=5): + raise TypeError("ccd is not an numpy.ndarray or a CCDData object.") + + +def ccdmask( + ratio, + findbadcolumns=False, + byblocks=False, + ncmed=7, + nlmed=7, + ncsig=15, + nlsig=15, + lsigma=9, + hsigma=9, + ngood=5, +): """ Uses method based on the IRAF ccdmask task to generate a mask based on the given input. @@ -1915,14 +2027,13 @@ def ccdmask(ratio, findbadcolumns=False, byblocks=False, ncmed=7, nlmed=7, raise ValueError('"ratio" should be a "CCDData".') def _sigma_mask(baseline, one_sigma_value, lower_sigma, upper_sigma): - """Helper function to mask values outside of the specified sigma range. - """ - return ((baseline < -lower_sigma * one_sigma_value) | - (baseline > upper_sigma * one_sigma_value)) + """Helper function to mask values outside of the specified sigma range.""" + return (baseline < -lower_sigma * one_sigma_value) | ( + baseline > upper_sigma * one_sigma_value + ) mask = ~np.isfinite(ratio.data) - medsub = (ratio.data - - ndimage.median_filter(ratio.data, size=(nlmed, ncmed))) + medsub = ratio.data - ndimage.median_filter(ratio.data, size=(nlmed, ncmed)) if byblocks: nlinesblock = int(math.ceil(nlines / nlsig)) @@ -1944,8 +2055,7 @@ def _sigma_mask(baseline, one_sigma_value, lower_sigma, upper_sigma): csum = np.ma.sum(mblock, axis=0) csum[csum <= 0] = 0 csum_sigma = np.ma.MaskedArray(np.sqrt(c2 - c1 - csum)) - colmask = _sigma_mask(csum.filled(1), csum_sigma, - lsigma, hsigma) + colmask = _sigma_mask(csum.filled(1), csum_sigma, lsigma, hsigma) block_mask[:, :] |= colmask[np.newaxis, :] mask[l1:l2, c1:c2] = block_mask @@ -1964,8 +2074,7 @@ def _sigma_mask(baseline, one_sigma_value, lower_sigma, upper_sigma): if mask[line, col]: for i in range(2, ngood + 2): lend = line + i - if (mask[lend, col] and - not np.all(mask[line:lend + 1, col])): + if mask[lend, col] and not np.all(mask[line : lend + 1, col]): mask[line:lend, col] = True return mask @@ -2055,13 +2164,13 @@ def bitfield_to_boolean_mask(bitfield, ignore_bits=0, flip_bits=None): """ return _bitfield_to_boolean_mask( - bitfield, ignore_bits, flip_bits=flip_bits, - good_mask_value=False, dtype=bool) + bitfield, ignore_bits, flip_bits=flip_bits, good_mask_value=False, dtype=bool + ) class Keyword: - """ - """ + """ """ + def __init__(self, name, unit=None, value=None): self._name = name self._unit = unit @@ -2088,14 +2197,14 @@ def value(self, value): self._value = value elif isinstance(value, str): if self.unit is not None: - raise ValueError("keyword with a unit cannot have a " - "string value.") + raise ValueError("keyword with a unit cannot have a " "string value.") else: self._value = value else: if self.unit is None: - raise ValueError("no unit provided. Set value with " - "an astropy.units.Quantity.") + raise ValueError( + "no unit provided. Set value with " "an astropy.units.Quantity." + ) self._value = value * self.unit def value_from(self, header): diff --git a/ccdproc/image_collection.py b/ccdproc/image_collection.py index 84820900..c3a4be71 100644 --- a/ccdproc/image_collection.py +++ b/ccdproc/image_collection.py @@ -20,8 +20,8 @@ logger = logging.getLogger(__name__) -__all__ = ['ImageFileCollection'] -__doctest_skip__ = ['*'] +__all__ = ["ImageFileCollection"] +__doctest_skip__ = ["*"] class ImageFileCollection: @@ -82,9 +82,16 @@ class ImageFileCollection: value. """ - def __init__(self, location=None, keywords=None, - find_fits_by_reading=False, - filenames=None, glob_include=None, glob_exclude=None, ext=0): + def __init__( + self, + location=None, + keywords=None, + find_fits_by_reading=False, + filenames=None, + glob_include=None, + glob_exclude=None, + ext=0, + ): # Include or exclude files from the collection based on glob pattern # matching - has to go above call to _get_files() if glob_exclude is not None: @@ -98,7 +105,7 @@ def __init__(self, location=None, keywords=None, if location is not None: self._location = location else: - self._location = '' + self._location = "" self._find_fits_by_reading = find_fits_by_reading @@ -107,12 +114,11 @@ def __init__(self, location=None, keywords=None, self._files = self._get_files() if self._files == []: - warnings.warn("no FITS files in the collection.", - AstropyUserWarning) + warnings.warn("no FITS files in the collection.", AstropyUserWarning) self._summary = {} if keywords is None: # Use all keywords. - keywords = '*' + keywords = "*" # Used internally to keep track of whether the user asked for all # keywords or a specific list. The keywords setter takes care of @@ -137,12 +143,12 @@ def __repr__(self): kw = "keywords={!r}".format(self.keywords[1:]) if self.glob_exclude is None: - glob_exclude = '' + glob_exclude = "" else: glob_exclude = "glob_exclude={!r}".format(self.glob_exclude) if self.glob_include is None: - glob_include = '' + glob_include = "" else: glob_include = "glob_include={!r}".format(self.glob_include) @@ -157,10 +163,11 @@ def __repr__(self): filenames = "filenames={}".format(self._filenames) params = [location, kw, filenames, glob_include, glob_exclude, ext] - params = ', '.join([p for p in params if p]) + params = ", ".join([p for p in params if p]) str_repr = "{self.__class__.__name__}({params})".format( - self=self, params=params) + self=self, params=params + ) return str_repr @@ -220,34 +227,33 @@ def keywords(self, keywords): self._summary = [] return - if keywords == '*': + if keywords == "*": self._all_keywords = True else: self._all_keywords = False - logging.debug('keywords in setter before pruning: %s.', keywords) + logging.debug("keywords in setter before pruning: %s.", keywords) # remove duplicates and force a copy so we can sort the items later # by their given position. new_keys_set = set(keywords) new_keys_lst = list(new_keys_set) - new_keys_set.add('file') + new_keys_set.add("file") - logging.debug('keywords after pruning %s.', new_keys_lst) + logging.debug("keywords after pruning %s.", new_keys_lst) current_set = set(self.keywords) if new_keys_set.issubset(current_set): - logging.debug('table columns before trimming: %s.', - ' '.join(current_set)) + logging.debug("table columns before trimming: %s.", " ".join(current_set)) cut_keys = current_set.difference(new_keys_set) - logging.debug('will try removing columns: %s.', - ' '.join(cut_keys)) + logging.debug("will try removing columns: %s.", " ".join(cut_keys)) for key in cut_keys: self._summary.remove_column(key) - logging.debug('after removal column names are: %s.', - ' '.join(self.keywords)) + logging.debug( + "after removal column names are: %s.", " ".join(self.keywords) + ) else: - logging.debug('should be building new table...') + logging.debug("should be building new table...") # Reorder the keywords to match the initial ordering. new_keys_lst.sort(key=keywords.index) self._summary = self._fits_summary(new_keys_lst) @@ -308,8 +314,7 @@ def values(self, keyword, unique=False): Values as a list. """ if keyword not in self.keywords: - raise ValueError( - 'keyword %s is not in the current summary' % keyword) + raise ValueError("keyword %s is not in the current summary" % keyword) if unique: return list(set(self.summary[keyword].tolist())) else: @@ -368,23 +373,24 @@ def files_filtered(self, **kwd): return [] # force a copy by explicitly converting to a list - current_file_mask = self.summary['file'].mask.tolist() + current_file_mask = self.summary["file"].mask.tolist() - include_path = kwd.pop('include_path', False) + include_path = kwd.pop("include_path", False) self._find_keywords_by_values(**kwd) - filtered_files = self.summary['file'].compressed() - self.summary['file'].mask = current_file_mask + filtered_files = self.summary["file"].compressed() + self.summary["file"].mask = current_file_mask if include_path: - filtered_files = [path.join(self._location, f) - for f in filtered_files.tolist()] + filtered_files = [ + path.join(self._location, f) for f in filtered_files.tolist() + ] return filtered_files def refresh(self): """ Refresh the collection by re-reading headers. """ - keywords = '*' if self._all_keywords else self.keywords + keywords = "*" if self._all_keywords else self.keywords # Re-load list of files self._files = self._get_files() self._summary = self._fits_summary(header_keywords=keywords) @@ -402,7 +408,7 @@ def sort(self, keys): """ if len(self._summary) > 0: self._summary.sort(keys) - self._files = self.summary['file'].tolist() + self._files = self.summary["file"].tolist() def filter(self, **kwd): """ @@ -430,11 +436,10 @@ def filter(self, **kwd): to filter. """ files = self.files_filtered(include_path=True, **kwd) - return ImageFileCollection(filenames=files, - keywords=self.keywords) + return ImageFileCollection(filenames=files, keywords=self.keywords) def _get_files(self): - """ Helper method which checks whether ``files`` should be set + """Helper method which checks whether ``files`` should be set to a subset of file names or to all file names in a directory. Returns @@ -450,19 +455,23 @@ def _get_files(self): files = self._filenames else: # Check if self.location is set, otherwise proceed with empty list - if self.location != '': + if self.location != "": files = self._fits_files_in_directory() if self.glob_include is not None: files = fnmatch.filter(files, self.glob_include) if self.glob_exclude is not None: - files = [file for file in files - if not fnmatch.fnmatch(file, self.glob_exclude)] + files = [ + file + for file in files + if not fnmatch.fnmatch(file, self.glob_exclude) + ] return files - def _dict_from_fits_header(self, file_name, input_summary=None, - missing_marker=None): + def _dict_from_fits_header( + self, file_name, input_summary=None, missing_marker=None + ): """ Construct an ordered dictionary whose keys are the header keywords and values are a list of the values from this file and the input @@ -499,11 +508,11 @@ def _add_val_to_dict(key, value, tbl_dict, n_previous, missing_marker): n_previous = 0 else: summary = input_summary - n_previous = len(summary['file']) + n_previous = len(summary["file"]) h = fits.getheader(file_name, self.ext) - assert 'file' not in h + assert "file" not in h if self.location: # We have a location and can reconstruct the path using it @@ -515,24 +524,22 @@ def _add_val_to_dict(key, value, tbl_dict, n_previous, missing_marker): # Try opening header before this so that file name is only added if # file is valid FITS try: - summary['file'].append(name_for_file_column) + summary["file"].append(name_for_file_column) except KeyError: - summary['file'] = [name_for_file_column] + summary["file"] = [name_for_file_column] - missing_in_this_file = [k for k in summary if (k not in h and - k != 'file')] + missing_in_this_file = [k for k in summary if (k not in h and k != "file")] - multi_entry_keys = {'comment': [], - 'history': []} + multi_entry_keys = {"comment": [], "history": []} alreadyencountered = set() for k, v in h.items(): - if k == '': + if k == "": continue k = k.lower() - if k in ['comment', 'history']: + if k in ["comment", "history"]: multi_entry_keys[k].append(str(v)) # Accumulate these in a separate dictionary until the # end to avoid adding multiple entries to summary. @@ -545,8 +552,9 @@ def _add_val_to_dict(key, value, tbl_dict, n_previous, missing_marker): warnings.warn( 'Header from file "{f}" contains multiple entries for ' '"{k}", the pair "{k}={v}" will be ignored.' - ''.format(k=k, v=v, f=file_name), - UserWarning) + "".format(k=k, v=v, f=file_name), + UserWarning, + ) continue else: # Add the key to the already encountered keys so we don't add @@ -557,17 +565,15 @@ def _add_val_to_dict(key, value, tbl_dict, n_previous, missing_marker): for k, v in multi_entry_keys.items(): if v: - joined = ','.join(v) - _add_val_to_dict(k, joined, summary, n_previous, - missing_marker) + joined = ",".join(v) + _add_val_to_dict(k, joined, summary, n_previous, missing_marker) for missing in missing_in_this_file: summary[missing].append(missing_marker) return summary - def _set_column_name_case_to_match_keywords(self, header_keys, - summary_table): + def _set_column_name_case_to_match_keywords(self, header_keys, summary_table): for k in header_keys: k_lower = k.lower() if k_lower != k: @@ -596,11 +602,11 @@ def _fits_summary(self, header_keywords): # Get rid of any duplicate keywords, also forces a copy. header_keys = set(original_keywords) - header_keys.add('file') + header_keys.add("file") - file_name_column = MaskedColumn(name='file', data=self.files) + file_name_column = MaskedColumn(name="file", data=self.files) - if not header_keys or (header_keys == {'file'}): + if not header_keys or (header_keys == {"file"}): summary_table = Table(masked=True) summary_table.add_column(file_name_column) return summary_table @@ -614,36 +620,37 @@ def _fits_summary(self, header_keywords): # Note: summary_dict is an OrderedDict, so should preserve # the order of the keywords in the FITS header. summary_dict = self._dict_from_fits_header( - file_path, input_summary=summary_dict, - missing_marker=missing_marker) + file_path, input_summary=summary_dict, missing_marker=missing_marker + ) except IOError as e: - logger.warning('unable to get FITS header for file %s: %s.', - file_path, e) + logger.warning( + "unable to get FITS header for file %s: %s.", file_path, e + ) continue summary_table = Table(summary_dict, masked=True) for column in summary_table.colnames: summary_table[column].mask = [ - v is missing_marker for v in summary_table[column].tolist()] + v is missing_marker for v in summary_table[column].tolist() + ] - self._set_column_name_case_to_match_keywords(header_keys, - summary_table) + self._set_column_name_case_to_match_keywords(header_keys, summary_table) missing_columns = header_keys - set(summary_table.colnames) - missing_columns -= {'*'} + missing_columns -= {"*"} length = len(summary_table) for column in missing_columns: - all_masked = MaskedColumn(name=column, data=np.zeros(length), - mask=np.ones(length)) + all_masked = MaskedColumn( + name=column, data=np.zeros(length), mask=np.ones(length) + ) summary_table.add_column(all_masked) - if '*' not in header_keys: + if "*" not in header_keys: # Rearrange table columns to match order of keywords. # File always comes first. - header_keys -= {'file'} - original_order = ['file'] + sorted(header_keys, - key=original_keywords.index) + header_keys -= {"file"} + original_order = ["file"] + sorted(header_keys, key=original_keywords.index) summary_table = summary_table[original_order] if not summary_table.masked: @@ -680,7 +687,7 @@ def _find_keywords_by_values(self, **kwd): NOTE: Value comparison is case *insensitive* for strings. """ - regex_match = kwd.pop('regex_match', False) + regex_match = kwd.pop("regex_match", False) keywords = kwd.keys() values = kwd.values() @@ -693,12 +700,12 @@ def _find_keywords_by_values(self, **kwd): matches = np.ones(len(use_info), dtype=bool) for key, value in zip(keywords, values): - logger.debug('key %s, value %s', key, value) - logger.debug('value in table %s', use_info[key]) + logger.debug("key %s, value %s", key, value) + logger.debug("value in table %s", use_info[key]) value_missing = use_info[key].mask - logger.debug('value missing: %s', value_missing) + logger.debug("value missing: %s", value_missing) value_not_missing = np.logical_not(value_missing) - if value == '*': + if value == "*": have_this_value = value_not_missing elif value is not None: if isinstance(value, str): @@ -708,20 +715,19 @@ def _find_keywords_by_values(self, **kwd): # We are going to do a regex match no matter what. if regex_match: - pattern = re.compile(value, - flags=re.IGNORECASE) + pattern = re.compile(value, flags=re.IGNORECASE) else: # Escape all special characters that might be present value = re.escape(value) # This pattern matches the prior behavior. - pattern = re.compile('^' + value + '$', - flags=re.IGNORECASE) + pattern = re.compile("^" + value + "$", flags=re.IGNORECASE) for idx, file_key_value in enumerate(use_info[key].tolist()): if value_not_missing[idx]: try: value_matches = ( - pattern.search(file_key_value) is not None) + pattern.search(file_key_value) is not None + ) except TypeError: # In case we're dealing with an object column # there could be values other than strings in it @@ -730,11 +736,10 @@ def _find_keywords_by_values(self, **kwd): else: value_matches = False - have_this_value[idx] = (value_not_missing[idx] & - value_matches) + have_this_value[idx] = value_not_missing[idx] & value_matches else: have_this_value = value_not_missing - tmp = (use_info[key][value_not_missing] == value) + tmp = use_info[key][value_not_missing] == value have_this_value[value_not_missing] = tmp have_this_value &= value_not_missing else: @@ -746,12 +751,11 @@ def _find_keywords_by_values(self, **kwd): # the numpy convention is that the mask is True for values to # be omitted, hence use ~matches. - logger.debug('Matches: %s', matches) - self.summary['file'].mask = ma.nomask - self.summary['file'].mask[~matches] = True + logger.debug("Matches: %s", matches) + self.summary["file"].mask = ma.nomask + self.summary["file"].mask[~matches] = True - def _fits_files_in_directory(self, extensions=None, - compressed=True): + def _fits_files_in_directory(self, extensions=None, compressed=True): """ Get names of FITS files in directory, based on filename extension. @@ -777,7 +781,7 @@ def _fits_files_in_directory(self, extensions=None, # The common compressed fits image .fz is supported using ext=1 when calling ImageFileCollection if compressed: - for comp in ['.gz', '.bz2', '.Z', '.zip', '.fz']: + for comp in [".gz", ".bz2", ".Z", ".zip", ".fz"]: with_comp = [extension + comp for extension in full_extensions] full_extensions.extend(with_comp) @@ -785,26 +789,30 @@ def _fits_files_in_directory(self, extensions=None, files = [] if not self._find_fits_by_reading: for extension in full_extensions: - files.extend(fnmatch.filter(all_files, '*' + extension)) + files.extend(fnmatch.filter(all_files, "*" + extension)) else: for infile in all_files: inpath = path.join(self.location, infile) - with open(inpath, 'rb') as fp: + with open(inpath, "rb") as fp: # Hmm, first argument to is_fits is not actually used in # that function. *shrug* - if fits.connect.is_fits('just some junk', infile, fp): + if fits.connect.is_fits("just some junk", infile, fp): files.append(infile) files.sort() return files - def _generator(self, return_type, - save_with_name="", save_location='', - overwrite=False, - do_not_scale_image_data=True, - return_fname=False, - ccd_kwargs=None, - **kwd): + def _generator( + self, + return_type, + save_with_name="", + save_location="", + overwrite=False, + do_not_scale_image_data=True, + return_fname=False, + ccd_kwargs=None, + **kwd, + ): """ Generator that yields each {name} in the collection. @@ -890,28 +898,29 @@ def _generator(self, return_type, ccd_kwargs = ccd_kwargs or {} for full_path in self._paths(): - add_kwargs = {'do_not_scale_image_data': do_not_scale_image_data} + add_kwargs = {"do_not_scale_image_data": do_not_scale_image_data} # We need to open the file here, get the appropriate values and then # close it again before it "yields" otherwise it's not garantueed # that the generator actually advances and closes the file again. # For example if one uses "next" on the generator manually the # file handle could "leak". - if return_type == 'header': + if return_type == "header": return_thing = fits.getheader(full_path, self.ext) - elif return_type == 'data': + elif return_type == "data": return_thing = fits.getdata(full_path, self.ext, **add_kwargs) - elif return_type == 'ccd': + elif return_type == "ccd": return_thing = fits_ccddata_reader( - full_path, hdu=self.ext, **ccd_kwargs) - elif return_type == 'hdu': + full_path, hdu=self.ext, **ccd_kwargs + ) + elif return_type == "hdu": with fits.open(full_path, **add_kwargs) as hdulist: ext_index = hdulist.index_of(self.ext) # Need to copy the HDU to prevent lazy loading problems # and "IO operations on closed file" errors return_thing = hdulist[ext_index].copy() else: - raise ValueError('no generator for {}'.format(return_type)) + raise ValueError("no generator for {}".format(return_type)) file_name = path.basename(full_path) if return_fname: @@ -930,22 +939,22 @@ def _generator(self, return_type, new_path = path.join(destination_dir, basename) - if return_type == 'ccd': + if return_type == "ccd": pass elif (new_path != full_path) or overwrite: with fits.open(full_path, **add_kwargs) as hdulist: ext_index = hdulist.index_of(self.ext) - if return_type == 'hdu': + if return_type == "hdu": hdulist[ext_index] = return_thing - elif return_type == 'data': + elif return_type == "data": hdulist[ext_index].data = return_thing - elif return_type == 'header': + elif return_type == "header": hdulist[ext_index].header = return_thing try: hdulist.writeto(new_path, overwrite=overwrite) except IOError: - logger.error('error writing file %s', new_path) + logger.error("error writing file %s", new_path) raise # reset mask @@ -956,42 +965,52 @@ def _paths(self): """ Full path to each file. """ - unmasked_files = self.summary['file'].compressed().tolist() + unmasked_files = self.summary["file"].compressed().tolist() return [path.join(self.location, file_) for file_ in unmasked_files] def headers(self, do_not_scale_image_data=True, **kwd): - return self._generator('header', - do_not_scale_image_data=do_not_scale_image_data, - **kwd) + return self._generator( + "header", do_not_scale_image_data=do_not_scale_image_data, **kwd + ) + headers.__doc__ = _generator.__doc__.format( - name='header', default_scaling='True', - return_type='astropy.io.fits.Header') + name="header", default_scaling="True", return_type="astropy.io.fits.Header" + ) def hdus(self, do_not_scale_image_data=False, **kwd): - return self._generator('hdu', - do_not_scale_image_data=do_not_scale_image_data, - **kwd) + return self._generator( + "hdu", do_not_scale_image_data=do_not_scale_image_data, **kwd + ) + hdus.__doc__ = _generator.__doc__.format( - name='HDU', default_scaling='False', - return_type="`, ` ".join(('astropy.io.fits.PrimaryHDU', 'astropy.io.fits.ImageHDU'))) + name="HDU", + default_scaling="False", + return_type="`, ` ".join( + ("astropy.io.fits.PrimaryHDU", "astropy.io.fits.ImageHDU") + ), + ) def data(self, do_not_scale_image_data=False, **kwd): - return self._generator('data', - do_not_scale_image_data=do_not_scale_image_data, - **kwd) + return self._generator( + "data", do_not_scale_image_data=do_not_scale_image_data, **kwd + ) + data.__doc__ = _generator.__doc__.format( - name='image', default_scaling='False', return_type='numpy.ndarray') + name="image", default_scaling="False", return_type="numpy.ndarray" + ) def ccds(self, ccd_kwargs=None, **kwd): - if (clobber := kwd.get('clobber')) is not None: + if (clobber := kwd.get("clobber")) is not None: warnings.warn( "The 'clobber' keyword argument is a deprecated alias for 'overwrite'", category=DeprecationWarning, - stacklevel=2 + stacklevel=2, ) kwd["overwrite"] = clobber - if kwd.get('overwrite'): + if kwd.get("overwrite"): raise NotImplementedError("overwrite=True is not supported for CCDs.") - return self._generator('ccd', ccd_kwargs=ccd_kwargs, **kwd) + return self._generator("ccd", ccd_kwargs=ccd_kwargs, **kwd) + ccds.__doc__ = _generator.__doc__.format( - name='CCDData', default_scaling='True', return_type='astropy.nddata.CCDData') + name="CCDData", default_scaling="True", return_type="astropy.nddata.CCDData" + ) diff --git a/ccdproc/log_meta.py b/ccdproc/log_meta.py index 41a4a633..5a409a4f 100644 --- a/ccdproc/log_meta.py +++ b/ccdproc/log_meta.py @@ -14,10 +14,9 @@ __all__ = [] -_LOG_ARGUMENT = 'add_keyword' +_LOG_ARGUMENT = "add_keyword" -_LOG_ARG_HELP = \ - """ +_LOG_ARG_HELP = """ {arg} : str, `~ccdproc.Keyword` or dict-like, optional Item(s) to add to metadata of result. Set to False or None to completely disable logging. @@ -25,7 +24,9 @@ The key is the name of this function and the value is a string containing the arguments the function was called with, except the value of this argument. - """.format(arg=_LOG_ARGUMENT) + """.format( + arg=_LOG_ARGUMENT +) def _insert_in_metadata_fits_safe(ccd, key, value): @@ -41,11 +42,12 @@ def _insert_in_metadata_fits_safe(ccd, key, value): # Shorten, sort of... short_name = _short_names[key] if isinstance(ccd.meta, fits.Header): - ccd.meta['HIERARCH {0}'.format(key.upper())] = ( - short_name, "Shortened name for ccdproc command") + ccd.meta["HIERARCH {0}".format(key.upper())] = ( + short_name, + "Shortened name for ccdproc command", + ) else: - ccd.meta[key] = ( - short_name, "Shortened name for ccdproc command") + ccd.meta[key] = (short_name, "Shortened name for ccdproc command") ccd.meta[short_name] = value else: ccd.meta[key] = value @@ -64,8 +66,12 @@ def log_to_metadata(func): func.__doc__ = func.__doc__.format(log=_LOG_ARG_HELP) argspec = inspect.getfullargspec(func) - original_args, varargs, keywords, defaults = (argspec.args, argspec.varargs, - argspec.varkw, argspec.defaults) + original_args, varargs, keywords, defaults = ( + argspec.args, + argspec.varargs, + argspec.varkw, + argspec.defaults, + ) # original_args = argspec.args # varargs = argspec.varargs # keywords = argspec.varkw @@ -73,7 +79,7 @@ def log_to_metadata(func): # Grab the names of positional arguments for use in automatic logging try: - original_positional_args = original_args[:-len(defaults)] + original_positional_args = original_args[: -len(defaults)] except TypeError: original_positional_args = original_args @@ -86,8 +92,7 @@ def log_to_metadata(func): defaults.append(True) signature_with_arg_added = inspect.signature(func) - signature_with_arg_added = "{0}{1}".format(func.__name__, - signature_with_arg_added) + signature_with_arg_added = "{0}{1}".format(func.__name__, signature_with_arg_added) func.__doc__ = "\n".join([signature_with_arg_added, func.__doc__]) @wraps(func) @@ -108,12 +113,13 @@ def wrapper(*args, **kwd): key = func.__name__ # Get names of arguments, which may or may not have # been called as keywords. - positional_args = original_args[:len(args)] + positional_args = original_args[: len(args)] all_args = chain(zip(positional_args, args), kwd.items()) - all_args = ["{0}={1}".format(name, - _replace_array_with_placeholder(val)) - for name, val in all_args] + all_args = [ + "{0}={1}".format(name, _replace_array_with_placeholder(val)) + for name, val in all_args + ] log_val = ", ".join(all_args) log_val = log_val.replace("\n", "") meta_dict = {key: log_val} diff --git a/ccdproc/tests/make_mef.py b/ccdproc/tests/make_mef.py index a871eaab..79f0a466 100644 --- a/ccdproc/tests/make_mef.py +++ b/ccdproc/tests/make_mef.py @@ -7,7 +7,7 @@ from ccdproc import flat_correct -def make_sample_mef(science_name, flat_name, size=10, dtype='float32'): +def make_sample_mef(science_name, flat_name, size=10, dtype="float32"): """ Make a multi-extension FITS image with random data and a MEF flat. @@ -34,24 +34,24 @@ def make_sample_mef(science_name, flat_name, size=10, dtype='float32'): for _ in range(number_of_image_extensions): # Simulate a cloudy night, average pixel # value of 100 with a read_noise of 1 electron. - data = np.random.normal(100., 1.0, [size, size]).astype(dtype) + data = np.random.normal(100.0, 1.0, [size, size]).astype(dtype) hdu = fits.ImageHDU(data=data) # Make a header that is at least somewhat realistic - hdu.header['unit'] = 'electron' - hdu.header['object'] = 'clouds' - hdu.header['exptime'] = 30.0 - hdu.header['date-obs'] = '1928-07-23T21:03:27' - hdu.header['filter'] = 'B' - hdu.header['imagetyp'] = 'LIGHT' + hdu.header["unit"] = "electron" + hdu.header["object"] = "clouds" + hdu.header["exptime"] = 30.0 + hdu.header["date-obs"] = "1928-07-23T21:03:27" + hdu.header["filter"] = "B" + hdu.header["imagetyp"] = "LIGHT" science_image.append(hdu) # Make a perfect flat flat = np.ones_like(data, dtype=dtype) flat_hdu = fits.ImageHDU(data=flat) - flat_hdu.header['unit'] = 'electron' - flat_hdu.header['filter'] = 'B' - flat_hdu.header['imagetyp'] = 'FLAT' - flat_hdu.header['date-obs'] = '1928-07-23T21:03:27' + flat_hdu.header["unit"] = "electron" + flat_hdu.header["filter"] = "B" + flat_hdu.header["imagetyp"] = "FLAT" + flat_hdu.header["date-obs"] = "1928-07-23T21:03:27" flat_image.append(flat_hdu) science_image = fits.HDUList(science_image) @@ -61,5 +61,5 @@ def make_sample_mef(science_name, flat_name, size=10, dtype='float32'): flat_image.writeto(flat_name) -if __name__ == '__main__': - make_sample_mef('data/science-mef.fits', 'data/flat-mef.fits') +if __name__ == "__main__": + make_sample_mef("data/science-mef.fits", "data/flat-mef.fits") diff --git a/ccdproc/tests/pytest_fixtures.py b/ccdproc/tests/pytest_fixtures.py index 3e6eae15..5ab7b1fc 100644 --- a/ccdproc/tests/pytest_fixtures.py +++ b/ccdproc/tests/pytest_fixtures.py @@ -13,12 +13,7 @@ # If additional pytest markers are defined the key in the dictionary below # should be the name of the marker. -DEFAULTS = { - 'seed': 123, - 'data_size': 100, - 'data_scale': 1.0, - 'data_mean': 0.0 -} +DEFAULTS = {"seed": 123, "data_size": 100, "data_scale": 1.0, "data_mean": 0.0} DEFAULT_SEED = 123 DEFAULT_DATA_SIZE = 100 @@ -34,10 +29,12 @@ def value_from_markers(key, request): return DEFAULTS[key] -def ccd_data(data_size=DEFAULT_DATA_SIZE, - data_scale=DEFAULT_DATA_SCALE, - data_mean=DEFAULT_DATA_MEAN, - rng_seed=DEFAULT_SEED): +def ccd_data( + data_size=DEFAULT_DATA_SIZE, + data_scale=DEFAULT_DATA_SCALE, + data_mean=DEFAULT_DATA_MEAN, + rng_seed=DEFAULT_SEED, +): """ Return a CCDData object with units of ADU. @@ -61,7 +58,7 @@ def ccd_data(data_size=DEFAULT_DATA_SIZE, with NumpyRNGContext(rng_seed): data = np.random.normal(loc=mean, size=[size, size], scale=scale) - fake_meta = {'my_key': 42, 'your_key': 'not 42'} + fake_meta = {"my_key": 42, "your_key": "not 42"} ccd = CCDData(data, unit=u.adu) ccd.header = fake_meta return ccd @@ -85,4 +82,5 @@ class Result: def __init__(self, n, directory): self.n_test = n self.test_dir = directory + return Result(n_test, test_dir) diff --git a/ccdproc/tests/run_for_memory_profile.py b/ccdproc/tests/run_for_memory_profile.py index e43069a3..9dc9044f 100644 --- a/ccdproc/tests/run_for_memory_profile.py +++ b/ccdproc/tests/run_for_memory_profile.py @@ -1,5 +1,6 @@ import pytest -pytest.importorskip('memory_profiler') + +pytest.importorskip("memory_profiler") from argparse import ArgumentParser from tempfile import TemporaryDirectory @@ -23,8 +24,8 @@ try: from ccdproc.combiner import _calculate_size_of_image except ImportError: - def _calculate_size_of_image(ccd, - combine_uncertainty_function): + + def _calculate_size_of_image(ccd, combine_uncertainty_function): # If uncertainty_func is given for combine this will create an uncertainty # even if the originals did not have one. In that case we need to create # an empty placeholder. @@ -68,7 +69,7 @@ def generate_fits_files(n_images, size=None, seed=1523): np.random.seed(seed) - base_name = 'test-combine-{num:03d}.fits' + base_name = "test-combine-{num:03d}.fits" for num in range(n_images): data = np.random.normal(size=use_size) @@ -76,17 +77,24 @@ def generate_fits_files(n_images, size=None, seed=1523): n_bad = 50000 bad_x = np.random.randint(0, high=use_size[0] - 1, size=n_bad) bad_y = np.random.randint(0, high=use_size[1] - 1, size=n_bad) - data[bad_x, bad_y] = (np.random.choice([-1, 1], size=n_bad) * - (10 + np.random.rand(n_bad))) - hdu = fits.PrimaryHDU(data=np.asarray(data, dtype='float32')) - hdu.header['for_prof'] = 'yes' - hdu.header['bunit'] = 'adu' + data[bad_x, bad_y] = np.random.choice([-1, 1], size=n_bad) * ( + 10 + np.random.rand(n_bad) + ) + hdu = fits.PrimaryHDU(data=np.asarray(data, dtype="float32")) + hdu.header["for_prof"] = "yes" + hdu.header["bunit"] = "adu" path = TMPPATH.resolve() / base_name.format(num=num) hdu.writeto(path, overwrite=True) -def run_memory_profile(n_files, sampling_interval, size=None, sigma_clip=False, - combine_method=None, memory_limit=None): +def run_memory_profile( + n_files, + sampling_interval, + size=None, + sigma_clip=False, + combine_method=None, + memory_limit=None, +): """ Try opening a bunch of files with a relatively low limit on the number of open files. @@ -119,59 +127,79 @@ def run_memory_profile(n_files, sampling_interval, size=None, sigma_clip=False, proc = psutil.Process() - print('Process ID is: ', proc.pid, flush=True) + print("Process ID is: ", proc.pid, flush=True) ic = ImageFileCollection(str(TMPPATH)) - files = ic.files_filtered(for_prof='yes', include_path=True) + files = ic.files_filtered(for_prof="yes", include_path=True) - kwargs = {'method': combine_method} + kwargs = {"method": combine_method} if sigma_clip: kwargs.update( - {'sigma_clip': True, - 'sigma_clip_low_thresh': 5, - 'sigma_clip_high_thresh': 5, - 'sigma_clip_func': np.ma.median, - 'sigma_clip_dev_func': median_absolute_deviation} + { + "sigma_clip": True, + "sigma_clip_low_thresh": 5, + "sigma_clip_high_thresh": 5, + "sigma_clip_func": np.ma.median, + "sigma_clip_dev_func": median_absolute_deviation, + } ) ccd = CCDData.read(files[0]) expected_img_size = _calculate_size_of_image(ccd, None) if memory_limit: - kwargs['mem_limit'] = memory_limit + kwargs["mem_limit"] = memory_limit pre_mem_use = memory_usage(-1, interval=sampling_interval, timeout=1) baseline = np.mean(pre_mem_use) - print('Subtracting baseline memory before profile: {}'.format(baseline)) - mem_use = memory_usage((combine, (files,), kwargs), - interval=sampling_interval, timeout=None) + print("Subtracting baseline memory before profile: {}".format(baseline)) + mem_use = memory_usage( + (combine, (files,), kwargs), interval=sampling_interval, timeout=None + ) mem_use = [m - baseline for m in mem_use] return mem_use, expected_img_size -if __name__ == '__main__': +if __name__ == "__main__": parser = ArgumentParser() - parser.add_argument('number', type=int, - help='Number of files to combine.') - parser.add_argument('--size', type=int, action='store', - help='Size of one side of image to create. ' - 'All images are square, so only give ' - 'a single number for the size.') - parser.add_argument('--combine-method', '-c', - choices=('average', 'median'), - help='Method to use to combine images.') - parser.add_argument('--memory-limit', type=int, - help='Limit combination to this amount of memory') - parser.add_argument('--sigma-clip', action='store_true', - help='If set, sigma-clip before combining. Clipping ' - 'will be done with high/low limit of 5. ' - 'The central function is the median, the ' - 'deviation is the median_absolute_deviation.') - parser.add_argument('--sampling-freq', type=float, default=0.05, - help='Time, in seconds, between memory samples.') - parser.add_argument('--frequent-gc', action='store_true', - help='If set, perform garbage collection ' - 'much more frequently than the default.') + parser.add_argument("number", type=int, help="Number of files to combine.") + parser.add_argument( + "--size", + type=int, + action="store", + help="Size of one side of image to create. " + "All images are square, so only give " + "a single number for the size.", + ) + parser.add_argument( + "--combine-method", + "-c", + choices=("average", "median"), + help="Method to use to combine images.", + ) + parser.add_argument( + "--memory-limit", type=int, help="Limit combination to this amount of memory" + ) + parser.add_argument( + "--sigma-clip", + action="store_true", + help="If set, sigma-clip before combining. Clipping " + "will be done with high/low limit of 5. " + "The central function is the median, the " + "deviation is the median_absolute_deviation.", + ) + parser.add_argument( + "--sampling-freq", + type=float, + default=0.05, + help="Time, in seconds, between memory samples.", + ) + parser.add_argument( + "--frequent-gc", + action="store_true", + help="If set, perform garbage collection " + "much more frequently than the default.", + ) args = parser.parse_args() if args.frequent_gc: @@ -179,10 +207,13 @@ def run_memory_profile(n_files, sampling_interval, size=None, sigma_clip=False, print("Garbage collection thresholds: ", gc.get_threshold()) - mem_use = run_with_limit(args.number, args.sampling_freq, - size=args.size, - sigma_clip=args.sigma_clip, - combine_method=args.combine_method, - memory_limit=args.memory_limit) - print('Max memory usage (MB): ', np.max(mem_use)) - print('Baseline memory usage (MB): ', mem_use[0]) + mem_use = run_with_limit( + args.number, + args.sampling_freq, + size=args.size, + sigma_clip=args.sigma_clip, + combine_method=args.combine_method, + memory_limit=args.memory_limit, + ) + print("Max memory usage (MB): ", np.max(mem_use)) + print("Baseline memory usage (MB): ", mem_use[0]) diff --git a/ccdproc/tests/run_with_file_number_limit.py b/ccdproc/tests/run_with_file_number_limit.py index 1d689c49..2d6188ae 100644 --- a/ccdproc/tests/run_with_file_number_limit.py +++ b/ccdproc/tests/run_with_file_number_limit.py @@ -18,10 +18,7 @@ _TMPDIR = TemporaryDirectory() TMPPATH = Path(_TMPDIR.name) -ALLOWED_EXTENSIONS = { - 'fits': 'fits', - 'plain': 'txt' -} +ALLOWED_EXTENSIONS = {"fits": "fits", "plain": "txt"} def generate_fits_files(number, size=None): @@ -31,12 +28,12 @@ def generate_fits_files(number, size=None): int_size = int(size) use_size = [int_size, int_size] - base_name = 'test-combine-{num:03d}.' + ALLOWED_EXTENSIONS['fits'] + base_name = "test-combine-{num:03d}." + ALLOWED_EXTENSIONS["fits"] for num in range(number): data = np.zeros(shape=use_size) hdu = fits.PrimaryHDU(data=data) - hdu.header['bunit'] = 'adu' + hdu.header["bunit"] = "adu" name = base_name.format(num=num) path = TMPPATH / name hdu.writeto(path, overwrite=True) @@ -44,7 +41,7 @@ def generate_fits_files(number, size=None): def generate_plain_files(number): for i in range(number): - file = TMPPATH / ("{i:03d}.".format(i=i) + ALLOWED_EXTENSIONS['plain']) + file = TMPPATH / ("{i:03d}.".format(i=i) + ALLOWED_EXTENSIONS["plain"]) file.write_bytes(np.random.random(100)) @@ -57,7 +54,7 @@ def open_files_with_open(kind): global fds fds = [] - paths = TMPPATH.glob('**/*.' + ALLOWED_EXTENSIONS[kind]) + paths = TMPPATH.glob("**/*." + ALLOWED_EXTENSIONS[kind]) for p in paths: fds.append(p.open()) @@ -72,7 +69,7 @@ def open_files_as_mmap(kind): global fds fds = [] - paths = TMPPATH.glob('**/*.' + ALLOWED_EXTENSIONS[kind]) + paths = TMPPATH.glob("**/*." + ALLOWED_EXTENSIONS[kind]) for p in paths: with p.open() as f: @@ -85,7 +82,7 @@ def open_files_ccdproc_combine_chunk(kind): task is broken into chunks. """ global combo - paths = sorted(list(TMPPATH.glob('**/*.' + ALLOWED_EXTENSIONS[kind]))) + paths = sorted(list(TMPPATH.glob("**/*." + ALLOWED_EXTENSIONS[kind]))) # We want to force combine to break the task into chunks even # if the task really would fit in memory; it is in that case that # we end up with too many open files. We'll open one file, determine @@ -104,7 +101,7 @@ def open_files_ccdproc_combine_nochunk(kind): task is not broken into chunks. """ global combo - paths = sorted(list(TMPPATH.glob('**/*.' + ALLOWED_EXTENSIONS[kind]))) + paths = sorted(list(TMPPATH.glob("**/*." + ALLOWED_EXTENSIONS[kind]))) # We ensure there are no chunks by setting a memory limit large # enough to hold everything. @@ -118,15 +115,14 @@ def open_files_ccdproc_combine_nochunk(kind): ALLOWED_OPENERS = { - 'open': open_files_with_open, - 'mmap': open_files_as_mmap, - 'combine-chunk': open_files_ccdproc_combine_chunk, - 'combine-nochunk': open_files_ccdproc_combine_nochunk + "open": open_files_with_open, + "mmap": open_files_as_mmap, + "combine-chunk": open_files_ccdproc_combine_chunk, + "combine-nochunk": open_files_ccdproc_combine_nochunk, } -def run_with_limit(n, kind='fits', size=None, overhead=6, - open_method='mmap'): +def run_with_limit(n, kind="fits", size=None, overhead=6, open_method="mmap"): """ Try opening a bunch of files with a relatively low limit on the number of open files. @@ -174,8 +170,9 @@ def run_with_limit(n, kind='fits', size=None, overhead=6, raise ValueError("Argument 'n' must be a positive integer") if kind not in ALLOWED_EXTENSIONS.keys(): - raise ValueError("Argument 'kind' must be one of " - "{}".format(ALLOWED_EXTENSIONS.keys())) + raise ValueError( + "Argument 'kind' must be one of " "{}".format(ALLOWED_EXTENSIONS.keys()) + ) # Set the limit on the number of open files to n. The try/except # is the catch the case where this change would *increase*, rather than @@ -183,11 +180,12 @@ def run_with_limit(n, kind='fits', size=None, overhead=6, try: resource.setrlimit(resource.RLIMIT_NOFILE, (n, n)) except ValueError as e: - if 'not allowed to raise maximum limit' not in str(e): + if "not allowed to raise maximum limit" not in str(e): raise max_n_this_process = resource.getrlimit(resource.RLIMIT_NOFILE) - raise ValueError('Maximum number of open ' - 'files is {}'.format(max_n_this_process)) + raise ValueError( + "Maximum number of open " "files is {}".format(max_n_this_process) + ) # The "-1" is to leave a little wiggle room. overhead is based on the # the number of open files that a process running on linux has open. @@ -196,64 +194,87 @@ def run_with_limit(n, kind='fits', size=None, overhead=6, proc = psutil.Process() - print('Process ID is: ', proc.pid, flush=True) + print("Process ID is: ", proc.pid, flush=True) print("Making {} files".format(n_files)) - if kind == 'plain': + if kind == "plain": generate_plain_files(n_files) - elif kind == 'fits': + elif kind == "fits": generate_fits_files(n_files, size=size) # Print number of open files before we try opening anything for debugging # purposes. - print("Before opening, files open is {}".format(len(proc.open_files())), - flush=True) + print("Before opening, files open is {}".format(len(proc.open_files())), flush=True) print(" Note well: this number is different than what lsof reports.") try: ALLOWED_OPENERS[open_method](kind) - # fds.append(p.open()) + # fds.append(p.open()) except OSError as e: # Capture the error and re-raise as a SystemExit because this is # run in a subprocess. This ensures that the original error message # is reported back to the calling process; we add on the number of # open files. - raise SystemExit(str(e) + '; number of open files: ' + - '{}, with target {}'.format(len(proc.open_files()), - n_files)) + raise SystemExit( + str(e) + + "; number of open files: " + + "{}, with target {}".format(len(proc.open_files()), n_files) + ) else: - print('Opens succeeded, files currently open:', - len(proc.open_files()), - flush=True) + print( + "Opens succeeded, files currently open:", len(proc.open_files()), flush=True + ) -if __name__ == '__main__': +if __name__ == "__main__": from argparse import ArgumentParser import psutil parser = ArgumentParser() - parser.add_argument('number', type=int, - help='Limit on number of open files.') - parser.add_argument('--kind', action='store', default='plain', - choices=ALLOWED_EXTENSIONS.keys(), - help='Kind of file to generate for test; ' - 'default is plain') - parser.add_argument('--overhead', type=int, action='store', - help='Number of files to assume the OS is using.', - default=6) - parser.add_argument('--open-by', action='store', default='mmap', - choices=ALLOWED_OPENERS.keys(), - help='How to open the files. Default is mmap') - parser.add_argument('--size', type=int, action='store', - help='Size of one side of image to create. ' - 'All images are square, so only give ' - 'a single number for the size.') - parser.add_argument('--frequent-gc', action='store_true', - help='If set, perform garbage collection ' - 'much more frequently than the default.') + parser.add_argument("number", type=int, help="Limit on number of open files.") + parser.add_argument( + "--kind", + action="store", + default="plain", + choices=ALLOWED_EXTENSIONS.keys(), + help="Kind of file to generate for test; " "default is plain", + ) + parser.add_argument( + "--overhead", + type=int, + action="store", + help="Number of files to assume the OS is using.", + default=6, + ) + parser.add_argument( + "--open-by", + action="store", + default="mmap", + choices=ALLOWED_OPENERS.keys(), + help="How to open the files. Default is mmap", + ) + parser.add_argument( + "--size", + type=int, + action="store", + help="Size of one side of image to create. " + "All images are square, so only give " + "a single number for the size.", + ) + parser.add_argument( + "--frequent-gc", + action="store_true", + help="If set, perform garbage collection " + "much more frequently than the default.", + ) args = parser.parse_args() if args.frequent_gc: gc.set_threshold(10, 10, 10) print("Garbage collection thresholds: ", gc.get_threshold()) - run_with_limit(args.number, kind=args.kind, overhead=args.overhead, - open_method=args.open_by, size=args.size) + run_with_limit( + args.number, + kind=args.kind, + overhead=args.overhead, + open_method=args.open_by, + size=args.size, + ) diff --git a/ccdproc/tests/test_bitfield.py b/ccdproc/tests/test_bitfield.py index 612b1e81..9ff98ff8 100644 --- a/ccdproc/tests/test_bitfield.py +++ b/ccdproc/tests/test_bitfield.py @@ -32,31 +32,31 @@ def test_bitfield_flipbits_when_no_bits(): def test_bitfield_flipbits_when_stringbits(): bm = np.random.randint(0, 10, (10, 10)) with pytest.raises(TypeError): - bitfield_to_boolean_mask(bm, '3', flip_bits=1) + bitfield_to_boolean_mask(bm, "3", flip_bits=1) def test_bitfield_string_flag_flip_not_start_of_string(): bm = np.random.randint(0, 10, (10, 10)) with pytest.raises(ValueError): - bitfield_to_boolean_mask(bm, '1, ~4') + bitfield_to_boolean_mask(bm, "1, ~4") def test_bitfield_string_flag_unbalanced_parens(): bm = np.random.randint(0, 10, (10, 10)) with pytest.raises(ValueError): - bitfield_to_boolean_mask(bm, '(1, 4))') + bitfield_to_boolean_mask(bm, "(1, 4))") def test_bitfield_string_flag_wrong_positioned_parens(): bm = np.random.randint(0, 10, (10, 10)) with pytest.raises(ValueError): - bitfield_to_boolean_mask(bm, '((1, )4)') + bitfield_to_boolean_mask(bm, "((1, )4)") def test_bitfield_string_flag_empty(): bm = np.random.randint(0, 10, (10, 10)) with pytest.raises(ValueError): - bitfield_to_boolean_mask(bm, '~') + bitfield_to_boolean_mask(bm, "~") def test_bitfield_flag_non_integer(): @@ -74,5 +74,5 @@ def test_bitfield_duplicate_flag_throws_warning(): def test_bitfield_none_identical_to_strNone(): bm = np.random.randint(0, 10, (10, 10)) m1 = bitfield_to_boolean_mask(bm, None) - m2 = bitfield_to_boolean_mask(bm, 'None') + m2 = bitfield_to_boolean_mask(bm, "None") np.testing.assert_array_equal(m1, m2) diff --git a/ccdproc/tests/test_ccdmask.py b/ccdproc/tests/test_ccdmask.py index f65d5e58..97451327 100644 --- a/ccdproc/tests/test_ccdmask.py +++ b/ccdproc/tests/test_ccdmask.py @@ -18,18 +18,19 @@ def test_ccdmask_no_ccddata(): def test_ccdmask_not_2d(): # Fails when a CCDData has less than 2 dimensions with pytest.raises(ValueError): - ccdmask(CCDData(np.ones(3), unit='adu')) + ccdmask(CCDData(np.ones(3), unit="adu")) # Fails when scalar with pytest.raises(ValueError): - ccdmask(CCDData(np.array(10), unit='adu')) + ccdmask(CCDData(np.array(10), unit="adu")) # Fails when more than 2d with pytest.raises(ValueError): - ccdmask(CCDData(np.ones((3, 3, 3)), unit='adu')) + ccdmask(CCDData(np.ones((3, 3, 3)), unit="adu")) def test_ccdmask_pixels(): + # fmt: off flat1 = CCDData(np.array([[ 20044, 19829, 19936, 20162, 19948, 19965, 19919, 20004, 19951, 20002, 19926, 20151, 19886, 20014, 19928, 20025, 19921, 19996, @@ -160,7 +161,7 @@ def test_ccdmask_pixels(): 20184, 19948, 20034, 19896, 19905, 20138, 19870, 19936, 20085, 19971, 20063, 19936, 19941, 19928, 19937, 19970, 19931, 20036, 19965, 19855, 19949, 19965, 19821]]), unit='adu') - + # fmt: on target_mask = np.zeros(flat1.shape, dtype=bool) # No bad pixels in this scenario @@ -198,8 +199,7 @@ def test_ccdmask_pixels(): mask = ccdmask(ratio, ncsig=11, nlsig=15, findbadcolumns=True) assert_array_equal(mask, target_mask) - mask = ccdmask(ratio, ncsig=11, nlsig=15, findbadcolumns=True, - byblocks=True) + mask = ccdmask(ratio, ncsig=11, nlsig=15, findbadcolumns=True, byblocks=True) assert_array_equal(mask, target_mask) # Add bad column with gaps diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 3811ed7c..cf6b5127 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -16,33 +16,48 @@ import skimage from ccdproc.core import ( - ccd_process, cosmicray_median, cosmicray_lacosmic, create_deviation, - flat_correct, gain_correct, subtract_bias, subtract_dark, subtract_overscan, - transform_image, trim_image, wcs_project, Keyword) + ccd_process, + cosmicray_median, + cosmicray_lacosmic, + create_deviation, + flat_correct, + gain_correct, + subtract_bias, + subtract_dark, + subtract_overscan, + transform_image, + trim_image, + wcs_project, + Keyword, +) from ccdproc.core import _blkavg from ccdproc.tests.pytest_fixtures import ccd_data as ccd_data_func try: from ..core import block_reduce, block_average, block_replicate + HAS_BLOCK_X_FUNCS = True except ImportError: HAS_BLOCK_X_FUNCS = False _NUMPY_COPY_IF_NEEDED = False if np.__version__.startswith("1.") else None + # Test creating deviation # Success expected if u_image * u_gain = u_readnoise -@pytest.mark.parametrize('u_image,u_gain,u_readnoise,expect_success', [ - (u.electron, None, u.electron, True), - (u.electron, u.electron, u.electron, False), - (u.adu, u.electron / u.adu, u.electron, True), - (u.electron, None, u.dimensionless_unscaled, False), - (u.electron, u.dimensionless_unscaled, u.electron, True), - (u.adu, u.dimensionless_unscaled, u.electron, False), - (u.adu, u.photon / u.adu, u.electron, False), - ]) -def test_create_deviation(u_image, u_gain, u_readnoise, - expect_success): +@pytest.mark.parametrize( + "u_image,u_gain,u_readnoise,expect_success", + [ + (u.electron, None, u.electron, True), + (u.electron, u.electron, u.electron, False), + (u.adu, u.electron / u.adu, u.electron, True), + (u.electron, None, u.dimensionless_unscaled, False), + (u.electron, u.dimensionless_unscaled, u.electron, True), + (u.adu, u.dimensionless_unscaled, u.electron, False), + (u.adu, u.photon / u.adu, u.electron, False), + ], +) +def test_create_deviation(u_image, u_gain, u_readnoise, expect_success): ccd_data = ccd_data_func(data_size=10, data_mean=100) ccd_data.unit = u_image if u_gain is not None: @@ -56,11 +71,10 @@ def test_create_deviation(u_image, u_gain, u_readnoise, assert ccd_var.uncertainty.array.size == 100 assert ccd_var.uncertainty.array.dtype == np.dtype(float) if gain is not None: - expected_var = np.sqrt(2 * ccd_data.data + 5 ** 2) / 2 + expected_var = np.sqrt(2 * ccd_data.data + 5**2) / 2 else: - expected_var = np.sqrt(ccd_data.data + 5 ** 2) - np.testing.assert_array_equal(ccd_var.uncertainty.array, - expected_var) + expected_var = np.sqrt(ccd_data.data + 5**2) + np.testing.assert_array_equal(ccd_var.uncertainty.array, expected_var) assert ccd_var.unit == ccd_data.unit # Uncertainty should *not* have any units -- does it? with pytest.raises(AttributeError): @@ -74,23 +88,25 @@ def test_create_deviation_from_negative(): ccd_data = ccd_data_func(data_mean=0, data_scale=10) ccd_data.unit = u.electron readnoise = 5 * u.electron - ccd_var = create_deviation(ccd_data, gain=None, readnoise=readnoise, - disregard_nan=False) - np.testing.assert_array_equal(ccd_data.data < 0, - np.isnan(ccd_var.uncertainty.array)) + ccd_var = create_deviation( + ccd_data, gain=None, readnoise=readnoise, disregard_nan=False + ) + np.testing.assert_array_equal( + ccd_data.data < 0, np.isnan(ccd_var.uncertainty.array) + ) def test_create_deviation_from_negative_2(): ccd_data = ccd_data_func(data_mean=0, data_scale=10) ccd_data.unit = u.electron readnoise = 5 * u.electron - ccd_var = create_deviation(ccd_data, gain=None, readnoise=readnoise, - disregard_nan=True) - mask = (ccd_data.data < 0) + ccd_var = create_deviation( + ccd_data, gain=None, readnoise=readnoise, disregard_nan=True + ) + mask = ccd_data.data < 0 ccd_data.data[mask] = 0 expected_var = np.sqrt(ccd_data.data + readnoise.value**2) - np.testing.assert_array_equal(ccd_var.uncertainty.array, - expected_var) + np.testing.assert_array_equal(ccd_var.uncertainty.array, expected_var) def test_create_deviation_keywords_must_have_unit(): @@ -107,11 +123,15 @@ def test_create_deviation_keywords_must_have_unit(): # Tests for overscan -@pytest.mark.parametrize('data_rectangle', [False, True]) -@pytest.mark.parametrize('median,transpose', [ - (False, False), - (False, True), - (True, False), ]) +@pytest.mark.parametrize("data_rectangle", [False, True]) +@pytest.mark.parametrize( + "median,transpose", + [ + (False, False), + (False, True), + (True, False), + ], +) def test_subtract_overscan(median, transpose, data_rectangle): ccd_data = ccd_data_func() # Make data non-square if desired @@ -119,71 +139,87 @@ def test_subtract_overscan(median, transpose, data_rectangle): ccd_data.data = ccd_data.data[:, :-30] # Create the overscan region - oscan = 300. + oscan = 300.0 oscan_region = (slice(None), slice(0, 10)) # Indices 0 through 9 - fits_section = '[1:10, :]' + fits_section = "[1:10, :]" science_region = (slice(None), slice(10, None)) overscan_axis = 1 if transpose: # Put overscan in first axis, not second, a test for #70 oscan_region = oscan_region[::-1] - fits_section = '[:, 1:10]' + fits_section = "[:, 1:10]" science_region = science_region[::-1] overscan_axis = 0 ccd_data.data[oscan_region] = oscan # Add a fake sky background so the "science" part of the image has a # different average than the "overscan" part. - sky = 10. + sky = 10.0 original_mean = ccd_data.data[science_region].mean() ccd_data.data[science_region] += oscan + sky # Test once using the overscan argument to specify the overscan region - ccd_data_overscan = subtract_overscan(ccd_data, - overscan=ccd_data[oscan_region], - overscan_axis=overscan_axis, - median=median, model=None) + ccd_data_overscan = subtract_overscan( + ccd_data, + overscan=ccd_data[oscan_region], + overscan_axis=overscan_axis, + median=median, + model=None, + ) # Is the mean of the "science" region the sum of sky and the mean the # "science" section had before backgrounds were added? np.testing.assert_almost_equal( - ccd_data_overscan.data[science_region].mean(), - sky + original_mean) + ccd_data_overscan.data[science_region].mean(), sky + original_mean + ) # Is the overscan region zero? assert (ccd_data_overscan.data[oscan_region] == 0).all() # Now do what should be the same subtraction, with the overscan specified # with the fits_section - ccd_data_fits_section = subtract_overscan(ccd_data, - overscan_axis=overscan_axis, - fits_section=fits_section, - median=median, model=None) + ccd_data_fits_section = subtract_overscan( + ccd_data, + overscan_axis=overscan_axis, + fits_section=fits_section, + median=median, + model=None, + ) # Is the mean of the "science" region the sum of sky and the mean the # "science" section had before backgrounds were added? np.testing.assert_almost_equal( - ccd_data_fits_section.data[science_region].mean(), - sky + original_mean) + ccd_data_fits_section.data[science_region].mean(), sky + original_mean + ) # Is the overscan region zero? assert (ccd_data_fits_section.data[oscan_region] == 0).all() # Do both ways of subtracting overscan give exactly the same result? - np.testing.assert_array_equal(ccd_data_overscan[science_region], - ccd_data_fits_section[science_region]) + np.testing.assert_array_equal( + ccd_data_overscan[science_region], ccd_data_fits_section[science_region] + ) # Set overscan_axis to None, and let the routine figure out the axis. # This should lead to the same results as before. ccd_data_overscan_auto = subtract_overscan( - ccd_data, overscan_axis=None, overscan=ccd_data[oscan_region], - median=median, model=None) + ccd_data, + overscan_axis=None, + overscan=ccd_data[oscan_region], + median=median, + model=None, + ) np.testing.assert_almost_equal( - ccd_data_overscan_auto.data[science_region].mean(), - sky + original_mean) + ccd_data_overscan_auto.data[science_region].mean(), sky + original_mean + ) # Use overscan_axis=None with a FITS section ccd_data_fits_section_overscan_auto = subtract_overscan( - ccd_data, overscan_axis=None, fits_section=fits_section, - median=median, model=None) + ccd_data, + overscan_axis=None, + fits_section=fits_section, + median=median, + model=None, + ) np.testing.assert_almost_equal( ccd_data_fits_section_overscan_auto.data[science_region].mean(), - sky + original_mean) + sky + original_mean, + ) # Overscan_axis should be 1 for a square overscan region # This test only works for a non-square data region, but the # default has the wrong axis. @@ -192,19 +228,24 @@ def test_subtract_overscan(median, transpose, data_rectangle): oscan_region = (slice(None), slice(0, -30)) science_region = (slice(None), slice(-30, None)) ccd_data_square_overscan_auto = subtract_overscan( - ccd_data, overscan_axis=None, overscan=ccd_data[oscan_region], - median=median, model=None) + ccd_data, + overscan_axis=None, + overscan=ccd_data[oscan_region], + median=median, + model=None, + ) ccd_data_square = subtract_overscan( - ccd_data, overscan_axis=1, overscan=ccd_data[oscan_region], - median=median, model=None) - np.testing.assert_allclose(ccd_data_square_overscan_auto, - ccd_data_square) + ccd_data, + overscan_axis=1, + overscan=ccd_data[oscan_region], + median=median, + model=None, + ) + np.testing.assert_allclose(ccd_data_square_overscan_auto, ccd_data_square) # A more substantial test of overscan modeling -@pytest.mark.parametrize('transpose', [ - True, - False]) +@pytest.mark.parametrize("transpose", [True, False]) def test_subtract_overscan_model(transpose): ccd_data = ccd_data_func() # Create the overscan region @@ -226,21 +267,27 @@ def test_subtract_overscan_model(transpose): original_mean = ccd_data.data[science_region].mean() - ccd_data.data[oscan_region] = 0. # Only want overscan in that region + ccd_data.data[oscan_region] = 0.0 # Only want overscan in that region ccd_data.data = ccd_data.data + scan - ccd_data = subtract_overscan(ccd_data, overscan=ccd_data[oscan_region], - overscan_axis=overscan_axis, - median=False, model=models.Polynomial1D(2)) - np.testing.assert_almost_equal(ccd_data.data[science_region].mean(), - original_mean) + ccd_data = subtract_overscan( + ccd_data, + overscan=ccd_data[oscan_region], + overscan_axis=overscan_axis, + median=False, + model=models.Polynomial1D(2), + ) + np.testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) # Set the overscan_axis explicitly to None, and let the routine # figure it out. - ccd_data = subtract_overscan(ccd_data, overscan=ccd_data[oscan_region], - overscan_axis=None, - median=False, model=models.Polynomial1D(2)) - np.testing.assert_almost_equal(ccd_data.data[science_region].mean(), - original_mean) + ccd_data = subtract_overscan( + ccd_data, + overscan=ccd_data[oscan_region], + overscan_axis=None, + median=False, + model=models.Polynomial1D(2), + ) + np.testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) def test_subtract_overscan_fails(): @@ -253,8 +300,7 @@ def test_subtract_overscan_fails(): subtract_overscan(np.zeros((10, 10)), 3, median=False, model=None) # Do we get an error if we specify both overscan and fits_section? with pytest.raises(TypeError): - subtract_overscan(ccd_data, overscan=ccd_data[0:10], - fits_section='[1:10]') + subtract_overscan(ccd_data, overscan=ccd_data[0:10], fits_section="[1:10]") # Do we raise an error if we specify neither overscan nor fits_section? with pytest.raises(TypeError): subtract_overscan(ccd_data) @@ -269,9 +315,7 @@ def test_trim_image_fits_section_requires_string(): trim_image(ccd_data, fits_section=5) -@pytest.mark.parametrize('mask_data, uncertainty', [ - (False, False), - (True, True)]) +@pytest.mark.parametrize("mask_data, uncertainty", [(False, False), (True, True)]) def test_trim_image_fits_section(mask_data, uncertainty): ccd_data = ccd_data_func(data_size=50) if mask_data: @@ -280,7 +324,7 @@ def test_trim_image_fits_section(mask_data, uncertainty): err = np.random.normal(size=ccd_data.shape) ccd_data.uncertainty = StdDevUncertainty(err) - trimmed = trim_image(ccd_data, fits_section='[20:40,:]') + trimmed = trim_image(ccd_data, fits_section="[20:40,:]") # FITS reverse order, bounds are inclusive and starting index is 1-based assert trimmed.shape == (50, 21) np.testing.assert_array_equal(trimmed.data, ccd_data[:, 19:40]) @@ -301,7 +345,7 @@ def test_trim_with_wcs_alters_wcs(): ccd_data = ccd_data_func() # WCS construction example pulled form astropy.wcs docs wcs = WCS(naxis=2) - wcs.wcs.crpix = np.array(ccd_data.shape)/2 + wcs.wcs.crpix = np.array(ccd_data.shape) / 2 wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] @@ -319,7 +363,7 @@ def test_subtract_bias(): data_avg = ccd_data.data.mean() bias_level = 5.0 ccd_data.data = ccd_data.data + bias_level - ccd_data.header['key'] = 'value' + ccd_data.header["key"] = "value" master_bias_array = np.zeros_like(ccd_data.data) + bias_level master_bias = CCDData(master_bias_array, unit=ccd_data.unit) no_bias = subtract_bias(ccd_data, master_bias, add_keyword=None) @@ -327,8 +371,8 @@ def test_subtract_bias(): np.testing.assert_almost_equal(no_bias.data.mean(), data_avg) # With logging turned off, metadata should not change assert no_bias.header == ccd_data.header - del no_bias.header['key'] - assert 'key' in ccd_data.header + del no_bias.header["key"] + assert "key" in ccd_data.header assert no_bias.header is not ccd_data.header @@ -344,13 +388,13 @@ def test_subtract_bias_fails(): subtract_bias(ccd_data, bias) -@pytest.mark.parametrize('exposure_keyword', [True, False]) -@pytest.mark.parametrize('explicit_times', [True, False]) -@pytest.mark.parametrize('scale', [True, False]) +@pytest.mark.parametrize("exposure_keyword", [True, False]) +@pytest.mark.parametrize("explicit_times", [True, False]) +@pytest.mark.parametrize("scale", [True, False]) def test_subtract_dark(explicit_times, scale, exposure_keyword): ccd_data = ccd_data_func() exptime = 30.0 - exptime_key = 'exposure' + exptime_key = "exposure" exposure_unit = u.second dark_level = 1.7 master_dark_data = np.zeros_like(ccd_data.data) + dark_level @@ -362,28 +406,38 @@ def test_subtract_dark(explicit_times, scale, exposure_keyword): if explicit_times: # Test case when units of dark and data exposures are different dark_exposure_unit = u.minute - dark_sub = subtract_dark(ccd_data, master_dark, - dark_exposure=dark_exptime * dark_exposure_unit, - data_exposure=exptime * exposure_unit, - scale=scale, add_keyword=None) + dark_sub = subtract_dark( + ccd_data, + master_dark, + dark_exposure=dark_exptime * dark_exposure_unit, + data_exposure=exptime * exposure_unit, + scale=scale, + add_keyword=None, + ) elif exposure_keyword: key = Keyword(exptime_key, unit=u.second) - dark_sub = subtract_dark(ccd_data, master_dark, - exposure_time=key, - scale=scale, add_keyword=None) + dark_sub = subtract_dark( + ccd_data, master_dark, exposure_time=key, scale=scale, add_keyword=None + ) else: - dark_sub = subtract_dark(ccd_data, master_dark, - exposure_time=exptime_key, - exposure_unit=u.second, - scale=scale, add_keyword=None) + dark_sub = subtract_dark( + ccd_data, + master_dark, + exposure_time=exptime_key, + exposure_unit=u.second, + scale=scale, + add_keyword=None, + ) dark_scale = 1.0 if scale: - dark_scale = float((exptime / dark_exptime) * - (exposure_unit / dark_exposure_unit)) + dark_scale = float( + (exptime / dark_exptime) * (exposure_unit / dark_exposure_unit) + ) - np.testing.assert_array_equal(ccd_data.data - dark_scale * dark_level, - dark_sub.data) + np.testing.assert_array_equal( + ccd_data.data - dark_scale * dark_level, dark_sub.data + ) # Headers should have the same content...do they? assert dark_sub.header == ccd_data.header # But the headers should not be the same object -- a copy was made @@ -394,7 +448,7 @@ def test_subtract_dark_fails(): ccd_data = ccd_data_func() # None of these tests check a result so the content of the master # can be anything. - ccd_data.header['exptime'] = 30.0 + ccd_data.header["exptime"] = 30.0 master = ccd_data.copy() # Do we fail if we give one of dark_exposure, data_exposure but not both? @@ -405,9 +459,13 @@ def test_subtract_dark_fails(): # Do we fail if we supply dark_exposure and data_exposure and exposure_time with pytest.raises(TypeError): - subtract_dark(ccd_data, master, dark_exposure=10 * u.second, - data_exposure=10 * u.second, - exposure_time='exptime') + subtract_dark( + ccd_data, + master, + dark_exposure=10 * u.second, + data_exposure=10 * u.second, + exposure_time="exptime", + ) # Fail if we supply none of the exposure-related arguments? with pytest.raises(TypeError): @@ -415,13 +473,13 @@ def test_subtract_dark_fails(): # Fail if we supply exposure time but not a unit? with pytest.raises(TypeError): - subtract_dark(ccd_data, master, exposure_time='exptime') + subtract_dark(ccd_data, master, exposure_time="exptime") # Fail if ccd_data or master are not CCDData objects? with pytest.raises(TypeError): - subtract_dark(ccd_data.data, master, exposure_time='exptime') + subtract_dark(ccd_data.data, master, exposure_time="exptime") with pytest.raises(TypeError): - subtract_dark(ccd_data, master.data, exposure_time='exptime') + subtract_dark(ccd_data, master.data, exposure_time="exptime") # Fail if units do not match... @@ -430,8 +488,7 @@ def test_subtract_dark_fails(): master.unit = u.meter with pytest.raises(u.UnitsError) as e: - subtract_dark(ccd_data, master, exposure_time='exptime', - exposure_unit=u.second) + subtract_dark(ccd_data, master, exposure_time="exptime", exposure_unit=u.second) assert "uncalibrated image" in str(e.value) # Fail when the arrays are not the same size @@ -450,14 +507,14 @@ def test_unit_mismatch_behaves_as_expected(): bad_unit = ccd_data.copy() bad_unit.unit = u.meter - if astropy.__version__.startswith('1.0'): + if astropy.__version__.startswith("1.0"): expected_error = ValueError - expected_message = 'operand units' + expected_message = "operand units" else: expected_error = u.UnitConversionError # Make this an empty string, which always matches. In this case # we are really only checking by the type of error raised. - expected_message = '' + expected_message = "" # Did we raise the right error? with pytest.raises(expected_error) as e: @@ -471,7 +528,7 @@ def test_unit_mismatch_behaves_as_expected(): def test_flat_correct(): ccd_data = ccd_data_func(data_scale=10) # Add metadata to header for a test below... - ccd_data.header['my_key'] = 42 + ccd_data.header["my_key"] = 42 size = ccd_data.shape[0] # create the flat, with some scatter data = 2 * np.random.normal(loc=1.0, scale=0.05, size=(size, size)) @@ -481,10 +538,12 @@ def test_flat_correct(): # Check that the flat was normalized # Should be the case that flat * flat_data = ccd_data * flat.data.mean # if the normalization was done correctly. - np.testing.assert_almost_equal((flat_data.data * flat.data).mean(), - ccd_data.data.mean() * flat.data.mean()) - np.testing.assert_allclose(ccd_data.data / flat_data.data, - flat.data / flat.data.mean()) + np.testing.assert_almost_equal( + (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat.data.mean() + ) + np.testing.assert_allclose( + ccd_data.data / flat_data.data, flat.data / flat.data.mean() + ) # Check that metadata is unchanged (since logging is turned off) assert flat_data.header == ccd_data.header @@ -509,10 +568,12 @@ def test_flat_correct_min_value(data_scale=1, data_mean=5): # flat_corrected_data = ccd_data / (flat_with_min / mean(flat_with_min)) np.testing.assert_almost_equal( (flat_corrected_data.data * flat_with_min.data).mean(), - (ccd_data.data * flat_with_min.data.mean()).mean() + (ccd_data.data * flat_with_min.data.mean()).mean(), + ) + np.testing.assert_allclose( + ccd_data.data / flat_corrected_data.data, + flat_with_min.data / flat_with_min.data.mean(), ) - np.testing.assert_allclose(ccd_data.data / flat_corrected_data.data, - flat_with_min.data / flat_with_min.data.mean()) # Test that flat is not modified. assert (flat_orig_data == flat.data).all() @@ -530,16 +591,15 @@ def test_flat_correct_norm_value(): flat_mean = 5.0 data = np.random.normal(loc=1.0, scale=0.05, size=ccd_data.shape) flat = CCDData(data, meta=fits.Header(), unit=ccd_data.unit) - flat_data = flat_correct(ccd_data, flat, add_keyword=None, - norm_value=flat_mean) + flat_data = flat_correct(ccd_data, flat, add_keyword=None, norm_value=flat_mean) # Check that the flat was normalized # Should be the case that flat * flat_data = ccd_data * flat_mean # if the normalization was done correctly. - np.testing.assert_almost_equal((flat_data.data * flat.data).mean(), - ccd_data.data.mean() * flat_mean) - np.testing.assert_allclose(ccd_data.data / flat_data.data, - flat.data / flat_mean) + np.testing.assert_almost_equal( + (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat_mean + ) + np.testing.assert_allclose(ccd_data.data / flat_data.data, flat.data / flat_mean) def test_flat_correct_norm_value_bad_value(): @@ -571,10 +631,9 @@ def test_flat_correct_deviation(): # Test the uncertainty on the data after flat correction def test_flat_correct_data_uncertainty(): # Regression test for #345 - dat = CCDData(np.ones([100, 100]), unit='adu', - uncertainty=np.ones([100, 100])) + dat = CCDData(np.ones([100, 100]), unit="adu", uncertainty=np.ones([100, 100])) # Note flat is set to 10, error, if present, is set to one. - flat = CCDData(10 * np.ones([100, 100]), unit='adu') + flat = CCDData(10 * np.ones([100, 100]), unit="adu") res = flat_correct(dat, flat) assert (res.data == dat.data).all() assert (res.uncertainty.array == dat.uncertainty.array).all() @@ -624,13 +683,11 @@ def tran(arr): # Issue warning when data has WCS. ccd_data.wcs = wcs_for_testing(ccd_data.shape) - with pytest.warns(UserWarning, match='WCS information may be incorrect'): + with pytest.warns(UserWarning, match="WCS information may be incorrect"): transform_image(ccd_data, tran) -@pytest.mark.parametrize('mask_data, uncertainty', [ - (False, False), - (True, True)]) +@pytest.mark.parametrize("mask_data, uncertainty", [(False, False), (True, True)]) def test_transform_image(mask_data, uncertainty): ccd_data = ccd_data_func(data_size=50) if mask_data: @@ -651,54 +708,60 @@ def tran(arr): assert_array_equal(ccd_data.mask, tran.mask) if uncertainty: assert tran.shape == tran.uncertainty.array.shape - assert_array_equal(10 * ccd_data.uncertainty.array, - tran.uncertainty.array) + assert_array_equal(10 * ccd_data.uncertainty.array, tran.uncertainty.array) # Test block_reduce and block_replicate wrapper @pytest.mark.skipif(not HAS_BLOCK_X_FUNCS, reason="needs astropy >= 1.1.x") -@pytest.mark.skipif((skimage.__version__ < '0.14.2') and - ('dev' in np.__version__), - reason="Incompatibility between scikit-image " - "and numpy 1.16") +@pytest.mark.skipif( + (skimage.__version__ < "0.14.2") and ("dev" in np.__version__), + reason="Incompatibility between scikit-image " "and numpy 1.16", +) def test_block_reduce(): - ccd = CCDData(np.ones((4, 4)), unit='adu', meta={'testkw': 1}, - mask=np.zeros((4, 4), dtype=bool), - uncertainty=StdDevUncertainty(np.ones((4, 4))) - ) + ccd = CCDData( + np.ones((4, 4)), + unit="adu", + meta={"testkw": 1}, + mask=np.zeros((4, 4), dtype=bool), + uncertainty=StdDevUncertainty(np.ones((4, 4))), + ) with pytest.warns(AstropyUserWarning) as w: ccd_summed = block_reduce(ccd, (2, 2)) assert len(w) == 1 - assert 'following attributes were set' in str(w[0].message) + assert "following attributes were set" in str(w[0].message) assert isinstance(ccd_summed, CCDData) assert np.all(ccd_summed.data == 4) assert ccd_summed.data.shape == (2, 2) assert ccd_summed.unit == u.adu # Other attributes are set to None. In case the function is modified to # work on these attributes correctly those tests need to be updated! - assert ccd_summed.meta == {'testkw': 1} + assert ccd_summed.meta == {"testkw": 1} assert ccd_summed.mask is None assert ccd_summed.uncertainty is None # Make sure meta is copied - ccd_summed.meta['testkw2'] = 10 - assert 'testkw2' not in ccd.meta + ccd_summed.meta["testkw2"] = 10 + assert "testkw2" not in ccd.meta @pytest.mark.skipif(not HAS_BLOCK_X_FUNCS, reason="needs astropy >= 1.1.x") -@pytest.mark.skipif((skimage.__version__ < '0.14.2') and - ('dev' in np.__version__), - reason="Incompatibility between scikit-image " - "and numpy 1.16") +@pytest.mark.skipif( + (skimage.__version__ < "0.14.2") and ("dev" in np.__version__), + reason="Incompatibility between scikit-image " "and numpy 1.16", +) def test_block_average(): - ccd = CCDData(np.ones((4, 4)), unit='adu', meta={'testkw': 1}, - mask=np.zeros((4, 4), dtype=bool), - uncertainty=StdDevUncertainty(np.ones((4, 4)))) + ccd = CCDData( + np.ones((4, 4)), + unit="adu", + meta={"testkw": 1}, + mask=np.zeros((4, 4), dtype=bool), + uncertainty=StdDevUncertainty(np.ones((4, 4))), + ) ccd.data[::2, ::2] = 2 with pytest.warns(AstropyUserWarning) as w: ccd_avgd = block_average(ccd, (2, 2)) assert len(w) == 1 - assert 'following attributes were set' in str(w[0].message) + assert "following attributes were set" in str(w[0].message) assert isinstance(ccd_avgd, CCDData) assert np.all(ccd_avgd.data == 1.25) @@ -706,25 +769,29 @@ def test_block_average(): assert ccd_avgd.unit == u.adu # Other attributes are set to None. In case the function is modified to # work on these attributes correctly those tests need to be updated! - assert ccd_avgd.meta == {'testkw': 1} + assert ccd_avgd.meta == {"testkw": 1} assert ccd_avgd.mask is None assert ccd_avgd.wcs is None assert ccd_avgd.uncertainty is None # Make sure meta is copied - ccd_avgd.meta['testkw2'] = 10 - assert 'testkw2' not in ccd.meta + ccd_avgd.meta["testkw2"] = 10 + assert "testkw2" not in ccd.meta @pytest.mark.skipif(not HAS_BLOCK_X_FUNCS, reason="needs astropy >= 1.1.x") def test_block_replicate(): - ccd = CCDData(np.ones((4, 4)), unit='adu', meta={'testkw': 1}, - mask=np.zeros((4, 4), dtype=bool), - uncertainty=StdDevUncertainty(np.ones((4, 4)))) + ccd = CCDData( + np.ones((4, 4)), + unit="adu", + meta={"testkw": 1}, + mask=np.zeros((4, 4), dtype=bool), + uncertainty=StdDevUncertainty(np.ones((4, 4))), + ) with pytest.warns(AstropyUserWarning) as w: ccd_repl = block_replicate(ccd, (2, 2)) assert len(w) == 1 - assert 'following attributes were set' in str(w[0].message) + assert "following attributes were set" in str(w[0].message) assert isinstance(ccd_repl, CCDData) assert np.all(ccd_repl.data == 0.25) @@ -732,14 +799,14 @@ def test_block_replicate(): assert ccd_repl.unit == u.adu # Other attributes are set to None. In case the function is modified to # work on these attributes correctly those tests need to be updated! - assert ccd_repl.meta == {'testkw': 1} + assert ccd_repl.meta == {"testkw": 1} assert ccd_repl.mask is None assert ccd_repl.wcs is None assert ccd_repl.uncertainty is None # Make sure meta is copied - ccd_repl.meta['testkw2'] = 10 - assert 'testkw2' not in ccd.meta + ccd_repl.meta["testkw2"] = 10 + assert "testkw2" not in ccd.meta # Test blockaveraging ndarray @@ -777,7 +844,9 @@ def test__overscan_schange(): def test_create_deviation_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - _ = create_deviation(ccd_data, gain=5 * u.electron / u.adu, readnoise=10 * u.electron) + _ = create_deviation( + ccd_data, gain=5 * u.electron / u.adu, readnoise=10 * u.electron + ) np.testing.assert_array_equal(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -787,7 +856,9 @@ def test_cosmicray_median_does_not_change_input(): original = ccd_data.copy() error = np.zeros_like(ccd_data) with np.errstate(invalid="ignore", divide="ignore"): - _ = cosmicray_median(ccd_data, error_image=error, thresh=5, mbox=11, gbox=0, rbox=0) + _ = cosmicray_median( + ccd_data, error_image=error, thresh=5, mbox=11, gbox=0, rbox=0 + ) np.testing.assert_array_equal(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -911,12 +982,10 @@ def test_wcs_project_onto_shifted_wcs(): # that the pixels should all be shifted. masked_input = np.ma.array(ccd_data.data, mask=ccd_data.mask) masked_output = np.ma.array(new_ccd.data, mask=new_ccd.mask) - np.testing.assert_allclose(masked_input[:-1, :-1], - masked_output[1:, 1:], rtol=1e-5) + np.testing.assert_allclose(masked_input[:-1, :-1], masked_output[1:, 1:], rtol=1e-5) # The masks should all be shifted too. - np.testing.assert_array_equal(ccd_data.mask[:-1, :-1], - new_ccd.mask[1:, 1:]) + np.testing.assert_array_equal(ccd_data.mask[:-1, :-1], new_ccd.mask[1:, 1:]) # We should have more values that are masked in the output array # than on input because some on output were not in the footprint @@ -945,8 +1014,7 @@ def test_wcs_project_onto_scale_wcs(): # Make mask zero... ccd_data.mask = np.zeros_like(ccd_data.data) # ...except the center pixel, which is one. - ccd_data.mask[int(ccd_data.wcs.wcs.crpix[0]), - int(ccd_data.wcs.wcs.crpix[1])] = 1 + ccd_data.mask[int(ccd_data.wcs.wcs.crpix[0]), int(ccd_data.wcs.wcs.crpix[1])] = 1 target_wcs = wcs_for_testing(ccd_data.shape) target_wcs.wcs.cdelt /= 2 @@ -957,9 +1025,9 @@ def test_wcs_project_onto_scale_wcs(): # Explicitly set the interpolation method so we know what to # expect for the mass. - new_ccd = wcs_project(ccd_data, target_wcs, - target_shape=target_shape, - order='nearest-neighbor') + new_ccd = wcs_project( + ccd_data, target_wcs, target_shape=target_shape, order="nearest-neighbor" + ) # Make sure new image has correct WCS. assert new_ccd.wcs.wcs.compare(target_wcs.wcs) @@ -967,20 +1035,22 @@ def test_wcs_project_onto_scale_wcs(): # Define a cutout from the new array that should match the old. new_lower_bound = (np.array(new_ccd.shape) - np.array(ccd_data.shape)) // 2 new_upper_bound = (np.array(new_ccd.shape) + np.array(ccd_data.shape)) // 2 - data_cutout = new_ccd.data[new_lower_bound[0]:new_upper_bound[0], - new_lower_bound[1]:new_upper_bound[1]] + data_cutout = new_ccd.data[ + new_lower_bound[0] : new_upper_bound[0], new_lower_bound[1] : new_upper_bound[1] + ] # Make sure data matches within some reasonable tolerance, keeping in mind # that the pixels have been scaled. - np.testing.assert_allclose(ccd_data.data / 4, - data_cutout, - rtol=1e-5) + np.testing.assert_allclose(ccd_data.data / 4, data_cutout, rtol=1e-5) # Mask should be true for four pixels (all nearest neighbors) # of the single pixel we masked initially. new_center = np.array(new_ccd.wcs.wcs.crpix, dtype=int, copy=_NUMPY_COPY_IF_NEEDED) - assert np.all(new_ccd.mask[new_center[0]:new_center[0]+2, - new_center[1]:new_center[1]+2]) + assert np.all( + new_ccd.mask[ + new_center[0] : new_center[0] + 2, new_center[1] : new_center[1] + 2 + ] + ) # Those four, and any that reproject made nan because they draw on # pixels outside the footprint of the original image, are the only @@ -991,8 +1061,7 @@ def test_wcs_project_onto_scale_wcs(): def test_ccd_process_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - _ = ccd_process(ccd_data, gain=5 * u.electron / u.adu, - readnoise=10 * u.electron) + _ = ccd_process(ccd_data, gain=5 * u.electron / u.adu, readnoise=10 * u.electron) np.testing.assert_array_equal(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -1033,7 +1102,7 @@ def test_ccd_process(): # Test the through ccd_process ccd_data = CCDData(10.0 * np.ones((100, 100)), unit=u.adu) ccd_data.data[:, -10:] = 2 - ccd_data.meta['testkw'] = 100 + ccd_data.meta["testkw"] = 100 mask = np.zeros((100, 90)) @@ -1046,31 +1115,39 @@ def test_ccd_process(): masterflat = CCDData(10.0 * np.ones((100, 90)), unit=u.electron) masterflat.uncertainty = StdDevUncertainty(np.zeros((100, 90))) - occd = ccd_process(ccd_data, oscan=ccd_data[:, -10:], trim='[1:90,1:100]', - error=True, master_bias=masterbias, - master_flat=masterflat, dark_frame=dark_frame, - bad_pixel_mask=mask, gain=0.5 * u.electron/u.adu, - readnoise=5**0.5 * u.electron, oscan_median=True, - dark_scale=False, dark_exposure=1.*u.s, - data_exposure=1.*u.s) + occd = ccd_process( + ccd_data, + oscan=ccd_data[:, -10:], + trim="[1:90,1:100]", + error=True, + master_bias=masterbias, + master_flat=masterflat, + dark_frame=dark_frame, + bad_pixel_mask=mask, + gain=0.5 * u.electron / u.adu, + readnoise=5**0.5 * u.electron, + oscan_median=True, + dark_scale=False, + dark_exposure=1.0 * u.s, + data_exposure=1.0 * u.s, + ) # Final results should be (10 - 2) / 2.0 - 2 = 2 # Error should be (4 + 5)**0.5 / 0.5 = 3.0 np.testing.assert_array_equal(2.0 * np.ones((100, 90)), occd.data) - np.testing.assert_almost_equal(3.0 * np.ones((100, 90)), - occd.uncertainty.array) + np.testing.assert_almost_equal(3.0 * np.ones((100, 90)), occd.uncertainty.array) np.testing.assert_array_equal(mask, occd.mask) - assert(occd.unit == u.electron) + assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 - assert occd.meta['testkw'] == 100 + assert occd.meta["testkw"] == 100 def test_ccd_process_gain_corrected(): # Test the through ccd_process with gain_corrected as False ccd_data = CCDData(10.0 * np.ones((100, 100)), unit=u.adu) ccd_data.data[:, -10:] = 2 - ccd_data.meta['testkw'] = 100 + ccd_data.meta["testkw"] = 100 mask = np.zeros((100, 90)) @@ -1083,21 +1160,30 @@ def test_ccd_process_gain_corrected(): masterflat = CCDData(5.0 * np.ones((100, 90)), unit=u.adu) masterflat.uncertainty = StdDevUncertainty(np.zeros((100, 90))) - occd = ccd_process(ccd_data, oscan=ccd_data[:, -10:], trim='[1:90,1:100]', - error=True, master_bias=masterbias, - master_flat=masterflat, dark_frame=dark_frame, - bad_pixel_mask=mask, gain=0.5 * u.electron/u.adu, - readnoise=5**0.5 * u.electron, oscan_median=True, - dark_scale=False, dark_exposure=1.*u.s, - data_exposure=1.*u.s, gain_corrected=False) + occd = ccd_process( + ccd_data, + oscan=ccd_data[:, -10:], + trim="[1:90,1:100]", + error=True, + master_bias=masterbias, + master_flat=masterflat, + dark_frame=dark_frame, + bad_pixel_mask=mask, + gain=0.5 * u.electron / u.adu, + readnoise=5**0.5 * u.electron, + oscan_median=True, + dark_scale=False, + dark_exposure=1.0 * u.s, + data_exposure=1.0 * u.s, + gain_corrected=False, + ) # Final results should be (10 - 2) / 2.0 - 2 = 2 # Error should be (4 + 5)**0.5 / 0.5 = 3.0 np.testing.assert_array_equal(2.0 * np.ones((100, 90)), occd.data) - np.testing.assert_almost_equal(3.0 * np.ones((100, 90)), - occd.uncertainty.array) + np.testing.assert_almost_equal(3.0 * np.ones((100, 90)), occd.uncertainty.array) np.testing.assert_array_equal(mask, occd.mask) - assert(occd.unit == u.electron) + assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 - assert occd.meta['testkw'] == 100 + assert occd.meta["testkw"] == 100 diff --git a/ccdproc/tests/test_ccdproc_logging.py b/ccdproc/tests/test_ccdproc_logging.py index 1af1896e..c040416f 100644 --- a/ccdproc/tests/test_ccdproc_logging.py +++ b/ccdproc/tests/test_ccdproc_logging.py @@ -10,14 +10,11 @@ from ccdproc.tests.pytest_fixtures import ccd_data as ccd_data_func -@pytest.mark.parametrize('key', [ - 'short', - 'toolongforfits']) +@pytest.mark.parametrize("key", ["short", "toolongforfits"]) def test_log_string(key): ccd_data = ccd_data_func() add_key = key - new = create_deviation(ccd_data, readnoise=3 * ccd_data.unit, - add_keyword=add_key) + new = create_deviation(ccd_data, readnoise=3 * ccd_data.unit, add_keyword=add_key) # Keys should be added to new but not to ccd_data and should have # no value. assert add_key in new.meta @@ -29,11 +26,10 @@ def test_log_string(key): def test_log_keyword(): ccd_data = ccd_data_func() - key = 'filter' - key_val = 'V' + key = "filter" + key_val = "V" kwd = Keyword(key, value=key_val) - new = create_deviation(ccd_data, readnoise=3 * ccd_data.unit, - add_keyword=kwd) + new = create_deviation(ccd_data, readnoise=3 * ccd_data.unit, add_keyword=kwd) # Was the Keyword added with the correct value? assert kwd.name in new.meta assert kwd.name not in ccd_data.meta @@ -43,12 +39,13 @@ def test_log_keyword(): def test_log_dict(): ccd_data = ccd_data_func() keys_to_add = { - 'process': 'Added deviation', - 'n_images_input': 1, - 'current_temp': 42.9 + "process": "Added deviation", + "n_images_input": 1, + "current_temp": 42.9, } - new = create_deviation(ccd_data, readnoise=3 * ccd_data.unit, - add_keyword=keys_to_add) + new = create_deviation( + ccd_data, readnoise=3 * ccd_data.unit, add_keyword=keys_to_add + ) for k, v in keys_to_add.items(): # Were all dictionary items added? assert k in new.meta @@ -58,17 +55,15 @@ def test_log_dict(): def test_log_bad_type_fails(): ccd_data = ccd_data_func() - add_key = 15 # anything not string and not dict-like will work here + add_key = 15 # anything not string and not dict-like will work here # Do we fail with non-string, non-Keyword, non-dict-like value? with pytest.raises(AttributeError): - create_deviation(ccd_data, readnoise=3 * ccd_data.unit, - add_keyword=add_key) + create_deviation(ccd_data, readnoise=3 * ccd_data.unit, add_keyword=add_key) def test_log_set_to_None_does_not_change_header(): ccd_data = ccd_data_func() - new = create_deviation(ccd_data, readnoise=3 * ccd_data.unit, - add_keyword=None) + new = create_deviation(ccd_data, readnoise=3 * ccd_data.unit, add_keyword=None) assert new.meta.keys() == ccd_data.header.keys() @@ -81,34 +76,35 @@ def test_implicit_logging(): bias = CCDData(np.zeros_like(ccd_data.data), unit="adu") result = subtract_bias(ccd_data, bias) assert "subtract_bias" in result.header - assert result.header['subtract_bias'] == ( - 'subbias', 'Shortened name for ccdproc command') - assert result.header['subbias'] == "ccd=, master=" + assert result.header["subtract_bias"] == ( + "subbias", + "Shortened name for ccdproc command", + ) + assert result.header["subbias"] == "ccd=, master=" result = create_deviation(ccd_data, readnoise=3 * ccd_data.unit) - assert result.header['create_deviation'] == ( - 'creatvar', 'Shortened name for ccdproc command') - assert ("readnoise=" + str(3 * ccd_data.unit) in - result.header['creatvar']) + assert result.header["create_deviation"] == ( + "creatvar", + "Shortened name for ccdproc command", + ) + assert "readnoise=" + str(3 * ccd_data.unit) in result.header["creatvar"] def test_loggin_without_keyword_args(): # Regression test for the first failure in #704, which fails because # there is no "fits_section" keyword in the call to trim_image. - ccd = CCDData(data=np.arange(1000).reshape(20, 50), - header=None, - unit='adu') + ccd = CCDData(data=np.arange(1000).reshape(20, 50), header=None, unit="adu") section = "[10:20, 10:20]" trim_1 = trim_image(ccd, "[10:20, 10:20]") - assert section in trim_1.header[_short_names['trim_image']] + assert section in trim_1.header[_short_names["trim_image"]] def test_logging_with_really_long_parameter_value(): # Another regression test for the trim_3 case in #704 - ccd = CCDData(data=np.arange(1000).reshape(20, 50), - header=None, - unit='adu') - section = ("[10:2000000000000000000000000000000000000000000000000000000, " - "10:2000000000000000000000000000000]") + ccd = CCDData(data=np.arange(1000).reshape(20, 50), header=None, unit="adu") + section = ( + "[10:2000000000000000000000000000000000000000000000000000000, " + "10:2000000000000000000000000000000]" + ) trim_3 = trim_image(ccd, fits_section=section) - assert section in trim_3.header[_short_names['trim_image']] + assert section in trim_3.header[_short_names["trim_image"]] diff --git a/ccdproc/tests/test_combine_open_files.py b/ccdproc/tests/test_combine_open_files.py index 10f6e915..015228ea 100644 --- a/ccdproc/tests/test_combine_open_files.py +++ b/ccdproc/tests/test_combine_open_files.py @@ -11,15 +11,23 @@ # subprocess we can add that direction to sys.path. subprocess_dir = run_dir.parent.parent -OVERHEAD = '4' -NUM_FILE_LIMIT = '20' -common_args = [sys.executable, str(run_dir / 'run_with_file_number_limit.py'), - '--kind', 'fits', '--overhead', OVERHEAD] +OVERHEAD = "4" +NUM_FILE_LIMIT = "20" +common_args = [ + sys.executable, + str(run_dir / "run_with_file_number_limit.py"), + "--kind", + "fits", + "--overhead", + OVERHEAD, +] # Regression test for #629 -@pytest.mark.skipif(os.environ.get('APPVEYOR') or os.sys.platform == 'win32', - reason='Test relies on linux/osx features of psutil') +@pytest.mark.skipif( + os.environ.get("APPVEYOR") or os.sys.platform == "win32", + reason="Test relies on linux/osx features of psutil", +) def test_open_files_combine_no_chunks(): """ Test that we are not opening (much) more than the number of files @@ -27,7 +35,7 @@ def test_open_files_combine_no_chunks(): """ # Make a copy args = list(common_args) - args.extend(['--open-by', 'combine-nochunk', NUM_FILE_LIMIT]) + args.extend(["--open-by", "combine-nochunk", NUM_FILE_LIMIT]) p = subprocess.run(args=args, cwd=str(subprocess_dir)) # If we have succeeded the test passes. We are only checking that # we don't have too many files open. @@ -35,8 +43,10 @@ def test_open_files_combine_no_chunks(): # Regression test for #629 -@pytest.mark.skipif(os.environ.get('APPVEYOR') or os.sys.platform == 'win32', - reason='Test relies on linux/osx features of psutil') +@pytest.mark.skipif( + os.environ.get("APPVEYOR") or os.sys.platform == "win32", + reason="Test relies on linux/osx features of psutil", +) def test_open_files_combine_chunks(): """ Test that we are not opening (much) more than the number of files @@ -44,7 +54,7 @@ def test_open_files_combine_chunks(): """ # Make a copy args = list(common_args) - args.extend(['--open-by', 'combine-chunk', NUM_FILE_LIMIT]) + args.extend(["--open-by", "combine-chunk", NUM_FILE_LIMIT]) p = subprocess.run(args=args, cwd=str(subprocess_dir)) # If we have succeeded the test passes. We are only checking that # we don't have too many files open. diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index a2ab84fa..fb9b249a 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -11,18 +11,23 @@ from astropy.utils.data import get_pkg_data_filename from astropy.nddata import CCDData -from ccdproc.combiner import (Combiner, combine, _calculate_step_sizes, - _default_std, sigma_func) +from ccdproc.combiner import ( + Combiner, + combine, + _calculate_step_sizes, + _default_std, + sigma_func, +) from ccdproc.image_collection import ImageFileCollection from ccdproc.tests.pytest_fixtures import ccd_data as ccd_data_func -SUPER_OLD_ASTROPY = parse(astropy.__version__) < Version('4.3.0') +SUPER_OLD_ASTROPY = parse(astropy.__version__) < Version("4.3.0") # Several tests have many more NaNs in them than real data. numpy generates # lots of warnings in those cases and it makes more sense to suppress them # than to generate them. pytestmark = pytest.mark.filterwarnings( - 'ignore:All-NaN slice encountered:RuntimeWarning' + "ignore:All-NaN slice encountered:RuntimeWarning" ) @@ -97,7 +102,7 @@ def test_combiner_dtype(): def test_combiner_mask(): data = np.zeros((10, 10)) data[5, 5] = 1 - mask = (data == 0) + mask = data == 0 ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] c = Combiner(ccd_list) @@ -123,9 +128,11 @@ def test_weights_shape(): def test_1Dweights(): - ccd_list = [CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu)] + ccd_list = [ + CCDData(np.zeros((10, 10)), unit=u.adu), + CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + ] c = Combiner(ccd_list) c.weights = np.array([1, 5, 10]) @@ -134,9 +141,11 @@ def test_1Dweights(): def test_pixelwise_weights(): - ccd_list = [CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu)] + ccd_list = [ + CCDData(np.zeros((10, 10)), unit=u.adu), + CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + ] c = Combiner(ccd_list) c.weights = np.ones_like(c.data_arr) c.weights[:, 5, 5] = [1, 5, 10] @@ -147,9 +156,11 @@ def test_pixelwise_weights(): # test the min-max rejection def test_combiner_minmax(): - ccd_list = [CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu)] + ccd_list = [ + CCDData(np.zeros((10, 10)), unit=u.adu), + CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + ] c = Combiner(ccd_list) c.minmax_clipping(min_clip=-500, max_clip=500) @@ -158,9 +169,11 @@ def test_combiner_minmax(): def test_combiner_minmax_max(): - ccd_list = [CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu)] + ccd_list = [ + CCDData(np.zeros((10, 10)), unit=u.adu), + CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + ] c = Combiner(ccd_list) c.minmax_clipping(min_clip=None, max_clip=500) @@ -168,9 +181,11 @@ def test_combiner_minmax_max(): def test_combiner_minmax_min(): - ccd_list = [CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu)] + ccd_list = [ + CCDData(np.zeros((10, 10)), unit=u.adu), + CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + ] c = Combiner(ccd_list) c.minmax_clipping(min_clip=-500, max_clip=None) @@ -178,27 +193,30 @@ def test_combiner_minmax_min(): def test_combiner_sigmaclip_high(): - ccd_list = [CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu)] + ccd_list = [ + CCDData(np.zeros((10, 10)), unit=u.adu), + CCDData(np.zeros((10, 10)) - 10, unit=u.adu), + CCDData(np.zeros((10, 10)) + 10, unit=u.adu), + CCDData(np.zeros((10, 10)) - 10, unit=u.adu), + CCDData(np.zeros((10, 10)) + 10, unit=u.adu), + CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + ] c = Combiner(ccd_list) # using mad for more robust statistics vs. std - c.sigma_clipping(high_thresh=3, low_thresh=None, func=np.ma.median, - dev_func=mad) + c.sigma_clipping(high_thresh=3, low_thresh=None, func=np.ma.median, dev_func=mad) assert c.data_arr[5].mask.all() def test_combiner_sigmaclip_single_pix(): - ccd_list = [CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu)] + ccd_list = [ + CCDData(np.zeros((10, 10)), unit=u.adu), + CCDData(np.zeros((10, 10)) - 10, unit=u.adu), + CCDData(np.zeros((10, 10)) + 10, unit=u.adu), + CCDData(np.zeros((10, 10)) - 10, unit=u.adu), + CCDData(np.zeros((10, 10)) + 10, unit=u.adu), + CCDData(np.zeros((10, 10)) - 10, unit=u.adu), + ] c = Combiner(ccd_list) # add a single pixel in another array to check that # that one gets rejected @@ -207,23 +225,23 @@ def test_combiner_sigmaclip_single_pix(): c.data_arr[2, 5, 5] = 5 c.data_arr[3, 5, 5] = -5 c.data_arr[4, 5, 5] = 25 - c.sigma_clipping(high_thresh=3, low_thresh=None, func=np.ma.median, - dev_func=mad) + c.sigma_clipping(high_thresh=3, low_thresh=None, func=np.ma.median, dev_func=mad) assert c.data_arr.mask[4, 5, 5] def test_combiner_sigmaclip_low(): - ccd_list = [CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu)] + ccd_list = [ + CCDData(np.zeros((10, 10)), unit=u.adu), + CCDData(np.zeros((10, 10)) - 10, unit=u.adu), + CCDData(np.zeros((10, 10)) + 10, unit=u.adu), + CCDData(np.zeros((10, 10)) - 10, unit=u.adu), + CCDData(np.zeros((10, 10)) + 10, unit=u.adu), + CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), + ] c = Combiner(ccd_list) # using mad for more robust statistics vs. std - c.sigma_clipping(high_thresh=None, low_thresh=3, func=np.ma.median, - dev_func=mad) + c.sigma_clipping(high_thresh=None, low_thresh=3, func=np.ma.median, dev_func=mad) assert c.data_arr[5].mask.all() @@ -236,7 +254,7 @@ def test_combiner_median(): assert isinstance(ccd, CCDData) assert ccd.shape == (100, 100) assert ccd.unit == u.adu - assert ccd.meta['NCOMBINE'] == len(ccd_list) + assert ccd.meta["NCOMBINE"] == len(ccd_list) # test that the average combination works and returns a ccddata object @@ -248,7 +266,7 @@ def test_combiner_average(): assert isinstance(ccd, CCDData) assert ccd.shape == (100, 100) assert ccd.unit == u.adu - assert ccd.meta['NCOMBINE'] == len(ccd_list) + assert ccd.meta["NCOMBINE"] == len(ccd_list) # test that the sum combination works and returns a ccddata object @@ -260,52 +278,44 @@ def test_combiner_sum(): assert isinstance(ccd, CCDData) assert ccd.shape == (100, 100) assert ccd.unit == u.adu - assert ccd.meta['NCOMBINE'] == len(ccd_list) + assert ccd.meta["NCOMBINE"] == len(ccd_list) # test weighted sum def test_combiner_sum_weighted(): - ccd_data = CCDData(data=[[0, 1], [2, 3]], unit='adu') + ccd_data = CCDData(data=[[0, 1], [2, 3]], unit="adu") ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) c.weights = np.array([1, 2, 3]) ccd = c.sum_combine() - expected_result = sum(w * d.data for w, d in - zip(c.weights, ccd_list)) - np.testing.assert_almost_equal(ccd, - expected_result) + expected_result = sum(w * d.data for w, d in zip(c.weights, ccd_list)) + np.testing.assert_almost_equal(ccd, expected_result) # test weighted sum def test_combiner_sum_weighted_by_pixel(): - ccd_data = CCDData(data=[[1, 2], [4, 8]], unit='adu') + ccd_data = CCDData(data=[[1, 2], [4, 8]], unit="adu") ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) # Weights below are chosen so that every entry in - weights_pixel = [ - [8, 4], - [2, 1] - ] + weights_pixel = [[8, 4], [2, 1]] c.weights = np.array([weights_pixel] * 3) ccd = c.sum_combine() - expected_result = [ - [24, 24], - [24, 24] - ] + expected_result = [[24, 24], [24, 24]] np.testing.assert_almost_equal(ccd, expected_result) # This warning is generated by numpy and is expected when # many pixels are masked. @pytest.mark.filterwarnings( - 'ignore:Mean of empty slice:RuntimeWarning', - 'ignore:Degrees of freedom <= 0:RuntimeWarning' + "ignore:Mean of empty slice:RuntimeWarning", + "ignore:Degrees of freedom <= 0:RuntimeWarning", ) def test_combiner_mask_average(): # test data combined with mask is created correctly data = np.zeros((10, 10)) data[5, 5] = 1 - mask = (data == 0) + mask = data == 0 ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] c = Combiner(ccd_list) @@ -333,19 +343,16 @@ def test_combiner_with_scaling(): avg_ccd = combiner.average_combine() # Does the mean of the scaled arrays match the value to which it was # scaled? - np.testing.assert_almost_equal(avg_ccd.data.mean(), - ccd_data.data.mean()) + np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape median_ccd = combiner.median_combine() # Does median also scale to the correct value? - np.testing.assert_almost_equal(np.median(median_ccd.data), - np.median(ccd_data.data)) + np.testing.assert_almost_equal(np.median(median_ccd.data), np.median(ccd_data.data)) # Set the scaling manually... combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] avg_ccd = combiner.average_combine() - np.testing.assert_almost_equal(avg_ccd.data.mean(), - ccd_data.data.mean()) + np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape @@ -361,7 +368,7 @@ def test_combiner_scaling_fails(): def test_combiner_mask_median(): data = np.zeros((10, 10)) data[5, 5] = 1 - mask = (data == 0) + mask = data == 0 ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] c = Combiner(ccd_list) @@ -374,14 +381,12 @@ def test_combiner_mask_median(): # Ignore warnings generated because most values are masked -@pytest.mark.filterwarnings( - 'ignore:Degrees of freedom <= 0:RuntimeWarning' -) +@pytest.mark.filterwarnings("ignore:Degrees of freedom <= 0:RuntimeWarning") def test_combiner_mask_sum(): # test data combined with mask is created correctly data = np.zeros((10, 10)) data[5, 5] = 1 - mask = (data == 0) + mask = data == 0 ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] c = Combiner(ccd_list) @@ -394,34 +399,32 @@ def test_combiner_mask_sum(): # test combiner convenience function reads fits file and combine as expected def test_combine_average_fitsimages(): - fitsfile = get_pkg_data_filename('data/a8280271.fits', package='ccdproc.tests') + fitsfile = get_pkg_data_filename("data/a8280271.fits", package="ccdproc.tests") ccd = CCDData.read(fitsfile, unit=u.adu) ccd_list = [ccd] * 3 c = Combiner(ccd_list) ccd_by_combiner = c.average_combine() fitsfilename_list = [fitsfile] * 3 - avgccd = combine(fitsfilename_list, output_file=None, - method='average', unit=u.adu) + avgccd = combine(fitsfilename_list, output_file=None, method="average", unit=u.adu) # averaging same fits images should give back same fits image np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) def test_combine_numpyndarray(): - """ Test of numpy ndarray implementation: #493 + """Test of numpy ndarray implementation: #493 Test the average combine using ``Combiner`` and ``combine`` with input ``img_list`` in the format of ``numpy.ndarray``. """ - fitsfile = get_pkg_data_filename('data/a8280271.fits') + fitsfile = get_pkg_data_filename("data/a8280271.fits") ccd = CCDData.read(fitsfile, unit=u.adu) ccd_list = [ccd] * 3 c = Combiner(ccd_list) ccd_by_combiner = c.average_combine() fitsfilename_list = np.array([fitsfile] * 3) - avgccd = combine(fitsfilename_list, output_file=None, - method='average', unit=u.adu) + avgccd = combine(fitsfilename_list, output_file=None, method="average", unit=u.adu) # averaging same fits images should give back same fits image np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) @@ -431,7 +434,7 @@ def test_combiner_result_dtype(): The result should have the appropriate dtype not the dtype of the first input.""" - ccd = CCDData(np.ones((3, 3), dtype=np.uint16), unit='adu') + ccd = CCDData(np.ones((3, 3), dtype=np.uint16), unit="adu") res = combine([ccd, ccd.multiply(2)]) # The default dtype of Combiner is float64 assert res.data.dtype == np.float64 @@ -449,12 +452,11 @@ def test_combiner_image_file_collection_input(tmp_path): # Regression check for #754 ccd = ccd_data_func() for i in range(3): - ccd.write(tmp_path / f'ccd-{i}.fits') + ccd.write(tmp_path / f"ccd-{i}.fits") ifc = ImageFileCollection(tmp_path) comb = Combiner(ifc.ccds()) - np.testing.assert_array_almost_equal(ccd.data, - comb.average_combine().data) + np.testing.assert_array_almost_equal(ccd.data, comb.average_combine().data) def test_combine_image_file_collection_input(tmp_path): @@ -462,19 +464,16 @@ def test_combine_image_file_collection_input(tmp_path): # combine function instead of Combiner ccd = ccd_data_func() for i in range(3): - ccd.write(tmp_path / f'ccd-{i}.fits') + ccd.write(tmp_path / f"ccd-{i}.fits") ifc = ImageFileCollection(tmp_path) - comb_files = combine(ifc.files_filtered(include_path=True), - method='average') + comb_files = combine(ifc.files_filtered(include_path=True), method="average") - comb_ccds = combine(ifc.ccds(), method='average') + comb_ccds = combine(ifc.ccds(), method="average") - np.testing.assert_array_almost_equal(ccd.data, - comb_files.data) - np.testing.assert_array_almost_equal(ccd.data, - comb_ccds.data) + np.testing.assert_array_almost_equal(ccd.data, comb_files.data) + np.testing.assert_array_almost_equal(ccd.data, comb_ccds.data) with pytest.raises(FileNotFoundError): # This should fail because the test is not running in the @@ -484,13 +483,13 @@ def test_combine_image_file_collection_input(tmp_path): # test combiner convenience function works with list of ccddata objects def test_combine_average_ccddata(): - fitsfile = get_pkg_data_filename('data/a8280271.fits') + fitsfile = get_pkg_data_filename("data/a8280271.fits") ccd = CCDData.read(fitsfile, unit=u.adu) ccd_list = [ccd] * 3 c = Combiner(ccd_list) ccd_by_combiner = c.average_combine() - avgccd = combine(ccd_list, output_file=None, method='average', unit=u.adu) + avgccd = combine(ccd_list, output_file=None, method="average", unit=u.adu) # averaging same ccdData should give back same images np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) @@ -498,15 +497,16 @@ def test_combine_average_ccddata(): # test combiner convenience function reads fits file and # and combine as expected when asked to run in limited memory def test_combine_limitedmem_fitsimages(): - fitsfile = get_pkg_data_filename('data/a8280271.fits') + fitsfile = get_pkg_data_filename("data/a8280271.fits") ccd = CCDData.read(fitsfile, unit=u.adu) ccd_list = [ccd] * 5 c = Combiner(ccd_list) ccd_by_combiner = c.average_combine() fitsfilename_list = [fitsfile] * 5 - avgccd = combine(fitsfilename_list, output_file=None, method='average', - mem_limit=1e6, unit=u.adu) + avgccd = combine( + fitsfilename_list, output_file=None, method="average", mem_limit=1e6, unit=u.adu + ) # averaging same ccdData should give back same images np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) @@ -514,7 +514,7 @@ def test_combine_limitedmem_fitsimages(): # test combiner convenience function reads fits file and # and combine as expected when asked to run in limited memory with scaling def test_combine_limitedmem_scale_fitsimages(): - fitsfile = get_pkg_data_filename('data/a8280271.fits') + fitsfile = get_pkg_data_filename("data/a8280271.fits") ccd = CCDData.read(fitsfile, unit=u.adu) ccd_list = [ccd] * 5 c = Combiner(ccd_list) @@ -524,11 +524,16 @@ def test_combine_limitedmem_scale_fitsimages(): ccd_by_combiner = c.average_combine() fitsfilename_list = [fitsfile] * 5 - avgccd = combine(fitsfilename_list, output_file=None, method='average', - mem_limit=1e6, scale=scale_by_mean, unit=u.adu) + avgccd = combine( + fitsfilename_list, + output_file=None, + method="average", + mem_limit=1e6, + scale=scale_by_mean, + unit=u.adu, + ) - np.testing.assert_array_almost_equal( - avgccd.data, ccd_by_combiner.data, decimal=4) + np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data, decimal=4) # test the optional uncertainty function in average_combine @@ -541,11 +546,9 @@ def test_average_combine_uncertainty(): np.testing.assert_array_equal(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call - ccd2 = combine(ccd_list, method='average', - combine_uncertainty_function=np.sum) + ccd2 = combine(ccd_list, method="average", combine_uncertainty_function=np.sum) np.testing.assert_array_equal(ccd.data, ccd2.data) - np.testing.assert_array_equal( - ccd.uncertainty.array, ccd2.uncertainty.array) + np.testing.assert_array_equal(ccd.uncertainty.array, ccd2.uncertainty.array) # test the optional uncertainty function in median_combine @@ -558,11 +561,9 @@ def test_median_combine_uncertainty(): np.testing.assert_array_equal(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call - ccd2 = combine(ccd_list, method='median', - combine_uncertainty_function=np.sum) + ccd2 = combine(ccd_list, method="median", combine_uncertainty_function=np.sum) np.testing.assert_array_equal(ccd.data, ccd2.data) - np.testing.assert_array_equal( - ccd.uncertainty.array, ccd2.uncertainty.array) + np.testing.assert_array_equal(ccd.uncertainty.array, ccd2.uncertainty.array) # test the optional uncertainty function in sum_combine @@ -575,20 +576,20 @@ def test_sum_combine_uncertainty(): np.testing.assert_almost_equal(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call - ccd2 = combine(ccd_list, method='sum', combine_uncertainty_function=np.sum) + ccd2 = combine(ccd_list, method="sum", combine_uncertainty_function=np.sum) np.testing.assert_array_equal(ccd.data, ccd2.data) - np.testing.assert_array_equal( - ccd.uncertainty.array, ccd2.uncertainty.array) + np.testing.assert_array_equal(ccd.uncertainty.array, ccd2.uncertainty.array) # Ignore warnings generated because most values are masked @pytest.mark.filterwarnings( - 'ignore:Mean of empty slice:RuntimeWarning', - 'ignore:Degrees of freedom <= 0:RuntimeWarning' + "ignore:Mean of empty slice:RuntimeWarning", + "ignore:Degrees of freedom <= 0:RuntimeWarning", +) +@pytest.mark.parametrize("mask_point", [True, False]) +@pytest.mark.parametrize( + "comb_func", ["average_combine", "median_combine", "sum_combine"] ) -@pytest.mark.parametrize('mask_point', [True, False]) -@pytest.mark.parametrize('comb_func', - ['average_combine', 'median_combine', 'sum_combine']) def test_combine_result_uncertainty_and_mask(comb_func, mask_point): # Regression test for #774 # Turns out combine does not return an uncertainty or mask if the input @@ -612,13 +613,15 @@ def test_combine_result_uncertainty_and_mask(comb_func, mask_point): expected_result = getattr(c, comb_func)() # Just need the first part of the name for the combine function - combine_method_name = comb_func.split('_')[0] + combine_method_name = comb_func.split("_")[0] - ccd_comb = combine(ccd_list, method=combine_method_name, - minmax_clip=True, minmax_clip_min=-100) + ccd_comb = combine( + ccd_list, method=combine_method_name, minmax_clip=True, minmax_clip_min=-100 + ) - np.testing.assert_array_almost_equal(ccd_comb.uncertainty.array, - expected_result.uncertainty.array) + np.testing.assert_array_almost_equal( + ccd_comb.uncertainty.array, expected_result.uncertainty.array + ) # Check that the right point is masked, and only one point is # masked @@ -635,7 +638,7 @@ def test_combine_overwrite_output(tmp_path): """ output_file = tmp_path / "fake.fits" - ccd = CCDData(np.ones((3, 3)), unit='adu') + ccd = CCDData(np.ones((3, 3)), unit="adu") # Make sure we have a file to overwrite ccd.write(output_file) @@ -645,9 +648,9 @@ def test_combine_overwrite_output(tmp_path): # Should be no error here... # The default dtype of Combiner is float64 - res = combine([ccd, ccd.multiply(2)], - output_file=output_file, - overwrite_output=True) + res = combine( + [ccd, ccd.multiply(2)], output_file=output_file, overwrite_output=True + ) res_from_disk = CCDData.read(output_file) @@ -657,16 +660,17 @@ def test_combine_overwrite_output(tmp_path): # test resulting uncertainty is corrected for the number of images def test_combiner_uncertainty_average(): - ccd_list = [CCDData(np.ones((10, 10)), unit=u.adu), - CCDData(np.ones((10, 10)) * 2, unit=u.adu)] + ccd_list = [ + CCDData(np.ones((10, 10)), unit=u.adu), + CCDData(np.ones((10, 10)) * 2, unit=u.adu), + ] c = Combiner(ccd_list) ccd = c.average_combine() # Just the standard deviation of ccd data. ref_uncertainty = np.ones((10, 10)) / 2 # Correction because we combined two images. ref_uncertainty /= np.sqrt(2) - np.testing.assert_array_almost_equal(ccd.uncertainty.array, - ref_uncertainty) + np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) @@ -674,9 +678,11 @@ def test_combiner_uncertainty_average_mask(): mask = np.zeros((10, 10), dtype=np.bool_) mask[5, 5] = True ccd_with_mask = CCDData(np.ones((10, 10)), unit=u.adu, mask=mask) - ccd_list = [ccd_with_mask, - CCDData(np.ones((10, 10)) * 2, unit=u.adu), - CCDData(np.ones((10, 10)) * 3, unit=u.adu)] + ccd_list = [ + ccd_with_mask, + CCDData(np.ones((10, 10)) * 2, unit=u.adu), + CCDData(np.ones((10, 10)) * 3, unit=u.adu), + ] c = Combiner(ccd_list) ccd = c.average_combine() # Just the standard deviation of ccd data. @@ -684,8 +690,7 @@ def test_combiner_uncertainty_average_mask(): # Correction because we combined two images. ref_uncertainty /= np.sqrt(3) ref_uncertainty[5, 5] = np.std([2, 3]) / np.sqrt(2) - np.testing.assert_array_almost_equal(ccd.uncertainty.array, - ref_uncertainty) + np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) @@ -694,19 +699,19 @@ def test_combiner_uncertainty_median_mask(): mask = np.zeros((10, 10), dtype=np.bool_) mask[5, 5] = True ccd_with_mask = CCDData(np.ones((10, 10)), unit=u.adu, mask=mask) - ccd_list = [ccd_with_mask, - CCDData(np.ones((10, 10)) * 2, unit=u.adu), - CCDData(np.ones((10, 10)) * 3, unit=u.adu)] + ccd_list = [ + ccd_with_mask, + CCDData(np.ones((10, 10)) * 2, unit=u.adu), + CCDData(np.ones((10, 10)) * 3, unit=u.adu), + ] c = Combiner(ccd_list) ccd = c.median_combine() # Just the standard deviation of ccd data. ref_uncertainty = np.ones((10, 10)) * mad_to_sigma * mad([1, 2, 3]) # Correction because we combined two images. ref_uncertainty /= np.sqrt(3) # 0.855980789955 - ref_uncertainty[5, 5] = mad_to_sigma * \ - mad([2, 3]) / np.sqrt(2) # 0.524179041254 - np.testing.assert_array_almost_equal(ccd.uncertainty.array, - ref_uncertainty) + ref_uncertainty[5, 5] = mad_to_sigma * mad([2, 3]) / np.sqrt(2) # 0.524179041254 + np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) @@ -714,17 +719,18 @@ def test_combiner_uncertainty_sum_mask(): mask = np.zeros((10, 10), dtype=np.bool_) mask[5, 5] = True ccd_with_mask = CCDData(np.ones((10, 10)), unit=u.adu, mask=mask) - ccd_list = [ccd_with_mask, - CCDData(np.ones((10, 10)) * 2, unit=u.adu), - CCDData(np.ones((10, 10)) * 3, unit=u.adu)] + ccd_list = [ + ccd_with_mask, + CCDData(np.ones((10, 10)) * 2, unit=u.adu), + CCDData(np.ones((10, 10)) * 3, unit=u.adu), + ] c = Combiner(ccd_list) ccd = c.sum_combine() # Just the standard deviation of ccd data. ref_uncertainty = np.ones((10, 10)) * np.std([1, 2, 3]) ref_uncertainty *= np.sqrt(3) ref_uncertainty[5, 5] = np.std([2, 3]) * np.sqrt(2) - np.testing.assert_array_almost_equal(ccd.uncertainty.array, - ref_uncertainty) + np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) def test_combiner_3d(): @@ -757,30 +763,28 @@ def test_3d_combiner_with_scaling(): avg_ccd = combiner.average_combine() # Does the mean of the scaled arrays match the value to which it was # scaled? - np.testing.assert_almost_equal(avg_ccd.data.mean(), - ccd_data.data.mean()) + np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape median_ccd = combiner.median_combine() # Does median also scale to the correct value? - np.testing.assert_almost_equal(np.median(median_ccd.data), - np.median(ccd_data.data)) + np.testing.assert_almost_equal(np.median(median_ccd.data), np.median(ccd_data.data)) # Set the scaling manually... combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] avg_ccd = combiner.average_combine() - np.testing.assert_almost_equal(avg_ccd.data.mean(), - ccd_data.data.mean()) + np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape def test_clip_extrema_3d(): - ccdlist = [CCDData(np.ones((3, 3, 3)) * 90., unit="adu"), - CCDData(np.ones((3, 3, 3)) * 20., unit="adu"), - CCDData(np.ones((3, 3, 3)) * 10., unit="adu"), - CCDData(np.ones((3, 3, 3)) * 40., unit="adu"), - CCDData(np.ones((3, 3, 3)) * 25., unit="adu"), - CCDData(np.ones((3, 3, 3)) * 35., unit="adu"), - ] + ccdlist = [ + CCDData(np.ones((3, 3, 3)) * 90.0, unit="adu"), + CCDData(np.ones((3, 3, 3)) * 20.0, unit="adu"), + CCDData(np.ones((3, 3, 3)) * 10.0, unit="adu"), + CCDData(np.ones((3, 3, 3)) * 40.0, unit="adu"), + CCDData(np.ones((3, 3, 3)) * 25.0, unit="adu"), + CCDData(np.ones((3, 3, 3)) * 35.0, unit="adu"), + ] c = Combiner(ccdlist) c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() @@ -788,12 +792,14 @@ def test_clip_extrema_3d(): np.testing.assert_array_equal(result, expected) -@pytest.mark.parametrize('comb_func', - ['average_combine', 'median_combine', 'sum_combine']) +@pytest.mark.parametrize( + "comb_func", ["average_combine", "median_combine", "sum_combine"] +) def test_writeable_after_combine(tmpdir, comb_func): ccd_data = ccd_data_func() - tmp_file = tmpdir.join('tmp.fits') + tmp_file = tmpdir.join("tmp.fits") from ..combiner import Combiner + combined = Combiner([ccd_data for _ in range(3)]) ccd2 = getattr(combined, comb_func)() # This should not fail because the resulting uncertainty has a mask @@ -801,51 +807,63 @@ def test_writeable_after_combine(tmpdir, comb_func): def test_clip_extrema(): - ccdlist = [CCDData(np.ones((3, 5)) * 90., unit="adu"), - CCDData(np.ones((3, 5)) * 20., unit="adu"), - CCDData(np.ones((3, 5)) * 10., unit="adu"), - CCDData(np.ones((3, 5)) * 40., unit="adu"), - CCDData(np.ones((3, 5)) * 25., unit="adu"), - CCDData(np.ones((3, 5)) * 35., unit="adu"), - ] + ccdlist = [ + CCDData(np.ones((3, 5)) * 90.0, unit="adu"), + CCDData(np.ones((3, 5)) * 20.0, unit="adu"), + CCDData(np.ones((3, 5)) * 10.0, unit="adu"), + CCDData(np.ones((3, 5)) * 40.0, unit="adu"), + CCDData(np.ones((3, 5)) * 25.0, unit="adu"), + CCDData(np.ones((3, 5)) * 35.0, unit="adu"), + ] ccdlist[0].data[0, 1] = 3.1 ccdlist[1].data[1, 2] = 100.1 ccdlist[1].data[2, 0] = 100.1 c = Combiner(ccdlist) c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() - expected = [[30.0, 22.5, 30.0, 30.0, 30.0], - [30.0, 30.0, 47.5, 30.0, 30.0], - [47.5, 30.0, 30.0, 30.0, 30.0]] + expected = [ + [30.0, 22.5, 30.0, 30.0, 30.0], + [30.0, 30.0, 47.5, 30.0, 30.0], + [47.5, 30.0, 30.0, 30.0, 30.0], + ] np.testing.assert_array_equal(result, expected) def test_clip_extrema_via_combine(): - ccdlist = [CCDData(np.ones((3, 5)) * 90., unit="adu"), - CCDData(np.ones((3, 5)) * 20., unit="adu"), - CCDData(np.ones((3, 5)) * 10., unit="adu"), - CCDData(np.ones((3, 5)) * 40., unit="adu"), - CCDData(np.ones((3, 5)) * 25., unit="adu"), - CCDData(np.ones((3, 5)) * 35., unit="adu"), - ] + ccdlist = [ + CCDData(np.ones((3, 5)) * 90.0, unit="adu"), + CCDData(np.ones((3, 5)) * 20.0, unit="adu"), + CCDData(np.ones((3, 5)) * 10.0, unit="adu"), + CCDData(np.ones((3, 5)) * 40.0, unit="adu"), + CCDData(np.ones((3, 5)) * 25.0, unit="adu"), + CCDData(np.ones((3, 5)) * 35.0, unit="adu"), + ] ccdlist[0].data[0, 1] = 3.1 ccdlist[1].data[1, 2] = 100.1 ccdlist[1].data[2, 0] = 100.1 - result = combine(ccdlist, clip_extrema=True, nlow=1, nhigh=1,) - expected = [[30.0, 22.5, 30.0, 30.0, 30.0], - [30.0, 30.0, 47.5, 30.0, 30.0], - [47.5, 30.0, 30.0, 30.0, 30.0]] + result = combine( + ccdlist, + clip_extrema=True, + nlow=1, + nhigh=1, + ) + expected = [ + [30.0, 22.5, 30.0, 30.0, 30.0], + [30.0, 30.0, 47.5, 30.0, 30.0], + [47.5, 30.0, 30.0, 30.0, 30.0], + ] np.testing.assert_array_equal(result, expected) def test_clip_extrema_with_other_rejection(): - ccdlist = [CCDData(np.ones((3, 5)) * 90., unit="adu"), - CCDData(np.ones((3, 5)) * 20., unit="adu"), - CCDData(np.ones((3, 5)) * 10., unit="adu"), - CCDData(np.ones((3, 5)) * 40., unit="adu"), - CCDData(np.ones((3, 5)) * 25., unit="adu"), - CCDData(np.ones((3, 5)) * 35., unit="adu"), - ] + ccdlist = [ + CCDData(np.ones((3, 5)) * 90.0, unit="adu"), + CCDData(np.ones((3, 5)) * 20.0, unit="adu"), + CCDData(np.ones((3, 5)) * 10.0, unit="adu"), + CCDData(np.ones((3, 5)) * 40.0, unit="adu"), + CCDData(np.ones((3, 5)) * 25.0, unit="adu"), + CCDData(np.ones((3, 5)) * 35.0, unit="adu"), + ] ccdlist[0].data[0, 1] = 3.1 ccdlist[1].data[1, 2] = 100.1 ccdlist[1].data[2, 0] = 100.1 @@ -857,20 +875,25 @@ def test_clip_extrema_with_other_rejection(): c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() - expected = [[80. / 3., 22.5, 30., 30., 30.], - [30., 30., 47.5, 30., 30.], - [47.5, 30., 30., 30., 30.]] + expected = [ + [80.0 / 3.0, 22.5, 30.0, 30.0, 30.0], + [30.0, 30.0, 47.5, 30.0, 30.0], + [47.5, 30.0, 30.0, 30.0, 30.0], + ] np.testing.assert_array_equal(result, expected) # The expected values below assume an image that is 2000x2000 -@pytest.mark.parametrize('num_chunks, expected', - [(53, (37, 2000)), - (1500, (1, 2000)), - (2001, (1, 1000)), - (2999, (1, 1000)), - (10000, (1, 333))] - ) +@pytest.mark.parametrize( + "num_chunks, expected", + [ + (53, (37, 2000)), + (1500, (1, 2000)), + (2001, (1, 1000)), + (2999, (1, 1000)), + (10000, (1, 333)), + ], +) def test_ystep_calculation(num_chunks, expected): # Regression test for # https://github.com/astropy/ccdproc/issues/639 @@ -880,19 +903,23 @@ def test_ystep_calculation(num_chunks, expected): xstep, ystep = _calculate_step_sizes(2000, 2000, num_chunks) assert xstep == expected[0] and ystep == expected[1] + def test_combiner_gen(): ccd_data = ccd_data_func() + def create_gen(): yield ccd_data yield ccd_data yield ccd_data + c = Combiner(create_gen()) assert c.data_arr.shape == (3, 100, 100) assert c.data_arr.mask.shape == (3, 100, 100) -@pytest.mark.parametrize('comb_func', - ['average_combine', 'median_combine', 'sum_combine']) +@pytest.mark.parametrize( + "comb_func", ["average_combine", "median_combine", "sum_combine"] +) def test_combiner_with_scaling_uncertainty(comb_func): # A regression test for #719, in which it was pointed out that the # uncertainty was not properly calculated from scaled data in @@ -909,26 +936,29 @@ def test_combiner_with_scaling_uncertainty(comb_func): scale_by_mean = lambda x: ccd_data.data.mean() / np.ma.average(x) combiner.scaling = scale_by_mean - scaled_ccds = np.array([ccd_data.data * scale_by_mean(ccd_data.data), - ccd_data_lower.data * scale_by_mean(ccd_data_lower.data), - ccd_data_higher.data * scale_by_mean(ccd_data_higher.data) - ]) + scaled_ccds = np.array( + [ + ccd_data.data * scale_by_mean(ccd_data.data), + ccd_data_lower.data * scale_by_mean(ccd_data_lower.data), + ccd_data_higher.data * scale_by_mean(ccd_data_higher.data), + ] + ) avg_ccd = getattr(combiner, comb_func)() - if comb_func != 'median_combine': + if comb_func != "median_combine": uncertainty_func = _default_std() else: uncertainty_func = sigma_func expected_unc = uncertainty_func(scaled_ccds, axis=0) - np.testing.assert_almost_equal(avg_ccd.uncertainty.array, - expected_unc) + np.testing.assert_almost_equal(avg_ccd.uncertainty.array, expected_unc) -@pytest.mark.parametrize('comb_func', - ['average_combine', 'median_combine', 'sum_combine']) +@pytest.mark.parametrize( + "comb_func", ["average_combine", "median_combine", "sum_combine"] +) def test_user_supplied_combine_func_that_relies_on_masks(comb_func): # Test to make sure that setting some values to NaN internally # does not affect results when the user supplies a function that @@ -936,7 +966,7 @@ def test_user_supplied_combine_func_that_relies_on_masks(comb_func): data = np.ones((10, 10)) data[5, 5] = 2 - mask = (data == 2) + mask = data == 2 ccd = CCDData(data, unit=u.adu, mask=mask) # Same, but no mask ccd2 = CCDData(data, unit=u.adu) @@ -944,13 +974,13 @@ def test_user_supplied_combine_func_that_relies_on_masks(comb_func): ccd_list = [ccd, ccd, ccd2] c = Combiner(ccd_list) - if comb_func == 'sum_combine': + if comb_func == "sum_combine": expected_result = 3 * data actual_result = c.sum_combine(sum_func=np.ma.sum) - elif comb_func == 'average_combine': + elif comb_func == "average_combine": expected_result = data actual_result = c.average_combine(scale_func=np.ma.mean) - elif comb_func == 'median_combine': + elif comb_func == "median_combine": expected_result = data actual_result = c.median_combine(median_func=np.ma.median) @@ -958,5 +988,4 @@ def test_user_supplied_combine_func_that_relies_on_masks(comb_func): # method is the result in this pixel should be 2. expected_result[5, 5] = 2 - np.testing.assert_almost_equal(expected_result, - actual_result) + np.testing.assert_almost_equal(expected_result, actual_result) diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index 278cd177..915f83cd 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -16,16 +16,21 @@ print("Oh no, no working astroscrappy") pytest.skip("skipping astroscrappy tests", allow_module_level=True) -from ccdproc.core import (cosmicray_lacosmic, cosmicray_median, - background_deviation_box, background_deviation_filter) +from ccdproc.core import ( + cosmicray_lacosmic, + cosmicray_median, + background_deviation_box, + background_deviation_filter, +) from ccdproc.tests.pytest_fixtures import ccd_data as ccd_data_func DATA_SCALE = 5.3 NCRAYS = 30 -OLD_ASTROSCRAPPY = (packaging.version.parse(asy_version) < - packaging.version.parse('1.1.0')) +OLD_ASTROSCRAPPY = packaging.version.parse(asy_version) < packaging.version.parse( + "1.1.0" +) def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): @@ -34,8 +39,7 @@ def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): crrays = np.random.randint(0, size, size=(ncrays, 2)) # use (threshold + 15) below to make sure cosmic ray is well above the # threshold no matter what the random number generator returns - crflux = (10 * scale * np.random.random(NCRAYS) + - (threshold + 15) * scale) + crflux = 10 * scale * np.random.random(NCRAYS) + (threshold + 15) * scale for i in range(ncrays): y, x = crrays[i] data.data[y, x] = crflux[i] @@ -75,8 +79,8 @@ def test_cosmicray_lacosmic_check_data(): cosmicray_lacosmic(10, noise) -@pytest.mark.parametrize('array_input', [True, False]) -@pytest.mark.parametrize('gain_correct_data', [True, False]) +@pytest.mark.parametrize("array_input", [True, False]) +@pytest.mark.parametrize("gain_correct_data", [True, False]) def test_cosmicray_gain_correct(array_input, gain_correct_data): # Add regression check for #705 and for the new gain_correct # argument. @@ -94,13 +98,11 @@ def test_cosmicray_gain_correct(array_input, gain_correct_data): # make lack of units explicit. readnoise = 6.5 if array_input: - new_data, cr_mask = cosmicray_lacosmic(ccd_data.data, - gain=gain, - gain_apply=gain_correct_data) + new_data, cr_mask = cosmicray_lacosmic( + ccd_data.data, gain=gain, gain_apply=gain_correct_data + ) else: - new_ccd = cosmicray_lacosmic(ccd_data, - gain=gain, - gain_apply=gain_correct_data) + new_ccd = cosmicray_lacosmic(ccd_data, gain=gain, gain_apply=gain_correct_data) new_data = new_ccd.data cr_mask = new_ccd.mask # Fill masked locations with 0 since there is no simple relationship @@ -126,9 +128,7 @@ def test_cosmicray_lacosmic_accepts_quantity_gain(): # Since gain and ccd_data have units, the readnoise should too. readnoise = 6.5 * u.electron - new_ccd = cosmicray_lacosmic(ccd_data, - gain=gain, - gain_apply=True) + new_ccd = cosmicray_lacosmic(ccd_data, gain=gain, gain_apply=True) def test_cosmicray_lacosmic_accepts_quantity_readnoise(): @@ -140,10 +140,9 @@ def test_cosmicray_lacosmic_accepts_quantity_readnoise(): gain = 2.0 * u.electron / u.adu # The units below are the point of this test readnoise = 6.5 * u.electron - new_ccd = cosmicray_lacosmic(ccd_data, - gain=gain, - gain_apply=True, - readnoise=readnoise) + new_ccd = cosmicray_lacosmic( + ccd_data, gain=gain, gain_apply=True, readnoise=readnoise + ) def test_cosmicray_lacosmic_detects_inconsistent_units(): @@ -151,7 +150,7 @@ def test_cosmicray_lacosmic_detects_inconsistent_units(): # of adu, a readnoise in electrons and a gain in adu / electron. # That is not internally inconsistent. ccd_data = ccd_data_func(data_scale=DATA_SCALE) - ccd_data.unit = 'adu' + ccd_data.unit = "adu" threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * np.ones_like(ccd_data.data) @@ -161,18 +160,18 @@ def test_cosmicray_lacosmic_detects_inconsistent_units(): # The units below are deliberately incorrect. gain = 2.0 * u.adu / u.electron with pytest.raises(ValueError) as e: - cosmicray_lacosmic(ccd_data, - gain=gain, - gain_apply=True, - readnoise=readnoise) - assert 'Inconsistent units' in str(e.value) + cosmicray_lacosmic(ccd_data, gain=gain, gain_apply=True, readnoise=readnoise) + assert "Inconsistent units" in str(e.value) if OLD_ASTROSCRAPPY: - decorator = pytest.mark.filterwarnings("ignore:`np.bool` is a deprecated alias:DeprecationWarning") + decorator = pytest.mark.filterwarnings( + "ignore:`np.bool` is a deprecated alias:DeprecationWarning" + ) else: decorator = lambda f: f + @decorator def test_cosmicray_lacosmic_warns_on_ccd_in_electrons(): # Check that an input ccd in electrons raises a warning. @@ -190,25 +189,21 @@ def test_cosmicray_lacosmic_warns_on_ccd_in_electrons(): # make lack of units explicit. readnoise = 6.5 with pytest.warns(UserWarning, match="Image unit is electron"): - cosmicray_lacosmic( - ccd_data, - gain=gain, - gain_apply=True, - readnoise=readnoise - ) + cosmicray_lacosmic(ccd_data, gain=gain, gain_apply=True, readnoise=readnoise) # The skip can be removed when the oldest supported astroscrappy # is 1.1.0 or higher -@pytest.mark.skipif(OLD_ASTROSCRAPPY, - reason='astroscrappy < 1.1.0 does not support ' - 'this functionality') +@pytest.mark.skipif( + OLD_ASTROSCRAPPY, + reason="astroscrappy < 1.1.0 does not support " "this functionality", +) # The values for inbkg and invar are DELIBERATELY BAD. They are supposed to be # arrays, so if detect_cosmics is called with these bad values a ValueError # will be raised, which we can check for. -@pytest.mark.parametrize('new_args', [dict(inbkg=5), - dict(invar=5), - dict(inbkg=5, invar=5)]) +@pytest.mark.parametrize( + "new_args", [dict(inbkg=5), dict(invar=5), dict(inbkg=5, invar=5)] +) def test_cosmicray_lacosmic_invar_inbkg(new_args): # This IS NOT TESTING FUNCTIONALITY it is simply testing # that calling with the new keyword arguments to astroscrappy @@ -220,22 +215,21 @@ def test_cosmicray_lacosmic_invar_inbkg(new_args): ccd_data.uncertainty = noise with pytest.raises(TypeError): - nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5.9, - **new_args) + nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5.9, **new_args) def test_cosmicray_median_check_data(): with pytest.raises(TypeError): - ndata, crarr = cosmicray_median(10, thresh=5, mbox=11, - error_image=DATA_SCALE) + ndata, crarr = cosmicray_median(10, thresh=5, mbox=11, error_image=DATA_SCALE) def test_cosmicray_median(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - ndata, crarr = cosmicray_median(ccd_data.data, thresh=5, mbox=11, - error_image=DATA_SCALE) + ndata, crarr = cosmicray_median( + ccd_data.data, thresh=5, mbox=11, error_image=DATA_SCALE + ) # check the number of cosmic rays detected assert crarr.sum() == NCRAYS @@ -245,9 +239,8 @@ def test_cosmicray_median_ccddata(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - ccd_data.uncertainty = ccd_data.data*0.0+DATA_SCALE - nccd = cosmicray_median(ccd_data, thresh=5, mbox=11, - error_image=None) + ccd_data.uncertainty = ccd_data.data * 0.0 + DATA_SCALE + nccd = cosmicray_median(ccd_data, thresh=5, mbox=11, error_image=None) # check the number of cosmic rays detected assert nccd.mask.sum() == NCRAYS @@ -258,8 +251,7 @@ def test_cosmicray_median_masked(): threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) data = np.ma.masked_array(ccd_data.data, (ccd_data.data > -1e6)) - ndata, crarr = cosmicray_median(data, thresh=5, mbox=11, - error_image=DATA_SCALE) + ndata, crarr = cosmicray_median(data, thresh=5, mbox=11, error_image=DATA_SCALE) # check the number of cosmic rays detected assert crarr.sum() == NCRAYS @@ -269,8 +261,7 @@ def test_cosmicray_median_background_None(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - data, crarr = cosmicray_median(ccd_data.data, thresh=5, mbox=11, - error_image=None) + data, crarr = cosmicray_median(ccd_data.data, thresh=5, mbox=11, error_image=None) # check the number of cosmic rays detected assert crarr.sum() == NCRAYS @@ -281,9 +272,10 @@ def test_cosmicray_median_gbox(): scale = DATA_SCALE # yuck. Maybe use pytest.parametrize? threshold = 5 add_cosmicrays(ccd_data, scale, threshold, ncrays=NCRAYS) - error = ccd_data.data*0.0+DATA_SCALE - data, crarr = cosmicray_median(ccd_data.data, error_image=error, - thresh=5, mbox=11, rbox=0, gbox=5) + error = ccd_data.data * 0.0 + DATA_SCALE + data, crarr = cosmicray_median( + ccd_data.data, error_image=error, thresh=5, mbox=11, rbox=0, gbox=5 + ) data = np.ma.masked_array(data, crarr) assert crarr.sum() > NCRAYS assert abs(data.std() - scale) < 0.1 @@ -294,9 +286,10 @@ def test_cosmicray_median_rbox(): scale = DATA_SCALE # yuck. Maybe use pytest.parametrize? threshold = 5 add_cosmicrays(ccd_data, scale, threshold, ncrays=NCRAYS) - error = ccd_data.data*0.0+DATA_SCALE - data, crarr = cosmicray_median(ccd_data.data, error_image=error, - thresh=5, mbox=11, rbox=21, gbox=5) + error = ccd_data.data * 0.0 + DATA_SCALE + data, crarr = cosmicray_median( + ccd_data.data, error_image=error, thresh=5, mbox=11, rbox=21, gbox=5 + ) assert data[crarr].mean() < ccd_data.data[crarr].mean() assert crarr.sum() > NCRAYS @@ -304,8 +297,7 @@ def test_cosmicray_median_rbox(): def test_cosmicray_median_background_deviation(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) with pytest.raises(TypeError): - cosmicray_median(ccd_data.data, thresh=5, mbox=11, - error_image='blank') + cosmicray_median(ccd_data.data, thresh=5, mbox=11, error_image="blank") def test_background_deviation_box(): @@ -350,11 +342,10 @@ def test_cosmicray_lacosmic_pssl_deprecation_warning(): # Remaining tests can be removed when the oldest supported version # of astroscrappy is 1.1.0 or higher. -@pytest.mark.skipif(not OLD_ASTROSCRAPPY, - reason='Should succeed for new astroscrappy') -@pytest.mark.parametrize('bad_args', [dict(inbkg=5), - dict(invar=5), - dict(inbkg=5, invar=5)]) +@pytest.mark.skipif(not OLD_ASTROSCRAPPY, reason="Should succeed for new astroscrappy") +@pytest.mark.parametrize( + "bad_args", [dict(inbkg=5), dict(invar=5), dict(inbkg=5, invar=5)] +) def test_error_raised_lacosmic_old_interface_new_args(bad_args): ccd_data = ccd_data_func(data_scale=DATA_SCALE) with pytest.raises(TypeError) as err: @@ -364,8 +355,9 @@ def test_error_raised_lacosmic_old_interface_new_args(bad_args): assert all(check_message) -@pytest.mark.skipif(OLD_ASTROSCRAPPY, - reason='Test is of new interface compatibility layer') +@pytest.mark.skipif( + OLD_ASTROSCRAPPY, reason="Test is of new interface compatibility layer" +) def test_cosmicray_lacosmic_pssl_and_inbkg_fails(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) with pytest.raises(ValueError) as err: @@ -374,11 +366,12 @@ def test_cosmicray_lacosmic_pssl_and_inbkg_fails(): # The deprecation warning is expected and should be captured cosmicray_lacosmic(ccd_data, pssl=3, inbkg=ccd_data.data) - assert 'pssl and inbkg' in str(err) + assert "pssl and inbkg" in str(err) -@pytest.mark.skipif(OLD_ASTROSCRAPPY, - reason='Test is of new interface compatibility layer') +@pytest.mark.skipif( + OLD_ASTROSCRAPPY, reason="Test is of new interface compatibility layer" +) def test_cosmicray_lacosmic_pssl_does_not_fail(): # This test is a copy/paste of test_cosmicray_lacosmic_ccddata # except with pssl=0.0001 as an argument. Subtracting nearly zero from @@ -392,8 +385,7 @@ def test_cosmicray_lacosmic_pssl_does_not_fail(): ccd_data.uncertainty = noise with pytest.warns(AstropyDeprecationWarning): # The deprecation warning is expected and should be captured - nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5.9, - pssl=0.0001) + nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5.9, pssl=0.0001) # check the number of cosmic rays detected # Note that to get this to succeed reliably meant tuning diff --git a/ccdproc/tests/test_gain.py b/ccdproc/tests/test_gain.py index 756c0959..4791a25f 100644 --- a/ccdproc/tests/test_gain.py +++ b/ccdproc/tests/test_gain.py @@ -10,22 +10,26 @@ # tests for gain -@pytest.mark.parametrize('gain', [ - 3.0, - 3.0 * u.photon / u.adu, - 3.0 * u.electron / u.adu, - Keyword('gainval', unit=u.electron / u.adu)]) +@pytest.mark.parametrize( + "gain", + [ + 3.0, + 3.0 * u.photon / u.adu, + 3.0 * u.electron / u.adu, + Keyword("gainval", unit=u.electron / u.adu), + ], +) def test_linear_gain_correct(gain): ccd_data = ccd_data_func() # The data values should be positive, so the poisson noise calculation # works without throwing warnings ccd_data.data = np.absolute(ccd_data.data) ccd_data = create_deviation(ccd_data, readnoise=1.0 * u.adu) - ccd_data.meta['gainval'] = 3.0 + ccd_data.meta["gainval"] = 3.0 orig_data = ccd_data.data ccd = gain_correct(ccd_data, gain) if isinstance(gain, Keyword): - gain = gain.value # convert to Quantity... + gain = gain.value # convert to Quantity... try: gain_value = gain.value except AttributeError: @@ -33,7 +37,8 @@ def test_linear_gain_correct(gain): np.testing.assert_array_almost_equal_nulp(ccd.data, gain_value * orig_data) np.testing.assert_array_almost_equal_nulp( - ccd.uncertainty.array, gain_value * ccd_data.uncertainty.array) + ccd.uncertainty.array, gain_value * ccd_data.uncertainty.array + ) if isinstance(gain, u.Quantity): assert ccd.unit == ccd_data.unit * gain.unit @@ -55,5 +60,6 @@ def test_linear_gain_unit_keyword(): ccd = gain_correct(ccd_data, gain, gain_unit=gain_unit) np.testing.assert_array_almost_equal_nulp(ccd.data, gain * orig_data) np.testing.assert_array_almost_equal_nulp( - ccd.uncertainty.array, gain * ccd_data.uncertainty.array) + ccd.uncertainty.array, gain * ccd_data.uncertainty.array + ) assert ccd.unit == ccd_data.unit * gain_unit diff --git a/ccdproc/tests/test_image_collection.py b/ccdproc/tests/test_image_collection.py index 16abb828..7cdf92eb 100644 --- a/ccdproc/tests/test_image_collection.py +++ b/ccdproc/tests/test_image_collection.py @@ -21,73 +21,79 @@ from ccdproc.image_collection import ImageFileCollection _filters = [] -_original_dir = '' +_original_dir = "" def test_fits_summary(triage_setup): - keywords = ['imagetyp', 'filter'] - ic = ImageFileCollection(triage_setup.test_dir, - keywords=keywords) + keywords = ["imagetyp", "filter"] + ic = ImageFileCollection(triage_setup.test_dir, keywords=keywords) summary = ic._fits_summary(header_keywords=keywords) - assert len(summary['file']) == triage_setup.n_test['files'] + assert len(summary["file"]) == triage_setup.n_test["files"] for keyword in keywords: - assert len(summary[keyword]) == triage_setup.n_test['files'] + assert len(summary[keyword]) == triage_setup.n_test["files"] # Explicit conversion to array is needed to avoid astropy Table bug in # 0.2.4 - no_filter_no_object_row = np.array(summary['file'] == - 'no_filter_no_object_bias.fit') + no_filter_no_object_row = np.array( + summary["file"] == "no_filter_no_object_bias.fit" + ) # There should be no filter keyword in the bias file - assert summary['filter'][no_filter_no_object_row].mask + assert summary["filter"][no_filter_no_object_row].mask class TestImageFileCollectionRepresentation: def test_repr_location(self, triage_setup): ic = ImageFileCollection(location=triage_setup.test_dir) assert repr(ic) == "ImageFileCollection(location={0!r})".format( - triage_setup.test_dir) + triage_setup.test_dir + ) def test_repr_keywords(self, triage_setup): - ic = ImageFileCollection( - location=triage_setup.test_dir, keywords=['imagetyp']) - ref = ("ImageFileCollection(location={0!r}, keywords=['imagetyp'])" - .format(triage_setup.test_dir)) + ic = ImageFileCollection(location=triage_setup.test_dir, keywords=["imagetyp"]) + ref = "ImageFileCollection(location={0!r}, keywords=['imagetyp'])".format( + triage_setup.test_dir + ) assert repr(ic) == ref def test_repr_globs(self, triage_setup): ic = ImageFileCollection( - location=triage_setup.test_dir, glob_exclude="*no_filter*", - glob_include="*object_light*") - ref = ("ImageFileCollection(location={0!r}, " - "glob_include='*object_light*', " - "glob_exclude='*no_filter*')" - .format(triage_setup.test_dir)) + location=triage_setup.test_dir, + glob_exclude="*no_filter*", + glob_include="*object_light*", + ) + ref = ( + "ImageFileCollection(location={0!r}, " + "glob_include='*object_light*', " + "glob_exclude='*no_filter*')".format(triage_setup.test_dir) + ) assert repr(ic) == ref def test_repr_files(self, triage_setup): ic = ImageFileCollection( location=triage_setup.test_dir, - filenames=['no_filter_no_object_light.fit', - 'no_filter_no_object_bias.fit']) - ref = ("ImageFileCollection(location={0!r}, " - "filenames=['no_filter_no_object_light.fit', " - "'no_filter_no_object_bias.fit'])" - .format(triage_setup.test_dir)) + filenames=["no_filter_no_object_light.fit", "no_filter_no_object_bias.fit"], + ) + ref = ( + "ImageFileCollection(location={0!r}, " + "filenames=['no_filter_no_object_light.fit', " + "'no_filter_no_object_bias.fit'])".format(triage_setup.test_dir) + ) assert repr(ic) == ref def test_repr_ext(self, triage_setup): - hdul = fits.HDUList([fits.PrimaryHDU(np.ones((10, 10))), - fits.ImageHDU(np.ones((10, 10)))]) - hdul.writeto(os.path.join(triage_setup.test_dir, 'mef.fits')) + hdul = fits.HDUList( + [fits.PrimaryHDU(np.ones((10, 10))), fits.ImageHDU(np.ones((10, 10)))] + ) + hdul.writeto(os.path.join(triage_setup.test_dir, "mef.fits")) ic = ImageFileCollection( - location=triage_setup.test_dir, - filenames=['mef.fits'], - ext=1) - ref = ("ImageFileCollection(location={0!r}, " - "filenames=['mef.fits'], " - "ext=1)" - .format(triage_setup.test_dir)) + location=triage_setup.test_dir, filenames=["mef.fits"], ext=1 + ) + ref = ( + "ImageFileCollection(location={0!r}, " + "filenames=['mef.fits'], " + "ext=1)".format(triage_setup.test_dir) + ) assert repr(ic) == ref @@ -107,72 +113,75 @@ def _setup_logger(self, path, level=logging.WARN): def test_filter_files(self, triage_setup): img_collection = ImageFileCollection( - location=triage_setup.test_dir, keywords=['imagetyp', 'filter']) - assert len(img_collection.files_filtered( - imagetyp='bias')) == triage_setup.n_test['bias'] - assert len(img_collection.files) == triage_setup.n_test['files'] - assert ('filter' in img_collection.keywords) - assert ('flying monkeys' not in img_collection.keywords) - assert len(img_collection.values('imagetyp', unique=True)) == 2 + location=triage_setup.test_dir, keywords=["imagetyp", "filter"] + ) + assert ( + len(img_collection.files_filtered(imagetyp="bias")) + == triage_setup.n_test["bias"] + ) + assert len(img_collection.files) == triage_setup.n_test["files"] + assert "filter" in img_collection.keywords + assert "flying monkeys" not in img_collection.keywords + assert len(img_collection.values("imagetyp", unique=True)) == 2 def test_filter_files_whitespace_keys(self, triage_setup): - hdr = fits.Header([('HIERARCH a b', 2)]) + hdr = fits.Header([("HIERARCH a b", 2)]) hdul = fits.HDUList([fits.PrimaryHDU(np.ones((10, 10)), header=hdr)]) - hdul.writeto(os.path.join(triage_setup.test_dir, - 'hdr_with_whitespace.fits')) + hdul.writeto(os.path.join(triage_setup.test_dir, "hdr_with_whitespace.fits")) ic = ImageFileCollection(location=triage_setup.test_dir) # Using a dictionary and unpacking it should work - filtered = ic.files_filtered(**{'a b': 2}) + filtered = ic.files_filtered(**{"a b": 2}) assert len(filtered) == 1 - assert 'hdr_with_whitespace.fits' in filtered + assert "hdr_with_whitespace.fits" in filtered # Also check it's working with generators: - for _, filename in ic.data(a_b=2, replace_='_', - return_fname=True): - assert filename == 'hdr_with_whitespace.fits' + for _, filename in ic.data(a_b=2, replace_="_", return_fname=True): + assert filename == "hdr_with_whitespace.fits" def test_filter_files_with_str_on_nonstr_column(self, triage_setup): ic = ImageFileCollection(location=triage_setup.test_dir) # Filtering an integer column with a string - filtered = ic.files_filtered(naxis='2') + filtered = ic.files_filtered(naxis="2") assert len(filtered) == 0 def test_filter_fz_files(self, triage_setup): - fn = 'test.fits.fz' + fn = "test.fits.fz" ic = ImageFileCollection(location=triage_setup.test_dir, filenames=fn) # Get a subset of files with a specific header value filtered = ic.files_filtered(exposure=15.0) assert len(filtered) == 1 def test_filtered_files_have_proper_path(self, triage_setup): - ic = ImageFileCollection(location=triage_setup.test_dir, keywords='*') + ic = ImageFileCollection(location=triage_setup.test_dir, keywords="*") # Get a subset of the files. - plain_biases = ic.files_filtered(imagetyp='bias') + plain_biases = ic.files_filtered(imagetyp="bias") # Force a copy... plain_biases = list(plain_biases) # Same subset, but with full path. - path_biases = ic.files_filtered(imagetyp='bias', include_path=True) + path_biases = ic.files_filtered(imagetyp="bias", include_path=True) for path_b, plain_b in zip(path_biases, plain_biases): # If the path munging has been done properly, this will succeed. assert os.path.basename(path_b) == plain_b def test_filenames_are_set_properly(self, triage_setup): - fn = ['filter_no_object_bias.fit', 'filter_object_light_foo.fit'] + fn = ["filter_no_object_bias.fit", "filter_object_light_foo.fit"] img_collection = ImageFileCollection( - location=triage_setup.test_dir, filenames=fn, keywords=['filter']) + location=triage_setup.test_dir, filenames=fn, keywords=["filter"] + ) assert img_collection.files == fn img_collection.refresh() assert img_collection.files == fn - fn = 'filter_no_object_bias.fit' + fn = "filter_no_object_bias.fit" img_collection = ImageFileCollection( - location=triage_setup.test_dir, filenames=fn, keywords=['filter']) + location=triage_setup.test_dir, filenames=fn, keywords=["filter"] + ) assert img_collection.files == [fn] def test_keywords_deleter(self, triage_setup): - ic = ImageFileCollection(triage_setup.test_dir, keywords='*') + ic = ImageFileCollection(triage_setup.test_dir, keywords="*") assert ic.keywords != [] del ic.keywords @@ -180,27 +189,31 @@ def test_keywords_deleter(self, triage_setup): def test_files_with_compressed(self, triage_setup): collection = ImageFileCollection(location=triage_setup.test_dir) - assert len(collection._fits_files_in_directory( - compressed=True)) == triage_setup.n_test['files'] + assert ( + len(collection._fits_files_in_directory(compressed=True)) + == triage_setup.n_test["files"] + ) def test_files_with_no_compressed(self, triage_setup): collection = ImageFileCollection(location=triage_setup.test_dir) - n_files_found = len( - collection._fits_files_in_directory(compressed=False)) - n_uncompressed = (triage_setup.n_test['files'] - - triage_setup.n_test['compressed']) + n_files_found = len(collection._fits_files_in_directory(compressed=False)) + n_uncompressed = ( + triage_setup.n_test["files"] - triage_setup.n_test["compressed"] + ) assert n_files_found == n_uncompressed def test_generator_full_path(self, triage_setup): collection = ImageFileCollection( - location=triage_setup.test_dir, keywords=['imagetyp']) + location=triage_setup.test_dir, keywords=["imagetyp"] + ) for path, file_name in zip(collection._paths(), collection.files): assert path == os.path.join(triage_setup.test_dir, file_name) def test_hdus(self, triage_setup): collection = ImageFileCollection( - location=triage_setup.test_dir, keywords=['imagetyp']) + location=triage_setup.test_dir, keywords=["imagetyp"] + ) n_hdus = 0 for hdu in collection.hdus(): @@ -209,41 +222,46 @@ def test_hdus(self, triage_setup): # pre-astropy 1.1 unsigned data was changed to float32 and BZERO # removed. In 1.1 and later, BZERO stays but the data type is # unsigned int. - assert (('BZERO' not in hdu.header) or - (data.dtype is np.dtype(np.uint16))) + assert ("BZERO" not in hdu.header) or (data.dtype is np.dtype(np.uint16)) n_hdus += 1 - assert n_hdus == triage_setup.n_test['files'] + assert n_hdus == triage_setup.n_test["files"] def test_hdus_masking(self, triage_setup): - collection = ImageFileCollection(location=triage_setup.test_dir, - keywords=['imagetyp', 'exposure']) + collection = ImageFileCollection( + location=triage_setup.test_dir, keywords=["imagetyp", "exposure"] + ) old_data = np.array(collection.summary) - for hdu in collection.hdus(imagetyp='bias'): + for hdu in collection.hdus(imagetyp="bias"): pass new_data = np.array(collection.summary) assert (new_data == old_data).all() - @pytest.mark.parametrize('extension', ['TESTEXT', 1, ('TESTEXT', 1)]) + @pytest.mark.parametrize("extension", ["TESTEXT", 1, ("TESTEXT", 1)]) def test_multiple_extensions(self, triage_setup, extension): ext1 = fits.PrimaryHDU() ext1.data = np.arange(1, 5) # It is important than the name used for this test extension # NOT be MASK or UNCERT because both are treated in a special # way by the FITS reader. - test_ext_name = 'TESTEXT' + test_ext_name = "TESTEXT" ext2 = fits.ImageHDU(name=test_ext_name) ext2.data = np.arange(6, 10) hdulist = fits.hdu.hdulist.HDUList([ext1, ext2]) - hdulist.writeto(os.path.join(triage_setup.test_dir, - 'multi-extension.fits')) + hdulist.writeto(os.path.join(triage_setup.test_dir, "multi-extension.fits")) ic2 = ImageFileCollection( - triage_setup.test_dir, keywords='*', - filenames=['multi-extension.fits'], ext=extension) + triage_setup.test_dir, + keywords="*", + filenames=["multi-extension.fits"], + ext=extension, + ) ic1 = ImageFileCollection( triage_setup.test_dir, - keywords='*', filenames=['multi-extension.fits'], ext=0) + keywords="*", + filenames=["multi-extension.fits"], + ext=0, + ) assert ic1.ext == 0 assert ic2.ext == extension @@ -258,11 +276,10 @@ def test_multiple_extensions(self, triage_setup, extension): assert list1 == list2 - ccd_kwargs = {'unit': 'adu'} - for data, hdr, hdu, ccd in zip(ic2.data(), - ic2.headers(), - ic2.hdus(), - ic2.ccds(ccd_kwargs)): + ccd_kwargs = {"unit": "adu"} + for data, hdr, hdu, ccd in zip( + ic2.data(), ic2.headers(), ic2.hdus(), ic2.ccds(ccd_kwargs) + ): np.testing.assert_array_equal(data, ext2.data) assert hdr == ext2.header # Now compare that the generators each give the same stuff @@ -272,166 +289,185 @@ def test_multiple_extensions(self, triage_setup, extension): assert hdr == ccd.meta def test_headers(self, triage_setup): - collection = ImageFileCollection(location=triage_setup.test_dir, - keywords=['imagetyp']) + collection = ImageFileCollection( + location=triage_setup.test_dir, keywords=["imagetyp"] + ) n_headers = 0 for header in collection.headers(): assert isinstance(header, fits.Header) - assert ('bzero' in header) + assert "bzero" in header n_headers += 1 - assert n_headers == triage_setup.n_test['files'] + assert n_headers == triage_setup.n_test["files"] def test_headers_save_location(self, triage_setup): - collection = ImageFileCollection(location=triage_setup.test_dir, - keywords=['imagetyp']) + collection = ImageFileCollection( + location=triage_setup.test_dir, keywords=["imagetyp"] + ) destination = mkdtemp() for header in collection.headers(save_location=destination): pass - new_collection = ImageFileCollection(location=destination, - keywords=['imagetyp']) - basenames = lambda paths: set( - [os.path.basename(file) for file in paths]) - - assert (len(basenames(collection._paths()) - - basenames(new_collection._paths())) == 0) + new_collection = ImageFileCollection( + location=destination, keywords=["imagetyp"] + ) + basenames = lambda paths: set([os.path.basename(file) for file in paths]) + + assert ( + len(basenames(collection._paths()) - basenames(new_collection._paths())) + == 0 + ) rmtree(destination) def test_headers_with_filter(self, triage_setup): - collection = ImageFileCollection(location=triage_setup.test_dir, - keywords=['imagetyp']) + collection = ImageFileCollection( + location=triage_setup.test_dir, keywords=["imagetyp"] + ) cnt = 0 - for header in collection.headers(imagetyp='light'): - assert header['imagetyp'].lower() == 'light' + for header in collection.headers(imagetyp="light"): + assert header["imagetyp"].lower() == "light" cnt += 1 - assert cnt == triage_setup.n_test['light'] + assert cnt == triage_setup.n_test["light"] def test_headers_with_multiple_filters(self, triage_setup): - collection = ImageFileCollection(location=triage_setup.test_dir, - keywords=['imagetyp']) + collection = ImageFileCollection( + location=triage_setup.test_dir, keywords=["imagetyp"] + ) cnt = 0 - for header in collection.headers(imagetyp='light', - filter='R'): - assert header['imagetyp'].lower() == 'light' - assert header['filter'].lower() == 'r' + for header in collection.headers(imagetyp="light", filter="R"): + assert header["imagetyp"].lower() == "light" + assert header["filter"].lower() == "r" cnt += 1 - assert cnt == (triage_setup.n_test['light'] - - triage_setup.n_test['missing_filter_value']) + assert cnt == ( + triage_setup.n_test["light"] - triage_setup.n_test["missing_filter_value"] + ) def test_headers_with_filter_wildcard(self, triage_setup): - collection = ImageFileCollection(location=triage_setup.test_dir, - keywords=['imagetyp']) + collection = ImageFileCollection( + location=triage_setup.test_dir, keywords=["imagetyp"] + ) cnt = 0 - for header in collection.headers(imagetyp='*'): + for header in collection.headers(imagetyp="*"): cnt += 1 - assert cnt == triage_setup.n_test['files'] + assert cnt == triage_setup.n_test["files"] def test_headers_with_filter_missing_keyword(self, triage_setup): - collection = ImageFileCollection(location=triage_setup.test_dir, - keywords=['imagetyp']) - for header in collection.headers(imagetyp='light', - object=''): - assert header['imagetyp'].lower() == 'light' + collection = ImageFileCollection( + location=triage_setup.test_dir, keywords=["imagetyp"] + ) + for header in collection.headers(imagetyp="light", object=""): + assert header["imagetyp"].lower() == "light" with pytest.raises(KeyError): - header['object'] + header["object"] def test_generator_headers_save_with_name(self, triage_setup): - collection = ImageFileCollection(location=triage_setup.test_dir, - keywords=['imagetyp']) - for header in collection.headers(save_with_name='_new'): + collection = ImageFileCollection( + location=triage_setup.test_dir, keywords=["imagetyp"] + ) + for header in collection.headers(save_with_name="_new"): assert isinstance(header, fits.Header) - new_collection = ImageFileCollection(location=triage_setup.test_dir, - keywords=['imagetyp']) - assert (len(new_collection._paths()) == - 2 * (triage_setup.n_test['files']) - - triage_setup.n_test['compressed']) - [os.remove(fil) for fil in iglob(triage_setup.test_dir + '/*_new*')] + new_collection = ImageFileCollection( + location=triage_setup.test_dir, keywords=["imagetyp"] + ) + assert ( + len(new_collection._paths()) + == 2 * (triage_setup.n_test["files"]) - triage_setup.n_test["compressed"] + ) + [os.remove(fil) for fil in iglob(triage_setup.test_dir + "/*_new*")] def test_generator_data(self, triage_setup): - collection = ImageFileCollection(location=triage_setup.test_dir, - keywords=['imagetyp']) + collection = ImageFileCollection( + location=triage_setup.test_dir, keywords=["imagetyp"] + ) for img in collection.data(): assert isinstance(img, np.ndarray) def test_generator_ccds_without_unit(self, triage_setup): collection = ImageFileCollection( - location=triage_setup.test_dir, keywords=['imagetyp']) + location=triage_setup.test_dir, keywords=["imagetyp"] + ) with pytest.raises(ValueError): _ = next(collection.ccds()) def test_generator_ccds(self, triage_setup): collection = ImageFileCollection( - location=triage_setup.test_dir, keywords=['imagetyp']) - ccd_kwargs = {'unit': 'adu'} + location=triage_setup.test_dir, keywords=["imagetyp"] + ) + ccd_kwargs = {"unit": "adu"} for ccd in collection.ccds(ccd_kwargs=ccd_kwargs): assert isinstance(ccd, CCDData) def test_consecutive_fiilters(self, triage_setup): - collection = ImageFileCollection(location=triage_setup.test_dir, - keywords=['imagetyp', 'filter', - 'object']) - no_files_match = collection.files_filtered(object='fdsafs') - assert(len(no_files_match) == 0) - some_files_should_match = collection.files_filtered(object=None, - imagetyp='light') - assert(len(some_files_should_match) == - triage_setup.n_test['light']) - - def test_filter_does_not_not_permanently_change_file_mask(self, - triage_setup): - collection = ImageFileCollection(location=triage_setup.test_dir, - keywords=['imagetyp']) + collection = ImageFileCollection( + location=triage_setup.test_dir, keywords=["imagetyp", "filter", "object"] + ) + no_files_match = collection.files_filtered(object="fdsafs") + assert len(no_files_match) == 0 + some_files_should_match = collection.files_filtered( + object=None, imagetyp="light" + ) + assert len(some_files_should_match) == triage_setup.n_test["light"] + + def test_filter_does_not_not_permanently_change_file_mask(self, triage_setup): + collection = ImageFileCollection( + location=triage_setup.test_dir, keywords=["imagetyp"] + ) # Ensure all files are originally unmasked - assert not collection.summary['file'].mask.any() + assert not collection.summary["file"].mask.any() # Generate list that will match NO files - collection.files_filtered(imagetyp='foisajfoisaj') + collection.files_filtered(imagetyp="foisajfoisaj") # If the code works, this should have no permanent effect - assert not collection.summary['file'].mask.any() - - @pytest.mark.parametrize("new_keywords,collection_keys", [ - (['imagetyp', 'object'], ['imagetyp', 'filter']), - (['imagetyp'], ['imagetyp', 'filter'])]) - def test_keyword_setting(self, new_keywords, collection_keys, - triage_setup): - collection = ImageFileCollection(location=triage_setup.test_dir, - keywords=collection_keys) + assert not collection.summary["file"].mask.any() + + @pytest.mark.parametrize( + "new_keywords,collection_keys", + [ + (["imagetyp", "object"], ["imagetyp", "filter"]), + (["imagetyp"], ["imagetyp", "filter"]), + ], + ) + def test_keyword_setting(self, new_keywords, collection_keys, triage_setup): + collection = ImageFileCollection( + location=triage_setup.test_dir, keywords=collection_keys + ) tbl_orig = collection.summary collection.keywords = new_keywords tbl_new = collection.summary if set(new_keywords).issubset(collection_keys): # Should just delete columns without rebuilding table - assert(tbl_orig is tbl_new) + assert tbl_orig is tbl_new else: # We need new keywords so must rebuild - assert(tbl_orig is not tbl_new) + assert tbl_orig is not tbl_new for key in new_keywords: - assert(key in tbl_new.keys()) - assert (tbl_orig['file'] == tbl_new['file']).all() - assert (tbl_orig['imagetyp'] == tbl_new['imagetyp']).all() - assert 'filter' not in tbl_new.keys() - assert 'object' not in tbl_orig.keys() + assert key in tbl_new.keys() + assert (tbl_orig["file"] == tbl_new["file"]).all() + assert (tbl_orig["imagetyp"] == tbl_new["imagetyp"]).all() + assert "filter" not in tbl_new.keys() + assert "object" not in tbl_orig.keys() def test_keyword_setting_to_empty_list(self, triage_setup): ic = ImageFileCollection(triage_setup.test_dir) ic.keywords = [] - assert ['file'] == ic.keywords + assert ["file"] == ic.keywords def test_header_and_filename(self, triage_setup): - collection = ImageFileCollection(location=triage_setup.test_dir, - keywords=['imagetyp']) + collection = ImageFileCollection( + location=triage_setup.test_dir, keywords=["imagetyp"] + ) for header, fname in collection.headers(return_fname=True): - assert (fname in collection.summary['file']) - assert (isinstance(header, fits.Header)) + assert fname in collection.summary["file"] + assert isinstance(header, fits.Header) def test_dir_with_no_fits_files(self, tmpdir): empty_dir = tmpdir.mkdtemp() - some_file = empty_dir.join('some_file.txt') - some_file.dump('words') + some_file = empty_dir.join("some_file.txt") + some_file.dump("words") with pytest.warns(Warning) as w: - collection = ImageFileCollection(location=empty_dir.strpath, - keywords=['imagetyp']) + collection = ImageFileCollection( + location=empty_dir.strpath, keywords=["imagetyp"] + ) assert len(w) == 1 assert str(w[0].message) == "no FITS files in the collection." assert collection.summary is None @@ -443,13 +479,13 @@ def test_dir_with_no_keys(self, tmpdir): # This test should fail if the FITS files in the directory # are actually read. bad_dir = tmpdir.mkdtemp() - not_really_fits = bad_dir.join('not_fits.fit') - not_really_fits.dump('I am not really a FITS file') + not_really_fits = bad_dir.join("not_fits.fit") + not_really_fits.dump("I am not really a FITS file") # Make sure an error will be generated if the FITS file is read with pytest.raises(IOError): fits.getheader(not_really_fits.strpath) - log = tmpdir.join('tmp.log') + log = tmpdir.join("tmp.log") self._setup_logger(log.strpath) _ = ImageFileCollection(location=bad_dir.strpath, keywords=[]) @@ -459,7 +495,7 @@ def test_dir_with_no_keys(self, tmpdir): # ImageFileCollection will suppress the IOError but log a warning # so check that the log has no warnings in it. - assert (len(warnings) == 0) + assert len(warnings) == 0 def test_fits_summary_when_keywords_are_not_subset(self, triage_setup): """ @@ -467,102 +503,101 @@ def test_fits_summary_when_keywords_are_not_subset(self, triage_setup): passed to the ImageFileCollection and to files_filtered but the latter is not a subset of the former. """ - ic = ImageFileCollection(triage_setup.test_dir, - keywords=['imagetyp', 'exposure']) + ic = ImageFileCollection( + triage_setup.test_dir, keywords=["imagetyp", "exposure"] + ) n_files = len(ic.files) - files_missing_this_key = ic.files_filtered(imagetyp='*', - monkeys=None) - assert(n_files > 0) - assert(n_files == len(files_missing_this_key)) + files_missing_this_key = ic.files_filtered(imagetyp="*", monkeys=None) + assert n_files > 0 + assert n_files == len(files_missing_this_key) def test_duplicate_keywords_in_setting(self, triage_setup): - keywords_in = ['imagetyp', 'a', 'a'] - ic = ImageFileCollection(triage_setup.test_dir, - keywords=keywords_in) + keywords_in = ["imagetyp", "a", "a"] + ic = ImageFileCollection(triage_setup.test_dir, keywords=keywords_in) for key in set(keywords_in): - assert (key in ic.keywords) + assert key in ic.keywords # One keyword gets added: file assert len(ic.keywords) < len(keywords_in) + 1 def test_keyword_includes_file(self, triage_setup): - keywords_in = ['file', 'imagetyp'] - ic = ImageFileCollection(triage_setup.test_dir, - keywords=keywords_in) - assert 'file' in ic.keywords - file_keywords = [key for key in ic.keywords if key == 'file'] + keywords_in = ["file", "imagetyp"] + ic = ImageFileCollection(triage_setup.test_dir, keywords=keywords_in) + assert "file" in ic.keywords + file_keywords = [key for key in ic.keywords if key == "file"] assert len(file_keywords) == 1 def test_setting_keywords_to_none(self, triage_setup): - ic = ImageFileCollection(triage_setup.test_dir, keywords=['imagetyp']) + ic = ImageFileCollection(triage_setup.test_dir, keywords=["imagetyp"]) ic.keywords = None assert ic.summary == [] def test_getting_value_for_keyword(self, triage_setup): - ic = ImageFileCollection(triage_setup.test_dir, keywords=['imagetyp']) + ic = ImageFileCollection(triage_setup.test_dir, keywords=["imagetyp"]) # Does it fail if the keyword is not in the summary? with pytest.raises(ValueError): - ic.values('filter') + ic.values("filter") # If I ask for unique values do I get them? - values = ic.values('imagetyp', unique=True) + values = ic.values("imagetyp", unique=True) - assert values == list(set(ic.summary['imagetyp'])) - assert len(values) < len(ic.summary['imagetyp']) + assert values == list(set(ic.summary["imagetyp"])) + assert len(values) < len(ic.summary["imagetyp"]) # Does the list of non-unique values match the raw column? - values = ic.values('imagetyp', unique=False) - assert values == list(ic.summary['imagetyp']) + values = ic.values("imagetyp", unique=False) + assert values == list(ic.summary["imagetyp"]) # Does unique actually default to false? - values2 = ic.values('imagetyp') + values2 = ic.values("imagetyp") assert values == values2 def test_collection_when_one_file_not_fits(self, triage_setup): - not_fits = 'foo.fit' + not_fits = "foo.fit" path_bad = os.path.join(triage_setup.test_dir, not_fits) # Create an empty file... - with open(path_bad, 'w'): + with open(path_bad, "w"): pass - ic = ImageFileCollection(triage_setup.test_dir, keywords=['imagetyp']) - assert not_fits not in ic.summary['file'] + ic = ImageFileCollection(triage_setup.test_dir, keywords=["imagetyp"]) + assert not_fits not in ic.summary["file"] os.remove(path_bad) def test_data_type_mismatch_in_fits_keyword_values(self, triage_setup): # If one keyword has an unexpected type, do we notice? img = np.uint16(np.arange(100)) bad_filter = fits.PrimaryHDU(img) - bad_filter.header['imagetyp'] = 'LIGHT' - bad_filter.header['filter'] = 15.0 - path_bad = os.path.join(triage_setup.test_dir, 'bad_filter.fit') + bad_filter.header["imagetyp"] = "LIGHT" + bad_filter.header["filter"] = 15.0 + path_bad = os.path.join(triage_setup.test_dir, "bad_filter.fit") bad_filter.writeto(path_bad) - ic = ImageFileCollection(triage_setup.test_dir, keywords=['filter']) + ic = ImageFileCollection(triage_setup.test_dir, keywords=["filter"]) # dtype is object when there is a mix of types - assert ic.summary['filter'].dtype == np.dtype('O') + assert ic.summary["filter"].dtype == np.dtype("O") os.remove(path_bad) def test_filter_by_numerical_value(self, triage_setup): - ic = ImageFileCollection(triage_setup.test_dir, keywords=['naxis']) + ic = ImageFileCollection(triage_setup.test_dir, keywords=["naxis"]) should_be_zero = ic.files_filtered(naxis=2) assert len(should_be_zero) == 0 should_not_be_zero = ic.files_filtered(naxis=1) - assert len(should_not_be_zero) == triage_setup.n_test['files'] + assert len(should_not_be_zero) == triage_setup.n_test["files"] def test_files_filtered_with_full_path(self, triage_setup): - ic = ImageFileCollection(triage_setup.test_dir, keywords=['naxis']) + ic = ImageFileCollection(triage_setup.test_dir, keywords=["naxis"]) files = ic.files_filtered(naxis=1, include_path=True) for f in files: assert f.startswith(triage_setup.test_dir) def test_unknown_generator_type_raises_error(self, triage_setup): - ic = ImageFileCollection(triage_setup.test_dir, keywords=['naxis']) + ic = ImageFileCollection(triage_setup.test_dir, keywords=["naxis"]) with pytest.raises(ValueError): - for foo in ic._generator('not a real generator'): + for foo in ic._generator("not a real generator"): pass - def test_setting_write_location_to_bad_dest_raises_error(self, tmpdir, - triage_setup): + def test_setting_write_location_to_bad_dest_raises_error( + self, tmpdir, triage_setup + ): new_tmp = tmpdir.mkdtemp() - bad_directory = new_tmp.join('foo') + bad_directory = new_tmp.join("foo") - ic = ImageFileCollection(triage_setup.test_dir, keywords=['naxis']) + ic = ImageFileCollection(triage_setup.test_dir, keywords=["naxis"]) with pytest.raises(IOError): for hdr in ic.headers(save_location=bad_directory.strpath): pass @@ -578,38 +613,37 @@ def test_initialization_with_no_keywords(self, triage_setup): assert not execs def check_all_keywords_in_collection(self, image_collection): - lower_case_columns = [c.lower() for c in - image_collection.summary.colnames] + lower_case_columns = [c.lower() for c in image_collection.summary.colnames] for h in image_collection.headers(): for k in h: assert k.lower() in lower_case_columns def test_tabulate_all_keywords(self, triage_setup): - ic = ImageFileCollection(location=triage_setup.test_dir, keywords='*') + ic = ImageFileCollection(location=triage_setup.test_dir, keywords="*") self.check_all_keywords_in_collection(ic) def test_summary_table_is_always_masked(self, triage_setup): # First, try grabbing all of the keywords - ic = ImageFileCollection(location=triage_setup.test_dir, keywords='*') + ic = ImageFileCollection(location=triage_setup.test_dir, keywords="*") assert ic.summary.masked # Now, try keywords that every file will have - ic.keywords = ['bitpix'] + ic.keywords = ["bitpix"] assert ic.summary.masked # What about keywords that include some that will surely be missing? - ic.keywords = ['bitpix', 'dsafui'] + ic.keywords = ["bitpix", "dsafui"] assert ic.summary.masked def test_case_of_keywords_respected(self, triage_setup): - keywords_in = ['BitPix', 'instrume', 'NAXIS'] - ic = ImageFileCollection(location=triage_setup.test_dir, - keywords=keywords_in) + keywords_in = ["BitPix", "instrume", "NAXIS"] + ic = ImageFileCollection(location=triage_setup.test_dir, keywords=keywords_in) for key in keywords_in: assert key in ic.summary.colnames def test_grabbing_all_keywords_and_specific_keywords(self, triage_setup): - keyword_not_in_headers = 'OIdn89!@' - ic = ImageFileCollection(triage_setup.test_dir, - keywords=['*', keyword_not_in_headers]) + keyword_not_in_headers = "OIdn89!@" + ic = ImageFileCollection( + triage_setup.test_dir, keywords=["*", keyword_not_in_headers] + ) assert keyword_not_in_headers in ic.summary.colnames self.check_all_keywords_in_collection(ic) @@ -618,16 +652,17 @@ def test_grabbing_all_keywords_excludes_empty_key(self, triage_setup): # that case is handled correctly. blank_keyword = fits.PrimaryHDU() blank_keyword.data = np.zeros((100, 100)) - blank_keyword.header[''] = 'blank' + blank_keyword.header[""] = "blank" - blank_keyword.writeto(os.path.join(triage_setup.test_dir, - 'blank.fits')) + blank_keyword.writeto(os.path.join(triage_setup.test_dir, "blank.fits")) - ic = ImageFileCollection(triage_setup.test_dir, keywords='*') - assert 'col0' not in ic.summary.colnames + ic = ImageFileCollection(triage_setup.test_dir, keywords="*") + assert "col0" not in ic.summary.colnames - @pytest.mark.skipif("os.environ.get('APPVEYOR') or os.sys.platform == 'win32'", - reason="fails on Windows because of file permissions.") + @pytest.mark.skipif( + "os.environ.get('APPVEYOR') or os.sys.platform == 'win32'", + reason="fails on Windows because of file permissions.", + ) def test_header_with_long_history_roundtrips_to_disk(self, triage_setup): # I tried combing several history comments into one table entry with # '\n'.join(history), which resulted in a table that couldn't @@ -635,28 +670,29 @@ def test_header_with_long_history_roundtrips_to_disk(self, triage_setup): # interpreted as...a new line! This test is a check against future # foolishness. from astropy.table import Table + img = np.uint16(np.arange(100)) long_history = fits.PrimaryHDU(img) - long_history.header['imagetyp'] = 'BIAS' - long_history.header['history'] = 'Something happened' - long_history.header['history'] = 'Then something else happened' - long_history.header['history'] = 'And then something odd happened' - path_history = os.path.join(triage_setup.test_dir, 'long_history.fit') + long_history.header["imagetyp"] = "BIAS" + long_history.header["history"] = "Something happened" + long_history.header["history"] = "Then something else happened" + long_history.header["history"] = "And then something odd happened" + path_history = os.path.join(triage_setup.test_dir, "long_history.fit") long_history.writeto(path_history) - ic = ImageFileCollection(triage_setup.test_dir, keywords='*') + ic = ImageFileCollection(triage_setup.test_dir, keywords="*") with NamedTemporaryFile() as test_table: - ic.summary.write(test_table.name, format='ascii.csv', - overwrite=True) - table_disk = Table.read(test_table.name, format='ascii.csv') + ic.summary.write(test_table.name, format="ascii.csv", overwrite=True) + table_disk = Table.read(test_table.name, format="ascii.csv") assert len(table_disk) == len(ic.summary) - @pytest.mark.skipif("os.environ.get('APPVEYOR') or os.sys.platform == 'win32'", - reason="fails on Windows because file " - "overwriting fails") + @pytest.mark.skipif( + "os.environ.get('APPVEYOR') or os.sys.platform == 'win32'", + reason="fails on Windows because file " "overwriting fails", + ) def test_refresh_method_sees_added_keywords(self, triage_setup): - ic = ImageFileCollection(triage_setup.test_dir, keywords='*') + ic = ImageFileCollection(triage_setup.test_dir, keywords="*") # Add a keyword I know isn't already in the header to each file. - not_in_header = 'BARKARK' + not_in_header = "BARKARK" for h in ic.headers(overwrite=True): h[not_in_header] = True @@ -668,36 +704,36 @@ def test_refresh_method_sees_added_keywords(self, triage_setup): assert not_in_header.lower() in ic.summary.colnames def test_refresh_method_sees_added_files(self, triage_setup): - ic = ImageFileCollection(triage_setup.test_dir, keywords='*') + ic = ImageFileCollection(triage_setup.test_dir, keywords="*") # Compressed files don't get copied. Not sure why... - original_len = len(ic.summary) - triage_setup.n_test['compressed'] + original_len = len(ic.summary) - triage_setup.n_test["compressed"] # Generate additional files in this directory for h in ic.headers(save_with_name="_foo"): pass ic.refresh() - new_len = len(ic.summary) - triage_setup.n_test['compressed'] + new_len = len(ic.summary) - triage_setup.n_test["compressed"] assert new_len == 2 * original_len def test_keyword_order_is_preserved(self, triage_setup): - keywords = ['imagetyp', 'exposure', 'filter'] + keywords = ["imagetyp", "exposure", "filter"] ic = ImageFileCollection(triage_setup.test_dir, keywords=keywords) - assert ic.keywords == ['file'] + keywords + assert ic.keywords == ["file"] + keywords def test_sorting(self, triage_setup): collection = ImageFileCollection( - location=triage_setup.test_dir, - keywords=['imagetyp', 'filter', 'object']) + location=triage_setup.test_dir, keywords=["imagetyp", "filter", "object"] + ) all_elements = [] for hdu, fname in collection.hdus(return_fname=True): all_elements.append((str(hdu.header), fname)) # Now sort - collection.sort(keys=['imagetyp', 'object']) + collection.sort(keys=["imagetyp", "object"]) # and check it's all still right for hdu, fname in collection.hdus(return_fname=True): - assert((str(hdu.header), fname) in all_elements) + assert (str(hdu.header), fname) in all_elements for i in range(len(collection.summary)): - assert(collection.summary['file'][i] == collection.files[i]) + assert collection.summary["file"][i] == collection.files[i] def test_sorting_without_key_fails(self, triage_setup): ic = ImageFileCollection(location=triage_setup.test_dir) @@ -709,19 +745,19 @@ def test_duplicate_keywords(self, triage_setup): # fail. hdu = fits.PrimaryHDU() hdu.data = np.zeros((5, 5)) - hdu.header['stupid'] = 'fun' - hdu.header.append(('stupid', 'nofun')) + hdu.header["stupid"] = "fun" + hdu.header.append(("stupid", "nofun")) - hdu.writeto(os.path.join(triage_setup.test_dir, 'duplicated.fits')) + hdu.writeto(os.path.join(triage_setup.test_dir, "duplicated.fits")) with pytest.warns(UserWarning) as w: - ic = ImageFileCollection(triage_setup.test_dir, keywords='*') + ic = ImageFileCollection(triage_setup.test_dir, keywords="*") assert len(w) == 1 - assert 'stupid' in str(w[0].message) + assert "stupid" in str(w[0].message) - assert 'stupid' in ic.summary.colnames - assert 'fun' in ic.summary['stupid'] - assert 'nofun' not in ic.summary['stupid'] + assert "stupid" in ic.summary.colnames + assert "fun" in ic.summary["stupid"] + assert "nofun" not in ic.summary["stupid"] def test_ccds_generator_in_different_directory(self, triage_setup, tmpdir): """ @@ -740,7 +776,7 @@ def test_ccds_generator_in_different_directory(self, triage_setup, tmpdir): assert not os.path.samefile(os.getcwd(), coll.location) # This generated an IOError before the issue was fixed. - for _ in coll.ccds(ccd_kwargs={'unit': 'adu'}): + for _ in coll.ccds(ccd_kwargs={"unit": "adu"}): pass def test_ccds_generator_does_not_support_overwrite(self, triage_setup): @@ -763,42 +799,42 @@ def test_glob_matching(self, triage_setup): # includes / excludes one = fits.PrimaryHDU() one.data = np.zeros((5, 5)) - one.header[''] = 'whatever' + one.header[""] = "whatever" - one.writeto(os.path.join(triage_setup.test_dir, 'SPAM_stuff.fits')) - one.writeto(os.path.join(triage_setup.test_dir, 'SPAM_other_stuff.fits')) + one.writeto(os.path.join(triage_setup.test_dir, "SPAM_stuff.fits")) + one.writeto(os.path.join(triage_setup.test_dir, "SPAM_other_stuff.fits")) - coll = ImageFileCollection(triage_setup.test_dir, glob_include='SPAM*') + coll = ImageFileCollection(triage_setup.test_dir, glob_include="SPAM*") assert len(coll.files) == 2 - coll = ImageFileCollection(triage_setup.test_dir, glob_include='SPAM*', - glob_exclude='*other*') + coll = ImageFileCollection( + triage_setup.test_dir, glob_include="SPAM*", glob_exclude="*other*" + ) assert len(coll.files) == 1 # The glob attributes are readonly, so setting them raises an Exception. with pytest.raises(AttributeError): - coll.glob_exclude = '*stuff*' + coll.glob_exclude = "*stuff*" with pytest.raises(AttributeError): - coll.glob_include = '*stuff*' + coll.glob_include = "*stuff*" def test_that_test_files_have_expected_properties(self, triage_setup): - expected_name = \ - get_pkg_data_filename('data/expected_ifc_file_properties.csv') + expected_name = get_pkg_data_filename("data/expected_ifc_file_properties.csv") expected = Table.read(expected_name) # Make the comparison more reliable by sorting - expected.sort('file') + expected.sort("file") ic = ImageFileCollection(triage_setup.test_dir) actual = ic.summary # Write the actual IFC summary out to disk to turn bool into strings of # "True" and "False", and any other non-essential differences between # the tables. - tmp_file = 'actual.csv' + tmp_file = "actual.csv" actual.write(tmp_file) actual = Table.read(tmp_file) # Make the comparison more reliable by sorting - actual.sort('file') + actual.sort("file") assert len(ic.summary) == len(expected) for column in expected.colnames: @@ -838,38 +874,39 @@ def test_image_collection_with_no_location(self, triage_setup): # Move up a level to make sure we are not accidentally # pulling in files from the current working directory, # which includes everythin in source. - os.chdir('..') + os.chdir("..") ic = ImageFileCollection(filenames=file_paths) assert len(ic.summary) == len(file_paths) - expected_name = \ - get_pkg_data_filename('data/expected_ifc_file_properties.csv') + expected_name = get_pkg_data_filename( + "data/expected_ifc_file_properties.csv" + ) expected = Table.read(expected_name) # Make the comparison more reliable by sorting - expected.sort('file') + expected.sort("file") actual = ic.summary # Write the actual IFC summary out to disk to turn bool into strings # of"True" and "False", and any other non-essential differences # between the tables. - tmp_file = 'actual.csv' + tmp_file = "actual.csv" actual.write(tmp_file) actual = Table.read(tmp_file) # Make the comparison more reliable by sorting...but the actual # in this case includes paths, so we really want to sort by the # base name of the file. - bases = np.array([Path(f).name for f in actual['file']]) + bases = np.array([Path(f).name for f in actual["file"]]) sort_order = np.argsort(bases) actual = actual[sort_order] bases = bases[sort_order] - assert all(Path(f).exists() for f in actual['file']) + assert all(Path(f).exists() for f in actual["file"]) for column in expected.colnames: - if column == 'file': + if column == "file": assert np.all(bases == expected[column]) else: assert np.all(actual[column] == expected[column]) @@ -877,28 +914,28 @@ def test_image_collection_with_no_location(self, triage_setup): # Set comparisons don't care about order :) # Check several of the ways we can get file names from the # collection and ensure all of them include the path. - assert set(file_paths) == set(ic.summary['file']) + assert set(file_paths) == set(ic.summary["file"]) assert set(file_paths) == set(ic.files) assert set(file_paths) == set(ic.files_filtered(include_path=True)) # Spot check a couple of dtypes as a test for # https://github.com/astropy/ccdproc/issues/662 - assert ic.summary['extend'].dtype == 'bool' + assert ic.summary["extend"].dtype == "bool" # Of course, default dtypes on Windows are different. So instead # of comparing to something sensible like int64, compare to the # default int dtype. - assert ic.summary['naxis1'].dtype == np.array([5]).dtype + assert ic.summary["naxis1"].dtype == np.array([5]).dtype # and the default float dtype - assert ic.summary['exposure'].dtype == np.array([5.0]).dtype + assert ic.summary["exposure"].dtype == np.array([5.0]).dtype - expected_heads = (actual['imagetyp'] == 'LIGHT').sum() + expected_heads = (actual["imagetyp"] == "LIGHT").sum() n_heads = 0 # Try one of the generators - for h in ic.headers(imagetyp='light'): - assert h['imagetyp'].lower() == 'light' + for h in ic.headers(imagetyp="light"): + assert h["imagetyp"].lower() == "light" n_heads += 1 assert n_heads == expected_heads @@ -919,40 +956,43 @@ def test_force_detect_fits_files_finds_fits_files(self, triage_setup): path = Path(triage_setup.test_dir) for idx, p in enumerate(path.iterdir()): - new_name = 'no_extension{}'.format(idx) + new_name = "no_extension{}".format(idx) new_path = path / new_name new_path.write_bytes(p.read_bytes()) - ic = ImageFileCollection(location=str(path), - find_fits_by_reading=True) + ic = ImageFileCollection(location=str(path), find_fits_by_reading=True) # Compressed files won't be automagically detected by reading the # first few bytes. - expected_number = (2 * triage_setup.n_test['files'] - - triage_setup.n_test['compressed']) + expected_number = ( + 2 * triage_setup.n_test["files"] - triage_setup.n_test["compressed"] + ) assert len(ic.summary) == expected_number - n_bias = (ic.summary['imagetyp'] == 'BIAS').sum() - assert n_bias == 2 * triage_setup.n_test['bias'] + n_bias = (ic.summary["imagetyp"] == "BIAS").sum() + assert n_bias == 2 * triage_setup.n_test["bias"] # Only one file in the original set of test files has exposure time # 15, so there should be two now. assert len(ic.files_filtered(exposure=15.0)) == 2 # Try one of the generators - expected_heads = (2 * triage_setup.n_test['light'] - - triage_setup.n_test['compressed']) + expected_heads = ( + 2 * triage_setup.n_test["light"] - triage_setup.n_test["compressed"] + ) n_heads = 0 - for h in ic.headers(imagetyp='light'): - assert h['imagetyp'].lower() == 'light' + for h in ic.headers(imagetyp="light"): + assert h["imagetyp"].lower() == "light" n_heads += 1 assert n_heads == expected_heads - @pytest.mark.filterwarnings("ignore:The following header keyword is invalid:UserWarning") + @pytest.mark.filterwarnings( + "ignore:The following header keyword is invalid:UserWarning" + ) def test_less_strict_verify_option(self, triage_setup): # Tests for feature request # @@ -972,10 +1012,8 @@ def test_less_strict_verify_option(self, triage_setup): testfits = fits.PrimaryHDU(data=np.ones((10, 10)), header=testh) path = Path(triage_setup.test_dir) - bad_fits_name = 'test_warnA.fits' - testfits.writeto(path / bad_fits_name, - output_verify='warn', - overwrite=True) + bad_fits_name = "test_warnA.fits" + testfits.writeto(path / bad_fits_name, output_verify="warn", overwrite=True) ic = ImageFileCollection(location=str(path)) @@ -983,14 +1021,16 @@ def test_less_strict_verify_option(self, triage_setup): # Turns out this sample header is so messed up that TESTVERI does not # end up as a keyword. - assert 'TESTVERI' not in ic.summary.colnames + assert "TESTVERI" not in ic.summary.colnames # This does end up as a key some how *shrug*. - assert '17/02/13' in ic.summary.colnames + assert "17/02/13" in ic.summary.colnames # Try making the summary as in the original bug report - with pytest.warns(AstropyUserWarning, match='The following header keyword is invalid'): - ic = ImageFileCollection(location=str(path), glob_include='*warnA*') + with pytest.warns( + AstropyUserWarning, match="The following header keyword is invalid" + ): + ic = ImageFileCollection(location=str(path), glob_include="*warnA*") def test_type_of_empty_collection(self, triage_setup): # Test for implementation of the suggestion in @@ -1002,9 +1042,9 @@ def test_type_of_empty_collection(self, triage_setup): # with no keys and no files returns None. # Make a dummy keyword that we then delete. - ic = ImageFileCollection(triage_setup.test_dir, keywords=['fafa']) + ic = ImageFileCollection(triage_setup.test_dir, keywords=["fafa"]) ic.keywords = [] - assert set(ic.summary.colnames) == set(['file']) + assert set(ic.summary.colnames) == set(["file"]) # Remove all of the fits files path = Path(triage_setup.test_dir) @@ -1012,7 +1052,7 @@ def test_type_of_empty_collection(self, triage_setup): p.unlink() # Now the summary should be none - with pytest.warns(AstropyUserWarning, match='no FITS files in the collection'): + with pytest.warns(AstropyUserWarning, match="no FITS files in the collection"): ic = ImageFileCollection(triage_setup.test_dir) assert ic.summary is None assert ic.keywords == [] @@ -1023,39 +1063,35 @@ def test_regex_match_for_search(self, triage_setup): ic = ImageFileCollection(triage_setup.test_dir) - files = ic.files_filtered(regex_match=True, imagetyp='b.*s') - assert len(files) == triage_setup.n_test['bias'] + files = ic.files_filtered(regex_match=True, imagetyp="b.*s") + assert len(files) == triage_setup.n_test["bias"] # This should return all of the files in the test set - all_files = ic.files_filtered(regex_match=True, imagetyp='bias|light') - assert len(all_files) == triage_setup.n_test['files'] + all_files = ic.files_filtered(regex_match=True, imagetyp="bias|light") + assert len(all_files) == triage_setup.n_test["files"] # Add a column with more interesting content and see whether we # match that. - ic.summary['match_me'] = [ - 'hello', - 'goodbye', - 'bye', - 'byte', - 'good bye hello', - 'dog' + ic.summary["match_me"] = [ + "hello", + "goodbye", + "bye", + "byte", + "good bye hello", + "dog", ] - hello_anywhere = ic.files_filtered(regex_match=True, - match_me='hello') + hello_anywhere = ic.files_filtered(regex_match=True, match_me="hello") assert len(hello_anywhere) == 2 - hello_start = ic.files_filtered(regex_match=True, - match_me='^hello') + hello_start = ic.files_filtered(regex_match=True, match_me="^hello") assert len(hello_start) == 1 # Is it really a case-insensitive match? - hello_start = ic.files_filtered(regex_match=True, - match_me='^HeLlo') + hello_start = ic.files_filtered(regex_match=True, match_me="^HeLlo") assert len(hello_start) == 1 - any_bye = ic.files_filtered(regex_match=True, - match_me='by.*e') + any_bye = ic.files_filtered(regex_match=True, match_me="by.*e") assert len(any_bye) == 4 def test_generator_with_regex(self, triage_setup): @@ -1063,11 +1099,11 @@ def test_generator_with_regex(self, triage_setup): n_light = 0 - for h in ic.headers(regex_match=True, imagetyp='li.*t'): - assert h['imagetyp'].lower() == 'light' + for h in ic.headers(regex_match=True, imagetyp="li.*t"): + assert h["imagetyp"].lower() == "light" n_light += 1 - assert n_light == triage_setup.n_test['light'] + assert n_light == triage_setup.n_test["light"] def test_make_collection_by_filtering(self, triage_setup): # Test for implementation of feature at @@ -1078,16 +1114,16 @@ def test_make_collection_by_filtering(self, triage_setup): # an existing ImageFileCollection. ic = ImageFileCollection(location=triage_setup.test_dir) - new_ic = ic.filter(imagetyp='light') - assert len(new_ic.summary) == triage_setup.n_test['light'] + new_ic = ic.filter(imagetyp="light") + assert len(new_ic.summary) == triage_setup.n_test["light"] for header in new_ic.headers(): - assert header['imagetyp'].lower() == 'light' + assert header["imagetyp"].lower() == "light" def test_filtered_collection_with_no_files(self, triage_setup): ifc = ImageFileCollection(triage_setup.test_dir) with pytest.warns(AstropyUserWarning, match="no FITS files"): - _ = ifc.filter(object='really fake object') + _ = ifc.filter(object="really fake object") def test_filter_on_regex_escape_characters(self, triage_setup): # Test for implementation of bugfix at @@ -1100,16 +1136,17 @@ def test_filter_on_regex_escape_characters(self, triage_setup): # For a few different special characters, make test files with FILTER # keyword containing these - special_kwds = ['CO+', 'GG420 (1)', 'V|R|I', 'O[III]', 'NaD^2'] + special_kwds = ["CO+", "GG420 (1)", "V|R|I", "O[III]", "NaD^2"] for i, kw in enumerate(special_kwds, 1): hdu = fits.PrimaryHDU() hdu.data = np.zeros((5, 5)) - hdu.header['REGEX_FL'] = kw - hdu.writeto(os.path.join(triage_setup.test_dir, - 'regex_special_{:d}.fits'.format(i))) + hdu.header["REGEX_FL"] = kw + hdu.writeto( + os.path.join(triage_setup.test_dir, "regex_special_{:d}.fits".format(i)) + ) ic = ImageFileCollection(triage_setup.test_dir) for kw in special_kwds: new_ic = ic.filter(regex_fl=kw) assert len(new_ic.files) == 1 - assert kw in new_ic.summary['regex_fl'] + assert kw in new_ic.summary["regex_fl"] diff --git a/ccdproc/tests/test_keyword.py b/ccdproc/tests/test_keyword.py index 3d25e68d..3ecab814 100644 --- a/ccdproc/tests/test_keyword.py +++ b/ccdproc/tests/test_keyword.py @@ -8,16 +8,16 @@ def test_keyword_init(): - key_name = 'some_key' + key_name = "some_key" key = Keyword(key_name, unit=u.second) assert key.name == key_name assert key.unit == u.second def test_keyword_properties_read_only(): - key = Keyword('observer') + key = Keyword("observer") with pytest.raises(AttributeError): - key.name = 'error' + key.name = "error" with pytest.raises(AttributeError): key.unit = u.hour @@ -30,17 +30,20 @@ def test_keyword_properties_read_only(): # True if the expected result is key.value == numerical_value * key.unit # Name of an error if an error is expected # A string if the expected value is a string -@pytest.mark.parametrize('value,unit,expected', [ - (numerical_value, unit, True), - (numerical_value, None, ValueError), - (numerical_value * unit, None, True), - (numerical_value * unit, unit, True), - (numerical_value * unit, u.km, True), - ('some string', None, 'some string'), - ('no strings with unit', unit, ValueError) - ]) +@pytest.mark.parametrize( + "value,unit,expected", + [ + (numerical_value, unit, True), + (numerical_value, None, ValueError), + (numerical_value * unit, None, True), + (numerical_value * unit, unit, True), + (numerical_value * unit, u.km, True), + ("some string", None, "some string"), + ("no strings with unit", unit, ValueError), + ], +) def test_value_setting(value, unit, expected): - name = 'exposure' + name = "exposure" # Setting at initialization time with try: expected_is_error = issubclass(expected, Exception) @@ -58,7 +61,7 @@ def test_value_setting(value, unit, expected): def test_keyword_value_from_header(): - name = 'exposure' + name = "exposure" numerical_value = 30 unit = u.second h = fits.Header() diff --git a/ccdproc/tests/test_memory_use.py b/ccdproc/tests/test_memory_use.py index 0ecc950f..1e50108b 100644 --- a/ccdproc/tests/test_memory_use.py +++ b/ccdproc/tests/test_memory_use.py @@ -6,13 +6,17 @@ import pytest try: - from ccdproc.tests.run_for_memory_profile import run_memory_profile, generate_fits_files, TMPPATH + from ccdproc.tests.run_for_memory_profile import ( + run_memory_profile, + generate_fits_files, + TMPPATH, + ) except ImportError: memory_profile_present = False else: memory_profile_present = True -image_size = 2000 # Square image, so 4000 x 4000 +image_size = 2000 # Square image, so 4000 x 4000 num_files = 10 @@ -23,16 +27,15 @@ def setup_module(): def teardown_module(): if memory_profile_present: - for fil in TMPPATH.glob('*.fit'): + for fil in TMPPATH.glob("*.fit"): fil.unlink() -@pytest.mark.skipif(not platform.startswith('linux'), - reason='memory tests only work on linux') -@pytest.mark.skipif(not memory_profile_present, - reason='memory_profiler not installed') -@pytest.mark.parametrize('combine_method', - ['average', 'sum', 'median']) +@pytest.mark.skipif( + not platform.startswith("linux"), reason="memory tests only work on linux" +) +@pytest.mark.skipif(not memory_profile_present, reason="memory_profiler not installed") +@pytest.mark.parametrize("combine_method", ["average", "sum", "median"]) def test_memory_use_in_combine(combine_method): # This is essentially a regression test for # https://github.com/astropy/ccdproc/issues/638 @@ -40,9 +43,13 @@ def test_memory_use_in_combine(combine_method): sampling_interval = 0.01 # sec memory_limit = 500000000 # bytes, roughly 0.5GB - mem_use, _ = run_memory_profile(num_files, sampling_interval, - size=image_size, memory_limit=memory_limit, - combine_method=combine_method) + mem_use, _ = run_memory_profile( + num_files, + sampling_interval, + size=image_size, + memory_limit=memory_limit, + combine_method=combine_method, + ) mem_use = np.array(mem_use) # We do not expect memory use to be strictly less than memory_limit diff --git a/ccdproc/tests/test_rebin.py b/ccdproc/tests/test_rebin.py index 59c150a5..1cc1d5f3 100644 --- a/ccdproc/tests/test_rebin.py +++ b/ccdproc/tests/test_rebin.py @@ -55,9 +55,7 @@ def test_rebin_smaller(): # test rebinning with ccddata object -@pytest.mark.parametrize('mask_data, uncertainty', [ - (False, False), - (True, True)]) +@pytest.mark.parametrize("mask_data, uncertainty", [(False, False), (True, True)]) def test_rebin_ccddata(mask_data, uncertainty): ccd_data = ccd_data_func(data_size=10) if mask_data: diff --git a/ccdproc/tests/test_wrapped_external_funcs.py b/ccdproc/tests/test_wrapped_external_funcs.py index 33c50b70..c2a081aa 100644 --- a/ccdproc/tests/test_wrapped_external_funcs.py +++ b/ccdproc/tests/test_wrapped_external_funcs.py @@ -10,45 +10,62 @@ def test_medianfilter_correct(): - ccd = CCDData([[2, 6, 6, 1, 7, 2, 4, 5, 9, 1], - [10, 10, 9, 0, 2, 10, 8, 3, 9, 7], - [2, 4, 0, 4, 4, 10, 0, 5, 6, 5], - [7, 10, 8, 7, 7, 0, 5, 3, 5, 9], - [9, 6, 3, 8, 6, 9, 2, 8, 10, 10], - [6, 5, 1, 7, 8, 0, 8, 2, 9, 3], - [0, 6, 0, 6, 3, 10, 8, 9, 7, 8], - [5, 8, 3, 2, 3, 0, 2, 0, 3, 5], - [9, 6, 3, 7, 1, 0, 5, 4, 8, 3], - [5, 6, 9, 9, 0, 4, 9, 1, 7, 8]], unit='adu') + ccd = CCDData( + [ + [2, 6, 6, 1, 7, 2, 4, 5, 9, 1], + [10, 10, 9, 0, 2, 10, 8, 3, 9, 7], + [2, 4, 0, 4, 4, 10, 0, 5, 6, 5], + [7, 10, 8, 7, 7, 0, 5, 3, 5, 9], + [9, 6, 3, 8, 6, 9, 2, 8, 10, 10], + [6, 5, 1, 7, 8, 0, 8, 2, 9, 3], + [0, 6, 0, 6, 3, 10, 8, 9, 7, 8], + [5, 8, 3, 2, 3, 0, 2, 0, 3, 5], + [9, 6, 3, 7, 1, 0, 5, 4, 8, 3], + [5, 6, 9, 9, 0, 4, 9, 1, 7, 8], + ], + unit="adu", + ) result = core.median_filter(ccd, 3) assert isinstance(result, CCDData) - assert np.all(result.data == [[6, 6, 6, 6, 2, 4, 4, 5, 5, 7], - [4, 6, 4, 4, 4, 4, 5, 5, 5, 6], - [7, 8, 7, 4, 4, 5, 5, 5, 5, 7], - [7, 6, 6, 6, 7, 5, 5, 5, 6, 9], - [7, 6, 7, 7, 7, 6, 3, 5, 8, 9], - [6, 5, 6, 6, 7, 8, 8, 8, 8, 8], - [5, 5, 5, 3, 3, 3, 2, 7, 5, 5], - [6, 5, 6, 3, 3, 3, 4, 5, 5, 5], - [6, 6, 6, 3, 2, 2, 2, 4, 4, 5], - [6, 6, 7, 7, 4, 4, 4, 7, 7, 8]]) - assert result.unit == 'adu' - assert all(getattr(result, attr) is None - for attr in ['mask', 'uncertainty', 'wcs', 'flags']) + assert np.all( + result.data + == [ + [6, 6, 6, 6, 2, 4, 4, 5, 5, 7], + [4, 6, 4, 4, 4, 4, 5, 5, 5, 6], + [7, 8, 7, 4, 4, 5, 5, 5, 5, 7], + [7, 6, 6, 6, 7, 5, 5, 5, 6, 9], + [7, 6, 7, 7, 7, 6, 3, 5, 8, 9], + [6, 5, 6, 6, 7, 8, 8, 8, 8, 8], + [5, 5, 5, 3, 3, 3, 2, 7, 5, 5], + [6, 5, 6, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 3, 2, 2, 2, 4, 4, 5], + [6, 6, 7, 7, 4, 4, 4, 7, 7, 8], + ] + ) + assert result.unit == "adu" + assert all( + getattr(result, attr) is None + for attr in ["mask", "uncertainty", "wcs", "flags"] + ) # The following test could be deleted if log_to_metadata is also applied. assert not result.meta def test_medianfilter_unusued(): - ccd = CCDData(np.ones((3, 3)), unit='adu', - mask=np.ones((3, 3)), - uncertainty=StdDevUncertainty(np.ones((3, 3))), - flags=np.ones((3, 3))) + ccd = CCDData( + np.ones((3, 3)), + unit="adu", + mask=np.ones((3, 3)), + uncertainty=StdDevUncertainty(np.ones((3, 3))), + flags=np.ones((3, 3)), + ) result = core.median_filter(ccd, 3) assert isinstance(result, CCDData) - assert result.unit == 'adu' - assert all(getattr(result, attr) is None - for attr in ['mask', 'uncertainty', 'wcs', 'flags']) + assert result.unit == "adu" + assert all( + getattr(result, attr) is None + for attr in ["mask", "uncertainty", "wcs", "flags"] + ) # The following test could be deleted if log_to_metadata is also applied. assert not result.meta diff --git a/ccdproc/utils/sample_directory.py b/ccdproc/utils/sample_directory.py index b5e5fc2e..c8219f53 100644 --- a/ccdproc/utils/sample_directory.py +++ b/ccdproc/utils/sample_directory.py @@ -7,7 +7,7 @@ from astropy.io import fits -def _make_file_for_testing(file_name='', **kwd): +def _make_file_for_testing(file_name="", **kwd): img = np.uint16(np.arange(100)) hdu = fits.PrimaryHDU(img) @@ -52,11 +52,11 @@ def directory_for_testing(): no_filter_no_object_light.fit <---- this one has no filter """ n_test = { - 'files': 6, - 'missing_filter_value': 1, - 'bias': 1, - 'compressed': 2, - 'light': 5 + "files": 6, + "missing_filter_value": 1, + "bias": 1, + "compressed": 2, + "light": 5, } test_dir = mkdtemp() @@ -65,34 +65,34 @@ def directory_for_testing(): original_dir = os.getcwd() os.chdir(test_dir) - _make_file_for_testing(file_name='no_filter_no_object_bias.fit', - imagetyp='BIAS', - EXPOSURE=0.0) + _make_file_for_testing( + file_name="no_filter_no_object_bias.fit", imagetyp="BIAS", EXPOSURE=0.0 + ) - _make_file_for_testing(file_name='no_filter_no_object_light.fit', - imagetyp='LIGHT', - EXPOSURE=1.0) + _make_file_for_testing( + file_name="no_filter_no_object_light.fit", imagetyp="LIGHT", EXPOSURE=1.0 + ) - _make_file_for_testing(file_name='filter_no_object_light.fit', - imagetyp='LIGHT', - EXPOSURE=1.0, - filter='R') + _make_file_for_testing( + file_name="filter_no_object_light.fit", + imagetyp="LIGHT", + EXPOSURE=1.0, + filter="R", + ) - _make_file_for_testing(file_name='filter_object_light.fit', - imagetyp='LIGHT', - EXPOSURE=1.0, - filter='R') + _make_file_for_testing( + file_name="filter_object_light.fit", imagetyp="LIGHT", EXPOSURE=1.0, filter="R" + ) - with open('filter_object_light.fit', 'rb') as f_in: - with gzip.open('filter_object_light.fit.gz', 'wb') as f_out: + with open("filter_object_light.fit", "rb") as f_in: + with gzip.open("filter_object_light.fit.gz", "wb") as f_out: f_out.write(f_in.read()) # filter_object.writeto('filter_object_RA_keyword_light.fit') - _make_file_for_testing(file_name='test.fits.fz', - imagetyp='LIGHT', - EXPOSURE=15.0, - filter='R') + _make_file_for_testing( + file_name="test.fits.fz", imagetyp="LIGHT", EXPOSURE=15.0, filter="R" + ) os.chdir(original_dir) diff --git a/ccdproc/utils/slices.py b/ccdproc/utils/slices.py index 5d9eee29..5e378a89 100644 --- a/ccdproc/utils/slices.py +++ b/ccdproc/utils/slices.py @@ -59,28 +59,27 @@ def slice_from_string(string, fits_convention=False): array([[0, 1, 2], [5, 6, 7]]) """ - no_space = string.replace(' ', '') + no_space = string.replace(" ", "") if not no_space: return () - if not (no_space.startswith('[') and no_space.endswith(']')): - raise ValueError('Slice string must be enclosed in square brackets.') + if not (no_space.startswith("[") and no_space.endswith("]")): + raise ValueError("Slice string must be enclosed in square brackets.") - no_space = no_space.strip('[]') + no_space = no_space.strip("[]") if fits_convention: # Special cases first # Flip dimension, with step - no_space = no_space.replace('-*:', '::-') + no_space = no_space.replace("-*:", "::-") # Flip dimension - no_space = no_space.replace('-*', '::-1') + no_space = no_space.replace("-*", "::-1") # Normal wildcard - no_space = no_space.replace('*', ':') - string_slices = no_space.split(',') + no_space = no_space.replace("*", ":") + string_slices = no_space.split(",") slices = [] for string_slice in string_slices: - slice_args = [int(arg) if arg else None - for arg in string_slice.split(':')] + slice_args = [int(arg) if arg else None for arg in string_slice.split(":")] a_slice = slice(*slice_args) slices.append(a_slice) @@ -114,12 +113,15 @@ def _defitsify_slice(slices): if a_slice.stop is not None and a_slice.stop < 0: raise ValueError("Negative final index not allowed for FITS slice") new_slice = slice(new_start, a_slice.stop, a_slice.step) - if (a_slice.start is not None and a_slice.stop is not None and - a_slice.start > a_slice.stop): + if ( + a_slice.start is not None + and a_slice.stop is not None + and a_slice.start > a_slice.stop + ): # FITS use a positive step index when dimension are inverted new_step = -1 if a_slice.step is None else -a_slice.step # Special case to prevent -1 as slice stop value - new_stop = None if a_slice.stop == 1 else a_slice.stop-2 + new_stop = None if a_slice.stop == 1 else a_slice.stop - 2 new_slice = slice(new_start, new_stop, new_step) python_slice.append(new_slice) diff --git a/ccdproc/utils/tests/test_slices.py b/ccdproc/utils/tests/test_slices.py index f96df679..e60ae5f6 100644 --- a/ccdproc/utils/tests/test_slices.py +++ b/ccdproc/utils/tests/test_slices.py @@ -8,45 +8,44 @@ # none of these are properly enclosed in brackets; is an error raised? -@pytest.mark.parametrize('arg', - ['1:2', '[1:2', '1:2]']) +@pytest.mark.parametrize("arg", ["1:2", "[1:2", "1:2]"]) def test_slice_from_string_needs_enclosing_brackets(arg): with pytest.raises(ValueError): slice_from_string(arg) -@pytest.mark.parametrize('start,stop,step', [ - (None, None, -1), - (5, 10, None), - (None, 25, None), - (2, 30, 3), - (30, None, -2), - (None, None, None) - ]) +@pytest.mark.parametrize( + "start,stop,step", + [ + (None, None, -1), + (5, 10, None), + (None, 25, None), + (2, 30, 3), + (30, None, -2), + (None, None, None), + ], +) def test_slice_from_string_1d(start, stop, step): an_array = np.zeros([100]) - stringify = lambda n: str(n) if n else '' + stringify = lambda n: str(n) if n else "" start_str = stringify(start) stop_str = stringify(stop) step_str = stringify(step) if step_str: - slice_str = ':'.join([start_str, stop_str, step_str]) + slice_str = ":".join([start_str, stop_str, step_str]) else: - slice_str = ':'.join([start_str, stop_str]) - sli = slice_from_string('[' + slice_str + ']') + slice_str = ":".join([start_str, stop_str]) + sli = slice_from_string("[" + slice_str + "]") expected = an_array[slice(start, stop, step)] - np.testing.assert_array_equal(expected, - an_array[sli]) + np.testing.assert_array_equal(expected, an_array[sli]) -@pytest.mark.parametrize('arg', - [' [ 1: 45]', '[ 1 :4 5]', ' [1:45] ']) +@pytest.mark.parametrize("arg", [" [ 1: 45]", "[ 1 :4 5]", " [1:45] "]) def test_slice_from_string_spaces(arg): an_array = np.zeros([100]) - np.testing.assert_array_equal(an_array[1:45], - an_array[slice_from_string(arg)]) + np.testing.assert_array_equal(an_array[1:45], an_array[slice_from_string(arg)]) def test_slice_from_string_2d(): @@ -54,42 +53,36 @@ def test_slice_from_string_2d(): # manually writing a few cases here rather than parametrizing because the # latter seems not worth the trouble. - sli = slice_from_string('[:-1:2, :]') - np.testing.assert_array_equal(an_array[:-1:2, :], - an_array[sli]) + sli = slice_from_string("[:-1:2, :]") + np.testing.assert_array_equal(an_array[:-1:2, :], an_array[sli]) - sli = slice_from_string('[:, 15:90]') - np.testing.assert_array_equal(an_array[:, 15:90], - an_array[sli]) + sli = slice_from_string("[:, 15:90]") + np.testing.assert_array_equal(an_array[:, 15:90], an_array[sli]) - sli = slice_from_string('[10:80:5, 15:90:-1]') - np.testing.assert_array_equal(an_array[10:80:5, 15:90:-1], - an_array[sli]) + sli = slice_from_string("[10:80:5, 15:90:-1]") + np.testing.assert_array_equal(an_array[10:80:5, 15:90:-1], an_array[sli]) def test_slice_from_string_fits_style(): - sli = slice_from_string('[1:5, :]', fits_convention=True) + sli = slice_from_string("[1:5, :]", fits_convention=True) # order is reversed, so is the *first* slice one that includes everything? - assert (sli[0].start is None and - sli[0].stop is None and - sli[0].step is None) + assert sli[0].start is None and sli[0].stop is None and sli[0].step is None # In the second slice, has the first index been reduced by 1 and the # second index left unchanged? - assert (sli[1].start == 0 and - sli[1].stop == 5) - sli = slice_from_string('[1:10:2, 4:5:2]', fits_convention=True) + assert sli[1].start == 0 and sli[1].stop == 5 + sli = slice_from_string("[1:10:2, 4:5:2]", fits_convention=True) assert sli[0] == slice(3, 5, 2) assert sli[1] == slice(0, 10, 2) def test_slice_from_string_fits_inverted(): - sli = slice_from_string('[20:10:2, 10:5, 5:4]', fits_convention=True) + sli = slice_from_string("[20:10:2, 10:5, 5:4]", fits_convention=True) assert sli[0] == slice(4, 2, -1) assert sli[1] == slice(9, 3, -1) assert sli[2] == slice(19, 8, -2) # Handle a bunch of special cases for inverted slices, when the # stop index is 1 or 2 - sli = slice_from_string('[20:1:4, 21:1:4, 22:2:4, 2:1]', fits_convention=True) + sli = slice_from_string("[20:1:4, 21:1:4, 22:2:4, 2:1]", fits_convention=True) assert sli[0] == slice(1, None, -1) assert sli[1] == slice(21, 0, -4) assert sli[2] == slice(20, None, -4) @@ -97,25 +90,25 @@ def test_slice_from_string_fits_inverted(): def test_slice_from_string_empty(): - assert len(slice_from_string('')) == 0 + assert len(slice_from_string("")) == 0 def test_slice_from_string_bad_fits_slice(): with pytest.raises(ValueError): # Do I error because 0 is an illegal lower bound? - slice_from_string('[0:10, 1:5]', fits_convention=True) + slice_from_string("[0:10, 1:5]", fits_convention=True) with pytest.raises(ValueError): # Same as above, but switched order - slice_from_string('[1:5, 0:10]', fits_convention=True) + slice_from_string("[1:5, 0:10]", fits_convention=True) with pytest.raises(ValueError): # Do I error if an ending index is negative? - slice_from_string('[1:10, 10:-1]', fits_convention=True) + slice_from_string("[1:10, 10:-1]", fits_convention=True) def test_slice_from_string_fits_wildcard(): - sli = slice_from_string('[*,-*]', fits_convention=True) + sli = slice_from_string("[*,-*]", fits_convention=True) assert sli[0] == slice(None, None, -1) assert sli[1] == slice(None, None, None) - sli = slice_from_string('[*:2,-*:2]', fits_convention=True) + sli = slice_from_string("[*:2,-*:2]", fits_convention=True) assert sli[0] == slice(None, None, -2) assert sli[1] == slice(None, None, 2) From 66e744a62f950b23a08a99941f5cc0d2db296340 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sun, 11 Aug 2024 18:38:23 -0500 Subject: [PATCH 3/3] Ignore black commit in blame --- .git-blame-ignore-revs | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .git-blame-ignore-revs diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000..b6212d93 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,2 @@ +# Ignore black formatting changes +f76a29df2b570e9f42bf2e14c15bbb9223bfa14f