From c357f755015f441cd780953b94425abcfb02de5d Mon Sep 17 00:00:00 2001 From: Maria Date: Wed, 20 Dec 2023 08:31:32 -0500 Subject: [PATCH 1/3] JP-3248 MIRI Subarrays 390 Hz EMI bug fix 3 (#8151) --- CHANGES.rst | 5 ++++- jwst/emicorr/emicorr.py | 16 ++++++++++------ 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index f123d836fc..c038316425 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,7 +1,10 @@ 1.13.2 (unreleased) =================== -- +emicorr +------- + +- Fix another bug with subarray=Full. [#8151] 1.13.1 (2023-12-19) =================== diff --git a/jwst/emicorr/emicorr.py b/jwst/emicorr/emicorr.py index 4ed62f78fc..34df58af97 100644 --- a/jwst/emicorr/emicorr.py +++ b/jwst/emicorr/emicorr.py @@ -245,6 +245,11 @@ def apply_emicorr(input_model, emicorr_model, save_onthefly_reffile, # Initialize the output model as a copy of the input output_model = input_model.copy() + nints, ngroups, ny, nx = np.shape(output_model.data) + if nints_to_phase is None: + nints_to_phase = nints + elif nints_to_phase > nints: + nints_to_phase = nints # create the dictionary to store the frequencies and corresponding phase amplitudes if save_intermediate_results and save_onthefly_reffile is not None: @@ -260,9 +265,6 @@ def apply_emicorr(input_model, emicorr_model, save_onthefly_reffile, # Read image data and set up some variables orig_data = output_model.data data = orig_data.copy() - nints, ngroups, ny, nx = np.shape(data) - if nints_to_phase is None: - nints_to_phase = nints # Correspondance of array order in IDL # sz[0] = 4 in idl @@ -618,10 +620,10 @@ def get_subarcase(subarray_cases, subarray, readpatt, detector): # search and return the specific values for the configuration if isinstance(subarray_cases, dict): for subname in subarray_cases: - if subarray not in subname: + if subarray == 'FULL': + subarray = subarray + '_' + readpatt + if subarray != subname: continue - if subname == 'FULL': - subname = subname + '_' + readpatt rowclocks = subarray_cases[subname]["rowclocks"] frameclocks = subarray_cases[subname]["frameclocks"] if readpatt == "SLOW": @@ -639,6 +641,8 @@ def get_subarcase(subarray_cases, subarray, readpatt, detector): log.debug('Found subarray case {}!'.format(subname)) for item, val in mdl_dict.items(): if subname in item: + if 'FULL' in item and readpatt not in item: + continue if "rowclocks" in item: rowclocks = val elif "frameclocks" in item: From 8ddb61c4eb83a3b9ced0480deffa33fed548482f Mon Sep 17 00:00:00 2001 From: Maria Date: Thu, 21 Dec 2023 13:28:24 -0500 Subject: [PATCH 2/3] reducing running time for EMICORR step (#8152) --- CHANGES.rst | 1 + jwst/emicorr/emicorr.py | 121 ++++++++++++++++++++-------------------- 2 files changed, 63 insertions(+), 59 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index c038316425..c24cbe5d66 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -5,6 +5,7 @@ emicorr ------- - Fix another bug with subarray=Full. [#8151] +- Speeding up the code and fixing case of subarray not in ref file. [#8152] 1.13.1 (2023-12-19) =================== diff --git a/jwst/emicorr/emicorr.py b/jwst/emicorr/emicorr.py index 34df58af97..896e867506 100644 --- a/jwst/emicorr/emicorr.py +++ b/jwst/emicorr/emicorr.py @@ -72,6 +72,14 @@ "MIRIFULONG" : ["Hz390", "Hz10_slow_MIRIFULONG"], "MIRIFUSHORT" : ["Hz390", "Hz10_slow_MIRIFUSHORT"]}}}, + "MASK1065": { + "rowclocks": 82, + "frameclocks": 23968, + "freqs": {"FAST": ["Hz390", "Hz10"], + "SLOW": {"MIRIMAGE" : ["Hz390", "Hz10_slow_MIRIMAGE"], + "MIRIFULONG" : ["Hz390", "Hz10_slow_MIRIFULONG"], + "MIRIFUSHORT" : ["Hz390", "Hz10_slow_MIRIFUSHORT"]}}}, + # 390Hz already in-phase for these, but may need corr for other # frequencies (e.g. 10Hz heater noise) @@ -224,16 +232,18 @@ def apply_emicorr(input_model, emicorr_model, save_onthefly_reffile, log.info('Using reference file to get subarray case.') subname, rowclocks, frameclocks, freqs2correct = get_subarcase(emicorr_model, subarray, readpatt, detector) reference_wave_list = [] - for fnme in freqs2correct: - freq, ref_wave = get_frequency_info(emicorr_model, fnme) - freqs_numbers.append(freq) - reference_wave_list.append(ref_wave) + if freqs2correct is not None: + for fnme in freqs2correct: + freq, ref_wave = get_frequency_info(emicorr_model, fnme) + freqs_numbers.append(freq) + reference_wave_list.append(ref_wave) else: log.info('Using default subarray case corrections.') subname, rowclocks, frameclocks, freqs2correct = get_subarcase(default_subarray_cases, subarray, readpatt, detector) - for fnme in freqs2correct: - freq = get_frequency_info(default_emi_freqs, fnme) - freqs_numbers.append(freq) + if freqs2correct is not None: + for fnme in freqs2correct: + freq = get_frequency_info(default_emi_freqs, fnme) + freqs_numbers.append(freq) log.info('With configuration: Subarray={}, Read_pattern={}, Detector={}'.format(subarray, readpatt, detector)) if rowclocks is None or len(freqs_numbers) == 0: @@ -275,7 +285,35 @@ def apply_emicorr(input_model, emicorr_model, save_onthefly_reffile, nx4 = int(nx/4) dd_all = np.ones((nints, ngroups, ny, nx4)) - log.info('Subtracting self-superbias from each group of each integration') + log.info('Subtracting self-superbias from each group of each integration and') + + # Calculate times of all pixels in the input integration, then use that to calculate + # phase of all pixels. Times here is in integer numbers of 10us pixels starting from + # the first data pixel in the input image. Times can be a very large integer, so use + # a big datatype. Phaseall (below) is just 0-1.0. + + # A safer option is to calculate times_per_integration and calculate the phase at each + # int separately. That way times array will have a smaller number of elements at each + # int, with less risk of datatype overflow. Still, use the largest datatype available + # for the time_this_int array. + + times_this_int = np.zeros((ngroups, ny, nx4), dtype='ulonglong') + phaseall = np.zeros((nints, ngroups, ny, nx4)) + + # non-roi rowclocks between subarray frames (this will be 0 for fullframe) + extra_rowclocks = (1024. - ny) * (4 + 3.) + # e.g. ((1./390.625) / 10e-6) = 256.0 pix and ((1./218.52055) / 10e-6) = 457.62287 pix + period_in_pixels = (1./frequency) / 10.0e-6 + + start_time, ref_pix_sample = 0, 3 + + # Need colstop for phase calculation in case of last refpixel in a row. Technically, + # this number comes from the subarray definition (see subarray_cases dict above), but + # calculate it from the input image header here just in case the subarray definitions + # are not available to this routine. + colstop = int( xsize/4 + xstart - 1 ) + log.info('doing phase calculation per integration') + for ninti in range(nints_to_phase): log.debug(' Working on integration: {}'.format(ninti+1)) @@ -310,34 +348,6 @@ def apply_emicorr(input_model, emicorr_model, save_onthefly_reffile, # This is the quad-averaged, cleaned, input image data for the exposure dd_all[ninti, ngroupi, ...] = dd - np.median(dd) - # Calculate times of all pixels in the input integration, then use that to calculate - # phase of all pixels. Times here is in integer numbers of 10us pixels starting from - # the first data pixel in the input image. Times can be a very large integer, so use - # a big datatype. Phaseall (below) is just 0-1.0. - - # A safer option is to calculate times_per_integration and calculate the phase at each - # int separately. That way times array will have a smaller number of elements at each - # int, with less risk of datatype overflow. Still, use the largest datatype available - # for the time_this_int array. - - times_this_int = np.zeros((ngroups, ny, nx4), dtype='ulonglong') - phaseall = np.zeros((nints, ngroups, ny, nx4)) - - # non-roi rowclocks between subarray frames (this will be 0 for fullframe) - extra_rowclocks = (1024. - ny) * (4 + 3.) - # e.g. ((1./390.625) / 10e-6) = 256.0 pix and ((1./218.52055) / 10e-6) = 457.62287 pix - period_in_pixels = (1./frequency) / 10.0e-6 - - start_time, ref_pix_sample = 0, 3 - - # Need colstop for phase calculation in case of last refpixel in a row. Technically, - # this number comes from the subarray definition (see subarray_cases dict above), but - # calculate it from the input image header here just in case the subarray definitions - # are not available to this routine. - colstop = int( xsize/4 + xstart - 1 ) - log.info('Phase calculation per integration') - for l in range(nints_to_phase): - log.debug(' Working on integration: {}'.format(l+1)) for k in range(ngroups): # frames for j in range(ny): # rows # nsamples= 1 for fast, 9 for slow (from metadata) @@ -366,7 +376,7 @@ def apply_emicorr(input_model, emicorr_model, save_onthefly_reffile, # number of 10us from the first data pixel in this integration, so to # convert to phase, divide by the waveform *period* in float pixels phase_this_int = times_this_int / period_in_pixels - phaseall[l, ...] = phase_this_int - phase_this_int.astype('ulonglong') + phaseall[ninti, ...] = phase_this_int - phase_this_int.astype('ulonglong') # add a frame time to account for the extra frame reset between MIRI integrations start_time += frameclocks @@ -436,8 +446,8 @@ def apply_emicorr(input_model, emicorr_model, save_onthefly_reffile, # and optionally amplitude scaled) # shift and resample reference_wave at pa's phase # u[0] is the phase shift of reference_wave *to* pa - u = np.where(cc >= max(cc)) - lut_reference = rebin(np.roll(reference_wave, u[0]), [period_in_pixels]) + u = np.argmax(cc) + lut_reference = rebin(np.roll(reference_wave, u), [period_in_pixels]) # Scale reference wave amplitude to match the pa amplitude from this dataset by # fitting a line rather than taking a mean ratio so that any DC offset drops out @@ -554,23 +564,17 @@ def minmed(data, minval=False, avgval=False, maxval=False): ngroups, ny, nx = np.shape(data) medimg = np.zeros((ny, nx)) # use a mask to ignore nans for calculations - masked_data = np.ma.array(data, mask=np.isnan(data)) - - for i in range(nx): - for j in range(ny): - vec = masked_data[:, j, i] - u = np.where(vec != 0) - n = vec[u].size - if n > 0: - if n <= 2 or minval: - medimg[j, i] = np.ma.min(vec[u]) - if maxval: - medimg[j, i] = np.ma.max(vec[u]) - if not minval and not maxval and not avgval: - medimg[j, i] = np.ma.median(vec[u]) - if avgval: - dmean , _, _, _ = iter_stat_sig_clip(vec[u]) - medimg[j, i] = dmean + vec = np.ma.array(data, mask=np.isnan(data)) + n = vec.size + if n > 0: + if n <= 2 or minval: + medimg = np.ma.min(vec, axis=0) + if maxval: + medimg = np.ma.max(vec, axis=0) + if not minval and not maxval and not avgval: + medimg = np.ma.median(vec, axis=0) + if avgval: + medimg = np.ma.mean(vec, axis=0) return medimg @@ -734,9 +738,8 @@ def iter_stat_sig_clip(data, sigrej=3.0, maxiter=10): # Compute the mean + standard deviation of the entire data array, # these values will be returned if there are fewer than 2 good points. dmask = np.ones(ngood, dtype='b') + 1 - dmean = sum(data * dmask) / ngood + dmean = np.sum(data * dmask) / ngood dsigma = np.sqrt(sum((data - dmean)**2) / (ngood - 1)) - dsigma = dsigma iiter = 1 # Iteratively compute the mean + stdev, updating the sigma-rejection thresholds @@ -752,7 +755,7 @@ def iter_stat_sig_clip(data, sigrej=3.0, maxiter=10): ngood = sum(dmask) if ngood >= 2: - dmean = sum(data*dmask) / ngood + dmean = np.sum(data*dmask) / ngood dsigma = np.sqrt( sum((data - dmean)**2 * dmask) / (ngood - 1) ) dsigma = dsigma From 009c8f78ac7770934597fd6660aaf74c75ffab67 Mon Sep 17 00:00:00 2001 From: Zach Burnett Date: Thu, 21 Dec 2023 13:31:18 -0500 Subject: [PATCH 3/3] update metadata for `1.13.2` (#8155) --- CHANGES.rst | 7 +++- CITATION.cff | 4 +- README.md | 1 + requirements-sdp.txt | 95 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 104 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index c24cbe5d66..f28fb06a79 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,4 +1,9 @@ -1.13.2 (unreleased) +1.13.3 (unreleased) +=================== + +- + +1.13.2 (2023-12-21) =================== emicorr diff --git a/CITATION.cff b/CITATION.cff index caeba32db4..72e7aca8b4 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -75,7 +75,7 @@ authors: given-names: "Maria" orcid: "https://orcid.org/0000-0003-2314-3453" title: "JWST Calibration Pipeline" -version: 1.13.1 +version: 1.13.2 doi: 10.5281/zenodo.7038885 -date-released: 2023-12-19 +date-released: 2023-12-21 url: "https://github.com/spacetelescope/jwst" diff --git a/README.md b/README.md index 9ff0642ab3..9bf73aff07 100644 --- a/README.md +++ b/README.md @@ -210,6 +210,7 @@ the specified context and less than the context for the next release. | jwst tag | DMS build | SDP_VER | CRDS_CONTEXT | Released | Ops Install | Notes | |---------------------|-----------|----------|--------------|------------|-------------|-----------------------------------------------| +| 1.13.2 | B10.1rc3 | 2023.4.0 | 1181 | 2023-12-21 | | Third release candidate for B10.1 | | 1.13.1 | B10.1rc2 | 2023.4.0 | 1181 | 2023-12-19 | | Second release candidate for B10.1 | | 1.13.0 | B10.1rc1 | 2023.4.0 | 1179 | 2023-12-15 | | First release candidate for B10.1 | | 1.12.5 | B10.0.1 | 2023.3.1 | 1166 | 2023-10-19 | 2023-12-05 | Patch release B10.0.1 | diff --git a/requirements-sdp.txt b/requirements-sdp.txt index a110a4b011..924db0b8ae 100644 --- a/requirements-sdp.txt +++ b/requirements-sdp.txt @@ -14,3 +14,98 @@ # conda activate sdp # pip install -e .[test,sdp] # pip freeze | grep -v jwst >> requirements-sdp.txt +alabaster==0.7.13 +asdf==3.0.1 +asdf-astropy==0.5.0 +asdf-coordinates-schemas==0.2.0 +asdf-standard==1.0.3 +asdf-transform-schemas==0.4.0 +asdf-unit-schemas==0.1.0 +asdf-wcs-schemas==0.3.0 +astropy==6.0.0 +astropy-iers-data==0.2023.12.18.0.30.18 +attrs==23.1.0 +Babel==2.14.0 +BayesicFitting==3.2.0 +certifi==2023.11.17 +charset-normalizer==3.3.2 +ci-watson==0.6.2 +colorama==0.4.6 +contourpy==1.2.0 +coverage==7.3.4 +crds==11.17.14 +cycler==0.12.1 +docutils==0.20.1 +drizzle==1.14.4 +et-xmlfile==1.1.0 +filelock==3.13.1 +fonttools==4.47.0 +future==0.18.3 +gwcs==0.20.0 +idna==3.6 +imageio==2.33.1 +imagesize==1.4.1 +importlib-metadata==7.0.0 +iniconfig==2.0.0 +Jinja2==3.1.2 +jmespath==1.0.1 +jplephem==2.21 +jsonschema==4.20.0 +jsonschema-specifications==2023.11.2 +kiwisolver==1.4.5 +lazy_loader==0.3 +lxml==4.9.4 +MarkupSafe==2.1.3 +matplotlib==3.8.2 +networkx==3.2.1 +numpy==1.26.2 +numpydoc==1.6.0 +opencv-python-headless==4.8.1.78 +openpyxl==3.1.2 +packaging==23.2 +Parsley==1.3 +photutils==1.10.0 +Pillow==10.1.0 +pluggy==1.3.0 +poppy==1.1.1 +psutil==5.9.7 +pyerfa==2.0.1.1 +Pygments==2.17.2 +pyparsing==3.1.1 +pysiaf==0.21.0 +pytest==7.4.3 +pytest-cov==4.1.0 +pytest-doctestplus==1.1.0 +python-dateutil==2.8.2 +PyYAML==6.0.1 +readchar==4.0.5 +referencing==0.32.0 +requests==2.31.0 +requests-mock==1.11.0 +rpds-py==0.15.2 +ruff==0.1.8 +scikit-image==0.22.0 +scipy==1.11.4 +semantic-version==2.10.0 +six==1.16.0 +snowballstemmer==2.2.0 +spherical-geometry==1.3.1 +Sphinx==7.2.6 +sphinxcontrib-applehelp==1.0.7 +sphinxcontrib-devhelp==1.0.5 +sphinxcontrib-htmlhelp==2.0.4 +sphinxcontrib-jsmath==1.0.1 +sphinxcontrib-qthelp==1.0.6 +sphinxcontrib-serializinghtml==1.1.9 +stcal==1.5.2 +stdatamodels==1.9.0 +stpipe==0.5.1 +stsci.image==2.3.5 +stsci.imagestats==1.8.0 +stsci.stimage==0.2.6 +tabulate==0.9.0 +tifffile==2023.12.9 +tweakwcs==0.8.5 +urllib3==2.1.0 +wiimatch==0.3.2 +zipp==3.17.0