Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into release/1.13.x
Browse files Browse the repository at this point in the history
  • Loading branch information
zacharyburnett committed Dec 21, 2023
2 parents 9565dd5 + 009c8f7 commit 4ca56b5
Show file tree
Hide file tree
Showing 5 changed files with 87 additions and 70 deletions.
11 changes: 10 additions & 1 deletion CHANGES.rst
Original file line number Diff line number Diff line change
@@ -1,8 +1,17 @@
1.13.2 (unreleased)
1.13.3 (unreleased)
===================

-

1.13.2 (2023-12-21)
===================

emicorr
-------

- Fix another bug with subarray=Full. [#8151]
- Speeding up the code and fixing case of subarray not in ref file. [#8152]

1.13.1 (2023-12-19)
===================

Expand Down
4 changes: 2 additions & 2 deletions CITATION.cff
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ authors:
given-names: "Maria"
orcid: "https://orcid.org/0000-0003-2314-3453"
title: "JWST Calibration Pipeline"
version: 1.13.1
version: 1.13.2
doi: 10.5281/zenodo.7038885
date-released: 2023-12-19
date-released: 2023-12-21
url: "https://github.com/spacetelescope/jwst"
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,7 @@ the specified context and less than the context for the next release.

| jwst tag | DMS build | SDP_VER | CRDS_CONTEXT | Released | Ops Install | Notes |
|---------------------|-----------|----------|--------------|------------|-------------|-----------------------------------------------|
| 1.13.2 | B10.1rc3 | 2023.4.0 | 1181 | 2023-12-21 | | Third release candidate for B10.1 |
| 1.13.1 | B10.1rc2 | 2023.4.0 | 1181 | 2023-12-19 | | Second release candidate for B10.1 |
| 1.13.0 | B10.1rc1 | 2023.4.0 | 1179 | 2023-12-15 | | First release candidate for B10.1 |
| 1.12.5 | B10.0.1 | 2023.3.1 | 1166 | 2023-10-19 | 2023-12-05 | Patch release B10.0.1 |
Expand Down
137 changes: 72 additions & 65 deletions jwst/emicorr/emicorr.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,14 @@
"MIRIFULONG" : ["Hz390", "Hz10_slow_MIRIFULONG"],
"MIRIFUSHORT" : ["Hz390", "Hz10_slow_MIRIFUSHORT"]}}},

"MASK1065": {
"rowclocks": 82,
"frameclocks": 23968,
"freqs": {"FAST": ["Hz390", "Hz10"],
"SLOW": {"MIRIMAGE" : ["Hz390", "Hz10_slow_MIRIMAGE"],
"MIRIFULONG" : ["Hz390", "Hz10_slow_MIRIFULONG"],
"MIRIFUSHORT" : ["Hz390", "Hz10_slow_MIRIFUSHORT"]}}},

# 390Hz already in-phase for these, but may need corr for other
# frequencies (e.g. 10Hz heater noise)

Expand Down Expand Up @@ -224,16 +232,18 @@ def apply_emicorr(input_model, emicorr_model, save_onthefly_reffile,
log.info('Using reference file to get subarray case.')
subname, rowclocks, frameclocks, freqs2correct = get_subarcase(emicorr_model, subarray, readpatt, detector)
reference_wave_list = []
for fnme in freqs2correct:
freq, ref_wave = get_frequency_info(emicorr_model, fnme)
freqs_numbers.append(freq)
reference_wave_list.append(ref_wave)
if freqs2correct is not None:
for fnme in freqs2correct:
freq, ref_wave = get_frequency_info(emicorr_model, fnme)
freqs_numbers.append(freq)
reference_wave_list.append(ref_wave)
else:
log.info('Using default subarray case corrections.')
subname, rowclocks, frameclocks, freqs2correct = get_subarcase(default_subarray_cases, subarray, readpatt, detector)
for fnme in freqs2correct:
freq = get_frequency_info(default_emi_freqs, fnme)
freqs_numbers.append(freq)
if freqs2correct is not None:
for fnme in freqs2correct:
freq = get_frequency_info(default_emi_freqs, fnme)
freqs_numbers.append(freq)

log.info('With configuration: Subarray={}, Read_pattern={}, Detector={}'.format(subarray, readpatt, detector))
if rowclocks is None or len(freqs_numbers) == 0:
Expand All @@ -245,6 +255,11 @@ def apply_emicorr(input_model, emicorr_model, save_onthefly_reffile,

# Initialize the output model as a copy of the input
output_model = input_model.copy()
nints, ngroups, ny, nx = np.shape(output_model.data)
if nints_to_phase is None:
nints_to_phase = nints
elif nints_to_phase > nints:
nints_to_phase = nints

# create the dictionary to store the frequencies and corresponding phase amplitudes
if save_intermediate_results and save_onthefly_reffile is not None:
Expand All @@ -260,9 +275,6 @@ def apply_emicorr(input_model, emicorr_model, save_onthefly_reffile,
# Read image data and set up some variables
orig_data = output_model.data
data = orig_data.copy()
nints, ngroups, ny, nx = np.shape(data)
if nints_to_phase is None:
nints_to_phase = nints

# Correspondance of array order in IDL
# sz[0] = 4 in idl
Expand All @@ -273,7 +285,35 @@ def apply_emicorr(input_model, emicorr_model, save_onthefly_reffile,
nx4 = int(nx/4)

dd_all = np.ones((nints, ngroups, ny, nx4))
log.info('Subtracting self-superbias from each group of each integration')
log.info('Subtracting self-superbias from each group of each integration and')

# Calculate times of all pixels in the input integration, then use that to calculate
# phase of all pixels. Times here is in integer numbers of 10us pixels starting from
# the first data pixel in the input image. Times can be a very large integer, so use
# a big datatype. Phaseall (below) is just 0-1.0.

# A safer option is to calculate times_per_integration and calculate the phase at each
# int separately. That way times array will have a smaller number of elements at each
# int, with less risk of datatype overflow. Still, use the largest datatype available
# for the time_this_int array.

times_this_int = np.zeros((ngroups, ny, nx4), dtype='ulonglong')
phaseall = np.zeros((nints, ngroups, ny, nx4))

# non-roi rowclocks between subarray frames (this will be 0 for fullframe)
extra_rowclocks = (1024. - ny) * (4 + 3.)
# e.g. ((1./390.625) / 10e-6) = 256.0 pix and ((1./218.52055) / 10e-6) = 457.62287 pix
period_in_pixels = (1./frequency) / 10.0e-6

start_time, ref_pix_sample = 0, 3

# Need colstop for phase calculation in case of last refpixel in a row. Technically,
# this number comes from the subarray definition (see subarray_cases dict above), but
# calculate it from the input image header here just in case the subarray definitions
# are not available to this routine.
colstop = int( xsize/4 + xstart - 1 )
log.info('doing phase calculation per integration')

for ninti in range(nints_to_phase):
log.debug(' Working on integration: {}'.format(ninti+1))

Expand Down Expand Up @@ -308,34 +348,6 @@ def apply_emicorr(input_model, emicorr_model, save_onthefly_reffile,
# This is the quad-averaged, cleaned, input image data for the exposure
dd_all[ninti, ngroupi, ...] = dd - np.median(dd)

# Calculate times of all pixels in the input integration, then use that to calculate
# phase of all pixels. Times here is in integer numbers of 10us pixels starting from
# the first data pixel in the input image. Times can be a very large integer, so use
# a big datatype. Phaseall (below) is just 0-1.0.

# A safer option is to calculate times_per_integration and calculate the phase at each
# int separately. That way times array will have a smaller number of elements at each
# int, with less risk of datatype overflow. Still, use the largest datatype available
# for the time_this_int array.

times_this_int = np.zeros((ngroups, ny, nx4), dtype='ulonglong')
phaseall = np.zeros((nints, ngroups, ny, nx4))

# non-roi rowclocks between subarray frames (this will be 0 for fullframe)
extra_rowclocks = (1024. - ny) * (4 + 3.)
# e.g. ((1./390.625) / 10e-6) = 256.0 pix and ((1./218.52055) / 10e-6) = 457.62287 pix
period_in_pixels = (1./frequency) / 10.0e-6

start_time, ref_pix_sample = 0, 3

# Need colstop for phase calculation in case of last refpixel in a row. Technically,
# this number comes from the subarray definition (see subarray_cases dict above), but
# calculate it from the input image header here just in case the subarray definitions
# are not available to this routine.
colstop = int( xsize/4 + xstart - 1 )
log.info('Phase calculation per integration')
for l in range(nints_to_phase):
log.debug(' Working on integration: {}'.format(l+1))
for k in range(ngroups): # frames
for j in range(ny): # rows
# nsamples= 1 for fast, 9 for slow (from metadata)
Expand Down Expand Up @@ -364,7 +376,7 @@ def apply_emicorr(input_model, emicorr_model, save_onthefly_reffile,
# number of 10us from the first data pixel in this integration, so to
# convert to phase, divide by the waveform *period* in float pixels
phase_this_int = times_this_int / period_in_pixels
phaseall[l, ...] = phase_this_int - phase_this_int.astype('ulonglong')
phaseall[ninti, ...] = phase_this_int - phase_this_int.astype('ulonglong')

# add a frame time to account for the extra frame reset between MIRI integrations
start_time += frameclocks
Expand Down Expand Up @@ -434,8 +446,8 @@ def apply_emicorr(input_model, emicorr_model, save_onthefly_reffile,
# and optionally amplitude scaled)
# shift and resample reference_wave at pa's phase
# u[0] is the phase shift of reference_wave *to* pa
u = np.where(cc >= max(cc))
lut_reference = rebin(np.roll(reference_wave, u[0]), [period_in_pixels])
u = np.argmax(cc)
lut_reference = rebin(np.roll(reference_wave, u), [period_in_pixels])

# Scale reference wave amplitude to match the pa amplitude from this dataset by
# fitting a line rather than taking a mean ratio so that any DC offset drops out
Expand Down Expand Up @@ -552,23 +564,17 @@ def minmed(data, minval=False, avgval=False, maxval=False):
ngroups, ny, nx = np.shape(data)
medimg = np.zeros((ny, nx))
# use a mask to ignore nans for calculations
masked_data = np.ma.array(data, mask=np.isnan(data))

for i in range(nx):
for j in range(ny):
vec = masked_data[:, j, i]
u = np.where(vec != 0)
n = vec[u].size
if n > 0:
if n <= 2 or minval:
medimg[j, i] = np.ma.min(vec[u])
if maxval:
medimg[j, i] = np.ma.max(vec[u])
if not minval and not maxval and not avgval:
medimg[j, i] = np.ma.median(vec[u])
if avgval:
dmean , _, _, _ = iter_stat_sig_clip(vec[u])
medimg[j, i] = dmean
vec = np.ma.array(data, mask=np.isnan(data))
n = vec.size
if n > 0:
if n <= 2 or minval:
medimg = np.ma.min(vec, axis=0)
if maxval:
medimg = np.ma.max(vec, axis=0)
if not minval and not maxval and not avgval:
medimg = np.ma.median(vec, axis=0)
if avgval:
medimg = np.ma.mean(vec, axis=0)
return medimg


Expand Down Expand Up @@ -618,10 +624,10 @@ def get_subarcase(subarray_cases, subarray, readpatt, detector):
# search and return the specific values for the configuration
if isinstance(subarray_cases, dict):
for subname in subarray_cases:
if subarray not in subname:
if subarray == 'FULL':
subarray = subarray + '_' + readpatt
if subarray != subname:
continue
if subname == 'FULL':
subname = subname + '_' + readpatt
rowclocks = subarray_cases[subname]["rowclocks"]
frameclocks = subarray_cases[subname]["frameclocks"]
if readpatt == "SLOW":
Expand All @@ -639,6 +645,8 @@ def get_subarcase(subarray_cases, subarray, readpatt, detector):
log.debug('Found subarray case {}!'.format(subname))
for item, val in mdl_dict.items():
if subname in item:
if 'FULL' in item and readpatt not in item:
continue
if "rowclocks" in item:
rowclocks = val
elif "frameclocks" in item:
Expand Down Expand Up @@ -730,9 +738,8 @@ def iter_stat_sig_clip(data, sigrej=3.0, maxiter=10):
# Compute the mean + standard deviation of the entire data array,
# these values will be returned if there are fewer than 2 good points.
dmask = np.ones(ngood, dtype='b') + 1
dmean = sum(data * dmask) / ngood
dmean = np.sum(data * dmask) / ngood
dsigma = np.sqrt(sum((data - dmean)**2) / (ngood - 1))
dsigma = dsigma
iiter = 1

# Iteratively compute the mean + stdev, updating the sigma-rejection thresholds
Expand All @@ -748,7 +755,7 @@ def iter_stat_sig_clip(data, sigrej=3.0, maxiter=10):
ngood = sum(dmask)

if ngood >= 2:
dmean = sum(data*dmask) / ngood
dmean = np.sum(data*dmask) / ngood
dsigma = np.sqrt( sum((data - dmean)**2 * dmask) / (ngood - 1) )
dsigma = dsigma

Expand Down
4 changes: 2 additions & 2 deletions requirements-sdp.txt
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ charset-normalizer==3.3.2
ci-watson==0.6.2
colorama==0.4.6
contourpy==1.2.0
coverage==7.3.3
coverage==7.3.4
crds==11.17.14
cycler==0.12.1
docutils==0.20.1
Expand All @@ -54,7 +54,7 @@ jsonschema==4.20.0
jsonschema-specifications==2023.11.2
kiwisolver==1.4.5
lazy_loader==0.3
lxml==4.9.3
lxml==4.9.4
MarkupSafe==2.1.3
matplotlib==3.8.2
networkx==3.2.1
Expand Down

0 comments on commit 4ca56b5

Please sign in to comment.