Skip to content

Commit

Permalink
Improve AzimuthIntervals and other small fixes. (#756)
Browse files Browse the repository at this point in the history
- Modify the AzimuthIntervals operator to more robustly
  exclude false "throw" intervals.  Instead we first
  detect the stable pointing periods and then find the
  exact turnaround sample between those (and raise an
  exception if there is more than one).  This does mean
  that beginning of the first throw and the end of the
  last throw are truncated to the stable scan boundary.

- In the mapmaker, keep the noise weighted map if
  `keep_final_products` is True.

- In the mapmaker, if the binner is configured to
  compute and save full detector pointing, use that
  option when initially computing the pixel distribution.
  This avoids computing the pointing twice.

- When scanning from a map in the template solver, if
  we already have full detector pointing, then run that
  over all detectors.

- Fix typo in mapmaker from recent refactor.
  • Loading branch information
tskisner authored May 10, 2024
1 parent 43649d9 commit f7dd931
Show file tree
Hide file tree
Showing 3 changed files with 75 additions and 39 deletions.
94 changes: 61 additions & 33 deletions src/toast/ops/azimuth_intervals.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,13 +149,6 @@ def _exec(self, data, detectors=None, **kwargs):
# Smooth with moving window
wscan_vel = uniform_filter1d(scan_vel, size=window, mode="nearest")

# When the velocity changes sign, we have a turnaround
vel_switch = np.where(wscan_vel[:-1] * wscan_vel[1:] < 0)[0] + 1
throw_times = [
(stamps[x[0]], stamps[x[1]])
for x in zip(vel_switch[:-1], vel_switch[1:])
]

# The peak to peak range of the scan velocity
vel_range = np.amax(wscan_vel) - np.amin(wscan_vel)

Expand All @@ -166,13 +159,17 @@ def _exec(self, data, detectors=None, **kwargs):
mode="nearest",
)

# Peak to peak acceleration range
accel_range = np.amax(scan_accel) - np.amin(scan_accel)

# When the acceleration is zero to some tolerance, we are
# scanning.
# scanning. However, we also need to only consider times where
# the velocity is non-zero.
stable = (np.absolute(scan_accel) < 0.1 * accel_range) * np.ones(
len(scan_accel), dtype=np.int8
)
stable *= (np.absolute(wscan_vel) > 0.1 * vel_range)

begin_stable = np.where(stable[1:] - stable[:-1] == 1)[0]
end_stable = np.where(stable[:-1] - stable[1:] == 1)[0]
if begin_stable[0] > end_stable[0]:
Expand All @@ -191,7 +188,6 @@ def _exec(self, data, detectors=None, **kwargs):
# stable periods.
if self.cut_short:
stable_spans = np.array([(x[1] - x[0]) for x in stable_times])
throw_spans = np.array([(x[1] - x[0]) for x in throw_times])
try:
# First try short limit as time
stable_bad = stable_spans < self.short_limit.to_value(u.s)
Expand All @@ -208,34 +204,54 @@ def _exec(self, data, detectors=None, **kwargs):
stable_times = [
x for (x, y) in zip(stable_times, stable_bad) if not y
]
try:
# First try short limit as time
throw_bad = throw_spans < self.short_limit.to_value(u.s)
except:
# Try short limit as fraction
median_throw = np.median(throw_spans)
throw_bad = throw_spans < self.short_limit * median_throw
throw_times = [x for (x, y) in zip(throw_times, throw_bad) if not y]

# The "throw" intervals extend from one turnaround to the next.
# We start the first throw at the beginning of the first stable scan
# and then find the sample between stable scans where the turnaround
# happens. This reduces false detections of turnarounds before or
# after the stable scanning within the observation.

begin_throw = [begin_stable[0]]
end_throw = list()
for start_turn, end_turn in zip(end_stable[:-1], begin_stable[1:]):
vel_switch = np.where(
wscan_vel[start_turn:end_turn-1] *
wscan_vel[start_turn+1:end_turn] < 0
)[0]
if len(vel_switch) > 1:
msg = "Multiple turnarounds between end of stable scan at"
msg = f" sample {start_turn} and next start at {end_turn}"
raise RuntimeError(msg)
end_throw.append(start_turn + vel_switch[0])
begin_throw.append(end_throw[-1] + 1)
end_throw.append(end_stable[-1])
begin_throw = np.array(begin_throw)
end_throw = np.array(end_throw)

throw_times = [
(stamps[x[0]], stamps[x[1] - 1])
for x in zip(begin_throw, end_throw)
]

# Split scans into left and right-going intervals
stable_leftright_times = []
stable_rightleft_times = []
for start, stop in stable_times:
# Check the velocity at the middle of the scan
i = np.argwhere(np.abs(stamps - 0.5 * (start + stop)))[0]
if wscan_vel[i] > 0:
stable_leftright_times.append((start, stop))
elif wscan_vel[i] < 0:
stable_rightleft_times.append((start, stop))
throw_leftright_times = []
throw_rightleft_times = []
for start, stop in throw_times:
# Check the velocity at the middle of the throw
i = np.argwhere(np.abs(stamps - 0.5 * (start + stop)))[0]
if wscan_vel[i] > 0:
throw_leftright_times.append((start, stop))
elif wscan_vel[i] < 0:
throw_rightleft_times.append((start, stop))

for iscan, (first, last) in enumerate(zip(begin_stable, end_stable)):
# Check the velocity at the middle of the scan
mid = first + (last - first) // 2
if wscan_vel[mid] > 0:
stable_leftright_times.append(stable_times[iscan])
throw_leftright_times.append(throw_times[iscan])
elif wscan_vel[mid] < 0:
stable_rightleft_times.append(stable_times[iscan])
throw_rightleft_times.append(throw_times[iscan])
else:
msg = "Velocity is zero in the middle of scan"
msg += f" samples {first} ... {last}"
raise RuntimeError(msg)

if self.debug_root is not None:
set_matplotlib_backend()
Expand All @@ -244,16 +260,18 @@ def _exec(self, data, detectors=None, **kwargs):

# Dump some plots
out_file = f"{self.debug_root}_{obs.comm_row_rank}.pdf"
if len(vel_switch) >= 5:
if len(end_throw) >= 5:
# Plot a few scans
n_plot = vel_switch[4]
n_plot = end_throw[4]
else:
# Plot it all
n_plot = obs.n_local_samples

swplot = vel_switch[vel_switch <= n_plot]
bstable = begin_stable[begin_stable <= n_plot]
estable = end_stable[end_stable <= n_plot]
bthrow = begin_throw[begin_throw <= n_plot]
ethrow = end_throw[end_throw <= n_plot]

fig = plt.figure(dpi=100, figsize=(8, 16))

Expand All @@ -263,11 +281,17 @@ def _exec(self, data, detectors=None, **kwargs):
obs.shared[self.azimuth].data[:n_plot],
"-",
)
ax.set_xlabel("Samples")
ax.set_ylabel("Azimuth")

ax = fig.add_subplot(4, 1, 2)
ax.plot(np.arange(n_plot), stable[:n_plot], "-")
ax.vlines(bstable, ymin=-1, ymax=2, color="green")
ax.vlines(estable, ymin=-1, ymax=2, color="red")
ax.vlines(bthrow, ymin=-2, ymax=1, color="cyan")
ax.vlines(ethrow, ymin=-2, ymax=1, color="purple")
ax.set_xlabel("Samples")
ax.set_ylabel("Stable Scan / Throw")

ax = fig.add_subplot(4, 1, 3)
ax.plot(np.arange(n_plot), scan_vel[:n_plot], "-")
Expand All @@ -277,9 +301,13 @@ def _exec(self, data, detectors=None, **kwargs):
ymin=np.amin(scan_vel),
ymax=np.amax(scan_vel),
)
ax.set_xlabel("Samples")
ax.set_ylabel("Scan Velocity")

ax = fig.add_subplot(4, 1, 4)
ax.plot(np.arange(n_plot), scan_accel[:n_plot], "-")
ax.set_xlabel("Samples")
ax.set_ylabel("Scan Acceleration")

plt.savefig(out_file)
plt.close()
Expand Down
3 changes: 2 additions & 1 deletion src/toast/ops/mapmaker.py
Original file line number Diff line number Diff line change
Expand Up @@ -400,6 +400,7 @@ def _prepare_binning(self):
pixel_pointing=map_binning.pixel_pointing,
shared_flags=map_binning.shared_flags,
shared_flag_mask=map_binning.shared_flag_mask,
save_pointing=map_binning.full_pointing,
)
pix_dist.apply(self._data)
self._log.info_rank(
Expand Down Expand Up @@ -559,7 +560,7 @@ def _bin_cleaned_signal(self, map_binning, out_cleaned):
map_binning.det_data = self.det_data
else:
map_binning.det_data = out_cleaned
if self.write_noiseweighted_map:
if self.write_noiseweighted_map or self.keep_final_products:
map_binning.noiseweighted = self.noiseweighted_map_name
map_binning.binned = self.map_name

Expand Down
17 changes: 12 additions & 5 deletions src/toast/ops/mapmaker_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -693,9 +693,16 @@ def _prepare_pixels(self):
pixels=solve_pixels.pixels,
view=self._solve_view,
)
scan_pipe = Pipeline(
detector_sets=["SINGLE"], operators=[solve_pixels, self._scanner]
)
if self.binning.full_pointing:
# We are caching the pointing anyway- run with all detectors
scan_pipe = Pipeline(
detector_sets=["ALL"], operators=[solve_pixels, self._scanner]
)
else:
# Pipeline over detectors
scan_pipe = Pipeline(
detector_sets=["SINGLE"], operators=[solve_pixels, self._scanner]
)

return solve_pixels, solve_weights, scan_pipe

Expand Down Expand Up @@ -767,7 +774,7 @@ def _prepare_flagging_ob(self, ob):
return

@function_timer
def _prepare_flagging(self, solve_pixels):
def _prepare_flagging(self, scan_pipe):
"""Flagging. We create a new set of data flags for the solver that includes:
- one bit for a bitwise OR of all detector / shared flags
- one bit for any pixel mask, projected to TOD
Expand Down Expand Up @@ -1102,7 +1109,7 @@ def _exec(self, data, detectors=None, use_accel=None, **kwargs):

self._timer.start()

self._prepare_flagging(solve_pixels)
self._prepare_flagging(scan_pipe)

self._get_pixel_covariance(solve_pixels, solve_weights)
self._write_del(self.solver_hits_name)
Expand Down

0 comments on commit f7dd931

Please sign in to comment.