Skip to content

Commit

Permalink
Format source and prepare for tag (#758)
Browse files Browse the repository at this point in the history
* Run format_source with latest versions of tools

* Bump pre-release version
  • Loading branch information
tskisner authored May 13, 2024
1 parent b1540cb commit ff60a67
Show file tree
Hide file tree
Showing 16 changed files with 80 additions and 75 deletions.
54 changes: 27 additions & 27 deletions src/libtoast/src/toast_atm_sim.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -694,9 +694,9 @@ cholmod_sparse * toast::atm_sim_sqrt_sparse_covariance(
timer.start();

if (atm_verbose()) {
o.str("");
o << rank << " : Analyzing sparse covariance ... ";
logger.verbose(o.str().c_str());
o.str("");
o << rank << " : Analyzing sparse covariance ... ";
logger.verbose(o.str().c_str());
}

cholmod_factor * factorization;
Expand All @@ -710,21 +710,21 @@ cholmod_sparse * toast::atm_sim_sqrt_sparse_covariance(
throw std::runtime_error("cholmod_analyze failed.");
}

if (atm_verbose()) {
o.str("");
o << rank << " : Factorizing sparse covariance, "
<< "nzmax = " << cov->nzmax
<< ", (" << cov->nzmax * 8 / pow(2, 20) << "MB)";
logger.verbose(o.str().c_str());
}
if (atm_verbose()) {
o.str("");
o << rank << " : Factorizing sparse covariance, "
<< "nzmax = " << cov->nzmax
<< ", (" << cov->nzmax * 8 / pow(2, 20) << "MB)";
logger.verbose(o.str().c_str());
}

cholmod_factorize(cov, factorization, chol.chcommon);

if (chol.chcommon->status != CHOLMOD_OK) {
o.str("");
o << rank << " : Factorize covariance failed, itry=" << itry;
o.str("");
o << rank << " : Factorize covariance failed, itry=" << itry;
cholmod_free_factor(&factorization, chol.chcommon);
logger.debug(o.str().c_str());
logger.debug(o.str().c_str());

if (itry < ntry - 1) {
// Extract band diagonal of the matrix and try
Expand Down Expand Up @@ -780,11 +780,11 @@ cholmod_sparse * toast::atm_sim_sqrt_sparse_covariance(
timer.stop();

if (atm_verbose()) {
o.str("");
o << rank
<< " : Cholesky decomposition done in " << timer.seconds()
<< " s. N = " << nelem << std::endl;
logger.verbose(o.str().c_str());
o.str("");
o << rank
<< " : Cholesky decomposition done in " << timer.seconds()
<< " s. N = " << nelem << std::endl;
logger.verbose(o.str().c_str());
}

// Report memory usage (only counting the non-zero elements, no
Expand All @@ -797,10 +797,10 @@ cholmod_sparse * toast::atm_sim_sqrt_sparse_covariance(
/ pow(2.0, 20.0);

if (atm_verbose()) {
o.str("");
o << rank << " : Allocated " << tot_mem
<< " MB for the sparse factorization.";
logger.verbose(o.str().c_str());
o.str("");
o << rank << " : Allocated " << tot_mem
<< " MB for the sparse factorization.";
logger.verbose(o.str().c_str());
}

cholmod_sparse * sqrt_cov = cholmod_factor_to_sparse(factorization, chol.chcommon);
Expand All @@ -821,11 +821,11 @@ cholmod_sparse * toast::atm_sim_sqrt_sparse_covariance(
double max_mem = (nelem * nelem * sizeof(double)) / pow(2.0, 20.0);

if (atm_verbose()) {
o.str("");
o << rank << " : Allocated " << tot_mem
<< " MB for the sparse sqrt covariance matrix. "
<< "Compression: " << tot_mem / max_mem;
logger.verbose(o.str().c_str());
o.str("");
o << rank << " : Allocated " << tot_mem
<< " MB for the sparse sqrt covariance matrix. "
<< "Compression: " << tot_mem / max_mem;
logger.verbose(o.str().c_str());
}

return sqrt_cov;
Expand Down
2 changes: 1 addition & 1 deletion src/toast/RELEASE
Original file line number Diff line number Diff line change
@@ -1 +1 @@
3.0.0a23
3.0.0a24
2 changes: 1 addition & 1 deletion src/toast/atm.py
Original file line number Diff line number Diff line change
Expand Up @@ -558,7 +558,7 @@ def _get_slice(self, ind_start, ind_stop, verbose=False):
f"({ix2 - ix_start} {self._xstep} m layers) m out of "
f"{self._nx * self._xstep} m indices {ind_start} -- {ind_stop} "
f"({ind_stop - ind_start}) out of {self._nelem}"
)
)

return (ind_start, ind_stop)

Expand Down
2 changes: 1 addition & 1 deletion src/toast/observation.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def set_default_values(values=None):
#
# Noise
#
"noise_model" : "noise_model",
"noise_model": "noise_model",
}

# composite masks for convenience
Expand Down
6 changes: 3 additions & 3 deletions src/toast/ops/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
from .delete import Delete
from .demodulation import Demodulate, StokesWeightsDemod
from .elevation_noise import ElevationNoise
from .signal_diff_noise_model import SignalDiffNoiseModel
from .filterbin import FilterBin, combine_observation_matrix
from .flag_intervals import FlagIntervals
from .flag_sso import FlagSSO
Expand Down Expand Up @@ -52,8 +51,7 @@
from .scan_healpix import ScanHealpixMap, ScanHealpixMask
from .scan_map import ScanMap, ScanMask, ScanScale
from .scan_wcs import ScanWCSMap, ScanWCSMask
from .simple_deglitch import SimpleDeglitch
from .simple_jumpcorrect import SimpleJumpCorrect
from .signal_diff_noise_model import SignalDiffNoiseModel
from .sim_cosmic_rays import InjectCosmicRays
from .sim_crosstalk import CrossTalk, MitigateCrossTalk
from .sim_gaindrifts import GainDrifter
Expand All @@ -63,6 +61,8 @@
from .sim_tod_atm import SimAtmosphere
from .sim_tod_dipole import SimDipole
from .sim_tod_noise import SimNoise
from .simple_deglitch import SimpleDeglitch
from .simple_jumpcorrect import SimpleJumpCorrect
from .sss import SimScanSynchronousSignal
from .statistics import Statistics
from .stokes_weights import StokesWeights
Expand Down
7 changes: 4 additions & 3 deletions src/toast/ops/azimuth_intervals.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def _exec(self, data, detectors=None, **kwargs):
stable = (np.absolute(scan_accel) < 0.1 * accel_range) * np.ones(
len(scan_accel), dtype=np.int8
)
stable *= (np.absolute(wscan_vel) > 0.1 * vel_range)
stable *= np.absolute(wscan_vel) > 0.1 * vel_range

begin_stable = np.where(stable[1:] - stable[:-1] == 1)[0]
end_stable = np.where(stable[:-1] - stable[1:] == 1)[0]
Expand Down Expand Up @@ -215,8 +215,9 @@ def _exec(self, data, detectors=None, **kwargs):
end_throw = list()
for start_turn, end_turn in zip(end_stable[:-1], begin_stable[1:]):
vel_switch = np.where(
wscan_vel[start_turn:end_turn-1] *
wscan_vel[start_turn+1:end_turn] < 0
wscan_vel[start_turn : end_turn - 1]
* wscan_vel[start_turn + 1 : end_turn]
< 0
)[0]
if len(vel_switch) > 1:
msg = "Multiple turnarounds between end of stable scan at"
Expand Down
10 changes: 2 additions & 8 deletions src/toast/ops/noise_estimation.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,14 +235,8 @@ def _redistribute(self, obs):
log = Logger.get()
timer = Timer()
timer.start()
if (
(
len(self.pairs) > 0 or
(not self.nocross)
) and (
obs.comm_col is not None and
obs.comm_col.size > 1
)
if (len(self.pairs) > 0 or (not self.nocross)) and (
obs.comm_col is not None and obs.comm_col.size > 1
):
self.redistribute = True
# Redistribute the data so each process has all detectors
Expand Down
4 changes: 2 additions & 2 deletions src/toast/ops/pixels_wcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,18 @@

import warnings

import astropy.io.fits as af
import numpy as np
import traitlets
from astropy import units as u
from astropy.wcs import WCS
import astropy.io.fits as af

from .. import qarray as qa
from ..instrument_coords import quat_to_xieta
from ..mpi import MPI
from ..observation import default_values as defaults
from ..pixels import PixelDistribution
from ..pointing_utils import scan_range_lonlat, center_offset_lonlat
from ..pointing_utils import center_offset_lonlat, scan_range_lonlat
from ..timing import function_timer
from ..traits import Bool, Instance, Int, Tuple, Unicode, trait_docs
from ..utils import Environment, Logger
Expand Down
17 changes: 13 additions & 4 deletions src/toast/ops/polyfilter/polyfilter.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
from time import time

import numpy as np
import scipy.optimize
import traitlets
from astropy import units as u
import scipy.optimize

from ... import qarray as qa
from ..._libtoast import subtract_mean, sum_detectors
Expand Down Expand Up @@ -806,10 +806,11 @@ def _re_redistribute(self, data, obs, timer, log, temp_ob):
def _plot_coeff(self, ob, coeffs, comm, value):
# Make a plot of the coupling coefficients
import matplotlib.pyplot as plt

ndet = len(coeffs)
lon = np.zeros(ndet)
lat = np.zeros(ndet)
yrot = qa.rotation([0, 1, 0], np.pi/2)
yrot = qa.rotation([0, 1, 0], np.pi / 2)
for idet in range(ndet):
name = ob.local_detectors[idet]
quat = ob.telescope.focalplane[name]["quat"]
Expand Down Expand Up @@ -839,8 +840,16 @@ def _plot_coeff(self, ob, coeffs, comm, value):
fig = plt.figure(figsize=[12, 8])
ax = fig.add_subplot(1, 1, 1)
ax.set_title(f"obs = {ob.name}, key = {value}")
amp = .15 # Need a smarter amplitude...
p = ax.scatter(lon, lat, c=coeffs, vmin=1-amp, vmax=1+amp, edgecolors="k", cmap="bwr")
amp = 0.15 # Need a smarter amplitude...
p = ax.scatter(
lon,
lat,
c=coeffs,
vmin=1 - amp,
vmax=1 + amp,
edgecolors="k",
cmap="bwr",
)
fig.colorbar(p)
fig.savefig(f"coeffs_{ob.name}_{value}.png")
plt.close()
Expand Down
2 changes: 1 addition & 1 deletion src/toast/ops/sim_tod_atm.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ class SimAtmosphere(Operator):
1e-3,
help="Correlation limit is used to measure the correlation length of the "
"simulation. Elements further than correlation length apart have their "
"covariance set to zero."
"covariance set to zero.",
)

@traitlets.validate("det_mask")
Expand Down
2 changes: 1 addition & 1 deletion src/toast/ops/sim_tod_atm_generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ class GenerateAtmosphere(Operator):
1e-3,
help="Correlation limit is used to measure the correlation length of the "
"simulation. Elements further than correlation length apart have their "
"covariance set to zero."
"covariance set to zero.",
)

@traitlets.validate("shared_flag_mask")
Expand Down
11 changes: 6 additions & 5 deletions src/toast/ops/simple_deglitch.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,7 @@

@trait_docs
class SimpleDeglitch(Operator):
"""An operator that flags extreme detector samples.
"""
"""An operator that flags extreme detector samples."""

# Class traits

Expand Down Expand Up @@ -181,8 +180,10 @@ def _exec(self, data, detectors=None, **kwargs):
rms = np.nanstd(sig_view)
nglitch = 0
while True:
if np.isnan(rms) or \
np.sum(np.isfinite(sig_view)) < self.nsample_min:
if (
np.isnan(rms)
or np.sum(np.isfinite(sig_view)) < self.nsample_min
):
# flag the entire view. Not enough statistics
sig_view[:] = np.nan
break
Expand All @@ -191,7 +192,7 @@ def _exec(self, data, detectors=None, **kwargs):
sig_view_test = sig_view.copy()
istart = max(0, i - self.glitch_radius)
istop = min(nsample, i + self.glitch_radius + 1)
sig_view_test[istart : istop] = np.nan
sig_view_test[istart:istop] = np.nan
rms_test = np.nanstd(sig_view_test)
if np.abs(sig_view[i]) < self.glitch_limit * rms_test:
# Not significant enough
Expand Down
27 changes: 13 additions & 14 deletions src/toast/ops/simple_jumpcorrect.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,7 @@

@trait_docs
class SimpleJumpCorrect(Operator):
"""An operator that identifies and corrects jumps in the data
"""
"""An operator that identifies and corrects jumps in the data"""

# Class traits

Expand Down Expand Up @@ -144,8 +143,8 @@ def _get_stepfilter(self, m):
Return the time domain matched filter kernel of length m.
"""
h = np.zeros(m)
h[:m // 2] = 1
h[m // 2:] = -1
h[: m // 2] = 1
h[m // 2 :] = -1
# This turns the interpretation of the peak amplitude directly
# into the step amplitude
h /= m // 2
Expand Down Expand Up @@ -195,8 +194,9 @@ def _find_peaks(self, toi, flag, flag_out, lim=3.0, tol=1e4, sigma_in=None):
sigma = self._get_sigma(mytoi, flag_out, tol)

# Excessive flagging is a sign of false detection
if significance > 5 or (float(np.sum(flag[istart:istop]))
/ (istop - istart) < .5):
if significance > 5 or (
float(np.sum(flag[istart:istop])) / (istop - istart) < 0.5
):
peaks.append((imax, significance, amplitude))

npeak = np.sum(np.abs(mytoi) > sigma * lim)
Expand All @@ -215,13 +215,13 @@ def _get_sigma(self, toi, flag, tol):
ind = slice(start, stop)
x = toi[ind][full_flag[ind] == 0]
if len(x) != 0:
rms = np.sqrt(np.mean(x.data ** 2))
rms = np.sqrt(np.mean(x.data**2))
sigmas.append(rms)

if len(sigmas) != 0:
sigma = np.median(sigmas)
else:
sigma = 0.
sigma = 0.0
return sigma

def _remove_jumps(self, signal, flag, peaks, tol):
Expand All @@ -234,7 +234,7 @@ def _remove_jumps(self, signal, flag, peaks, tol):
flag_out = flag.copy()
for peak, _, amplitude in peaks:
corrected_signal[peak:] -= amplitude
flag_out[peak - int(tol):peak + int(tol)] = True
flag_out[peak - int(tol) : peak + int(tol)] = True
return corrected_signal, flag_out

@function_timer
Expand Down Expand Up @@ -268,25 +268,24 @@ def _exec(self, data, detectors=None, **kwargs):
sig_view = sig[ind].copy()
bad_view = bad[ind]
bad_view_out = bad_view.copy()
sig_filtered = fftconvolve(
sig_view, stepfilter, mode="same"
)
sig_filtered = fftconvolve(sig_view, stepfilter, mode="same")
peaks = self._find_peaks(
sig_filtered,
bad_view,
bad_view_out,
lim=self.jump_limit,
tol=self.filterlen // 2,
)

njump = len(peaks)
if njump == 0:
continue
if njump > 10:
raise RuntimeError(f"Found {njump} jumps!")

corrected_signal, flag_out = self._remove_jumps(
sig_view, bad_view, peaks, self.jump_radius)
sig_view, bad_view, peaks, self.jump_radius
)
sig[ind] = corrected_signal
det_flags[ind][flag_out] |= self.jump_mask

Expand Down
Loading

0 comments on commit ff60a67

Please sign in to comment.