From eceae71bf6e06ec347977f320773ed995c9b34ca Mon Sep 17 00:00:00 2001 From: Tom Aldcroft Date: Tue, 19 Mar 2024 11:03:18 -0400 Subject: [PATCH 1/8] Modernize --- .pre-commit-config.yaml | 2 +- cheta/cache.py | 6 +-- cheta/converters.py | 2 - cheta/derived/orbit.py | 2 +- cheta/get_telem.py | 5 +-- cheta/plot.py | 1 - cheta/remote_access.py | 1 - cheta/tests/test_data_source.py | 1 - cheta/utils.py | 4 +- dev_utils/find_colname_diffs.py | 9 ++-- dev_utils/find_tdb_diffs.py | 21 ++++++---- dev_utils/validate_tdb_updates.py | 45 +++++++++++--------- make_units_sci.py | 3 +- ruff-base.toml | 58 +++++++++++++++++++++++++ ruff.toml | 70 +++++-------------------------- 15 files changed, 119 insertions(+), 111 deletions(-) create mode 100644 ruff-base.toml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f055986a..d1ba4259 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ repos: - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.1.5 + rev: v0.3.3 hooks: # Run the linter. - id: ruff diff --git a/cheta/cache.py b/cheta/cache.py index 55e393ce..79715b75 100644 --- a/cheta/cache.py +++ b/cheta/cache.py @@ -3,11 +3,9 @@ import collections import functools from heapq import nsmallest +from itertools import filterfalse from operator import itemgetter -import six -from six.moves import filterfalse - class Counter(dict): "Mapping where default values are zero" @@ -131,7 +129,7 @@ def wrapper(*args, **kwds): # purge least frequently used cache entry if len(cache) > maxsize: for key, _ in nsmallest( - maxsize // 10, six.iteritems(use_count), key=itemgetter(1) + maxsize // 10, use_count.items(), key=itemgetter(1) ): del cache[key], use_count[key] diff --git a/cheta/converters.py b/cheta/converters.py index 33beaedc..2308c4be 100644 --- a/cheta/converters.py +++ b/cheta/converters.py @@ -1,5 +1,4 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst -from __future__ import absolute_import, division, print_function import logging import sys @@ -10,7 +9,6 @@ import Ska.Numpy import Ska.tdb from Chandra.Time import DateTime -from six.moves import zip from . import units diff --git a/cheta/derived/orbit.py b/cheta/derived/orbit.py index 8d8cf73c..820eb2c0 100644 --- a/cheta/derived/orbit.py +++ b/cheta/derived/orbit.py @@ -1,5 +1,4 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst -from __future__ import absolute_import, division, print_function """ Orbital elements based on the position and velocity of Chandra at each 5 minute predictive @@ -32,6 +31,7 @@ The relevant equations were taken from http://www.castor2.ca/05_OD/01_Gauss/14_Kepler/index.html. """ + import numpy as np from Chandra.Time import DateTime diff --git a/cheta/get_telem.py b/cheta/get_telem.py index f74f22bb..9d9ad6f0 100755 --- a/cheta/get_telem.py +++ b/cheta/get_telem.py @@ -20,7 +20,6 @@ Arguments ========= """ -from __future__ import absolute_import, division, print_function import argparse import ast @@ -29,9 +28,7 @@ from itertools import count import numpy as np -import six from Chandra.Time import DateTime -from six.moves import zip from . import fetch, utils @@ -188,7 +185,7 @@ def get_telem( start = stop - 30 if start is None else DateTime(start) stat = None if sampling == "full" else sampling filter_bad = interpolate_dt is None - if isinstance(msids, six.string_types): + if isinstance(msids, str): msids = [msids] logger.info( diff --git a/cheta/plot.py b/cheta/plot.py index bd3465cf..bd6c6229 100644 --- a/cheta/plot.py +++ b/cheta/plot.py @@ -1,5 +1,4 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst -from __future__ import absolute_import, division, print_function import matplotlib.pyplot as plt import numpy as np diff --git a/cheta/remote_access.py b/cheta/remote_access.py index 5e7ede87..674eb46e 100644 --- a/cheta/remote_access.py +++ b/cheta/remote_access.py @@ -5,7 +5,6 @@ NOTE: see test_remote_access.py for useful information about doing functional testing of this code. """ -from __future__ import absolute_import, division, print_function import getpass import os diff --git a/cheta/tests/test_data_source.py b/cheta/tests/test_data_source.py index 981a3f0e..08c3b03d 100644 --- a/cheta/tests/test_data_source.py +++ b/cheta/tests/test_data_source.py @@ -1,5 +1,4 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst -from __future__ import absolute_import, division, print_function import numpy as np import pytest diff --git a/cheta/utils.py b/cheta/utils.py index cc687a37..7ff7a7c9 100644 --- a/cheta/utils.py +++ b/cheta/utils.py @@ -2,13 +2,13 @@ """ Utilities for the engineering archive. """ + import functools import re from contextlib import contextmanager import astropy.units as u import numpy as np -import six from astropy.table import Table from Chandra.Time import DateTime from cxotime import CxoTime, CxoTimeLike @@ -61,7 +61,7 @@ def get_fetch_size(msids, start, stop, stat=None, interpolate_dt=None, fast=True from . import fetch # Allow for a single MSID input and make all values lower-case - if isinstance(msids, six.string_types): + if isinstance(msids, str): msids = [msids] msids = [msid.lower() for msid in msids] diff --git a/dev_utils/find_colname_diffs.py b/dev_utils/find_colname_diffs.py index b0786486..8d1a0fdb 100644 --- a/dev_utils/find_colname_diffs.py +++ b/dev_utils/find_colname_diffs.py @@ -1,17 +1,16 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst -from __future__ import print_function import os import pickle -root = '/proj/sot/ska/data/eng_archive/data' +root = "/proj/sot/ska/data/eng_archive/data" content_dirs = os.listdir(root) for content_dir in content_dirs: - f1 = os.path.join(root, content_dir, 'colnames.pickle') - f2 = os.path.join(root, content_dir, 'colnames_all.pickle') + f1 = os.path.join(root, content_dir, "colnames.pickle") + f2 = os.path.join(root, content_dir, "colnames_all.pickle") if os.path.exists(f1) and os.path.exists(f2): colnames = pickle.load(open(f1)) colnames_all = pickle.load(open(f2)) - diff = colnames_all - colnames - set(['QUALITY']) + diff = colnames_all - colnames - set(["QUALITY"]) if diff: print(content_dir) print(diff) diff --git a/dev_utils/find_tdb_diffs.py b/dev_utils/find_tdb_diffs.py index 1fd64480..5927011b 100644 --- a/dev_utils/find_tdb_diffs.py +++ b/dev_utils/find_tdb_diffs.py @@ -1,26 +1,29 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst -from __future__ import print_function import Ska.tdb msid_xref = {} -for line in open('/proj/sot/ska/ops/TDB/p014/msids.xref'): +for line in open("/proj/sot/ska/ops/TDB/p014/msids.xref"): vals = line.split() msid_xref[vals[0]] = vals[1] versions = (4, 6, 7, 8, 9, 10, 11, 12, 13, 14) for v1, v2 in zip(versions[:-1], versions[1:]): Ska.tdb.set_tdb_version(v1) - m1 = set(Ska.tdb.msids.Tmsrment['MSID']) + m1 = set(Ska.tdb.msids.Tmsrment["MSID"]) Ska.tdb.set_tdb_version(v2) - m2 = set(Ska.tdb.msids.Tmsrment['MSID']) - print('****** {} vs {} *******'.format(v1, v2)) + m2 = set(Ska.tdb.msids.Tmsrment["MSID"]) + print("****** {} vs {} *******".format(v1, v2)) if m1 - m2: - print('** REMOVED **') + print("** REMOVED **") for msid in sorted(m1 - m2): - print('{:15s}'.format(msid)) + print("{:15s}".format(msid)) if m2 - m1: - print('** ADDED **') + print("** ADDED **") for msid in sorted(m2 - m1): - print('{:15s} {:15s} {:s}'.format(msid, msid_xref[msid], Ska.tdb.msids[msid].technical_name)) + print( + "{:15s} {:15s} {:s}".format( + msid, msid_xref[msid], Ska.tdb.msids[msid].technical_name + ) + ) print() diff --git a/dev_utils/validate_tdb_updates.py b/dev_utils/validate_tdb_updates.py index ac8ed8df..654290fd 100644 --- a/dev_utils/validate_tdb_updates.py +++ b/dev_utils/validate_tdb_updates.py @@ -1,33 +1,41 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst -from __future__ import print_function -import pickle import os +import pickle + import Ska.tdb -from cheta import fetch_eng as fetch from matplotlib import pyplot as plt -for content in os.listdir('data'): - new_colnames = pickle.load(open(os.path.join('data', content, 'colnames.pickle'))) - cur_colnames = pickle.load(open(os.path.join( - '/proj/sot/ska/data/eng_archive', 'data', content, 'colnames.pickle'))) - print('New {}'.format(content)) +from cheta import fetch_eng as fetch + +for content in os.listdir("data"): + new_colnames = pickle.load(open(os.path.join("data", content, "colnames.pickle"))) + cur_colnames = pickle.load( + open( + os.path.join( + "/proj/sot/ska/data/eng_archive", "data", content, "colnames.pickle" + ) + ) + ) + print("New {}".format(content)) new = new_colnames - cur_colnames - print(', '.join(sorted(new))) + print(", ".join(sorted(new))) lost = cur_colnames - new_colnames if lost: - print('LOST: ', lost) + print("LOST: ", lost) # Plot representative new vals -d1 = '2016:001' -d2 = '2016:002' +d1 = "2016:001" +d2 = "2016:002" -msids = set(['1AHIRADF']) -msids.update(['POLAEV2BT', 'POLINE07T', 'POM2THV1T']) -msids.update(['OHRTHR35_WIDE', '1OLORADF', '1OHIRADF', '2OLORADF', '2OHIRADF', 'OOBTHR30_WIDE']) -msids.update(['AOACIIRS', 'AOACISPX', 'AOACIDPX', 'AOACIMSS']) -msids.update(['4OAVOBAT_WIDE', '4OAVHRMT_WIDE']) -msids.update(['TFSSHDT1', 'TFSSHDT2']) +msids = set(["1AHIRADF"]) +msids.update(["POLAEV2BT", "POLINE07T", "POM2THV1T"]) +msids.update( + ["OHRTHR35_WIDE", "1OLORADF", "1OHIRADF", "2OLORADF", "2OHIRADF", "OOBTHR30_WIDE"] +) +msids.update(["AOACIIRS", "AOACISPX", "AOACIDPX", "AOACIMSS"]) +msids.update(["4OAVOBAT_WIDE", "4OAVHRMT_WIDE"]) +msids.update(["TFSSHDT1", "TFSSHDT2"]) for msid in msids: m = Ska.tdb.msids[msid] @@ -36,4 +44,3 @@ plt.figure() dat.plot() plt.title(msid) - diff --git a/make_units_sci.py b/make_units_sci.py index 012f94de..75e7c971 100644 --- a/make_units_sci.py +++ b/make_units_sci.py @@ -3,7 +3,8 @@ Make a unit_system consistent with usual CXC Science units where all temperatures are in degC instead of Kelvins. Otherwise leave the CXC units untouched. """ -import cPickle as pickle + +import pickle units_cxc = pickle.load(open("units_cxc.pkl")) units_sci = dict( diff --git a/ruff-base.toml b/ruff-base.toml new file mode 100644 index 00000000..3ac26177 --- /dev/null +++ b/ruff-base.toml @@ -0,0 +1,58 @@ +# Copied originally from pandas. This config requires ruff >= 0.2. +target-version = "py310" + +# fix = true +lint.unfixable = [] + +lint.select = [ + "I", # isort + "F", # pyflakes + "E", "W", # pycodestyle + "YTT", # flake8-2020 + "B", # flake8-bugbear + "Q", # flake8-quotes + "T10", # flake8-debugger + "INT", # flake8-gettext + "PLC", "PLE", "PLR", "PLW", # pylint + "PIE", # misc lints + "PYI", # flake8-pyi + "TID", # tidy imports + "ISC", # implicit string concatenation + "TCH", # type-checking imports + "C4", # comprehensions + "PGH" # pygrep-hooks +] + +# Some additional rules that are useful +lint.extend-select = [ +"UP009", # UTF-8 encoding declaration is unnecessary +"SIM118", # Use `key in dict` instead of `key in dict.keys()` +"D205", # One blank line required between summary line and description +"ARG001", # Unused function argument +"RSE102", # Unnecessary parentheses on raised exception +"PERF401", # Use a list comprehension to create a transformed list +] + +lint.ignore = [ + "ISC001", # Disable this for compatibility with ruff format + "E402", # module level import not at top of file + "E731", # do not assign a lambda expression, use a def + "PLR2004", # Magic number + "B028", # No explicit `stacklevel` keyword argument found + "PLR0913", # Too many arguments to function call +] + +extend-exclude = [ + "docs", +] + +[lint.pycodestyle] +max-line-length = 100 # E501 reports lines that exceed the length of 100. + +[lint.extend-per-file-ignores] +"__init__.py" = ["E402", "F401", "F403"] +# For tests: +# - D205: Don't worry about test docstrings +# - ARG001: Unused function argument false positives for some fixtures +# - E501: Line-too-long +"**/tests/test_*.py" = ["D205", "ARG001", "E501"] \ No newline at end of file diff --git a/ruff.toml b/ruff.toml index 6cbb39b1..33719d22 100644 --- a/ruff.toml +++ b/ruff.toml @@ -1,54 +1,19 @@ -# Copied originally from pandas -target-version = "py310" +extend = "ruff-base.toml" -# fix = true -unfixable = [] - -select = [ - "I", # isort - "F", # pyflakes - "E", "W", # pycodestyle - "YTT", # flake8-2020 - "B", # flake8-bugbear - "Q", # flake8-quotes - "T10", # flake8-debugger - "INT", # flake8-gettext - "PLC", "PLE", "PLR", "PLW", # pylint - "PIE", # misc lints - "PYI", # flake8-pyi - "TID", # tidy imports - "ISC", # implicit string concatenation - "TCH", # type-checking imports - "C4", # comprehensions - "PGH" # pygrep-hooks -] - -# Some additional rules that are useful -extend-select = [ -"UP009", # UTF-8 encoding declaration is unnecessary -"SIM118", # Use `key in dict` instead of `key in dict.keys()` -"D205", # One blank line required between summary line and description -"ARG001", # Unused function argument -"RSE102", # Unnecessary parentheses on raised exception -"PERF401", # Use a list comprehension to create a transformed list +# These are files to exclude for this project. +extend-exclude = [ + # "**/*.ipynb", # commonly not ruff-compliant ] -ignore = [ - "ISC001", # Disable this for compatibility with ruff format - "B028", # No explicit `stacklevel` keyword argument found +# These are rules that commonly cause many ruff warnings. Code will be improved by +# incrementally fixing code to adhere to these rules, but for practical purposes they +# can be ignored by uncommenting each one. You can also add to this list as needed. +lint.extend-ignore = [ "B905", # `zip()` without an explicit `strict=` parameter - "E402", # module level import not at top of file - "E731", # do not assign a lambda expression, use a def "PLC1901", # compare-to-empty-string "PLR0911", # Too many returns "PLR0912", # Too many branches - "PLR0913", # Too many arguments to function call "PLR0915", # Too many statements - "PLR2004", # Magic number -] - -# TODO : fix these and stop ignoring. This is an old package with a lot of poor style. -extend-ignore = [ "PGH004", # Use specific rule codes when using `noqa` "D205", # 1 blank line required between summary line and description "ARG001", # Unused function argument @@ -58,30 +23,15 @@ extend-ignore = [ "C405", # Unnecessary `list` literal (rewrite as a `set` literal) "C408", # Unnecessary `dict` call (rewrite as a literal) "C416", # Unnecessary `dict` comprehension (rewrite using `dict()`) - "PGH002", # warn is deprecated in favor of warning + "G010", # warn is deprecated in favor of warning "TID252", # Relative imports from parent modules are banned (DO NOT FIX: namespace) "PLW0603", # Using the global statement to update `password` is discouraged "SIM118", # Use `key in dict` instead of `key in dict.keys()` "B007", # Loop control variable `time` not used within loop body "E721", # Do not compare types, use `isinstance()` - "PGH001", # No builtin `eval()` allowed + "S307", # No builtin `eval()` allowed "B904", # Within an `except` clause, raise exceptions with "B007", # Loop control variable `i` not used within loop body "PLW0602", # Using global for .. but no assignment is done "PLR5501", # Use `elif` instead of `else` then `if`, to reduce indentation ] - -extend-exclude = [ - "docs", - "dev_utils", -] - -[pycodestyle] -max-line-length = 100 # E501 reports lines that exceed the length of 100. - -[lint.extend-per-file-ignores] -"__init__.py" = ["E402", "F401", "F403"] -# For tests: -# - D205: Don't worry about test docstrings -# - ARG001: Unused function argument false positives for some fixtures -"**/tests/test_*.py" = ["D205", "ARG001"] From b1b2307738381fa18e02dabf7459f131a3b0056a Mon Sep 17 00:00:00 2001 From: Tom Aldcroft Date: Tue, 19 Mar 2024 11:07:10 -0400 Subject: [PATCH 2/8] Fix formatting --- cheta/derived/mups_valve.py | 126 +++++++++++++++--------------- cheta/fetch.py | 1 + cheta/file_defs.py | 1 + cheta/tests/test_remote_access.py | 1 + cheta/units.py | 1 + cheta/update_server_sync.py | 1 - 6 files changed, 67 insertions(+), 64 deletions(-) diff --git a/cheta/derived/mups_valve.py b/cheta/derived/mups_valve.py index 2c7b659a..8bcc458b 100644 --- a/cheta/derived/mups_valve.py +++ b/cheta/derived/mups_valve.py @@ -1,67 +1,67 @@ """ - Fetch clean MUPS valve temperature telemetry - - This makes use of the temperature correction code provided by Scott Blanchard (to correct - for a resistor dropout in the thermistor data) and xija thermal model developed by - Matt Dahmer. - - The basic cleaning algorithm is very simple: - - - Fetch raw telemetry from the cheta archive. - - Compute a xija thermal model prediction for the same timespan (actually starting a few - days in advance to burn out uncertainty in the pseudo-node value). - - Accept either raw telemetry or corrected data which are within a tolerance (5 degF) of - the model. - - In the gaps where the model diverges from both raw and corrected temperatures, - "repropagate" the model starting from the last accepted temperature value. - This effectively takes out much of the systematic model error, which can be up to - 15 degF after a transition to a new attitude. - - In some cases this allows recovery of additional data, while in others the data are - not recoverable: either due to partial disconnect of the parallel resistor or full - disconnects where the output voltage exceeds 5.12 V. - - The output of this function is a `fetch.Msid` object with some bonus attributes, - documented below. In particular the output cleaned data are labeled so one knows - exactly where each data point came from. - - The function is fast, and one can get 5 years of cleaned telemetry in a few seconds - on a modern laptop with SSD drive. - - This cleaning technique recovers on average about 90% of data for PM2THV1T. Since - 2015, about 60% of telemetry is good (no dropout) while 30% is in a recoverable - fully-dropped state (and 10% is not recoverable). - - ``` - def fetch_clean_msid(msid, start, stop=None, dt_thresh=5.0, median=7, model_spec=None, - version=None): - Fetch a cleaned version of telemetry for ``msid``. - - If not supplied the model spec will come from - ``xija.get_model_spec.get_xija_model_spec(msid, version=version)`` - (which uses ``$SKA/data/chandra_models/chandra_models/xija/mups_valve/{msid}_spec.json``). - - This function returns a `fetch.Msid` object like a normal fetch but with extra attributes: - - - vals: cleaned telemetry (either original or corrected telemetry, or xija model prediction) - - source: label for each vals data point - - 0: unrecoverable, so use xija model value - - 1: original telemetry - - 2: corrected telemetry - - vals_raw: raw (uncleaned) telemetry - - vals_nan: cleaned telem but with np.nan at points where data are unrecoverable (this is - for plotting) - - vals_corr: telemetry with the MUPS correction applied - - vals_model: xija model prediction - - :param start: start time - :param stop: stop time (default=NOW) - :param dt_thresh: tolerance for matching model to data in degF (default=5 degF) - :param median: length of median filter (default=7, use 0 to disable) - :param model_spec: file name or URL containing relevant xija model spec - :param version: version of chandra_models repo (tag, branch, or commit) - - :returns: fetch.Msid object - ``` +Fetch clean MUPS valve temperature telemetry + +This makes use of the temperature correction code provided by Scott Blanchard (to correct +for a resistor dropout in the thermistor data) and xija thermal model developed by +Matt Dahmer. + +The basic cleaning algorithm is very simple: + +- Fetch raw telemetry from the cheta archive. +- Compute a xija thermal model prediction for the same timespan (actually starting a few + days in advance to burn out uncertainty in the pseudo-node value). +- Accept either raw telemetry or corrected data which are within a tolerance (5 degF) of + the model. +- In the gaps where the model diverges from both raw and corrected temperatures, + "repropagate" the model starting from the last accepted temperature value. + This effectively takes out much of the systematic model error, which can be up to + 15 degF after a transition to a new attitude. +- In some cases this allows recovery of additional data, while in others the data are + not recoverable: either due to partial disconnect of the parallel resistor or full + disconnects where the output voltage exceeds 5.12 V. + +The output of this function is a `fetch.Msid` object with some bonus attributes, +documented below. In particular the output cleaned data are labeled so one knows +exactly where each data point came from. + +The function is fast, and one can get 5 years of cleaned telemetry in a few seconds +on a modern laptop with SSD drive. + +This cleaning technique recovers on average about 90% of data for PM2THV1T. Since +2015, about 60% of telemetry is good (no dropout) while 30% is in a recoverable +fully-dropped state (and 10% is not recoverable). + +``` +def fetch_clean_msid(msid, start, stop=None, dt_thresh=5.0, median=7, model_spec=None, + version=None): + Fetch a cleaned version of telemetry for ``msid``. + + If not supplied the model spec will come from + ``xija.get_model_spec.get_xija_model_spec(msid, version=version)`` + (which uses ``$SKA/data/chandra_models/chandra_models/xija/mups_valve/{msid}_spec.json``). + + This function returns a `fetch.Msid` object like a normal fetch but with extra attributes: + + - vals: cleaned telemetry (either original or corrected telemetry, or xija model prediction) + - source: label for each vals data point + - 0: unrecoverable, so use xija model value + - 1: original telemetry + - 2: corrected telemetry + - vals_raw: raw (uncleaned) telemetry + - vals_nan: cleaned telem but with np.nan at points where data are unrecoverable (this is + for plotting) + - vals_corr: telemetry with the MUPS correction applied + - vals_model: xija model prediction + + :param start: start time + :param stop: stop time (default=NOW) + :param dt_thresh: tolerance for matching model to data in degF (default=5 degF) + :param median: length of median filter (default=7, use 0 to disable) + :param model_spec: file name or URL containing relevant xija model spec + :param version: version of chandra_models repo (tag, branch, or commit) + + :returns: fetch.Msid object +``` """ import os diff --git a/cheta/fetch.py b/cheta/fetch.py index db8e61ba..7088f994 100644 --- a/cheta/fetch.py +++ b/cheta/fetch.py @@ -3,6 +3,7 @@ """ Fetch values from the Ska engineering telemetry archive. """ + import collections import contextlib import fnmatch diff --git a/cheta/file_defs.py b/cheta/file_defs.py index 54e61e76..9e46f442 100644 --- a/cheta/file_defs.py +++ b/cheta/file_defs.py @@ -11,6 +11,7 @@ Arch files are the CXC archive files containing a short interval of telemetry for all MSIDs in the same content-type group (e.g. ACIS2ENG). """ + import os SKA = os.environ.get("SKA") or "/proj/sot/ska" diff --git a/cheta/tests/test_remote_access.py b/cheta/tests/test_remote_access.py index dfe5419a..0a366d4d 100644 --- a/cheta/tests/test_remote_access.py +++ b/cheta/tests/test_remote_access.py @@ -70,6 +70,7 @@ installation root directory (`python -c 'import sys; print(sys.prefix)'`) as `ska_remote_access.json`. """ + import os import shutil from pathlib import Path diff --git a/cheta/units.py b/cheta/units.py index dc956e05..2de722c6 100644 --- a/cheta/units.py +++ b/cheta/units.py @@ -37,6 +37,7 @@ ('VDC', 'V'), ('W', 'W')} """ + import logging import os import pickle diff --git a/cheta/update_server_sync.py b/cheta/update_server_sync.py index 49e74990..048366f9 100644 --- a/cheta/update_server_sync.py +++ b/cheta/update_server_sync.py @@ -29,7 +29,6 @@ sync repository to capture newly-available data since the last bundle. """ - import argparse import gzip import pickle From 8d7c3bbb74515def35323062e49bde1e8f86fb6a Mon Sep 17 00:00:00 2001 From: Tom Aldcroft Date: Tue, 19 Mar 2024 11:32:33 -0400 Subject: [PATCH 3/8] Exclude dev_utils --- ruff.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/ruff.toml b/ruff.toml index 33719d22..c9e92aff 100644 --- a/ruff.toml +++ b/ruff.toml @@ -3,6 +3,7 @@ extend = "ruff-base.toml" # These are files to exclude for this project. extend-exclude = [ # "**/*.ipynb", # commonly not ruff-compliant + "dev_utils", ] # These are rules that commonly cause many ruff warnings. Code will be improved by From 8ce7b7b6a4eec9174a1a15e40599b1bf848b1c92 Mon Sep 17 00:00:00 2001 From: Tom Aldcroft Date: Tue, 19 Mar 2024 11:35:26 -0400 Subject: [PATCH 4/8] Update code comment --- cheta/cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cheta/cache.py b/cheta/cache.py index 79715b75..ae30dcba 100644 --- a/cheta/cache.py +++ b/cheta/cache.py @@ -14,7 +14,7 @@ def __missing__(self, key): return 0 -# TODO: replace with std_library version of this in Py3.6 (issue #173) +# Note: this is not equivalent to functools.lru_cache, see #173. def lru_cache(maxsize=30): From 27c658c03be61679ae2c9deb3e66f7cbc420efa3 Mon Sep 17 00:00:00 2001 From: Tom Aldcroft Date: Sat, 23 Mar 2024 07:31:27 -0400 Subject: [PATCH 5/8] Use shape attribute to determine ndarray-like attribute --- cheta/fetch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cheta/fetch.py b/cheta/fetch.py index 7088f994..551de9cf 100644 --- a/cheta/fetch.py +++ b/cheta/fetch.py @@ -719,7 +719,7 @@ def _get_comp_data(self, comp_cls): self.colnames = [ attr for attr, val in attrs.items() - if (isinstance(val, np.ndarray) and len(val) == len(attrs["times"])) + if (hasattr(val, "shape") and len(val) == len(attrs["times"])) ] # Apply attributes to self From 2fc6807f337ef4f37e4744525798cc75585ff492 Mon Sep 17 00:00:00 2001 From: Tom Aldcroft Date: Sat, 23 Mar 2024 07:31:56 -0400 Subject: [PATCH 6/8] Filter out Quat normalization warning --- cheta/derived/comps.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cheta/derived/comps.py b/cheta/derived/comps.py index a1e8579e..332276ea 100644 --- a/cheta/derived/comps.py +++ b/cheta/derived/comps.py @@ -22,6 +22,7 @@ import functools import re +import warnings import astropy.table as tbl import numpy as np @@ -428,7 +429,11 @@ def get_msid_attrs(self, tstart: float, tstop: float, msid: str, msid_args: tupl q4 = np.sqrt((1.0 - q1**2 - q2**2 - q3**2).clip(0.0)) q = np.array([q1, q2, q3, q4]).transpose() - quat = Quat(q=normalize(q)) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", message="Normalizing quaternion with zero norm" + ) + quat = Quat(q=normalize(q)) bads = np.zeros_like(q1, dtype=bool) for msid in msids: bads |= dat[msid].bads From 927be989da0f4044f7b0c2cb41a05243e2c0e622 Mon Sep 17 00:00:00 2001 From: Tom Aldcroft Date: Sat, 23 Mar 2024 10:12:18 -0400 Subject: [PATCH 7/8] Avoid user-facing resource warning for unclosed file pickle.load(open(file, "rb")) does not close the file --- cheta/fetch.py | 3 ++- cheta/units.py | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/cheta/fetch.py b/cheta/fetch.py index 551de9cf..1333a088 100644 --- a/cheta/fetch.py +++ b/cheta/fetch.py @@ -295,7 +295,8 @@ def load_msid_names(all_msid_names_files): all_colnames = dict() for k, msid_names_file in all_msid_names_files.items(): try: - all_colnames[k] = pickle.load(open(os.path.join(*msid_names_file), "rb")) + with open(os.path.join(*msid_names_file), "rb") as fh: + all_colnames[k] = pickle.load(fh) except IOError: pass return all_colnames diff --git a/cheta/units.py b/cheta/units.py index 2de722c6..4b7137dd 100644 --- a/cheta/units.py +++ b/cheta/units.py @@ -61,7 +61,8 @@ def emit(self, record): units = {} units["system"] = "cxc" -units["cxc"] = pickle.load(open(os.path.join(module_dir, "units_cxc.pkl"), "rb")) +with open(os.path.join(module_dir, "units_cxc.pkl"), "rb") as fh: + units["cxc"] = pickle.load(fh) # Equivalent unit descriptors used in 'eng' and 'cxc' units @@ -222,7 +223,8 @@ def load_units(unit_system): if unit_system not in units: filename = os.path.join(module_dir, "units_{0}.pkl".format(unit_system)) - units[unit_system] = pickle.load(open(filename, "rb")) + with open(filename, "rb") as fh: + units[unit_system] = pickle.load(fh) def set_units(unit_system): From 33660ad6828420e3bbf3dbef97522e6d4aeefe20 Mon Sep 17 00:00:00 2001 From: Tom Aldcroft Date: Sat, 23 Mar 2024 10:13:08 -0400 Subject: [PATCH 8/8] Add a test for computed quat bad vals issue --- cheta/tests/test_comps.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/cheta/tests/test_comps.py b/cheta/tests/test_comps.py index c9109648..e28aa7f3 100644 --- a/cheta/tests/test_comps.py +++ b/cheta/tests/test_comps.py @@ -2,6 +2,8 @@ """Test that computed MSIDs work as expected.""" +import warnings + import astropy.units as u import numpy as np import pytest @@ -269,6 +271,28 @@ def test_quat_comp(msid, maude, offset): assert isinstance(datq.vals, Quat) +def test_quat_comp_bad_times(): + """Test bad time data on 2024:264. All four quats have zero value and are bad. + + The bad sample times are ['2024:064:09:27:02.652' '2024:064:09:27:03.677']. + """ + start = "2024:064:09:26:00" + stop = "2024:064:09:28:00" + # Assert no warnings despite quat with zero normalization. The zero-norm samples are + # marked bad. + with warnings.catch_warnings(): + warnings.simplefilter("error") # Assert no warnings + dat = fetch_eng.MSID("quat_aoattqt", start, stop) + + assert np.count_nonzero(dat.bads) == 2 + assert len(dat.vals) == len(dat.times) + + dat2 = fetch_eng.Msid("quat_aoattqt", start, stop) + assert dat2.bads is None # After Msid filtering + assert len(dat2.vals) == len(dat2.times) + assert len(dat2.vals) == len(dat.vals) - 2 + + def test_pitch_comp(): """Test pitch_comp during a time with NPNT, NMAN, NSUN and Safe Sun""" start = "2022:293"