From 2cf524bbdd7548a4444ec6d8f7cb04bd2d4af911 Mon Sep 17 00:00:00 2001 From: "J. Bryce Kalmbach" Date: Thu, 24 Oct 2024 00:00:15 -0700 Subject: [PATCH 01/15] Add Jenkinsfile. --- Jenkinsfile | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 Jenkinsfile diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 0000000..56f1b5f --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,13 @@ +@Library('JenkinsShared')_ +DevelopPipeline( + name: "donut_viz", + module_name: "lsst.donut.viz", + idl_names: [], + build_all_idl: false, + extra_packages: [], + kickoff_jobs: [], + slack_build_channel: "aos-builds", + has_doc_site: false, + require_git_lfs: false, + require_scons: true +) From 76b7e1767d7bf450cf8f194c7ee598e686c204a7 Mon Sep 17 00:00:00 2001 From: "J. Bryce Kalmbach" Date: Tue, 29 Oct 2024 13:08:44 -0700 Subject: [PATCH 02/15] Implement pipeline run for tests. --- pipelines/scienceDirectDetectPipeline.yaml | 46 ++++++++++++++ tests/test_aggregate_visit_science_sensors.py | 60 +++++++++++++++++++ 2 files changed, 106 insertions(+) create mode 100644 pipelines/scienceDirectDetectPipeline.yaml create mode 100644 tests/test_aggregate_visit_science_sensors.py diff --git a/pipelines/scienceDirectDetectPipeline.yaml b/pipelines/scienceDirectDetectPipeline.yaml new file mode 100644 index 0000000..4c25cdd --- /dev/null +++ b/pipelines/scienceDirectDetectPipeline.yaml @@ -0,0 +1,46 @@ +# This yaml file is used to define the tasks and configuration of +# a Gen 3 pipeline used for testing in ts_wep. +description: wep basic processing test pipeline +# Here we specify the corresponding instrument for the data we +# will be using. +instrument: lsst.obs.lsst.LsstCam +# Then we can specify each task in our pipeline by a name +# and then specify the class name corresponding to that task +tasks: + isr: + class: lsst.ip.isr.isrTask.IsrTask + # Below we specify the configuration settings we want to use + # when running the task in this pipeline. Since our data doesn't + # include bias or flats we only want to use doApplyGains and + # doOverscan in our isr task. + config: + connections.outputExposure: "postISRCCD" + doBias: False + doVariance: False + doLinearize: False + doCrosstalk: False + doDefect: False + doNanMasking: False + doInterpolate: False + doBrighterFatter: False + doDark: False + doFlat: False + doApplyGains: True + doFringe: False + doOverscan: True + python: OverscanCorrectionTask.ConfigClass.fitType = 'MEDIAN' + generateDonutDirectDetectTask: + class: lsst.ts.wep.task.generateDonutDirectDetectTask.GenerateDonutDirectDetectTask + config: + donutSelector.useCustomMagLimit: True + cutOutDonutsScienceSensorTask: + class: lsst.ts.wep.task.CutOutDonutsScienceSensorTask + calcZernikesTask: + class: lsst.ts.wep.task.calcZernikesTask.CalcZernikesTask + config: + estimateZernikes.maxNollIndex: 28 + estimateZernikes.saveHistory: False + estimateZernikes.maskKwargs: {'doMaskBlends': False} + aggregateZernikeTablesTask: + class: lsst.donut.viz.AggregateZernikeTablesTask + \ No newline at end of file diff --git a/tests/test_aggregate_visit_science_sensors.py b/tests/test_aggregate_visit_science_sensors.py new file mode 100644 index 0000000..e60b4ce --- /dev/null +++ b/tests/test_aggregate_visit_science_sensors.py @@ -0,0 +1,60 @@ +import os + +from lsst.daf.butler import Butler +from lsst.donut.viz import AggregateZernikeTablesTask, AggregateZernikeTablesTaskConfig +from lsst.ts.wep.utils import ( + getModulePath, + runProgram, + writeCleanUpRepoCmd, + writePipetaskCmd, +) +from lsst.utils.tests import TestCase + + +class TestAggregateZernikeTablesTask(TestCase): + @classmethod + def setUpClass(cls): + wep_module_dir = getModulePath() + cls.test_data_dir = os.path.join(wep_module_dir, "tests", "testData") + cls.test_repo_dir = os.path.join(cls.test_data_dir, "gen3TestRepo") + + cls.butler = Butler(cls.test_repo_dir) + cls.test_run_name = "test_run_1" + registry = cls.butler.registry + collections_list = list(registry.queryCollections()) + if cls.test_run_name in collections_list: + clean_up_cmd = writeCleanUpRepoCmd(cls.test_repo_dir, cls.test_run_name) + runProgram(clean_up_cmd) + + collections = "refcats/gen2,LSSTCam/calib,LSSTCam/raw/all" + instrument = "lsst.obs.lsst.LsstCam" + cls.camera_name = "LSSTCam" + test_pipeline = os.path.join( + os.getenv("DONUT_VIZ_DIR"), + "pipelines", + "scienceDirectDetectPipeline.yaml", + ) + + pipe_cmd = writePipetaskCmd( + cls.test_repo_dir, + cls.test_run_name, + instrument, + collections, + pipelineYaml=test_pipeline, + ) + # Make sure we are using the right exposure+detector combinations + pipe_cmd += ' -d "exposure IN (4021123106001, 4021123106002) AND ' + pipe_cmd += 'detector NOT IN (191, 192, 195, 196, 199, 200, 203, 204)"' + runProgram(pipe_cmd) + + @classmethod + def tearDownClass(cls): + clean_up_cmd = writeCleanUpRepoCmd(cls.test_repo_dir, cls.test_run_name) + runProgram(clean_up_cmd) + + def setUp(self): + self.config = AggregateZernikeTablesTaskConfig() + self.task = AggregateZernikeTablesTask(config=self.config) + + def testValidateConfigs(self): + self.assertEqual(1, 1) From 3d68495986f03c8c7851c59c6ea4cafc754ae21b Mon Sep 17 00:00:00 2001 From: "J. Bryce Kalmbach" Date: Tue, 29 Oct 2024 14:10:11 -0700 Subject: [PATCH 03/15] Add aggregateZernikes tests. --- tests/test_aggregate_visit_science_sensors.py | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/tests/test_aggregate_visit_science_sensors.py b/tests/test_aggregate_visit_science_sensors.py index e60b4ce..b5d3fca 100644 --- a/tests/test_aggregate_visit_science_sensors.py +++ b/tests/test_aggregate_visit_science_sensors.py @@ -1,7 +1,6 @@ import os from lsst.daf.butler import Butler -from lsst.donut.viz import AggregateZernikeTablesTask, AggregateZernikeTablesTaskConfig from lsst.ts.wep.utils import ( getModulePath, runProgram, @@ -52,9 +51,22 @@ def tearDownClass(cls): clean_up_cmd = writeCleanUpRepoCmd(cls.test_repo_dir, cls.test_run_name) runProgram(clean_up_cmd) - def setUp(self): - self.config = AggregateZernikeTablesTaskConfig() - self.task = AggregateZernikeTablesTask(config=self.config) + def testZernikesAvg(self): + average_dataset_list = list( + self.butler.query_datasets("aggregateZernikesAvg", collections="test_run_1") + ) + self.assertEqual(len(average_dataset_list), 1) + agg_zern_avg = self.butler.get(average_dataset_list[0]) + self.assertEqual(len(agg_zern_avg), 2) + self.assertCountEqual(agg_zern_avg["detector"], ["R22_S10", "R22_S11"]) - def testValidateConfigs(self): - self.assertEqual(1, 1) + def testZernikesRaw(self): + raw_dataset_list = list( + self.butler.query_datasets("aggregateZernikesRaw", collections="test_run_1") + ) + self.assertEqual(len(raw_dataset_list), 1) + agg_zern_raw = self.butler.get(raw_dataset_list[0]) + self.assertEqual(len(agg_zern_raw), 6) + self.assertCountEqual( + agg_zern_raw["detector"], ["R22_S10"] * 3 + ["R22_S11"] * 3 + ) From c80d08e2c3c3ecd9a6b30f09ccce6d54040473bb Mon Sep 17 00:00:00 2001 From: "J. Bryce Kalmbach" Date: Tue, 29 Oct 2024 14:53:18 -0700 Subject: [PATCH 04/15] Add donut table test. --- pipelines/scienceDirectDetectPipeline.yaml | 9 ++-- tests/test_aggregate_visit_science_sensors.py | 45 +++++++++++++++++-- 2 files changed, 46 insertions(+), 8 deletions(-) diff --git a/pipelines/scienceDirectDetectPipeline.yaml b/pipelines/scienceDirectDetectPipeline.yaml index 4c25cdd..e678906 100644 --- a/pipelines/scienceDirectDetectPipeline.yaml +++ b/pipelines/scienceDirectDetectPipeline.yaml @@ -29,10 +29,10 @@ tasks: doFringe: False doOverscan: True python: OverscanCorrectionTask.ConfigClass.fitType = 'MEDIAN' - generateDonutDirectDetectTask: - class: lsst.ts.wep.task.generateDonutDirectDetectTask.GenerateDonutDirectDetectTask + generateDonutCatalogWcsTask: + class: lsst.ts.wep.task.generateDonutCatalogWcsTask.GenerateDonutCatalogWcsTask config: - donutSelector.useCustomMagLimit: True + donutSelector.unblendedSeparation: 1 cutOutDonutsScienceSensorTask: class: lsst.ts.wep.task.CutOutDonutsScienceSensorTask calcZernikesTask: @@ -43,4 +43,5 @@ tasks: estimateZernikes.maskKwargs: {'doMaskBlends': False} aggregateZernikeTablesTask: class: lsst.donut.viz.AggregateZernikeTablesTask - \ No newline at end of file + aggregateDonutTablesTask: + class: lsst.donut.viz.AggregateDonutTablesTask diff --git a/tests/test_aggregate_visit_science_sensors.py b/tests/test_aggregate_visit_science_sensors.py index b5d3fca..ee3ee4a 100644 --- a/tests/test_aggregate_visit_science_sensors.py +++ b/tests/test_aggregate_visit_science_sensors.py @@ -16,6 +16,17 @@ def setUpClass(cls): wep_module_dir = getModulePath() cls.test_data_dir = os.path.join(wep_module_dir, "tests", "testData") cls.test_repo_dir = os.path.join(cls.test_data_dir, "gen3TestRepo") + cls.meta_keys = [ + "alt", + "az", + "dec", + "mjd", + "parallacticAngle", + "ra", + "rotAngle", + "rotTelPos", + "visit", + ] cls.butler = Butler(cls.test_repo_dir) cls.test_run_name = "test_run_1" @@ -51,18 +62,23 @@ def tearDownClass(cls): clean_up_cmd = writeCleanUpRepoCmd(cls.test_repo_dir, cls.test_run_name) runProgram(clean_up_cmd) - def testZernikesAvg(self): + def testAggregateZernikesAvg(self): average_dataset_list = list( - self.butler.query_datasets("aggregateZernikesAvg", collections="test_run_1") + self.butler.query_datasets( + "aggregateZernikesAvg", collections=self.test_run_name + ) ) self.assertEqual(len(average_dataset_list), 1) agg_zern_avg = self.butler.get(average_dataset_list[0]) self.assertEqual(len(agg_zern_avg), 2) self.assertCountEqual(agg_zern_avg["detector"], ["R22_S10", "R22_S11"]) + self.assertCountEqual(agg_zern_avg.meta.keys(), self.meta_keys) - def testZernikesRaw(self): + def testAggregateZernikesRaw(self): raw_dataset_list = list( - self.butler.query_datasets("aggregateZernikesRaw", collections="test_run_1") + self.butler.query_datasets( + "aggregateZernikesRaw", collections=self.test_run_name + ) ) self.assertEqual(len(raw_dataset_list), 1) agg_zern_raw = self.butler.get(raw_dataset_list[0]) @@ -70,3 +86,24 @@ def testZernikesRaw(self): self.assertCountEqual( agg_zern_raw["detector"], ["R22_S10"] * 3 + ["R22_S11"] * 3 ) + self.assertCountEqual(agg_zern_raw.meta.keys(), self.meta_keys) + + def testAggregateDonuts(self): + donut_table_list = list( + self.butler.query_datasets( + "aggregateDonutTable", collections=self.test_run_name + ) + ) + self.assertEqual(len(donut_table_list), 1) + agg_donut_table = self.butler.get(donut_table_list[0]) + self.assertEqual(len(agg_donut_table), 12) + self.assertCountEqual( + agg_donut_table["detector"], ["R22_S10"] * 6 + ["R22_S11"] * 6 + ) + self.assertCountEqual(agg_donut_table["focusZ"], [1.5] * 6 + [-1.5] * 6) + self.assertCountEqual( + agg_donut_table.meta.keys(), ["extra", "intra", "average"] + ) + donut_meta_keys = self.meta_keys + ["focusZ"] + for key in ["extra", "intra"]: + self.assertCountEqual(agg_donut_table.meta[key].keys(), donut_meta_keys) From bd77c3e6bc81c396be0c98c48e1ce33627019970 Mon Sep 17 00:00:00 2001 From: "J. Bryce Kalmbach" Date: Tue, 29 Oct 2024 16:09:17 -0700 Subject: [PATCH 05/15] Add donut stamps aggreagation test. --- pipelines/scienceDirectDetectPipeline.yaml | 2 ++ tests/test_aggregate_visit_science_sensors.py | 27 +++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/pipelines/scienceDirectDetectPipeline.yaml b/pipelines/scienceDirectDetectPipeline.yaml index e678906..c4d5a30 100644 --- a/pipelines/scienceDirectDetectPipeline.yaml +++ b/pipelines/scienceDirectDetectPipeline.yaml @@ -45,3 +45,5 @@ tasks: class: lsst.donut.viz.AggregateZernikeTablesTask aggregateDonutTablesTask: class: lsst.donut.viz.AggregateDonutTablesTask + aggregateDonutStampsTask: + class: lsst.donut.viz.AggregateDonutStampsTask diff --git a/tests/test_aggregate_visit_science_sensors.py b/tests/test_aggregate_visit_science_sensors.py index ee3ee4a..586b3c3 100644 --- a/tests/test_aggregate_visit_science_sensors.py +++ b/tests/test_aggregate_visit_science_sensors.py @@ -107,3 +107,30 @@ def testAggregateDonuts(self): donut_meta_keys = self.meta_keys + ["focusZ"] for key in ["extra", "intra"]: self.assertCountEqual(agg_donut_table.meta[key].keys(), donut_meta_keys) + donut_meta_keys.remove("focusZ") + donut_meta_keys.remove("visit") + self.assertCountEqual(agg_donut_table.meta["average"], donut_meta_keys) + + def testAggregateDonutStamps(self): + intra_dataset_list = list( + self.butler.query_datasets( + "donutStampsIntraVisit", collections=self.test_run_name + ) + ) + extra_dataset_list = list( + self.butler.query_datasets( + "donutStampsExtraVisit", collections=self.test_run_name + ) + ) + self.assertEqual(len(intra_dataset_list), 1) + self.assertEqual(len(extra_dataset_list), 1) + intra_donuts = self.butler.get(intra_dataset_list[0]) + extra_donuts = self.butler.get(extra_dataset_list[0]) + self.assertEqual(len(intra_donuts), 2) + self.assertEqual(len(extra_donuts), 2) + intra_meta = intra_donuts.metadata.toDict() + extra_meta = extra_donuts.metadata.toDict() + self.assertCountEqual(intra_meta["DET_NAME"], ["R22_S10", "R22_S11"]) + self.assertCountEqual(intra_meta["DFC_TYPE"], ["intra"] * 2) + self.assertCountEqual(extra_meta["DET_NAME"], ["R22_S10", "R22_S11"]) + self.assertCountEqual(extra_meta["DFC_TYPE"], ["extra"] * 2) From d12766c2eb76dcdd02ca03255e00f1b033e151b8 Mon Sep 17 00:00:00 2001 From: "J. Bryce Kalmbach" Date: Tue, 29 Oct 2024 16:13:16 -0700 Subject: [PATCH 06/15] Add ts_wep package as needed install for tests. --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 56f1b5f..8c06f2c 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -4,7 +4,7 @@ DevelopPipeline( module_name: "lsst.donut.viz", idl_names: [], build_all_idl: false, - extra_packages: [], + extra_packages: ['ts_wep'], kickoff_jobs: [], slack_build_channel: "aos-builds", has_doc_site: false, From 0d11458ea6f28fa5d7b6bef37fb748948a7ee41d Mon Sep 17 00:00:00 2001 From: "J. Bryce Kalmbach" Date: Wed, 30 Oct 2024 09:36:15 -0700 Subject: [PATCH 07/15] Add AOSVisitTables tests. --- pipelines/scienceDirectDetectPipeline.yaml | 2 + tests/test_aggregate_visit_science_sensors.py | 60 ++++++++++++++++++- 2 files changed, 61 insertions(+), 1 deletion(-) diff --git a/pipelines/scienceDirectDetectPipeline.yaml b/pipelines/scienceDirectDetectPipeline.yaml index c4d5a30..e64516b 100644 --- a/pipelines/scienceDirectDetectPipeline.yaml +++ b/pipelines/scienceDirectDetectPipeline.yaml @@ -47,3 +47,5 @@ tasks: class: lsst.donut.viz.AggregateDonutTablesTask aggregateDonutStampsTask: class: lsst.donut.viz.AggregateDonutStampsTask + aggregateAOSVisitTableTask: + class: lsst.donut.viz.AggregateAOSVisitTableTask diff --git a/tests/test_aggregate_visit_science_sensors.py b/tests/test_aggregate_visit_science_sensors.py index 586b3c3..038aa8f 100644 --- a/tests/test_aggregate_visit_science_sensors.py +++ b/tests/test_aggregate_visit_science_sensors.py @@ -1,5 +1,6 @@ import os +import numpy as np from lsst.daf.butler import Butler from lsst.ts.wep.utils import ( getModulePath, @@ -10,7 +11,7 @@ from lsst.utils.tests import TestCase -class TestAggregateZernikeTablesTask(TestCase): +class TestAggregateTasks(TestCase): @classmethod def setUpClass(cls): wep_module_dir = getModulePath() @@ -134,3 +135,60 @@ def testAggregateDonutStamps(self): self.assertCountEqual(intra_meta["DFC_TYPE"], ["intra"] * 2) self.assertCountEqual(extra_meta["DET_NAME"], ["R22_S10", "R22_S11"]) self.assertCountEqual(extra_meta["DFC_TYPE"], ["extra"] * 2) + + def testAggregateAOSVisitTableRaw(self): + raw_visit_table_list = list( + self.butler.query_datasets( + "aggregateAOSVisitTableRaw", collections=self.test_run_name + ) + ) + self.assertEqual(len(raw_visit_table_list), 1) + raw_visit_table = self.butler.get(raw_visit_table_list[0]) + self.assertCountEqual(raw_visit_table.meta.keys(), self.meta_keys) + raw_zern_table = self.butler.get( + "aggregateZernikesRaw", + dataId=raw_visit_table_list[0].dataId, + collections=self.test_run_name, + ) + self.assertEqual(len(raw_zern_table), len(raw_visit_table)) + np.testing.assert_array_equal( + raw_zern_table["zk_CCS"], raw_visit_table["zk_CCS"] + ) + donut_table = self.butler.get( + "aggregateDonutTable", + dataId=raw_visit_table_list[0].dataId, + collections=self.test_run_name, + ) + self.assertEqual(len(donut_table) / 2, len(raw_visit_table)) + np.testing.assert_array_equal( + donut_table["coord_ra"][donut_table["focusZ"] == -1.5].value, + raw_visit_table["coord_ra_intra"], + ) + + def testAggregateAOSVisitTableAvg(self): + avg_visit_table_list = list( + self.butler.query_datasets( + "aggregateAOSVisitTableAvg", collections=self.test_run_name + ) + ) + self.assertEqual(len(avg_visit_table_list), 1) + avg_visit_table = self.butler.get(avg_visit_table_list[0]) + self.assertCountEqual(avg_visit_table.meta.keys(), self.meta_keys) + avg_zern_table = self.butler.get( + "aggregateZernikesAvg", + dataId=avg_visit_table_list[0].dataId, + collections=self.test_run_name, + ) + self.assertEqual(len(avg_zern_table), len(avg_visit_table)) + np.testing.assert_array_equal( + avg_zern_table["zk_CCS"], avg_visit_table["zk_CCS"] + ) + donut_table = self.butler.get( + "aggregateDonutTable", + dataId=avg_visit_table_list[0].dataId, + collections=self.test_run_name, + ) + np.testing.assert_array_equal( + np.mean(donut_table["thx_CCS"][donut_table["detector"] == "R22_S11"]), + avg_visit_table["thx_CCS"][avg_visit_table["detector"] == "R22_S11"], + ) From f3ef672a004932dba886d27831eb4535535236de Mon Sep 17 00:00:00 2001 From: "J. Bryce Kalmbach" Date: Wed, 30 Oct 2024 09:48:29 -0700 Subject: [PATCH 08/15] Fix Jenkinsfile. --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 8c06f2c..735ef73 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -4,7 +4,7 @@ DevelopPipeline( module_name: "lsst.donut.viz", idl_names: [], build_all_idl: false, - extra_packages: ['ts_wep'], + extra_packages: ["lsst-ts/ts_wep"], kickoff_jobs: [], slack_build_channel: "aos-builds", has_doc_site: false, From 1367d16d91629d12469454cda58f5d9087e98789 Mon Sep 17 00:00:00 2001 From: "J. Bryce Kalmbach" Date: Wed, 30 Oct 2024 09:58:26 -0700 Subject: [PATCH 09/15] Finish pipeline tests for science sensors. --- pipelines/scienceDirectDetectPipeline.yaml | 8 ++++ ...est_donut_viz_pipeline_science_sensors.py} | 37 ++++++++++++++++++- 2 files changed, 44 insertions(+), 1 deletion(-) rename tests/{test_aggregate_visit_science_sensors.py => test_donut_viz_pipeline_science_sensors.py} (86%) diff --git a/pipelines/scienceDirectDetectPipeline.yaml b/pipelines/scienceDirectDetectPipeline.yaml index e64516b..e91b0cc 100644 --- a/pipelines/scienceDirectDetectPipeline.yaml +++ b/pipelines/scienceDirectDetectPipeline.yaml @@ -49,3 +49,11 @@ tasks: class: lsst.donut.viz.AggregateDonutStampsTask aggregateAOSVisitTableTask: class: lsst.donut.viz.AggregateAOSVisitTableTask + plotAOSTask: + class: lsst.donut.viz.PlotAOSTask + config: + doRubinTVUpload: false + plotDonutTask: + class: lsst.donut.viz.PlotDonutTask + config: + doRubinTVUpload: false diff --git a/tests/test_aggregate_visit_science_sensors.py b/tests/test_donut_viz_pipeline_science_sensors.py similarity index 86% rename from tests/test_aggregate_visit_science_sensors.py rename to tests/test_donut_viz_pipeline_science_sensors.py index 038aa8f..aff90bf 100644 --- a/tests/test_aggregate_visit_science_sensors.py +++ b/tests/test_donut_viz_pipeline_science_sensors.py @@ -11,7 +11,7 @@ from lsst.utils.tests import TestCase -class TestAggregateTasks(TestCase): +class TestDonutVizPipeline(TestCase): @classmethod def setUpClass(cls): wep_module_dir = getModulePath() @@ -192,3 +192,38 @@ def testAggregateAOSVisitTableAvg(self): np.mean(donut_table["thx_CCS"][donut_table["detector"] == "R22_S11"]), avg_visit_table["thx_CCS"][avg_visit_table["detector"] == "R22_S11"], ) + + def testPlotAOSTasks(self): + # Test that plots exist in butler + measured_dataset_list = list( + self.butler.query_datasets( + "measuredZernikePyramid", collections=self.test_run_name + ) + ) + self.assertEqual(len(measured_dataset_list), 1) + + intrinsic_dataset_list = list( + self.butler.query_datasets( + "intrinsicZernikePyramid", collections=self.test_run_name + ) + ) + self.assertEqual(len(intrinsic_dataset_list), 1) + + raw_dataset_list = list( + self.butler.query_datasets( + "rawZernikePyramid", collections=self.test_run_name + ) + ) + self.assertEqual(len(raw_dataset_list), 1) + + def testDonutPlotTask(self): + # Test that plots exist in butler + intra_dataset_list = list( + self.butler.query_datasets("donutPlotIntra", collections=self.test_run_name) + ) + self.assertEqual(len(intra_dataset_list), 1) + + extra_dataset_list = list( + self.butler.query_datasets("donutPlotExtra", collections=self.test_run_name) + ) + self.assertEqual(len(extra_dataset_list), 1) From cbe08e2c67575e80e8fd371fb66d1cc4d7c07ec6 Mon Sep 17 00:00:00 2001 From: "J. Bryce Kalmbach" Date: Wed, 30 Oct 2024 12:40:43 -0700 Subject: [PATCH 10/15] Update ups. Fix test data name. --- tests/test_donut_viz_pipeline_science_sensors.py | 6 +++--- ups/donut_viz.table | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/test_donut_viz_pipeline_science_sensors.py b/tests/test_donut_viz_pipeline_science_sensors.py index aff90bf..5101200 100644 --- a/tests/test_donut_viz_pipeline_science_sensors.py +++ b/tests/test_donut_viz_pipeline_science_sensors.py @@ -209,12 +209,12 @@ def testPlotAOSTasks(self): ) self.assertEqual(len(intrinsic_dataset_list), 1) - raw_dataset_list = list( + residual_dataset_list = list( self.butler.query_datasets( - "rawZernikePyramid", collections=self.test_run_name + "residualZernikePyramid", collections=self.test_run_name ) ) - self.assertEqual(len(raw_dataset_list), 1) + self.assertEqual(len(residual_dataset_list), 1) def testDonutPlotTask(self): # Test that plots exist in butler diff --git a/ups/donut_viz.table b/ups/donut_viz.table index c0142d8..be501d4 100644 --- a/ups/donut_viz.table +++ b/ups/donut_viz.table @@ -5,6 +5,7 @@ setupRequired(afw) setupRequired(daf_butler) setupRequired(geom) +setupRequired(ts_wep) # The following is boilerplate for all packages. # See https://dmtn-001.lsst.io for details on LSST_LIBRARY_PATH. From 1521f639006b42dfba8fbe1d41289ab477e064c3 Mon Sep 17 00:00:00 2001 From: "J. Bryce Kalmbach" Date: Wed, 30 Oct 2024 12:47:29 -0700 Subject: [PATCH 11/15] Add git_lfs required to Jenkinsfile. --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 735ef73..279f4a0 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -8,6 +8,6 @@ DevelopPipeline( kickoff_jobs: [], slack_build_channel: "aos-builds", has_doc_site: false, - require_git_lfs: false, + require_git_lfs: true, require_scons: true ) From fd846e9e48826a4718964df3345b74581e7b50ef Mon Sep 17 00:00:00 2001 From: "J. Bryce Kalmbach" Date: Wed, 30 Oct 2024 12:57:48 -0700 Subject: [PATCH 12/15] Add changelog. --- .github/workflows/changelog.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 .github/workflows/changelog.yaml diff --git a/.github/workflows/changelog.yaml b/.github/workflows/changelog.yaml new file mode 100644 index 0000000..96d8245 --- /dev/null +++ b/.github/workflows/changelog.yaml @@ -0,0 +1,19 @@ +name: ChangelogUpdated +on: + pull_request: + types: [assigned, opened, synchronize, reopened, labeled, unlabeled] + branches: + - develop +jobs: + build: + name: Check Actions + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: Changelog check + uses: Zomzog/changelog-checker@v1.2.0 + with: + fileName: doc/versionHistory.rst + checkNotification: Simple + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From 04a95ccf1b4350a29dc9793eef8e206c8d845d7c Mon Sep 17 00:00:00 2001 From: "J. Bryce Kalmbach" Date: Wed, 30 Oct 2024 15:48:47 -0700 Subject: [PATCH 13/15] Make sure aggregateDonutTables pick up both intra and extrafocal donut tables. --- doc/versionHistory.rst | 10 +++ python/lsst/donut/viz/aggregate_visit.py | 74 ++++++++++--------- ...test_donut_viz_pipeline_science_sensors.py | 8 +- 3 files changed, 53 insertions(+), 39 deletions(-) diff --git a/doc/versionHistory.rst b/doc/versionHistory.rst index f5b933c..19decac 100644 --- a/doc/versionHistory.rst +++ b/doc/versionHistory.rst @@ -4,6 +4,16 @@ Version History ################## +.. _lsst.ts.donut.viz-1.1.0: + +------------- +1.1.0 +------------- + +* Add tests for full donut_viz pipeline. +* Add changelog github action. +* Add Jenkinsfile. + .. _lsst.ts.donut.viz-1.0.0: ------------- diff --git a/python/lsst/donut/viz/aggregate_visit.py b/python/lsst/donut/viz/aggregate_visit.py index c74c1ee..bad62d1 100644 --- a/python/lsst/donut/viz/aggregate_visit.py +++ b/python/lsst/donut/viz/aggregate_visit.py @@ -241,11 +241,12 @@ def runQuantum( for ref in inputRefs.qualityTables } - # Find common (visit, detector) pairs - keys = set(donutRefDict) & set(qualityRefDict) + # Find common (visit, detector) extra-focal pairs + # DonutQualityTables only saved under extra-focal ids + extra_keys = set(donutRefDict) & set(qualityRefDict) # Raise error if there's no matches - if len(keys) == 0: + if len(extra_keys) == 0: raise RuntimeError( "No (visit, detector) matches found between " "the donut and quality tables" @@ -258,13 +259,9 @@ def runQuantum( tables = [] # Iterate over the common (visit, detector) pairs - for visit, detector in keys: + for visit, detector in extra_keys: # Determine if intra or extra - if visit == pair.intra: - intra = True - elif visit == pair.extra: - intra = False - else: + if visit not in (pair.intra, pair.extra): # This visit isn't in this pair # so we will skip for now continue @@ -274,35 +271,42 @@ def runQuantum( tform = det.getTransform(PIXELS, FIELD_ANGLE) # Load the donut catalog table, and the donut quality table - donutTable = butlerQC.get(donutRefDict[(visit, detector)]) - qualityTable = butlerQC.get(qualityRefDict[(visit, detector)]) + intraDonutTable = butlerQC.get(donutRefDict[(pair.intra, detector)]) + extraDonutTable = butlerQC.get(donutRefDict[(pair.extra, detector)]) + qualityTable = butlerQC.get(qualityRefDict[(pair.extra, detector)]) # Get rows of quality table for this exposure - if intra: - qualityTable = qualityTable[qualityTable["DEFOCAL_TYPE"] == "intra"] - else: - qualityTable = qualityTable[qualityTable["DEFOCAL_TYPE"] == "extra"] - - # Select donuts used in Zernike estimation - table = donutTable[qualityTable["FINAL_SELECT"]] - - # Add focusZ to donut table - table["focusZ"] = ( - intraVisitInfo.focusZ if intra else extraVisitInfo.focusZ - ) - - # Add field angle in CCS to the table - pts = tform.applyForward( - [ - Point2D(x, y) - for x, y in zip(table["centroid_x"], table["centroid_y"]) - ] - ) - table["thx_CCS"] = [pt.y for pt in pts] # Transpose from DVCS to CCS - table["thy_CCS"] = [pt.x for pt in pts] - table["detector"] = det.getName() + intraQualityTable = qualityTable[ + qualityTable["DEFOCAL_TYPE"] == "intra" + ] + extraQualityTable = qualityTable[ + qualityTable["DEFOCAL_TYPE"] == "extra" + ] + + for donutTable, qualityTable in zip( + [intraDonutTable, extraDonutTable], + [intraQualityTable, extraQualityTable], + ): + # Select donuts used in Zernike estimation + table = donutTable[qualityTable["FINAL_SELECT"]] + + # Add focusZ to donut table + table["focusZ"] = table.meta["visit_info"]["focus_z"] + + # Add field angle in CCS to the table + pts = tform.applyForward( + [ + Point2D(x, y) + for x, y in zip(table["centroid_x"], table["centroid_y"]) + ] + ) + table["thx_CCS"] = [ + pt.y for pt in pts + ] # Transpose from DVCS to CCS + table["thy_CCS"] = [pt.x for pt in pts] + table["detector"] = det.getName() - tables.append(table) + tables.append(table) # Don't attempt to stack metadata for table in tables: diff --git a/tests/test_donut_viz_pipeline_science_sensors.py b/tests/test_donut_viz_pipeline_science_sensors.py index 5101200..9f24635 100644 --- a/tests/test_donut_viz_pipeline_science_sensors.py +++ b/tests/test_donut_viz_pipeline_science_sensors.py @@ -101,7 +101,7 @@ def testAggregateDonuts(self): self.assertCountEqual( agg_donut_table["detector"], ["R22_S10"] * 6 + ["R22_S11"] * 6 ) - self.assertCountEqual(agg_donut_table["focusZ"], [1.5] * 6 + [-1.5] * 6) + self.assertCountEqual(agg_donut_table["focusZ"].value, [1.5] * 6 + [-1.5] * 6) self.assertCountEqual( agg_donut_table.meta.keys(), ["extra", "intra", "average"] ) @@ -160,9 +160,9 @@ def testAggregateAOSVisitTableRaw(self): collections=self.test_run_name, ) self.assertEqual(len(donut_table) / 2, len(raw_visit_table)) - np.testing.assert_array_equal( - donut_table["coord_ra"][donut_table["focusZ"] == -1.5].value, - raw_visit_table["coord_ra_intra"], + self.assertCountEqual( + donut_table["coord_ra"][donut_table["focusZ"].value == -1.5].value, + raw_visit_table["coord_ra_intra"].value, ) def testAggregateAOSVisitTableAvg(self): From c2fcf56eb1690f835e0e9f292cf028b5462f4901 Mon Sep 17 00:00:00 2001 From: "J. Bryce Kalmbach" Date: Wed, 30 Oct 2024 16:09:09 -0700 Subject: [PATCH 14/15] Update changelog branch. --- .github/workflows/changelog.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/changelog.yaml b/.github/workflows/changelog.yaml index 96d8245..5aa1b2f 100644 --- a/.github/workflows/changelog.yaml +++ b/.github/workflows/changelog.yaml @@ -3,7 +3,7 @@ on: pull_request: types: [assigned, opened, synchronize, reopened, labeled, unlabeled] branches: - - develop + - main jobs: build: name: Check Actions From 77fc28139df04969c09beade503dd33b8a6caf0a Mon Sep 17 00:00:00 2001 From: "J. Bryce Kalmbach" Date: Thu, 31 Oct 2024 13:00:15 -0700 Subject: [PATCH 15/15] Update ISR task in pipeline. Clarify code in aggregate_visit. --- pipelines/scienceDirectDetectPipeline.yaml | 31 ++++++++++------------ python/lsst/donut/viz/aggregate_visit.py | 7 +++-- 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/pipelines/scienceDirectDetectPipeline.yaml b/pipelines/scienceDirectDetectPipeline.yaml index e91b0cc..26208ac 100644 --- a/pipelines/scienceDirectDetectPipeline.yaml +++ b/pipelines/scienceDirectDetectPipeline.yaml @@ -8,27 +8,24 @@ instrument: lsst.obs.lsst.LsstCam # and then specify the class name corresponding to that task tasks: isr: - class: lsst.ip.isr.isrTask.IsrTask - # Below we specify the configuration settings we want to use - # when running the task in this pipeline. Since our data doesn't - # include bias or flats we only want to use doApplyGains and - # doOverscan in our isr task. + class: lsst.ip.isr.IsrTaskLSST config: - connections.outputExposure: "postISRCCD" + # Although we don't have to apply the amp offset corrections, we do want + # to compute them for analyzeAmpOffsetMetadata to report on as metrics. + doAmpOffset: true + ampOffset.doApplyAmpOffset: false + # Turn off slow steps in ISR + doBrighterFatter: false + doCrosstalk: false + # Turn off flats, biases, darks because we don't have these in test repo doBias: False - doVariance: False - doLinearize: False - doCrosstalk: False - doDefect: False - doNanMasking: False - doInterpolate: False - doBrighterFatter: False doDark: False doFlat: False - doApplyGains: True - doFringe: False - doOverscan: True - python: OverscanCorrectionTask.ConfigClass.fitType = 'MEDIAN' + doBootstrap: True + doApplyGains: False + doLinearize: False + doDeferredCharge: False + doDefect: False generateDonutCatalogWcsTask: class: lsst.ts.wep.task.generateDonutCatalogWcsTask.GenerateDonutCatalogWcsTask config: diff --git a/python/lsst/donut/viz/aggregate_visit.py b/python/lsst/donut/viz/aggregate_visit.py index bad62d1..8148409 100644 --- a/python/lsst/donut/viz/aggregate_visit.py +++ b/python/lsst/donut/viz/aggregate_visit.py @@ -260,10 +260,9 @@ def runQuantum( # Iterate over the common (visit, detector) pairs for visit, detector in extra_keys: - # Determine if intra or extra - if visit not in (pair.intra, pair.extra): - # This visit isn't in this pair - # so we will skip for now + # Check if this extra-focal visit is in this pair. + if visit != pair.extra: + # This visit isn't in this pair so we will skip for now continue # Get pixels -> field angle transform for this detector