|
26 | 26 | ) |
27 | 27 |
|
28 | 28 |
|
| 29 | +from lsst.pex.config import ListField |
29 | 30 | from lsst.pipe.base import connectionTypes as ct |
30 | 31 |
|
31 | 32 | from ..interfaces import AnalysisBaseConfig, AnalysisBaseConnections, AnalysisPipelineTask |
32 | 33 |
|
33 | 34 |
|
34 | 35 | class MetricAnalysisConnections( |
35 | 36 | AnalysisBaseConnections, |
36 | | - dimensions=("skymap",), |
37 | | - defaultTemplates={"metricBundleName": "objectTableCore_metrics"}, |
| 37 | + dimensions=(), |
| 38 | + defaultTemplates={"metricTableName": ""}, |
38 | 39 | ): |
| 40 | + |
39 | 41 | data = ct.Input( |
40 | | - doc="A summary table of all metrics by tract.", |
41 | | - name="{metricBundleName}Table", |
| 42 | + doc="A table containing metrics.", |
| 43 | + name="{metricTableName}", |
42 | 44 | storageClass="ArrowAstropy", |
43 | | - dimensions=("skymap",), |
44 | 45 | deferLoad=True, |
| 46 | + dimensions=(), |
45 | 47 | ) |
46 | 48 |
|
| 49 | + def __init__(self, *, config=None): |
| 50 | + |
| 51 | + self.dimensions.update(frozenset(sorted(config.outputDataDimensions))) |
| 52 | + super().__init__(config=config) |
| 53 | + self.data = ct.Input( |
| 54 | + doc=self.data.doc, |
| 55 | + name=self.data.name, |
| 56 | + storageClass=self.data.storageClass, |
| 57 | + deferLoad=self.data.deferLoad, |
| 58 | + dimensions=frozenset(sorted(config.inputDataDimensions)), |
| 59 | + ) |
| 60 | + |
47 | 61 |
|
48 | | -class MetricAnalysisConfig(AnalysisBaseConfig, pipelineConnections=MetricAnalysisConnections): |
49 | | - pass |
| 62 | +class MetricAnalysisConfig( |
| 63 | + AnalysisBaseConfig, |
| 64 | + pipelineConnections=MetricAnalysisConnections, |
| 65 | +): |
| 66 | + inputDataDimensions = ListField[str]( |
| 67 | + doc="Dimensions of the input data table.", |
| 68 | + default=(), |
| 69 | + optional=False, |
| 70 | + ) |
| 71 | + outputDataDimensions = ListField[str]( |
| 72 | + doc="Dimensions of the outputs.", |
| 73 | + default=(), |
| 74 | + optional=False, |
| 75 | + ) |
50 | 76 |
|
51 | 77 |
|
52 | 78 | class MetricAnalysisTask(AnalysisPipelineTask): |
53 | | - """Turn metric bundles which are per tract into a |
54 | | - summary metric table. |
| 79 | + """Take a metric table and run an analysis tool on the |
| 80 | + data it contains. This could include creating a plot |
| 81 | + the metrics and/or calculating summary values of those |
| 82 | + metrics, such as means, medians, etc. The analysis |
| 83 | + is outlined within the analysis tool. |
55 | 84 | """ |
56 | 85 |
|
57 | 86 | ConfigClass = MetricAnalysisConfig |
58 | 87 | _DefaultName = "metricAnalysis" |
| 88 | + |
| 89 | + def runQuantum(self, butlerQC, inputRefs, outputRefs): |
| 90 | + # Doctstring inherited |
| 91 | + |
| 92 | + inputs = butlerQC.get(inputRefs) |
| 93 | + dataId = butlerQC.quantum.dataId |
| 94 | + plotInfo = self.parsePlotInfo(inputs, dataId) |
| 95 | + |
| 96 | + data = self.loadData(inputs.pop("data")) |
| 97 | + |
| 98 | + # TODO: "bands" kwarg is a workaround for DM-47941. |
| 99 | + outputs = self.run( |
| 100 | + data=data, |
| 101 | + plotInfo=plotInfo, |
| 102 | + bands=dataId["band"], |
| 103 | + band=dataId["band"], |
| 104 | + **inputs, |
| 105 | + ) |
| 106 | + |
| 107 | + butlerQC.put(outputs, outputRefs) |
0 commit comments