|
13 | 13 |
|
14 | 14 | from .archive_diffs import arch_diffs
|
15 | 15 | from .constants import (METRICS, SENSOR_NAME_MAP,
|
16 |
| - SENSORS, INCIDENCE_BASE, GEO_RES) |
| 16 | + SENSORS, INCIDENCE_BASE) |
17 | 17 | from .pull import pull_nchs_mortality_data
|
18 | 18 |
|
19 | 19 |
|
@@ -72,51 +72,54 @@ def run_module(params: Dict[str, Any]):
|
72 | 72 | stats = []
|
73 | 73 | df_pull = pull_nchs_mortality_data(token, test_file)
|
74 | 74 | for metric in METRICS:
|
75 |
| - if metric == 'percent_of_expected_deaths': |
76 |
| - logger.info("Generating signal and exporting to CSV", |
77 |
| - metric = metric) |
78 |
| - df = df_pull.copy() |
79 |
| - df["val"] = df[metric] |
80 |
| - df["se"] = np.nan |
81 |
| - df["sample_size"] = np.nan |
82 |
| - df = add_nancodes(df) |
83 |
| - # df = df[~df["val"].isnull()] |
84 |
| - sensor_name = "_".join([SENSOR_NAME_MAP[metric]]) |
85 |
| - dates = create_export_csv( |
86 |
| - df, |
87 |
| - geo_res=GEO_RES, |
88 |
| - export_dir=daily_export_dir, |
89 |
| - start_date=datetime.strptime(export_start_date, "%Y-%m-%d"), |
90 |
| - sensor=sensor_name, |
91 |
| - weekly_dates=True |
92 |
| - ) |
93 |
| - if len(dates) > 0: |
94 |
| - stats.append((max(dates), len(dates))) |
95 |
| - else: |
96 |
| - for sensor in SENSORS: |
| 75 | + for geo in ["state", "nation"]: |
| 76 | + if metric == 'percent_of_expected_deaths': |
97 | 77 | logger.info("Generating signal and exporting to CSV",
|
98 |
| - metric = metric, |
99 |
| - sensor = sensor) |
| 78 | + metric=metric, geo_level=geo) |
100 | 79 | df = df_pull.copy()
|
101 |
| - if sensor == "num": |
102 |
| - df["val"] = df[metric] |
| 80 | + if geo == "nation": |
| 81 | + df = df[df["geo_id"] == "us"] |
103 | 82 | else:
|
104 |
| - df["val"] = df[metric] / df["population"] * INCIDENCE_BASE |
| 83 | + df = df[df["geo_id"] != "us"] |
| 84 | + df["val"] = df[metric] |
105 | 85 | df["se"] = np.nan
|
106 | 86 | df["sample_size"] = np.nan
|
107 | 87 | df = add_nancodes(df)
|
108 |
| - # df = df[~df["val"].isnull()] |
109 |
| - sensor_name = "_".join([SENSOR_NAME_MAP[metric], sensor]) |
110 | 88 | dates = create_export_csv(
|
111 | 89 | df,
|
112 |
| - geo_res=GEO_RES, |
| 90 | + geo_res=geo, |
113 | 91 | export_dir=daily_export_dir,
|
114 | 92 | start_date=datetime.strptime(export_start_date, "%Y-%m-%d"),
|
115 |
| - sensor=sensor_name, |
| 93 | + sensor=SENSOR_NAME_MAP[metric], |
116 | 94 | weekly_dates=True
|
117 | 95 | )
|
118 |
| - if len(dates) > 0: |
119 |
| - stats.append((max(dates), len(dates))) |
| 96 | + else: |
| 97 | + for sensor in SENSORS: |
| 98 | + logger.info("Generating signal and exporting to CSV", |
| 99 | + metric=metric, sensor=sensor, geo_level=geo) |
| 100 | + df = df_pull.copy() |
| 101 | + if geo == "nation": |
| 102 | + df = df[df["geo_id"] == "us"] |
| 103 | + else: |
| 104 | + df = df[df["geo_id"] != "us"] |
| 105 | + if sensor == "num": |
| 106 | + df["val"] = df[metric] |
| 107 | + else: |
| 108 | + df["val"] = df[metric] / df["population"] * INCIDENCE_BASE |
| 109 | + df["se"] = np.nan |
| 110 | + df["sample_size"] = np.nan |
| 111 | + df = add_nancodes(df) |
| 112 | + sensor_name = "_".join([SENSOR_NAME_MAP[metric], sensor]) |
| 113 | + dates = create_export_csv( |
| 114 | + df, |
| 115 | + geo_res=geo, |
| 116 | + export_dir=daily_export_dir, |
| 117 | + start_date=datetime.strptime(export_start_date, "%Y-%m-%d"), |
| 118 | + sensor=sensor_name, |
| 119 | + weekly_dates=True |
| 120 | + ) |
| 121 | + if len(dates) > 0: |
| 122 | + stats.append((max(dates), len(dates))) |
120 | 123 |
|
121 | 124 | # Weekly run of archive utility on Monday
|
122 | 125 | # - Does not upload to S3, that is handled by daily run of archive utility
|
|
0 commit comments