-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathmodular_analysis_pipeline_fixed.yaml
More file actions
237 lines (208 loc) · 7.62 KB
/
modular_analysis_pipeline_fixed.yaml
File metadata and controls
237 lines (208 loc) · 7.62 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
# Modular Analysis Pipeline with Sub-Pipelines (Fixed Version)
# Demonstrates using sub-pipelines for modular workflow design
id: modular_analysis
name: Modular Analysis Pipeline
description: Main pipeline that orchestrates multiple analysis sub-pipelines
version: "2.0.0"
parameters:
dataset:
type: string
default: "input/dataset.csv"
analysis_types:
type: array
default: ["statistical", "sentiment", "trend"]
output_format:
type: string
default: "pdf"
steps:
- id: load_data
tool: filesystem
action: read
parameters:
path: "{{ output_path }}/{{ parameters.dataset }}"
- id: data_preprocessing
tool: pipeline-executor
parameters:
pipeline: |
id: data_preprocessing_sub
name: Data Preprocessing Sub-Pipeline
steps:
- id: clean_data
tool: data-processing
action: clean
parameters:
data: "{{ inputs.raw_data }}"
remove_duplicates: true
handle_missing: "forward_fill"
- id: normalize_data
tool: data-processing
action: transform
parameters:
data: "{{ clean_data.result }}"
operation:
type: "normalize"
method: "min-max"
outputs:
processed_data: "{{ normalize_data.result }}"
inputs:
raw_data: "{{ load_data.result.content }}"
inherit_context: true
wait_for_completion: true
dependencies:
- load_data
- id: statistical_analysis
tool: pipeline-executor
parameters:
pipeline: "examples/sub_pipelines/statistical_analysis.yaml"
inputs:
data: "{{ data_preprocessing.outputs.processed_data }}"
confidence_level: 0.95
output_mapping:
statistics: "statistical_results"
summary: "statistical_summary"
inherit_context: true
dependencies:
- data_preprocessing
condition: "'statistical' in {{ parameters.analysis_types }}"
- id: sentiment_analysis
tool: pipeline-executor
parameters:
pipeline: "examples/sub_pipelines/sentiment_analysis.yaml"
inputs:
data: "{{ data_preprocessing.outputs.processed_data }}"
text_column: "comments"
output_mapping:
sentiment_scores: "sentiment_results"
sentiment_summary: "sentiment_summary"
inherit_context: true
dependencies:
- data_preprocessing
condition: "'sentiment' in {{ parameters.analysis_types }}"
- id: trend_analysis
tool: pipeline-executor
parameters:
pipeline: "examples/sub_pipelines/trend_analysis.yaml"
inputs:
data: "{{ data_preprocessing.outputs.processed_data }}"
time_column: "timestamp"
value_columns: ["sales", "revenue"]
output_mapping:
trends: "trend_results"
forecasts: "trend_forecasts"
inherit_context: true
dependencies:
- data_preprocessing
condition: "'trend' in {{ parameters.analysis_types }}"
- id: combine_results
tool: data-processing
action: merge
parameters:
datasets:
- name: "statistical"
data: "{{ statistical_analysis.outputs.statistical_results | default({}) }}"
- name: "sentiment"
data: "{{ sentiment_analysis.outputs.sentiment_results | default({}) }}"
- name: "trend"
data: "{{ trend_analysis.outputs.trend_results | default({}) }}"
merge_strategy: "combine"
dependencies:
- statistical_analysis
- sentiment_analysis
- trend_analysis
- id: generate_visualizations
tool: visualization
action: create_charts
parameters:
data: "{{ data_preprocessing.outputs.processed_data }}"
chart_types: ["auto"]
output_dir: "{{ output_path }}/charts"
title: "Analysis Results"
theme: "seaborn"
dependencies:
- combine_results
- id: create_dashboard
tool: visualization
action: create_dashboard
parameters:
charts: "{{ generate_visualizations.charts }}"
layout: "grid"
title: "Analysis Dashboard"
output_dir: "{{ output_path }}"
dependencies:
- generate_visualizations
- id: compile_report
tool: report-generator
action: generate
parameters:
title: "Comprehensive Analysis Report"
sections:
- title: "Executive Summary"
content: <AUTO>Summarize key findings from all analyses</AUTO>
- title: "Data Overview"
content: |
Dataset: {{ parameters.dataset }}
Preprocessing steps applied: cleaning, normalization
Analysis types performed: {{ parameters.analysis_types | join(', ') }}
- title: "Statistical Analysis"
content: "{{ statistical_analysis.outputs.statistical_summary | default('Not performed') }}"
condition: "'statistical' in {{ parameters.analysis_types }}"
- title: "Sentiment Analysis"
content: "{{ sentiment_analysis.outputs.sentiment_summary | default('Not performed') }}"
condition: "'sentiment' in {{ parameters.analysis_types }}"
- title: "Trend Analysis"
content: |
## Identified Trends
{{ trend_analysis.outputs.trend_results | json }}
## Forecasts
{{ trend_analysis.outputs.trend_forecasts | json }}
condition: "'trend' in {{ parameters.analysis_types }}"
- title: "Visualizations"
content: |
Dashboard available at: {{ create_dashboard.url }}
Generated charts: {{ generate_visualizations.charts | length }} files
include_visualizations: true
visualization_files: "{{ generate_visualizations.charts }}"
dependencies:
- create_dashboard
- id: export_report
tool: pdf-compiler
action: compile
parameters:
content: "{{ compile_report.report }}"
output_path: "{{ output_path }}/analysis_report.{{ parameters.output_format }}"
format: "{{ parameters.output_format }}"
include_toc: true
include_timestamp: true
dependencies:
- compile_report
# Save pipeline results
- id: save_results
tool: filesystem
action: write
parameters:
path: "{{ output_path }}/results_{{ execution.timestamp | replace(':', '-') }}.md"
content: |
# Modular Analysis Pipeline Results
**Date:** {{ execution.timestamp }}
**Pipeline ID:** modular_analysis
## Execution Summary
Pipeline completed successfully.
### Analysis Types Performed
{{ parameters.analysis_types | join(', ') }}
### Key Results
- Statistical Analysis: {{ 'Completed' if 'statistical' in parameters.analysis_types else 'Skipped' }}
- Sentiment Analysis: {{ 'Completed' if 'sentiment' in parameters.analysis_types else 'Skipped' }}
- Trend Analysis: {{ 'Completed' if 'trend' in parameters.analysis_types else 'Skipped' }}
### Output Files
- Report: {{ output_path }}/analysis_report.{{ parameters.output_format }}
- Dashboard: {{ create_dashboard.url | default('Not generated') }}
- Charts: {{ generate_visualizations.charts | length }} files generated
---
*Generated by Modular Analysis Pipeline*
dependencies:
- export_report
outputs:
report_path: "{{ output_path }}/analysis_report.{{ parameters.output_format }}"
dashboard_url: "{{ create_dashboard.url }}"
analysis_summary: "{{ compile_report.summary }}"
charts_generated: "{{ generate_visualizations.charts }}"