/ examples / workflows / workflow_to_workflow_example.yaml
workflow_to_workflow_example.yaml
  1  log:
  2    stdout_log_level: INFO
  3    log_file_level: DEBUG
  4    log_file: workflow_to_workflow_example.log
  5  
  6  !include ../shared_config.yaml
  7  
  8  apps:
  9    # ============================================================================
 10    # WORKFLOW-TO-WORKFLOW INVOCATION EXAMPLE
 11    # ============================================================================
 12    #
 13    # This example demonstrates the `workflow` node type that allows workflows
 14    # to invoke other workflows as sub-workflows.
 15    #
 16    # Architecture:
 17    # -------------
 18    #   DataAnalysisPipeline (Parent Workflow)
 19    #       |
 20    #       +-- DataValidator (Agent) - Validates incoming data
 21    #       |
 22    #       +-- StatisticalAnalysisWorkflow (Sub-Workflow)
 23    #       |       |
 24    #       |       +-- DataNormalizer (Agent)
 25    #       |       +-- StatisticsCalculator (Agent)
 26    #       |
 27    #       +-- ReportGenerationWorkflow (Sub-Workflow)
 28    #       |       |
 29    #       |       +-- InsightExtractor (Agent)
 30    #       |       +-- ReportFormatter (Agent)
 31    #       |
 32    #       +-- SummaryGenerator (Agent) - Creates final summary
 33    #
 34    # This demonstrates:
 35    # - Parent workflow invoking child workflows
 36    # - Data flow between workflows via input/output mappings
 37    # - Sequential sub-workflow invocations
 38    # - max_call_depth configuration for recursion prevention
 39    #
 40    # ============================================================================
 41  
 42  
 43    # ============================================================================
 44    # SUPPORTING AGENTS
 45    # ============================================================================
 46  
 47    # ----------------------------------------------------------------------------
 48    # AGENT: Data Validator
 49    # Validates incoming data structure and quality
 50    # ----------------------------------------------------------------------------
 51    - name: data_validator_app
 52      app_base_path: .
 53      app_module: solace_agent_mesh.agent.sac.app
 54      broker:
 55        <<: *broker_connection
 56  
 57      app_config:
 58        namespace: ${NAMESPACE}
 59        agent_name: "DataValidator"
 60        model: *planning_model
 61        model_provider: 
 62          - "planning"
 63  
 64        instruction: |
 65          You validate incoming data for analysis.
 66          1. Read 'dataset_name' and 'data_points' from input
 67          2. Check if data_points is a non-empty array
 68          3. Validate each data point has 'value' field
 69          4. Create a JSON artifact with: {"valid": true/false, "dataset_name": <name>, "record_count": <count>, "validation_notes": <notes>}
 70          5. End with: «result:artifact=<artifact_name> status=success»
 71  
 72          IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions.
 73  
 74        input_schema:
 75          type: object
 76          properties:
 77            dataset_name: {type: string}
 78            data_points: {type: array, items: {type: object}}
 79          required: [dataset_name, data_points]
 80  
 81        output_schema:
 82          type: object
 83          properties:
 84            valid: {type: boolean}
 85            dataset_name: {type: string}
 86            record_count: {type: integer}
 87            validation_notes: {type: string}
 88          required: [valid, dataset_name, record_count]
 89  
 90        tools:
 91          - tool_type: builtin-group
 92            group_name: "artifact_management"
 93  
 94        session_service:
 95          <<: *default_session_service
 96        artifact_service:
 97          <<: *default_artifact_service
 98  
 99        agent_card:
100          description: "Validates data structure and quality"
101          skills: [{id: "validate_data", name: "Validate Data", description: "Validates datasets", tags: ["validation"]}]
102        agent_card_publishing: { interval_seconds: 10 }
103        agent_discovery: { enabled: false }
104  
105    # ----------------------------------------------------------------------------
106    # AGENT: Data Normalizer
107    # Normalizes data values for statistical analysis
108    # ----------------------------------------------------------------------------
109    - name: data_normalizer_app
110      app_base_path: .
111      app_module: solace_agent_mesh.agent.sac.app
112      broker:
113        <<: *broker_connection
114  
115      app_config:
116        namespace: ${NAMESPACE}
117        agent_name: "DataNormalizer"
118        model: *planning_model
119        model_provider: 
120          - "planning"
121  
122        instruction: |
123          You normalize data for statistical analysis.
124          1. Read 'data_points' from input
125          2. Calculate min and max values from the data
126          3. Create a JSON artifact with: {"normalized": true, "min_value": <min>, "max_value": <max>, "normalized_data": <array of normalized values 0-1>}
127          4. End with: «result:artifact=<artifact_name> status=success»
128  
129          IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions.
130  
131        input_schema:
132          type: object
133          properties:
134            data_points: {type: array, items: {type: object}}
135          required: [data_points]
136  
137        output_schema:
138          type: object
139          properties:
140            normalized: {type: boolean}
141            min_value: {type: number}
142            max_value: {type: number}
143            normalized_data: {type: array, items: {type: number}}
144          required: [normalized, min_value, max_value]
145  
146        tools:
147          - tool_type: builtin-group
148            group_name: "artifact_management"
149  
150        session_service:
151          <<: *default_session_service
152        artifact_service:
153          <<: *default_artifact_service
154  
155        agent_card:
156          description: "Normalizes data values"
157          skills: [{id: "normalize_data", name: "Normalize Data", description: "Normalizes datasets", tags: ["normalization"]}]
158        agent_card_publishing: { interval_seconds: 10 }
159        agent_discovery: { enabled: false }
160  
161    # ----------------------------------------------------------------------------
162    # AGENT: Statistics Calculator
163    # Calculates statistical measures
164    # ----------------------------------------------------------------------------
165    - name: statistics_calculator_app
166      app_base_path: .
167      app_module: solace_agent_mesh.agent.sac.app
168      broker:
169        <<: *broker_connection
170  
171      app_config:
172        namespace: ${NAMESPACE}
173        agent_name: "StatisticsCalculator"
174        model: *planning_model
175        model_provider: 
176          - "planning"
177  
178        instruction: |
179          You calculate statistical measures from normalized data.
180          1. Read 'normalized_data', 'min_value', 'max_value' from input
181          2. Calculate mean, median, and standard deviation
182          3. Create a JSON artifact with: {"mean": <mean>, "median": <median>, "std_dev": <std>, "data_range": <max-min>, "sample_size": <count>}
183          4. End with: «result:artifact=<artifact_name> status=success»
184  
185          IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions.
186  
187        input_schema:
188          type: object
189          properties:
190            normalized_data: {type: array, items: {type: number}}
191            min_value: {type: number}
192            max_value: {type: number}
193          required: [normalized_data]
194  
195        output_schema:
196          type: object
197          properties:
198            mean: {type: number}
199            median: {type: number}
200            std_dev: {type: number}
201            data_range: {type: number}
202            sample_size: {type: integer}
203          required: [mean, median, std_dev, sample_size]
204  
205        tools:
206          - tool_type: builtin-group
207            group_name: "artifact_management"
208  
209        session_service:
210          <<: *default_session_service
211        artifact_service:
212          <<: *default_artifact_service
213  
214        agent_card:
215          description: "Calculates statistical measures"
216          skills: [{id: "calculate_stats", name: "Calculate Statistics", description: "Computes statistics", tags: ["statistics"]}]
217        agent_card_publishing: { interval_seconds: 10 }
218        agent_discovery: { enabled: false }
219  
220    # ----------------------------------------------------------------------------
221    # AGENT: Insight Extractor
222    # Extracts insights from statistical results
223    # ----------------------------------------------------------------------------
224    - name: insight_extractor_app
225      app_base_path: .
226      app_module: solace_agent_mesh.agent.sac.app
227      broker:
228        <<: *broker_connection
229  
230      app_config:
231        namespace: ${NAMESPACE}
232        agent_name: "InsightExtractor"
233        model: *planning_model
234        model_provider: 
235          - "planning"
236  
237        instruction: |
238          You extract business insights from statistical analysis.
239          1. Read 'statistics' (mean, median, std_dev, etc.) from input
240          2. Identify key insights based on the statistics
241          3. Create a JSON artifact with: {"insights": [<list of insight strings>], "trend": "stable/increasing/decreasing", "anomaly_detected": true/false, "confidence_level": "high/medium/low"}
242          4. End with: «result:artifact=<artifact_name> status=success»
243  
244          IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions.
245  
246        input_schema:
247          type: object
248          properties:
249            statistics:
250              type: object
251              properties:
252                mean: {type: number}
253                median: {type: number}
254                std_dev: {type: number}
255                data_range: {type: number}
256                sample_size: {type: integer}
257          required: [statistics]
258  
259        output_schema:
260          type: object
261          properties:
262            insights: {type: array, items: {type: string}}
263            trend: {type: string}
264            anomaly_detected: {type: boolean}
265            confidence_level: {type: string}
266          required: [insights, trend, confidence_level]
267  
268        tools:
269          - tool_type: builtin-group
270            group_name: "artifact_management"
271  
272        session_service:
273          <<: *default_session_service
274        artifact_service:
275          <<: *default_artifact_service
276  
277        agent_card:
278          description: "Extracts insights from statistics"
279          skills: [{id: "extract_insights", name: "Extract Insights", description: "Identifies insights", tags: ["insights"]}]
280        agent_card_publishing: { interval_seconds: 10 }
281        agent_discovery: { enabled: false }
282  
283    # ----------------------------------------------------------------------------
284    # AGENT: Report Formatter
285    # Formats insights into a structured report
286    # ----------------------------------------------------------------------------
287    - name: report_formatter_app
288      app_base_path: .
289      app_module: solace_agent_mesh.agent.sac.app
290      broker:
291        <<: *broker_connection
292  
293      app_config:
294        namespace: ${NAMESPACE}
295        agent_name: "ReportFormatter"
296        model: *planning_model
297        model_provider: 
298          - "planning"
299  
300        instruction: |
301          You format insights into a structured report.
302          1. Read 'dataset_name', 'insights', 'trend', 'confidence_level' from input
303          2. Create a formatted report structure
304          3. Create a JSON artifact with: {"report_title": "Analysis Report: <dataset_name>", "sections": [{title: "Key Insights", content: <insights>}, {title: "Trend Analysis", content: <trend>}], "generated_at": <timestamp>, "confidence": <confidence_level>}
305          4. End with: «result:artifact=<artifact_name> status=success»
306  
307          IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions.
308  
309        input_schema:
310          type: object
311          properties:
312            dataset_name: {type: string}
313            insights: {type: array, items: {type: string}}
314            trend: {type: string}
315            confidence_level: {type: string}
316          required: [dataset_name, insights, trend]
317  
318        output_schema:
319          type: object
320          properties:
321            report_title: {type: string}
322            sections: {type: array, items: {type: object}}
323            generated_at: {type: string}
324            confidence: {type: string}
325          required: [report_title, sections]
326  
327        tools:
328          - tool_type: builtin-group
329            group_name: "artifact_management"
330  
331        session_service:
332          <<: *default_session_service
333        artifact_service:
334          <<: *default_artifact_service
335  
336        agent_card:
337          description: "Formats analysis reports"
338          skills: [{id: "format_report", name: "Format Report", description: "Creates formatted reports", tags: ["reporting"]}]
339        agent_card_publishing: { interval_seconds: 10 }
340        agent_discovery: { enabled: false }
341  
342    # ----------------------------------------------------------------------------
343    # AGENT: Summary Generator
344    # Creates final executive summary
345    # ----------------------------------------------------------------------------
346    - name: summary_generator_app
347      app_base_path: .
348      app_module: solace_agent_mesh.agent.sac.app
349      broker:
350        <<: *broker_connection
351  
352      app_config:
353        namespace: ${NAMESPACE}
354        agent_name: "SummaryGenerator"
355        model: *planning_model
356        model_provider: 
357          - "planning"
358  
359        instruction: |
360          You create executive summaries of analysis pipelines.
361          1. Read 'dataset_name', 'statistics', 'report' from input
362          2. Create a concise executive summary
363          3. Create a JSON artifact with: {"executive_summary": <2-3 sentence summary>, "dataset_analyzed": <dataset_name>, "key_finding": <most important insight>, "recommendation": <action recommendation>, "pipeline_status": "completed"}
364          4. End with: «result:artifact=<artifact_name> status=success»
365  
366          IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions.
367  
368        input_schema:
369          type: object
370          properties:
371            dataset_name: {type: string}
372            statistics:
373              type: object
374              properties:
375                mean: {type: number}
376                median: {type: number}
377                std_dev: {type: number}
378            report:
379              type: object
380              properties:
381                report_title: {type: string}
382                sections: {type: array}
383          required: [dataset_name]
384  
385        output_schema:
386          type: object
387          properties:
388            executive_summary: {type: string}
389            dataset_analyzed: {type: string}
390            key_finding: {type: string}
391            recommendation: {type: string}
392            pipeline_status: {type: string}
393          required: [executive_summary, dataset_analyzed, pipeline_status]
394  
395        tools:
396          - tool_type: builtin-group
397            group_name: "artifact_management"
398  
399        session_service:
400          <<: *default_session_service
401        artifact_service:
402          <<: *default_artifact_service
403  
404        agent_card:
405          description: "Generates executive summaries"
406          skills: [{id: "generate_summary", name: "Generate Summary", description: "Creates summaries", tags: ["summary"]}]
407        agent_card_publishing: { interval_seconds: 10 }
408        agent_discovery: { enabled: false }
409  
410  
411    # ============================================================================
412    # SUB-WORKFLOW: Statistical Analysis
413    # A reusable workflow for performing statistical analysis on data
414    # ============================================================================
415    - name: statistical_analysis_workflow
416      app_base_path: .
417      app_module: solace_agent_mesh.workflow.app
418      broker:
419        <<: *broker_connection
420  
421      app_config:
422        namespace: ${NAMESPACE}
423        name: "StatisticalAnalysisWorkflow"
424        display_name: "Statistical Analysis Workflow"
425  
426        workflow:
427          version: "1.0.0"
428          description: |
429            A reusable sub-workflow that performs statistical analysis on data.
430  
431            This workflow:
432            1. Normalizes the input data
433            2. Calculates statistical measures (mean, median, std_dev)
434  
435            Designed to be invoked by parent workflows that need statistical analysis.
436  
437          # max_call_depth prevents infinite recursion if workflows call each other
438          max_call_depth: 5
439  
440          input_schema:
441            type: object
442            properties:
443              data_points:
444                type: array
445                description: "Array of data points to analyze"
446                items:
447                  type: object
448                  properties:
449                    value: {type: number}
450            required: [data_points]
451  
452          output_schema:
453            type: object
454            properties:
455              mean: {type: number}
456              median: {type: number}
457              std_dev: {type: number}
458              data_range: {type: number}
459              sample_size: {type: integer}
460              normalized_data: {type: array, items: {type: number}}
461            required: [mean, median, std_dev, sample_size]
462  
463          nodes:
464            # Step 1: Normalize the data
465            - id: normalize
466              type: agent
467              agent_name: "DataNormalizer"
468              instruction: |
469                Normalize the incoming data points for statistical analysis.
470                This is the first step in the statistical analysis sub-workflow.
471              input:
472                data_points: "{{workflow.input.data_points}}"
473  
474            # Step 2: Calculate statistics on normalized data
475            - id: calculate_stats
476              type: agent
477              agent_name: "StatisticsCalculator"
478              depends_on: [normalize]
479              instruction: |
480                Calculate statistical measures from the normalized data.
481                Use the normalization results to compute mean, median, and standard deviation.
482              input:
483                normalized_data: "{{normalize.output.normalized_data}}"
484                min_value: "{{normalize.output.min_value}}"
485                max_value: "{{normalize.output.max_value}}"
486  
487          output_mapping:
488            mean: "{{calculate_stats.output.mean}}"
489            median: "{{calculate_stats.output.median}}"
490            std_dev: "{{calculate_stats.output.std_dev}}"
491            data_range: "{{calculate_stats.output.data_range}}"
492            sample_size: "{{calculate_stats.output.sample_size}}"
493            normalized_data: "{{normalize.output.normalized_data}}"
494  
495          skills:
496            - id: "statistical_analysis"
497              name: "Statistical Analysis"
498              description: "Performs statistical analysis on data"
499              tags: ["statistics", "analysis", "sub-workflow"]
500  
501        session_service:
502          <<: *default_session_service
503        artifact_service:
504          <<: *default_artifact_service
505  
506        agent_card_publishing: { interval_seconds: 10 }
507        agent_discovery: { enabled: false }
508  
509  
510    # ============================================================================
511    # SUB-WORKFLOW: Report Generation
512    # A reusable workflow for generating analysis reports
513    # ============================================================================
514    - name: report_generation_workflow
515      app_base_path: .
516      app_module: solace_agent_mesh.workflow.app
517      broker:
518        <<: *broker_connection
519  
520      app_config:
521        namespace: ${NAMESPACE}
522        name: "ReportGenerationWorkflow"
523        display_name: "Report Generation Workflow"
524  
525        workflow:
526          version: "1.0.0"
527          description: |
528            A reusable sub-workflow that generates analysis reports.
529  
530            This workflow:
531            1. Extracts insights from statistical results
532            2. Formats the insights into a structured report
533  
534            Designed to be invoked by parent workflows that need report generation.
535  
536          # max_call_depth prevents infinite recursion if workflows call each other
537          max_call_depth: 5
538  
539          input_schema:
540            type: object
541            properties:
542              dataset_name:
543                type: string
544                description: "Name of the dataset being analyzed"
545              statistics:
546                type: object
547                description: "Statistical results to generate report from"
548                properties:
549                  mean: {type: number}
550                  median: {type: number}
551                  std_dev: {type: number}
552                  data_range: {type: number}
553                  sample_size: {type: integer}
554            required: [dataset_name, statistics]
555  
556          output_schema:
557            type: object
558            properties:
559              report_title: {type: string}
560              sections: {type: array, items: {type: object}}
561              generated_at: {type: string}
562              confidence: {type: string}
563              insights: {type: array, items: {type: string}}
564              trend: {type: string}
565            required: [report_title, sections]
566  
567          nodes:
568            # Step 1: Extract insights from statistics
569            - id: extract_insights
570              type: agent
571              agent_name: "InsightExtractor"
572              instruction: |
573                Extract business insights from the statistical analysis results.
574                Identify trends, anomalies, and key findings.
575              input:
576                statistics: "{{workflow.input.statistics}}"
577  
578            # Step 2: Format into a structured report
579            - id: format_report
580              type: agent
581              agent_name: "ReportFormatter"
582              depends_on: [extract_insights]
583              instruction: |
584                Format the extracted insights into a professional analysis report.
585                Include sections for key insights and trend analysis.
586              input:
587                dataset_name: "{{workflow.input.dataset_name}}"
588                insights: "{{extract_insights.output.insights}}"
589                trend: "{{extract_insights.output.trend}}"
590                confidence_level: "{{extract_insights.output.confidence_level}}"
591  
592          output_mapping:
593            report_title: "{{format_report.output.report_title}}"
594            sections: "{{format_report.output.sections}}"
595            generated_at: "{{format_report.output.generated_at}}"
596            confidence: "{{format_report.output.confidence}}"
597            insights: "{{extract_insights.output.insights}}"
598            trend: "{{extract_insights.output.trend}}"
599  
600          skills:
601            - id: "report_generation"
602              name: "Report Generation"
603              description: "Generates analysis reports from statistics"
604              tags: ["reporting", "insights", "sub-workflow"]
605  
606        session_service:
607          <<: *default_session_service
608        artifact_service:
609          <<: *default_artifact_service
610  
611        agent_card_publishing: { interval_seconds: 10 }
612        agent_discovery: { enabled: false }
613  
614  
615    # ============================================================================
616    # PARENT WORKFLOW: Data Analysis Pipeline
617    # Orchestrates the complete analysis by invoking sub-workflows
618    # ============================================================================
619    - name: data_analysis_pipeline
620      app_base_path: .
621      app_module: solace_agent_mesh.workflow.app
622      broker:
623        <<: *broker_connection
624  
625      app_config:
626        namespace: ${NAMESPACE}
627        name: "DataAnalysisPipeline"
628        display_name: "Data Analysis Pipeline"
629  
630        workflow:
631          version: "1.0.0"
632          description: |
633            A parent workflow that demonstrates workflow-to-workflow invocation.
634  
635            This workflow orchestrates a complete data analysis pipeline by:
636            1. Validating incoming data (agent node)
637            2. Performing statistical analysis (workflow node - invokes StatisticalAnalysisWorkflow)
638            3. Generating a report (workflow node - invokes ReportGenerationWorkflow)
639            4. Creating an executive summary (agent node)
640  
641            This demonstrates:
642            - The `workflow` node type for invoking sub-workflows
643            - Data flow between parent and child workflows via input/output mappings
644            - Sequential sub-workflow invocations
645            - Mixing agent nodes and workflow nodes in the same parent workflow
646  
647          # max_call_depth limits recursion depth to prevent infinite loops
648          # Default is 10, but we set it explicitly here for demonstration
649          max_call_depth: 10
650  
651          input_schema:
652            type: object
653            properties:
654              dataset_name:
655                type: string
656                description: "Name of the dataset to analyze"
657              data_points:
658                type: array
659                description: "Data points to analyze"
660                items:
661                  type: object
662                  properties:
663                    value:
664                      type: number
665                      description: "Numeric value for analysis"
666                  required: [value]
667            required: [dataset_name, data_points]
668  
669          output_schema:
670            type: object
671            properties:
672              executive_summary: {type: string}
673              dataset_analyzed: {type: string}
674              key_finding: {type: string}
675              recommendation: {type: string}
676              pipeline_status: {type: string}
677            required: [executive_summary, dataset_analyzed, pipeline_status]
678  
679          nodes:
680            # ====================================================================
681            # STEP 1: AGENT NODE - Validate the incoming data
682            # ====================================================================
683            - id: validate_data
684              type: agent
685              agent_name: "DataValidator"
686              instruction: |
687                Validate the incoming dataset before analysis.
688                Check data structure and quality.
689              input:
690                dataset_name: "{{workflow.input.dataset_name}}"
691                data_points: "{{workflow.input.data_points}}"
692  
693            # ====================================================================
694            # STEP 2: WORKFLOW NODE - Invoke Statistical Analysis sub-workflow
695            # This demonstrates the workflow node type!
696            # ====================================================================
697            - id: run_statistical_analysis
698              type: workflow
699              workflow_name: "StatisticalAnalysisWorkflow"
700              depends_on: [validate_data]
701              # Pass data to the sub-workflow via input mapping
702              input:
703                data_points: "{{workflow.input.data_points}}"
704              # Optional: Override schemas for this specific invocation
705              input_schema_override:
706                type: object
707                properties:
708                  data_points:
709                    type: array
710                    description: "Validated data points for statistical analysis"
711                    items:
712                      type: object
713                      properties:
714                        value:
715                          type: number
716                      required: [value]
717                required: [data_points]
718  
719            # ====================================================================
720            # STEP 3: WORKFLOW NODE - Invoke Report Generation sub-workflow
721            # Another workflow invocation, using output from the previous workflow
722            # ====================================================================
723            - id: run_report_generation
724              type: workflow
725              workflow_name: "ReportGenerationWorkflow"
726              depends_on: [run_statistical_analysis]
727              instruction: |
728                Generate a comprehensive analysis report based on the statistical results.
729                This sub-workflow will extract insights and format them into a report.
730              # Pass the statistical results to the report generation workflow
731              input:
732                dataset_name: "{{workflow.input.dataset_name}}"
733                statistics:
734                  mean: "{{run_statistical_analysis.output.mean}}"
735                  median: "{{run_statistical_analysis.output.median}}"
736                  std_dev: "{{run_statistical_analysis.output.std_dev}}"
737                  data_range: "{{run_statistical_analysis.output.data_range}}"
738                  sample_size: "{{run_statistical_analysis.output.sample_size}}"
739  
740            # ====================================================================
741            # STEP 4: AGENT NODE - Generate executive summary
742            # ====================================================================
743            - id: generate_summary
744              type: agent
745              agent_name: "SummaryGenerator"
746              depends_on: [run_report_generation]
747              instruction: |
748                Create an executive summary combining the statistical analysis
749                and report generation results. This is the final step in the pipeline.
750              input:
751                dataset_name: "{{workflow.input.dataset_name}}"
752                statistics:
753                  mean: "{{run_statistical_analysis.output.mean}}"
754                  median: "{{run_statistical_analysis.output.median}}"
755                  std_dev: "{{run_statistical_analysis.output.std_dev}}"
756                report:
757                  report_title: "{{run_report_generation.output.report_title}}"
758                  sections: "{{run_report_generation.output.sections}}"
759  
760          output_mapping:
761            executive_summary: "{{generate_summary.output.executive_summary}}"
762            dataset_analyzed: "{{generate_summary.output.dataset_analyzed}}"
763            key_finding: "{{generate_summary.output.key_finding}}"
764            recommendation: "{{generate_summary.output.recommendation}}"
765            pipeline_status: "{{generate_summary.output.pipeline_status}}"
766  
767          skills:
768            - id: "analyze_data"
769              name: "Analyze Data"
770              description: "Runs complete data analysis pipeline with statistical analysis and report generation"
771              tags: ["analysis", "pipeline", "workflow-orchestration"]
772  
773        session_service:
774          <<: *default_session_service
775        artifact_service:
776          <<: *default_artifact_service
777  
778        agent_card_publishing: { interval_seconds: 10 }
779        agent_discovery: { enabled: false }