re-uploading work
This commit is contained in:
710
docs/examples/complete-workflow.yaml
Normal file
710
docs/examples/complete-workflow.yaml
Normal file
@@ -0,0 +1,710 @@
|
||||
# Complete Workflow Example
|
||||
# This workflow demonstrates all features of the Attune workflow system
|
||||
|
||||
ref: examples.complete_deployment_workflow
|
||||
label: "Complete Application Deployment Workflow"
|
||||
description: |
|
||||
A comprehensive example workflow that demonstrates:
|
||||
- Sequential and parallel execution
|
||||
- Conditional branching
|
||||
- Iteration and batching
|
||||
- Variable scoping and templating
|
||||
- Error handling and retry
|
||||
- Human-in-the-loop (inquiries)
|
||||
- Subworkflow invocation
|
||||
version: "1.0.0"
|
||||
|
||||
# Input parameters accepted by this workflow
|
||||
parameters:
|
||||
app_name:
|
||||
type: string
|
||||
required: true
|
||||
description: "Name of the application to deploy"
|
||||
|
||||
version:
|
||||
type: string
|
||||
required: true
|
||||
description: "Version to deploy"
|
||||
|
||||
environment:
|
||||
type: string
|
||||
enum: [dev, staging, production]
|
||||
default: dev
|
||||
description: "Target environment"
|
||||
|
||||
regions:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
default: ["us-east-1"]
|
||||
description: "AWS regions to deploy to"
|
||||
|
||||
enable_canary:
|
||||
type: boolean
|
||||
default: false
|
||||
description: "Enable canary deployment"
|
||||
|
||||
rollback_on_failure:
|
||||
type: boolean
|
||||
default: true
|
||||
description: "Automatically rollback on failure"
|
||||
|
||||
# Output schema - what this workflow produces
|
||||
output:
|
||||
type: object
|
||||
properties:
|
||||
deployment_id:
|
||||
type: string
|
||||
status:
|
||||
type: string
|
||||
enum: [success, failed, rolled_back]
|
||||
deployed_version:
|
||||
type: string
|
||||
deployment_urls:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
duration_seconds:
|
||||
type: integer
|
||||
|
||||
# Workflow-scoped variables
|
||||
vars:
|
||||
deployment_id: null
|
||||
start_time: null
|
||||
health_check_urls: []
|
||||
failed_regions: []
|
||||
successful_regions: []
|
||||
canary_passed: false
|
||||
approval_granted: false
|
||||
rollback_initiated: false
|
||||
|
||||
# Task execution graph
|
||||
tasks:
|
||||
# ============================================================================
|
||||
# PHASE 1: VALIDATION & PREPARATION
|
||||
# ============================================================================
|
||||
|
||||
- name: start_workflow
|
||||
action: core.noop
|
||||
input: {}
|
||||
publish:
|
||||
- start_time: "{{ system.timestamp }}"
|
||||
on_success: validate_inputs
|
||||
|
||||
- name: validate_inputs
|
||||
action: validation.validate_deployment_params
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
version: "{{ parameters.version }}"
|
||||
environment: "{{ parameters.environment }}"
|
||||
regions: "{{ parameters.regions }}"
|
||||
retry:
|
||||
count: 2
|
||||
delay: 5
|
||||
on_success: check_environment_type
|
||||
on_failure: notify_validation_failed
|
||||
|
||||
# Conditional: Production deployments require approval
|
||||
- name: check_environment_type
|
||||
action: core.noop
|
||||
input: {}
|
||||
decision:
|
||||
- when: "{{ parameters.environment == 'production' }}"
|
||||
next: require_production_approval
|
||||
- default: create_deployment_record
|
||||
|
||||
# Human-in-the-loop: Require approval for production
|
||||
- name: require_production_approval
|
||||
action: core.inquiry
|
||||
input:
|
||||
prompt: |
|
||||
Approve production deployment?
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Regions: {{ parameters.regions | join(', ') }}
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
approved:
|
||||
type: boolean
|
||||
approver_notes:
|
||||
type: string
|
||||
assigned_to: "deployment-approvers"
|
||||
timeout: 3600 # 1 hour timeout
|
||||
publish:
|
||||
- approval_granted: "{{ task.require_production_approval.result.approved }}"
|
||||
decision:
|
||||
- when: "{{ vars.approval_granted == true }}"
|
||||
next: create_deployment_record
|
||||
- default: cancel_deployment
|
||||
on_timeout: deployment_approval_timeout
|
||||
|
||||
# ============================================================================
|
||||
# PHASE 2: DEPLOYMENT PREPARATION
|
||||
# ============================================================================
|
||||
|
||||
- name: create_deployment_record
|
||||
action: deployments.create_record
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
version: "{{ parameters.version }}"
|
||||
environment: "{{ parameters.environment }}"
|
||||
regions: "{{ parameters.regions }}"
|
||||
initiated_by: "{{ system.identity.login }}"
|
||||
publish:
|
||||
- deployment_id: "{{ task.create_deployment_record.result.id }}"
|
||||
on_success: parallel_pre_deployment_checks
|
||||
on_failure: notify_deployment_failed
|
||||
|
||||
# Parallel execution: Run multiple checks concurrently
|
||||
- name: parallel_pre_deployment_checks
|
||||
type: parallel
|
||||
tasks:
|
||||
- name: check_docker_registry
|
||||
action: docker.check_registry_health
|
||||
input:
|
||||
registry: "{{ pack.config.docker_registry }}"
|
||||
|
||||
- name: check_kubernetes_clusters
|
||||
action: kubernetes.check_clusters
|
||||
input:
|
||||
regions: "{{ parameters.regions }}"
|
||||
|
||||
- name: check_database_migrations
|
||||
action: database.check_pending_migrations
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
environment: "{{ parameters.environment }}"
|
||||
|
||||
- name: verify_secrets
|
||||
action: secrets.verify_availability
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
environment: "{{ parameters.environment }}"
|
||||
on_success: build_and_test
|
||||
on_failure: cleanup_failed_deployment
|
||||
|
||||
# ============================================================================
|
||||
# PHASE 3: BUILD & TEST
|
||||
# ============================================================================
|
||||
|
||||
- name: build_and_test
|
||||
action: ci.build_and_test
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
version: "{{ parameters.version }}"
|
||||
registry: "{{ pack.config.docker_registry }}"
|
||||
run_tests: true
|
||||
timeout: 1800 # 30 minutes
|
||||
retry:
|
||||
count: 2
|
||||
delay: 60
|
||||
backoff: exponential
|
||||
on_success: push_container_image
|
||||
on_failure: notify_build_failed
|
||||
on_timeout: notify_build_timeout
|
||||
|
||||
- name: push_container_image
|
||||
action: docker.push_image
|
||||
input:
|
||||
image: "{{ task.build_and_test.result.image_name }}"
|
||||
tag: "{{ parameters.version }}"
|
||||
registry: "{{ pack.config.docker_registry }}"
|
||||
on_success: check_canary_enabled
|
||||
on_failure: notify_push_failed
|
||||
|
||||
# ============================================================================
|
||||
# PHASE 4: CANARY DEPLOYMENT (CONDITIONAL)
|
||||
# ============================================================================
|
||||
|
||||
- name: check_canary_enabled
|
||||
action: core.noop
|
||||
input: {}
|
||||
decision:
|
||||
- when: "{{ parameters.enable_canary == true }}"
|
||||
next: deploy_canary
|
||||
- default: deploy_to_all_regions
|
||||
|
||||
- name: deploy_canary
|
||||
action: kubernetes.deploy_canary
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
image: "{{ task.build_and_test.result.image_uri }}"
|
||||
region: "{{ parameters.regions | first }}"
|
||||
traffic_percentage: 10
|
||||
on_success: monitor_canary
|
||||
on_failure: handle_canary_failure
|
||||
|
||||
- name: monitor_canary
|
||||
action: monitoring.analyze_canary
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
duration: 300 # 5 minutes
|
||||
error_threshold: 0.01
|
||||
publish:
|
||||
- canary_passed: "{{ task.monitor_canary.result.success }}"
|
||||
decision:
|
||||
- when: "{{ vars.canary_passed == true }}"
|
||||
next: promote_canary
|
||||
- default: rollback_canary
|
||||
|
||||
- name: promote_canary
|
||||
action: kubernetes.promote_canary
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
region: "{{ parameters.regions | first }}"
|
||||
on_success: deploy_to_remaining_regions
|
||||
on_failure: rollback_canary
|
||||
|
||||
- name: rollback_canary
|
||||
action: kubernetes.rollback_canary
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
region: "{{ parameters.regions | first }}"
|
||||
publish:
|
||||
- rollback_initiated: true
|
||||
on_complete: notify_canary_failed
|
||||
|
||||
# ============================================================================
|
||||
# PHASE 5: MULTI-REGION DEPLOYMENT (WITH ITERATION)
|
||||
# ============================================================================
|
||||
|
||||
- name: deploy_to_all_regions
|
||||
action: kubernetes.deploy
|
||||
with_items: "{{ parameters.regions }}"
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
image: "{{ task.build_and_test.result.image_uri }}"
|
||||
region: "{{ item }}"
|
||||
replicas: 3
|
||||
environment: "{{ parameters.environment }}"
|
||||
retry:
|
||||
count: 2
|
||||
delay: 30
|
||||
publish:
|
||||
- health_check_urls: "{{ task.deploy_to_all_regions.results | map(attribute='health_url') | list }}"
|
||||
on_success: parallel_health_checks
|
||||
on_failure: handle_deployment_failures
|
||||
|
||||
- name: deploy_to_remaining_regions
|
||||
action: kubernetes.deploy
|
||||
# Filter out first region (already has canary)
|
||||
with_items: "{{ parameters.regions | slice(start=1) }}"
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
image: "{{ task.build_and_test.result.image_uri }}"
|
||||
region: "{{ item }}"
|
||||
replicas: 3
|
||||
environment: "{{ parameters.environment }}"
|
||||
on_success: parallel_health_checks
|
||||
on_failure: handle_deployment_failures
|
||||
|
||||
# ============================================================================
|
||||
# PHASE 6: VERIFICATION
|
||||
# ============================================================================
|
||||
|
||||
- name: parallel_health_checks
|
||||
action: http.get
|
||||
with_items: "{{ vars.health_check_urls }}"
|
||||
batch_size: 5 # Check 5 URLs at a time
|
||||
input:
|
||||
url: "{{ item }}"
|
||||
expected_status: 200
|
||||
timeout: 30
|
||||
retry:
|
||||
count: 10
|
||||
delay: 10
|
||||
backoff: linear
|
||||
on_success: run_smoke_tests
|
||||
on_failure: handle_health_check_failures
|
||||
|
||||
- name: run_smoke_tests
|
||||
action: testing.run_smoke_tests
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
environment: "{{ parameters.environment }}"
|
||||
base_urls: "{{ vars.health_check_urls }}"
|
||||
timeout: 600
|
||||
on_success: verify_metrics
|
||||
on_failure: handle_smoke_test_failures
|
||||
|
||||
- name: verify_metrics
|
||||
action: monitoring.verify_deployment_metrics
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
environment: "{{ parameters.environment }}"
|
||||
thresholds:
|
||||
error_rate: 0.01
|
||||
latency_p99: 1000
|
||||
cpu_usage: 80
|
||||
on_success: update_load_balancer
|
||||
on_failure: handle_metrics_failures
|
||||
|
||||
# ============================================================================
|
||||
# PHASE 7: TRAFFIC ROUTING
|
||||
# ============================================================================
|
||||
|
||||
- name: update_load_balancer
|
||||
action: aws.update_load_balancer
|
||||
with_items: "{{ parameters.regions }}"
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
region: "{{ item }}"
|
||||
target_version: "{{ parameters.version }}"
|
||||
on_success: finalize_deployment
|
||||
on_failure: handle_lb_update_failure
|
||||
|
||||
# ============================================================================
|
||||
# PHASE 8: FINALIZATION
|
||||
# ============================================================================
|
||||
|
||||
- name: finalize_deployment
|
||||
action: deployments.update_status
|
||||
input:
|
||||
deployment_id: "{{ vars.deployment_id }}"
|
||||
status: "success"
|
||||
metadata:
|
||||
version: "{{ parameters.version }}"
|
||||
regions: "{{ parameters.regions }}"
|
||||
duration: "{{ system.timestamp - vars.start_time }}"
|
||||
publish:
|
||||
- successful_regions: "{{ parameters.regions }}"
|
||||
on_success: post_deployment_tasks
|
||||
|
||||
- name: post_deployment_tasks
|
||||
type: parallel
|
||||
tasks:
|
||||
- name: update_service_catalog
|
||||
action: catalog.update_service_version
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
version: "{{ parameters.version }}"
|
||||
environment: "{{ parameters.environment }}"
|
||||
|
||||
- name: trigger_integration_tests
|
||||
action: testing.trigger_integration_suite
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
environment: "{{ parameters.environment }}"
|
||||
|
||||
- name: update_documentation
|
||||
action: docs.update_deployment_history
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
version: "{{ parameters.version }}"
|
||||
deployment_id: "{{ vars.deployment_id }}"
|
||||
on_complete: notify_deployment_success
|
||||
|
||||
# ============================================================================
|
||||
# FAILURE HANDLERS
|
||||
# ============================================================================
|
||||
|
||||
- name: handle_deployment_failures
|
||||
action: deployments.analyze_failures
|
||||
input:
|
||||
deployment_id: "{{ vars.deployment_id }}"
|
||||
failed_tasks: "{{ task.deploy_to_all_regions.failed_items }}"
|
||||
publish:
|
||||
- failed_regions: "{{ task.handle_deployment_failures.result.failed_regions }}"
|
||||
decision:
|
||||
- when: "{{ parameters.rollback_on_failure == true }}"
|
||||
next: rollback_all_regions
|
||||
- default: notify_partial_deployment
|
||||
|
||||
- name: handle_health_check_failures
|
||||
action: diagnostics.analyze_health_failures
|
||||
input:
|
||||
deployment_id: "{{ vars.deployment_id }}"
|
||||
failed_urls: "{{ task.parallel_health_checks.failed_items }}"
|
||||
decision:
|
||||
- when: "{{ parameters.rollback_on_failure == true }}"
|
||||
next: rollback_all_regions
|
||||
- default: notify_health_check_failures
|
||||
|
||||
- name: handle_smoke_test_failures
|
||||
action: testing.capture_smoke_test_results
|
||||
input:
|
||||
deployment_id: "{{ vars.deployment_id }}"
|
||||
results: "{{ task.run_smoke_tests.result }}"
|
||||
decision:
|
||||
- when: "{{ parameters.rollback_on_failure == true }}"
|
||||
next: rollback_all_regions
|
||||
- default: notify_smoke_test_failures
|
||||
|
||||
- name: handle_metrics_failures
|
||||
action: monitoring.capture_metric_violations
|
||||
input:
|
||||
deployment_id: "{{ vars.deployment_id }}"
|
||||
violations: "{{ task.verify_metrics.result.violations }}"
|
||||
decision:
|
||||
- when: "{{ parameters.rollback_on_failure == true }}"
|
||||
next: rollback_all_regions
|
||||
- default: notify_metrics_failures
|
||||
|
||||
- name: handle_lb_update_failure
|
||||
action: aws.diagnose_lb_failure
|
||||
input:
|
||||
regions: "{{ parameters.regions }}"
|
||||
error: "{{ task.update_load_balancer.error }}"
|
||||
on_complete: rollback_all_regions
|
||||
|
||||
# ============================================================================
|
||||
# ROLLBACK PROCEDURES
|
||||
# ============================================================================
|
||||
|
||||
- name: rollback_all_regions
|
||||
action: kubernetes.rollback_deployment
|
||||
with_items: "{{ parameters.regions }}"
|
||||
input:
|
||||
app_name: "{{ parameters.app_name }}"
|
||||
region: "{{ item }}"
|
||||
target_version: "previous"
|
||||
publish:
|
||||
- rollback_initiated: true
|
||||
on_success: update_deployment_rolled_back
|
||||
on_failure: notify_rollback_failed
|
||||
|
||||
- name: update_deployment_rolled_back
|
||||
action: deployments.update_status
|
||||
input:
|
||||
deployment_id: "{{ vars.deployment_id }}"
|
||||
status: "rolled_back"
|
||||
metadata:
|
||||
reason: "deployment_failure"
|
||||
failed_regions: "{{ vars.failed_regions }}"
|
||||
on_complete: notify_deployment_rolled_back
|
||||
|
||||
# ============================================================================
|
||||
# CANCELLATION HANDLERS
|
||||
# ============================================================================
|
||||
|
||||
- name: cancel_deployment
|
||||
action: deployments.update_status
|
||||
input:
|
||||
deployment_id: "{{ vars.deployment_id }}"
|
||||
status: "cancelled"
|
||||
metadata:
|
||||
reason: "approval_denied"
|
||||
on_complete: notify_deployment_cancelled
|
||||
|
||||
- name: deployment_approval_timeout
|
||||
action: deployments.update_status
|
||||
input:
|
||||
deployment_id: "{{ vars.deployment_id }}"
|
||||
status: "cancelled"
|
||||
metadata:
|
||||
reason: "approval_timeout"
|
||||
on_complete: notify_approval_timeout
|
||||
|
||||
- name: cleanup_failed_deployment
|
||||
action: deployments.cleanup_resources
|
||||
input:
|
||||
deployment_id: "{{ vars.deployment_id }}"
|
||||
reason: "pre_deployment_checks_failed"
|
||||
on_complete: notify_deployment_failed
|
||||
|
||||
# ============================================================================
|
||||
# NOTIFICATION TASKS
|
||||
# ============================================================================
|
||||
|
||||
- name: notify_deployment_success
|
||||
action: notifications.send_multi_channel
|
||||
input:
|
||||
channels: ["slack", "email"]
|
||||
message: |
|
||||
✅ Deployment Successful
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Environment: {{ parameters.environment }}
|
||||
Regions: {{ vars.successful_regions | join(', ') }}
|
||||
Duration: {{ system.timestamp - vars.start_time }}s
|
||||
Deployment ID: {{ vars.deployment_id }}
|
||||
metadata:
|
||||
severity: "info"
|
||||
deployment_id: "{{ vars.deployment_id }}"
|
||||
|
||||
- name: notify_deployment_failed
|
||||
action: notifications.send_multi_channel
|
||||
input:
|
||||
channels: ["slack", "email", "pagerduty"]
|
||||
message: |
|
||||
❌ Deployment Failed
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Environment: {{ parameters.environment }}
|
||||
Failed Regions: {{ vars.failed_regions | join(', ') }}
|
||||
Deployment ID: {{ vars.deployment_id }}
|
||||
metadata:
|
||||
severity: "error"
|
||||
deployment_id: "{{ vars.deployment_id }}"
|
||||
|
||||
- name: notify_deployment_rolled_back
|
||||
action: notifications.send_multi_channel
|
||||
input:
|
||||
channels: ["slack", "email", "pagerduty"]
|
||||
message: |
|
||||
⚠️ Deployment Rolled Back
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Environment: {{ parameters.environment }}
|
||||
Rollback completed for all regions
|
||||
Deployment ID: {{ vars.deployment_id }}"
|
||||
metadata:
|
||||
severity: "warning"
|
||||
deployment_id: "{{ vars.deployment_id }}"
|
||||
|
||||
- name: notify_deployment_cancelled
|
||||
action: notifications.send_multi_channel
|
||||
input:
|
||||
channels: ["slack", "email"]
|
||||
message: |
|
||||
🚫 Deployment Cancelled
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Environment: {{ parameters.environment }}
|
||||
Reason: Approval denied
|
||||
metadata:
|
||||
severity: "info"
|
||||
|
||||
- name: notify_approval_timeout
|
||||
action: notifications.send_multi_channel
|
||||
input:
|
||||
channels: ["slack", "email"]
|
||||
message: |
|
||||
⏱️ Deployment Approval Timeout
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Environment: {{ parameters.environment }}
|
||||
No approval received within 1 hour
|
||||
metadata:
|
||||
severity: "warning"
|
||||
|
||||
- name: notify_validation_failed
|
||||
action: notifications.send_multi_channel
|
||||
input:
|
||||
channels: ["slack"]
|
||||
message: |
|
||||
⚠️ Deployment Validation Failed
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Error: {{ task.validate_inputs.error.message }}
|
||||
metadata:
|
||||
severity: "warning"
|
||||
|
||||
- name: notify_build_failed
|
||||
action: notifications.send_multi_channel
|
||||
input:
|
||||
channels: ["slack", "email"]
|
||||
message: |
|
||||
🔨 Build Failed
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Error: {{ task.build_and_test.error.message }}
|
||||
metadata:
|
||||
severity: "error"
|
||||
|
||||
- name: notify_build_timeout
|
||||
action: notifications.send_multi_channel
|
||||
input:
|
||||
channels: ["slack", "email"]
|
||||
message: |
|
||||
⏱️ Build Timeout
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Build exceeded 30 minute timeout
|
||||
metadata:
|
||||
severity: "error"
|
||||
|
||||
- name: notify_push_failed
|
||||
action: notifications.send_multi_channel
|
||||
input:
|
||||
channels: ["slack"]
|
||||
message: |
|
||||
📦 Container Push Failed
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Error: {{ task.push_container_image.error.message }}
|
||||
metadata:
|
||||
severity: "error"
|
||||
|
||||
- name: notify_canary_failed
|
||||
action: notifications.send_multi_channel
|
||||
input:
|
||||
channels: ["slack", "email"]
|
||||
message: |
|
||||
🐦 Canary Deployment Failed
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Canary has been rolled back
|
||||
Metrics: {{ task.monitor_canary.result }}
|
||||
metadata:
|
||||
severity: "warning"
|
||||
|
||||
- name: notify_partial_deployment
|
||||
action: notifications.send_multi_channel
|
||||
input:
|
||||
channels: ["slack", "email", "pagerduty"]
|
||||
message: |
|
||||
⚠️ Partial Deployment
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Successful Regions: {{ vars.successful_regions | join(', ') }}
|
||||
Failed Regions: {{ vars.failed_regions | join(', ') }}
|
||||
metadata:
|
||||
severity: "error"
|
||||
|
||||
- name: notify_health_check_failures
|
||||
action: notifications.send_multi_channel
|
||||
input:
|
||||
channels: ["slack", "email"]
|
||||
message: |
|
||||
💔 Health Check Failures
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Failed URLs: {{ task.parallel_health_checks.failed_items | length }}
|
||||
metadata:
|
||||
severity: "error"
|
||||
|
||||
- name: notify_smoke_test_failures
|
||||
action: notifications.send_multi_channel
|
||||
input:
|
||||
channels: ["slack", "email"]
|
||||
message: |
|
||||
🧪 Smoke Tests Failed
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Results: {{ task.run_smoke_tests.result.summary }}
|
||||
metadata:
|
||||
severity: "error"
|
||||
|
||||
- name: notify_metrics_failures
|
||||
action: notifications.send_multi_channel
|
||||
input:
|
||||
channels: ["slack", "email"]
|
||||
message: |
|
||||
📊 Metrics Verification Failed
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Violations: {{ task.verify_metrics.result.violations | length }}
|
||||
metadata:
|
||||
severity: "error"
|
||||
|
||||
- name: notify_rollback_failed
|
||||
action: notifications.send_multi_channel
|
||||
input:
|
||||
channels: ["slack", "email", "pagerduty"]
|
||||
message: |
|
||||
🚨 CRITICAL: Rollback Failed
|
||||
Application: {{ parameters.app_name }}
|
||||
Version: {{ parameters.version }}
|
||||
Manual intervention required!
|
||||
metadata:
|
||||
severity: "critical"
|
||||
|
||||
# Workflow output mapping
|
||||
output_map:
|
||||
deployment_id: "{{ vars.deployment_id }}"
|
||||
status: "{{ vars.rollback_initiated ? 'rolled_back' : 'success' }}"
|
||||
deployed_version: "{{ parameters.version }}"
|
||||
deployment_urls: "{{ vars.health_check_urls }}"
|
||||
duration_seconds: "{{ system.timestamp - vars.start_time }}"
|
||||
86
docs/examples/pack-test-demo.sh
Executable file
86
docs/examples/pack-test-demo.sh
Executable file
@@ -0,0 +1,86 @@
|
||||
#!/bin/bash
|
||||
# Pack Testing Framework Demo
|
||||
#
|
||||
# This script demonstrates the pack testing capabilities in Attune
|
||||
|
||||
set -e
|
||||
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo " Attune Pack Testing Framework Demo"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo ""
|
||||
|
||||
# Navigate to project root
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
# Build the CLI if needed
|
||||
if [ ! -f "./target/debug/attune" ]; then
|
||||
echo "🔨 Building Attune CLI..."
|
||||
cargo build --package attune-cli
|
||||
echo ""
|
||||
fi
|
||||
|
||||
ATTUNE_CLI="./target/debug/attune"
|
||||
|
||||
echo "📦 Testing Core Pack"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo ""
|
||||
|
||||
# Basic test execution
|
||||
echo "1️⃣ Basic Test Execution"
|
||||
echo " Command: attune pack test packs/core"
|
||||
echo ""
|
||||
$ATTUNE_CLI pack test packs/core
|
||||
echo ""
|
||||
|
||||
# JSON output
|
||||
echo ""
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "2️⃣ JSON Output (for scripting)"
|
||||
echo " Command: attune pack test packs/core --output json"
|
||||
echo ""
|
||||
RUST_LOG=error $ATTUNE_CLI pack test packs/core --output json | jq '{
|
||||
pack: .packRef,
|
||||
version: .packVersion,
|
||||
totalTests: .totalTests,
|
||||
passed: .passed,
|
||||
failed: .failed,
|
||||
passRate: (.passRate * 100 | tostring + "%"),
|
||||
duration: (.durationMs / 1000 | tostring + "s"),
|
||||
suites: .testSuites | map({
|
||||
name: .name,
|
||||
type: .runnerType,
|
||||
passed: .passed,
|
||||
total: .total
|
||||
})
|
||||
}'
|
||||
echo ""
|
||||
|
||||
# Verbose output
|
||||
echo ""
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "3️⃣ Verbose Output (shows test cases)"
|
||||
echo " Command: attune pack test packs/core --verbose"
|
||||
echo ""
|
||||
$ATTUNE_CLI pack test packs/core --verbose
|
||||
echo ""
|
||||
|
||||
echo ""
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "✅ Demo Complete!"
|
||||
echo ""
|
||||
echo "Available Commands:"
|
||||
echo " attune pack test <pack> # Test a pack"
|
||||
echo " attune pack test <pack> --verbose # Show test case details"
|
||||
echo " attune pack test <pack> --detailed # Show stdout/stderr"
|
||||
echo " attune pack test <pack> --output json # JSON output"
|
||||
echo " attune pack test <pack> --output yaml # YAML output"
|
||||
echo ""
|
||||
echo "Test Configuration:"
|
||||
echo " Pack tests are configured in pack.yaml under the 'testing' section"
|
||||
echo " See packs/core/pack.yaml for an example"
|
||||
echo ""
|
||||
echo "Documentation:"
|
||||
echo " docs/pack-testing-framework.md - Full design document"
|
||||
echo " packs/core/tests/README.md - Core pack test documentation"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
300
docs/examples/registry-index.json
Normal file
300
docs/examples/registry-index.json
Normal file
@@ -0,0 +1,300 @@
|
||||
{
|
||||
"registry_name": "Example Attune Pack Registry",
|
||||
"registry_url": "https://registry.example.com",
|
||||
"version": "1.0",
|
||||
"last_updated": "2024-01-20T12:00:00Z",
|
||||
"packs": [
|
||||
{
|
||||
"ref": "slack",
|
||||
"label": "Slack Integration",
|
||||
"description": "Send messages, upload files, and monitor Slack channels for events",
|
||||
"version": "2.1.0",
|
||||
"author": "Attune Community",
|
||||
"email": "community@attune.io",
|
||||
"homepage": "https://github.com/attune-io/pack-slack",
|
||||
"repository": "https://github.com/attune-io/pack-slack",
|
||||
"license": "Apache-2.0",
|
||||
"keywords": ["slack", "messaging", "notifications", "chat"],
|
||||
"runtime_deps": ["python3"],
|
||||
"install_sources": [
|
||||
{
|
||||
"type": "git",
|
||||
"url": "https://github.com/attune-io/pack-slack.git",
|
||||
"ref": "v2.1.0",
|
||||
"checksum": "sha256:abc123def456789012345678901234567890123456789012345678901234abcd"
|
||||
},
|
||||
{
|
||||
"type": "archive",
|
||||
"url": "https://github.com/attune-io/pack-slack/archive/refs/tags/v2.1.0.zip",
|
||||
"checksum": "sha256:def456abc789012345678901234567890123456789012345678901234567890ab"
|
||||
}
|
||||
],
|
||||
"contents": {
|
||||
"actions": [
|
||||
{
|
||||
"name": "send_message",
|
||||
"description": "Send a message to a Slack channel"
|
||||
},
|
||||
{
|
||||
"name": "upload_file",
|
||||
"description": "Upload a file to a Slack channel"
|
||||
},
|
||||
{
|
||||
"name": "create_channel",
|
||||
"description": "Create a new Slack channel"
|
||||
}
|
||||
],
|
||||
"sensors": [
|
||||
{
|
||||
"name": "message_sensor",
|
||||
"description": "Monitor Slack channels for new messages"
|
||||
}
|
||||
],
|
||||
"triggers": [
|
||||
{
|
||||
"name": "message_received",
|
||||
"description": "Fires when a message is received in a monitored channel"
|
||||
},
|
||||
{
|
||||
"name": "reaction_added",
|
||||
"description": "Fires when a reaction is added to a message"
|
||||
}
|
||||
],
|
||||
"rules": [],
|
||||
"workflows": []
|
||||
},
|
||||
"dependencies": {
|
||||
"attune_version": ">=0.1.0",
|
||||
"python_version": ">=3.9",
|
||||
"packs": []
|
||||
},
|
||||
"meta": {
|
||||
"downloads": 1543,
|
||||
"stars": 87,
|
||||
"tested_attune_versions": ["0.1.0", "0.2.0"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"ref": "aws",
|
||||
"label": "AWS Integration",
|
||||
"description": "Interact with AWS services including EC2, S3, Lambda, and more",
|
||||
"version": "1.5.2",
|
||||
"author": "Attune Community",
|
||||
"email": "community@attune.io",
|
||||
"homepage": "https://github.com/attune-io/pack-aws",
|
||||
"repository": "https://github.com/attune-io/pack-aws",
|
||||
"license": "Apache-2.0",
|
||||
"keywords": ["aws", "cloud", "ec2", "s3", "lambda"],
|
||||
"runtime_deps": ["python3"],
|
||||
"install_sources": [
|
||||
{
|
||||
"type": "git",
|
||||
"url": "https://github.com/attune-io/pack-aws.git",
|
||||
"ref": "v1.5.2",
|
||||
"checksum": "sha256:123abc456def789012345678901234567890123456789012345678901234cdef"
|
||||
},
|
||||
{
|
||||
"type": "archive",
|
||||
"url": "https://github.com/attune-io/pack-aws/archive/refs/tags/v1.5.2.tar.gz",
|
||||
"checksum": "sha256:456def123abc789012345678901234567890123456789012345678901234fedc"
|
||||
}
|
||||
],
|
||||
"contents": {
|
||||
"actions": [
|
||||
{
|
||||
"name": "ec2_start_instance",
|
||||
"description": "Start an EC2 instance"
|
||||
},
|
||||
{
|
||||
"name": "ec2_stop_instance",
|
||||
"description": "Stop an EC2 instance"
|
||||
},
|
||||
{
|
||||
"name": "s3_upload",
|
||||
"description": "Upload a file to S3"
|
||||
},
|
||||
{
|
||||
"name": "s3_download",
|
||||
"description": "Download a file from S3"
|
||||
},
|
||||
{
|
||||
"name": "lambda_invoke",
|
||||
"description": "Invoke a Lambda function"
|
||||
}
|
||||
],
|
||||
"sensors": [
|
||||
{
|
||||
"name": "cloudwatch_alarm_sensor",
|
||||
"description": "Monitor CloudWatch alarms"
|
||||
}
|
||||
],
|
||||
"triggers": [
|
||||
{
|
||||
"name": "cloudwatch_alarm",
|
||||
"description": "Fires when a CloudWatch alarm changes state"
|
||||
}
|
||||
],
|
||||
"rules": [],
|
||||
"workflows": []
|
||||
},
|
||||
"dependencies": {
|
||||
"attune_version": ">=0.1.0",
|
||||
"python_version": ">=3.9",
|
||||
"packs": []
|
||||
},
|
||||
"meta": {
|
||||
"downloads": 2341,
|
||||
"stars": 124,
|
||||
"tested_attune_versions": ["0.1.0", "0.2.0"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"ref": "github",
|
||||
"label": "GitHub Integration",
|
||||
"description": "Automate GitHub workflows, manage issues, pull requests, and repositories",
|
||||
"version": "1.3.0",
|
||||
"author": "Attune Community",
|
||||
"email": "community@attune.io",
|
||||
"homepage": "https://github.com/attune-io/pack-github",
|
||||
"repository": "https://github.com/attune-io/pack-github",
|
||||
"license": "MIT",
|
||||
"keywords": ["github", "git", "ci", "vcs", "version-control"],
|
||||
"runtime_deps": ["python3"],
|
||||
"install_sources": [
|
||||
{
|
||||
"type": "git",
|
||||
"url": "https://github.com/attune-io/pack-github.git",
|
||||
"ref": "v1.3.0",
|
||||
"checksum": "sha256:789abc123def456012345678901234567890123456789012345678901234bcde"
|
||||
},
|
||||
{
|
||||
"type": "archive",
|
||||
"url": "https://github.com/attune-io/pack-github/releases/download/v1.3.0/pack-github-1.3.0.zip",
|
||||
"checksum": "sha256:abc789def123456012345678901234567890123456789012345678901234edcb"
|
||||
}
|
||||
],
|
||||
"contents": {
|
||||
"actions": [
|
||||
{
|
||||
"name": "create_issue",
|
||||
"description": "Create a new issue in a repository"
|
||||
},
|
||||
{
|
||||
"name": "create_pr",
|
||||
"description": "Create a new pull request"
|
||||
},
|
||||
{
|
||||
"name": "merge_pr",
|
||||
"description": "Merge a pull request"
|
||||
},
|
||||
{
|
||||
"name": "add_comment",
|
||||
"description": "Add a comment to an issue or PR"
|
||||
}
|
||||
],
|
||||
"sensors": [
|
||||
{
|
||||
"name": "webhook_sensor",
|
||||
"description": "Receive GitHub webhook events"
|
||||
}
|
||||
],
|
||||
"triggers": [
|
||||
{
|
||||
"name": "push",
|
||||
"description": "Fires when code is pushed to a repository"
|
||||
},
|
||||
{
|
||||
"name": "pull_request",
|
||||
"description": "Fires when a pull request is opened, closed, or updated"
|
||||
},
|
||||
{
|
||||
"name": "issue",
|
||||
"description": "Fires when an issue is created or updated"
|
||||
}
|
||||
],
|
||||
"rules": [],
|
||||
"workflows": []
|
||||
},
|
||||
"dependencies": {
|
||||
"attune_version": ">=0.1.0",
|
||||
"python_version": ">=3.8",
|
||||
"packs": []
|
||||
},
|
||||
"meta": {
|
||||
"downloads": 1876,
|
||||
"stars": 98,
|
||||
"tested_attune_versions": ["0.1.0", "0.2.0"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"ref": "monitoring",
|
||||
"label": "Monitoring Pack",
|
||||
"description": "Monitor system metrics, check service health, and send alerts",
|
||||
"version": "1.0.0",
|
||||
"author": "Attune Community",
|
||||
"email": "community@attune.io",
|
||||
"homepage": "https://github.com/attune-io/pack-monitoring",
|
||||
"repository": "https://github.com/attune-io/pack-monitoring",
|
||||
"license": "Apache-2.0",
|
||||
"keywords": ["monitoring", "alerting", "metrics", "health-check"],
|
||||
"runtime_deps": ["python3", "shell"],
|
||||
"install_sources": [
|
||||
{
|
||||
"type": "git",
|
||||
"url": "https://github.com/attune-io/pack-monitoring.git",
|
||||
"ref": "v1.0.0",
|
||||
"checksum": "sha256:def123abc456789012345678901234567890123456789012345678901234fade"
|
||||
},
|
||||
{
|
||||
"type": "archive",
|
||||
"url": "https://github.com/attune-io/pack-monitoring/archive/refs/tags/v1.0.0.tar.gz",
|
||||
"checksum": "sha256:123def456abc789012345678901234567890123456789012345678901234defa"
|
||||
}
|
||||
],
|
||||
"contents": {
|
||||
"actions": [
|
||||
{
|
||||
"name": "check_http",
|
||||
"description": "Check if an HTTP endpoint is responding"
|
||||
},
|
||||
{
|
||||
"name": "check_tcp_port",
|
||||
"description": "Check if a TCP port is open"
|
||||
},
|
||||
{
|
||||
"name": "get_cpu_usage",
|
||||
"description": "Get current CPU usage"
|
||||
},
|
||||
{
|
||||
"name": "get_memory_usage",
|
||||
"description": "Get current memory usage"
|
||||
}
|
||||
],
|
||||
"sensors": [
|
||||
{
|
||||
"name": "metric_threshold_sensor",
|
||||
"description": "Monitor metrics and fire when thresholds are crossed"
|
||||
}
|
||||
],
|
||||
"triggers": [
|
||||
{
|
||||
"name": "metric_threshold",
|
||||
"description": "Fires when a metric crosses a threshold"
|
||||
}
|
||||
],
|
||||
"rules": [],
|
||||
"workflows": []
|
||||
},
|
||||
"dependencies": {
|
||||
"attune_version": ">=0.1.0",
|
||||
"python_version": ">=3.9",
|
||||
"packs": []
|
||||
},
|
||||
"meta": {
|
||||
"downloads": 892,
|
||||
"stars": 56,
|
||||
"tested_attune_versions": ["0.1.0", "0.2.0"]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
641
docs/examples/rule-parameter-examples.md
Normal file
641
docs/examples/rule-parameter-examples.md
Normal file
@@ -0,0 +1,641 @@
|
||||
# Rule Parameter Mapping Examples
|
||||
|
||||
This document provides practical, copy-paste ready examples of rule parameter mapping.
|
||||
|
||||
---
|
||||
|
||||
## Example 1: Static Parameters Only
|
||||
|
||||
**Use Case:** Simple echo with fixed message (included in seed data as `core.rule.timer_10s_echo`)
|
||||
|
||||
**Rule:**
|
||||
```json
|
||||
{
|
||||
"ref": "core.rule.timer_10s_echo",
|
||||
"pack_ref": "core",
|
||||
"trigger_ref": "core.intervaltimer",
|
||||
"action_ref": "core.echo",
|
||||
"action_params": {
|
||||
"message": "hello, world"
|
||||
},
|
||||
"enabled": true
|
||||
}
|
||||
```
|
||||
|
||||
**How it works:**
|
||||
- The rule references the generic `core.intervaltimer` trigger type
|
||||
- A sensor (`core.timer_10s_sensor`) is configured with `{"unit": "seconds", "interval": 10}` to fire this trigger every 10 seconds
|
||||
- When the sensor fires the trigger, the rule evaluates and executes the `core.echo` action with the message "hello, world"
|
||||
|
||||
**Result:** Every 10 seconds, the timer sensor fires and the echo action receives the message "hello, world" to print to stdout.
|
||||
|
||||
**When to use:** Fixed notifications, health checks, scheduled tasks with constant parameters.
|
||||
|
||||
**Note:** This example is included in the core pack seed data (`scripts/seed_core_pack.sql`) and serves as a basic demonstration of rule functionality. The seed script creates both the sensor instance and the rule.
|
||||
|
||||
---
|
||||
|
||||
## Example 2: Dynamic from Trigger Payload
|
||||
|
||||
**Use Case:** Alert with error details from event
|
||||
|
||||
**Event Payload:**
|
||||
```json
|
||||
{
|
||||
"service": "payment-api",
|
||||
"error": "Database connection timeout",
|
||||
"severity": "critical",
|
||||
"timestamp": "2026-01-17T15:30:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
**Rule:**
|
||||
```json
|
||||
{
|
||||
"ref": "alerts.error_notification",
|
||||
"pack_ref": "alerts",
|
||||
"trigger_ref": "core.error_event",
|
||||
"action_ref": "slack.post_message",
|
||||
"action_params": {
|
||||
"channel": "#incidents",
|
||||
"message": "🚨 Error in {{ trigger.payload.service }}: {{ trigger.payload.error }}",
|
||||
"severity": "{{ trigger.payload.severity }}",
|
||||
"timestamp": "{{ trigger.payload.timestamp }}"
|
||||
},
|
||||
"enabled": true
|
||||
}
|
||||
```
|
||||
|
||||
**Resolved Parameters:**
|
||||
```json
|
||||
{
|
||||
"channel": "#incidents",
|
||||
"message": "🚨 Error in payment-api: Database connection timeout",
|
||||
"severity": "critical",
|
||||
"timestamp": "2026-01-17T15:30:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
**When to use:** Alerts, notifications, any scenario where event data drives the action.
|
||||
|
||||
---
|
||||
|
||||
## Example 3: Dynamic from Pack Config
|
||||
|
||||
**Use Case:** API integration with credentials from config
|
||||
|
||||
**Pack Config:**
|
||||
```json
|
||||
{
|
||||
"ref": "slack",
|
||||
"config": {
|
||||
"api_token": "xoxb-1234567890-abcdefghijk",
|
||||
"default_channel": "#general",
|
||||
"bot_name": "Attune Bot"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Rule:**
|
||||
```json
|
||||
{
|
||||
"ref": "slack.auto_notify",
|
||||
"pack_ref": "slack",
|
||||
"trigger_ref": "core.notification_event",
|
||||
"action_ref": "slack.post_message",
|
||||
"action_params": {
|
||||
"token": "{{ pack.config.api_token }}",
|
||||
"channel": "{{ pack.config.default_channel }}",
|
||||
"username": "{{ pack.config.bot_name }}",
|
||||
"message": "Notification triggered"
|
||||
},
|
||||
"enabled": true
|
||||
}
|
||||
```
|
||||
|
||||
**Resolved Parameters:**
|
||||
```json
|
||||
{
|
||||
"token": "xoxb-1234567890-abcdefghijk",
|
||||
"channel": "#general",
|
||||
"username": "Attune Bot",
|
||||
"message": "Notification triggered"
|
||||
}
|
||||
```
|
||||
|
||||
**When to use:** API integrations, any action requiring credentials or configuration.
|
||||
|
||||
---
|
||||
|
||||
## Example 4: Mixed Static and Dynamic
|
||||
|
||||
**Use Case:** GitHub issue creation with mixed parameters
|
||||
|
||||
**Event Payload:**
|
||||
```json
|
||||
{
|
||||
"error_message": "Memory leak detected",
|
||||
"severity": "high",
|
||||
"service": "worker-pool",
|
||||
"stack_trace": "Error at line 42..."
|
||||
}
|
||||
```
|
||||
|
||||
**Pack Config:**
|
||||
```json
|
||||
{
|
||||
"ref": "github",
|
||||
"config": {
|
||||
"token": "ghp_xxxxxxxxxxxx",
|
||||
"repo_owner": "myorg",
|
||||
"repo_name": "myrepo"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Rule:**
|
||||
```json
|
||||
{
|
||||
"ref": "github.create_issue_on_error",
|
||||
"pack_ref": "github",
|
||||
"trigger_ref": "core.error_event",
|
||||
"action_ref": "github.create_issue",
|
||||
"action_params": {
|
||||
"token": "{{ pack.config.token }}",
|
||||
"repo": "{{ pack.config.repo_owner }}/{{ pack.config.repo_name }}",
|
||||
"title": "[{{ trigger.payload.severity }}] {{ trigger.payload.service }}: {{ trigger.payload.error_message }}",
|
||||
"body": "Error Details:\n\nService: {{ trigger.payload.service }}\nSeverity: {{ trigger.payload.severity }}\n\nStack Trace:\n{{ trigger.payload.stack_trace }}",
|
||||
"labels": ["bug", "automated"],
|
||||
"assignees": ["oncall"]
|
||||
},
|
||||
"enabled": true
|
||||
}
|
||||
```
|
||||
|
||||
**Resolved Parameters:**
|
||||
```json
|
||||
{
|
||||
"token": "ghp_xxxxxxxxxxxx",
|
||||
"repo": "myorg/myrepo",
|
||||
"title": "[high] worker-pool: Memory leak detected",
|
||||
"body": "Error Details:\n\nService: worker-pool\nSeverity: high\n\nStack Trace:\nError at line 42...",
|
||||
"labels": ["bug", "automated"],
|
||||
"assignees": ["oncall"]
|
||||
}
|
||||
```
|
||||
|
||||
**When to use:** Complex integrations requiring both configuration and event data.
|
||||
|
||||
---
|
||||
|
||||
## Example 5: Nested Object Access
|
||||
|
||||
**Use Case:** Extract deeply nested values
|
||||
|
||||
**Event Payload:**
|
||||
```json
|
||||
{
|
||||
"user": {
|
||||
"id": 12345,
|
||||
"profile": {
|
||||
"name": "Alice Smith",
|
||||
"email": "alice@example.com",
|
||||
"department": "Engineering"
|
||||
}
|
||||
},
|
||||
"action": "login",
|
||||
"metadata": {
|
||||
"ip": "192.168.1.100",
|
||||
"user_agent": "Mozilla/5.0..."
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Rule:**
|
||||
```json
|
||||
{
|
||||
"ref": "audit.log_user_action",
|
||||
"pack_ref": "audit",
|
||||
"trigger_ref": "core.user_event",
|
||||
"action_ref": "audit.log",
|
||||
"action_params": {
|
||||
"user_id": "{{ trigger.payload.user.id }}",
|
||||
"user_name": "{{ trigger.payload.user.profile.name }}",
|
||||
"user_email": "{{ trigger.payload.user.profile.email }}",
|
||||
"department": "{{ trigger.payload.user.profile.department }}",
|
||||
"action": "{{ trigger.payload.action }}",
|
||||
"ip_address": "{{ trigger.payload.metadata.ip }}"
|
||||
},
|
||||
"enabled": true
|
||||
}
|
||||
```
|
||||
|
||||
**Resolved Parameters:**
|
||||
```json
|
||||
{
|
||||
"user_id": 12345,
|
||||
"user_name": "Alice Smith",
|
||||
"user_email": "alice@example.com",
|
||||
"department": "Engineering",
|
||||
"action": "login",
|
||||
"ip_address": "192.168.1.100"
|
||||
}
|
||||
```
|
||||
|
||||
**When to use:** Complex event structures, deeply nested data.
|
||||
|
||||
---
|
||||
|
||||
## Example 6: Array Access
|
||||
|
||||
**Use Case:** Extract specific array elements
|
||||
|
||||
**Event Payload:**
|
||||
```json
|
||||
{
|
||||
"errors": [
|
||||
"Connection timeout",
|
||||
"Retry failed",
|
||||
"Circuit breaker open"
|
||||
],
|
||||
"tags": ["production", "critical", "api-gateway"]
|
||||
}
|
||||
```
|
||||
|
||||
**Rule:**
|
||||
```json
|
||||
{
|
||||
"ref": "alerts.first_error",
|
||||
"pack_ref": "alerts",
|
||||
"trigger_ref": "core.error_event",
|
||||
"action_ref": "slack.post_message",
|
||||
"action_params": {
|
||||
"channel": "#alerts",
|
||||
"message": "Primary error: {{ trigger.payload.errors.0 }}",
|
||||
"secondary_error": "{{ trigger.payload.errors.1 }}",
|
||||
"environment": "{{ trigger.payload.tags.0 }}",
|
||||
"severity": "{{ trigger.payload.tags.1 }}"
|
||||
},
|
||||
"enabled": true
|
||||
}
|
||||
```
|
||||
|
||||
**Resolved Parameters:**
|
||||
```json
|
||||
{
|
||||
"channel": "#alerts",
|
||||
"message": "Primary error: Connection timeout",
|
||||
"secondary_error": "Retry failed",
|
||||
"environment": "production",
|
||||
"severity": "critical"
|
||||
}
|
||||
```
|
||||
|
||||
**When to use:** Event payloads with arrays where you need specific elements.
|
||||
|
||||
---
|
||||
|
||||
## Example 7: System Variables
|
||||
|
||||
**Use Case:** Include system-provided metadata
|
||||
|
||||
**Rule:**
|
||||
```json
|
||||
{
|
||||
"ref": "monitoring.heartbeat",
|
||||
"pack_ref": "monitoring",
|
||||
"trigger_ref": "core.timer_5m",
|
||||
"action_ref": "http.post",
|
||||
"action_params": {
|
||||
"url": "{{ pack.config.monitoring_url }}",
|
||||
"body": {
|
||||
"source": "attune",
|
||||
"timestamp": "{{ system.timestamp }}",
|
||||
"rule_id": "{{ system.rule.id }}",
|
||||
"rule_ref": "{{ system.rule.ref }}",
|
||||
"event_id": "{{ system.event.id }}",
|
||||
"status": "healthy"
|
||||
}
|
||||
},
|
||||
"enabled": true
|
||||
}
|
||||
```
|
||||
|
||||
**Resolved Parameters (example):**
|
||||
```json
|
||||
{
|
||||
"url": "https://monitoring.example.com/heartbeat",
|
||||
"body": {
|
||||
"source": "attune",
|
||||
"timestamp": "2026-01-17T15:30:00Z",
|
||||
"rule_id": 42,
|
||||
"rule_ref": "monitoring.heartbeat",
|
||||
"event_id": 123,
|
||||
"status": "healthy"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**When to use:** Audit trails, logging, debugging, tracking execution context.
|
||||
|
||||
---
|
||||
|
||||
## Example 8: PagerDuty Integration
|
||||
|
||||
**Use Case:** Trigger PagerDuty incident from metrics
|
||||
|
||||
**Event Payload:**
|
||||
```json
|
||||
{
|
||||
"metric_name": "cpu_usage",
|
||||
"current_value": 95.3,
|
||||
"threshold": 80,
|
||||
"host": "web-server-01",
|
||||
"duration_seconds": 300
|
||||
}
|
||||
```
|
||||
|
||||
**Pack Config:**
|
||||
```json
|
||||
{
|
||||
"ref": "pagerduty",
|
||||
"config": {
|
||||
"routing_key": "R123ABC456DEF789",
|
||||
"default_severity": "error"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Rule:**
|
||||
```json
|
||||
{
|
||||
"ref": "pagerduty.critical_metric",
|
||||
"pack_ref": "pagerduty",
|
||||
"trigger_ref": "metrics.threshold_exceeded",
|
||||
"action_ref": "pagerduty.trigger_incident",
|
||||
"action_params": {
|
||||
"routing_key": "{{ pack.config.routing_key }}",
|
||||
"event_action": "trigger",
|
||||
"payload": {
|
||||
"summary": "{{ trigger.payload.metric_name }} exceeded threshold on {{ trigger.payload.host }}",
|
||||
"severity": "critical",
|
||||
"source": "{{ trigger.payload.host }}",
|
||||
"custom_details": {
|
||||
"metric": "{{ trigger.payload.metric_name }}",
|
||||
"current_value": "{{ trigger.payload.current_value }}",
|
||||
"threshold": "{{ trigger.payload.threshold }}",
|
||||
"duration": "{{ trigger.payload.duration_seconds }}s"
|
||||
}
|
||||
},
|
||||
"dedup_key": "{{ trigger.payload.host }}_{{ trigger.payload.metric_name }}"
|
||||
},
|
||||
"enabled": true
|
||||
}
|
||||
```
|
||||
|
||||
**Resolved Parameters:**
|
||||
```json
|
||||
{
|
||||
"routing_key": "R123ABC456DEF789",
|
||||
"event_action": "trigger",
|
||||
"payload": {
|
||||
"summary": "cpu_usage exceeded threshold on web-server-01",
|
||||
"severity": "critical",
|
||||
"source": "web-server-01",
|
||||
"custom_details": {
|
||||
"metric": "cpu_usage",
|
||||
"current_value": 95.3,
|
||||
"threshold": 80,
|
||||
"duration": "300s"
|
||||
}
|
||||
},
|
||||
"dedup_key": "web-server-01_cpu_usage"
|
||||
}
|
||||
```
|
||||
|
||||
**When to use:** Incident management, alerting, on-call notifications.
|
||||
|
||||
---
|
||||
|
||||
## Example 9: Webhook to Multiple Services
|
||||
|
||||
**Use Case:** Fan-out webhook data to multiple channels
|
||||
|
||||
**Event Payload:**
|
||||
```json
|
||||
{
|
||||
"event_type": "deployment",
|
||||
"service": "api-gateway",
|
||||
"version": "v2.3.1",
|
||||
"environment": "production",
|
||||
"deployed_by": "alice@example.com",
|
||||
"timestamp": "2026-01-17T15:30:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
**Pack Config:**
|
||||
```json
|
||||
{
|
||||
"ref": "notifications",
|
||||
"config": {
|
||||
"slack_channel": "#deployments",
|
||||
"slack_token": "xoxb-...",
|
||||
"teams_webhook": "https://outlook.office.com/webhook/...",
|
||||
"email_recipients": ["team@example.com"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Rule (Slack):**
|
||||
```json
|
||||
{
|
||||
"ref": "notifications.deployment_slack",
|
||||
"pack_ref": "notifications",
|
||||
"trigger_ref": "webhooks.deployment",
|
||||
"action_ref": "slack.post_message",
|
||||
"action_params": {
|
||||
"token": "{{ pack.config.slack_token }}",
|
||||
"channel": "{{ pack.config.slack_channel }}",
|
||||
"message": "✅ Deployment Complete",
|
||||
"attachments": [
|
||||
{
|
||||
"color": "good",
|
||||
"fields": [
|
||||
{
|
||||
"title": "Service",
|
||||
"value": "{{ trigger.payload.service }}",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"title": "Version",
|
||||
"value": "{{ trigger.payload.version }}",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"title": "Environment",
|
||||
"value": "{{ trigger.payload.environment }}",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"title": "Deployed By",
|
||||
"value": "{{ trigger.payload.deployed_by }}",
|
||||
"short": true
|
||||
}
|
||||
],
|
||||
"footer": "Attune Automation",
|
||||
"ts": "{{ trigger.payload.timestamp }}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"enabled": true
|
||||
}
|
||||
```
|
||||
|
||||
**When to use:** Multi-channel notifications, deployment tracking, audit trails.
|
||||
|
||||
---
|
||||
|
||||
## Example 10: Conditional Channels (Future with Filters)
|
||||
|
||||
**Use Case:** Route to different channels based on severity
|
||||
|
||||
**Event Payload:**
|
||||
```json
|
||||
{
|
||||
"severity": "critical",
|
||||
"message": "Database unreachable"
|
||||
}
|
||||
```
|
||||
|
||||
**Rule (Future Enhancement with Filters):**
|
||||
```json
|
||||
{
|
||||
"ref": "alerts.smart_routing",
|
||||
"pack_ref": "alerts",
|
||||
"trigger_ref": "core.alert_event",
|
||||
"action_ref": "slack.post_message",
|
||||
"action_params": {
|
||||
"channel": "{{ trigger.payload.severity | default: 'info' | map: {'critical': '#incidents', 'high': '#alerts', 'medium': '#monitoring', 'low': '#logs'} }}",
|
||||
"message": "{{ trigger.payload.message }}",
|
||||
"color": "{{ trigger.payload.severity | map: {'critical': 'danger', 'high': 'warning', 'medium': 'good', 'low': '#cccccc'} }}"
|
||||
},
|
||||
"enabled": true
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** This uses advanced filter syntax not yet implemented (Phase 2).
|
||||
|
||||
---
|
||||
|
||||
## Testing Your Rules
|
||||
|
||||
### 1. Create a Test Event
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/api/v1/events \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"trigger_ref": "core.error_event",
|
||||
"payload": {
|
||||
"service": "test-service",
|
||||
"error": "Test error message",
|
||||
"severity": "info"
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
### 2. Check Enforcement
|
||||
|
||||
```bash
|
||||
curl -X GET http://localhost:8080/api/v1/enforcements?limit=1 \
|
||||
-H "Authorization: Bearer $TOKEN"
|
||||
```
|
||||
|
||||
Look at the `config` field to verify parameters were resolved correctly.
|
||||
|
||||
### 3. Check Execution
|
||||
|
||||
```bash
|
||||
curl -X GET http://localhost:8080/api/v1/executions?limit=1 \
|
||||
-H "Authorization: Bearer $TOKEN"
|
||||
```
|
||||
|
||||
The `config` field should contain the same resolved parameters.
|
||||
|
||||
---
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### API Authentication
|
||||
```json
|
||||
{
|
||||
"action_params": {
|
||||
"url": "{{ pack.config.api_url }}",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{ pack.config.api_token }}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Error Context
|
||||
```json
|
||||
{
|
||||
"action_params": {
|
||||
"summary": "Error: {{ trigger.payload.message }}",
|
||||
"details": {
|
||||
"service": "{{ trigger.payload.service }}",
|
||||
"host": "{{ trigger.payload.host }}",
|
||||
"timestamp": "{{ trigger.payload.timestamp }}",
|
||||
"stack_trace": "{{ trigger.payload.stack_trace }}"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### User Information
|
||||
```json
|
||||
{
|
||||
"action_params": {
|
||||
"user": {
|
||||
"id": "{{ trigger.payload.user.id }}",
|
||||
"name": "{{ trigger.payload.user.name }}",
|
||||
"email": "{{ trigger.payload.user.email }}"
|
||||
},
|
||||
"action": "{{ trigger.payload.action_type }}"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Tips and Best Practices
|
||||
|
||||
1. **Use pack config for secrets** - Never hardcode API keys in rules
|
||||
2. **Provide context** - Include relevant fields from event payload
|
||||
3. **Keep templates simple** - Deeply nested access can be fragile
|
||||
4. **Test with sample events** - Verify your templates work before production
|
||||
5. **Use descriptive field names** - Make it clear what each parameter is for
|
||||
6. **Document your rules** - Use clear labels and descriptions
|
||||
|
||||
---
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Rule Parameter Mapping Guide](../rule-parameter-mapping.md) - Complete reference
|
||||
- [Rule Management API](../api-rules.md) - API documentation
|
||||
- [Pack Management](../api-packs.md) - Pack configuration
|
||||
|
||||
---
|
||||
|
||||
## Need Help?
|
||||
|
||||
If your templates aren't resolving correctly:
|
||||
|
||||
1. Check the event payload structure in the database
|
||||
2. Verify the pack config exists and has the expected fields
|
||||
3. Review sensor service logs for template resolution warnings
|
||||
4. Test with a simple template first, then add complexity
|
||||
5. Ensure field names match exactly (case-sensitive)
|
||||
68
docs/examples/simple-workflow.yaml
Normal file
68
docs/examples/simple-workflow.yaml
Normal file
@@ -0,0 +1,68 @@
|
||||
# Simple Workflow Example
|
||||
# This is a minimal example to demonstrate basic workflow concepts
|
||||
|
||||
ref: examples.simple_sequential_workflow
|
||||
label: "Simple Sequential Workflow"
|
||||
description: "A basic workflow that runs three tasks in sequence"
|
||||
version: "1.0.0"
|
||||
|
||||
# Input parameters
|
||||
parameters:
|
||||
message:
|
||||
type: string
|
||||
required: true
|
||||
description: "Message to process"
|
||||
|
||||
uppercase:
|
||||
type: boolean
|
||||
default: false
|
||||
description: "Convert message to uppercase"
|
||||
|
||||
# Output schema
|
||||
output:
|
||||
type: object
|
||||
properties:
|
||||
original:
|
||||
type: string
|
||||
processed:
|
||||
type: string
|
||||
final:
|
||||
type: string
|
||||
|
||||
# Workflow variables
|
||||
vars:
|
||||
processed_message: null
|
||||
timestamp: null
|
||||
|
||||
# Task definitions
|
||||
tasks:
|
||||
# Task 1: Log the input
|
||||
- name: log_input
|
||||
action: core.echo
|
||||
input:
|
||||
message: "Starting workflow with: {{ parameters.message }}"
|
||||
publish:
|
||||
- timestamp: "{{ system.timestamp }}"
|
||||
on_success: process_message
|
||||
|
||||
# Task 2: Process the message
|
||||
- name: process_message
|
||||
action: string.transform
|
||||
input:
|
||||
text: "{{ parameters.message }}"
|
||||
uppercase: "{{ parameters.uppercase }}"
|
||||
publish:
|
||||
- processed_message: "{{ task.process_message.result.text }}"
|
||||
on_success: finalize
|
||||
|
||||
# Task 3: Finalize and log result
|
||||
- name: finalize
|
||||
action: core.echo
|
||||
input:
|
||||
message: "Workflow complete. Result: {{ vars.processed_message }}"
|
||||
|
||||
# Map workflow outputs
|
||||
output_map:
|
||||
original: "{{ parameters.message }}"
|
||||
processed: "{{ vars.processed_message }}"
|
||||
final: "{{ task.finalize.result.message }}"
|
||||
211
docs/examples/timer-types-example.yaml
Normal file
211
docs/examples/timer-types-example.yaml
Normal file
@@ -0,0 +1,211 @@
|
||||
# Timer Types Example
|
||||
# This file demonstrates all three timer types with practical use cases
|
||||
|
||||
---
|
||||
# Example 1: Interval Timer - Health Check
|
||||
# Use case: Monitor service health every 30 seconds
|
||||
name: health_check_rule
|
||||
ref: monitoring.health_check
|
||||
description: "Periodic health check every 30 seconds"
|
||||
enabled: true
|
||||
trigger:
|
||||
type: core.intervaltimer
|
||||
parameters:
|
||||
unit: seconds
|
||||
interval: 30
|
||||
action:
|
||||
ref: core.http_request
|
||||
parameters:
|
||||
url: "https://api.example.com/health"
|
||||
method: GET
|
||||
timeout: 5
|
||||
|
||||
---
|
||||
# Example 2: Interval Timer - Backup
|
||||
# Use case: Run backups every 6 hours
|
||||
name: database_backup_rule
|
||||
ref: backup.database
|
||||
description: "Backup database every 6 hours"
|
||||
enabled: true
|
||||
trigger:
|
||||
type: core.intervaltimer
|
||||
parameters:
|
||||
unit: hours
|
||||
interval: 6
|
||||
action:
|
||||
ref: backup.run_backup
|
||||
parameters:
|
||||
backup_type: full
|
||||
retention_days: 30
|
||||
|
||||
---
|
||||
# Example 3: Cron Timer - Business Hours Report
|
||||
# Use case: Generate report weekdays at 5 PM
|
||||
name: daily_report_rule
|
||||
ref: reports.daily
|
||||
description: "Generate daily report at 5 PM on weekdays"
|
||||
enabled: true
|
||||
trigger:
|
||||
type: core.crontimer
|
||||
parameters:
|
||||
expression: "0 0 17 * * 1-5" # 5 PM Monday-Friday
|
||||
timezone: "America/New_York"
|
||||
description: "End of business day"
|
||||
action:
|
||||
ref: reports.generate
|
||||
parameters:
|
||||
report_type: daily_summary
|
||||
recipients:
|
||||
- ops@example.com
|
||||
- management@example.com
|
||||
|
||||
---
|
||||
# Example 4: Cron Timer - Cleanup Task
|
||||
# Use case: Clean up old logs daily at midnight
|
||||
name: log_cleanup_rule
|
||||
ref: maintenance.cleanup
|
||||
description: "Clean up old logs daily at midnight"
|
||||
enabled: true
|
||||
trigger:
|
||||
type: core.crontimer
|
||||
parameters:
|
||||
expression: "0 0 0 * * *" # Midnight UTC
|
||||
timezone: "UTC"
|
||||
action:
|
||||
ref: core.shell
|
||||
parameters:
|
||||
cmd: |
|
||||
find /var/log/app -type f -mtime +30 -delete
|
||||
echo "Cleaned up logs older than 30 days"
|
||||
|
||||
---
|
||||
# Example 5: Cron Timer - Complex Schedule
|
||||
# Use case: Run intensive task during off-peak hours only
|
||||
name: data_processing_rule
|
||||
ref: processing.intensive
|
||||
description: "Process data only during off-peak hours"
|
||||
enabled: true
|
||||
trigger:
|
||||
type: core.crontimer
|
||||
parameters:
|
||||
# Every 2 hours between 10 PM and 6 AM (22, 0, 2, 4 hours)
|
||||
expression: "0 0 22,0,2,4 * * *"
|
||||
timezone: "UTC"
|
||||
action:
|
||||
ref: processing.run_job
|
||||
parameters:
|
||||
job_type: intensive_processing
|
||||
max_duration_minutes: 90
|
||||
|
||||
---
|
||||
# Example 6: DateTime Timer - Scheduled Deployment
|
||||
# Use case: Deploy to production at specific date/time
|
||||
name: production_deployment_rule
|
||||
ref: deployment.production
|
||||
description: "Deploy version 2.0 to production"
|
||||
enabled: true
|
||||
trigger:
|
||||
type: core.datetimetimer
|
||||
parameters:
|
||||
fire_at: "2024-06-15T02:00:00Z"
|
||||
timezone: "UTC"
|
||||
description: "Version 2.0 production release"
|
||||
action:
|
||||
ref: deployment.deploy
|
||||
parameters:
|
||||
environment: production
|
||||
version: "2.0.0"
|
||||
rollback_on_error: true
|
||||
|
||||
---
|
||||
# Example 7: DateTime Timer - Event Reminder
|
||||
# Use case: Send reminder before important event
|
||||
name: meeting_reminder_rule
|
||||
ref: notifications.reminder
|
||||
description: "Send reminder 15 minutes before meeting"
|
||||
enabled: true
|
||||
trigger:
|
||||
type: core.datetimetimer
|
||||
parameters:
|
||||
fire_at: "2024-03-20T13:45:00Z" # 1:45 PM, 15 min before 2 PM meeting
|
||||
timezone: "America/New_York"
|
||||
description: "Board meeting reminder"
|
||||
action:
|
||||
ref: notifications.send
|
||||
parameters:
|
||||
recipients:
|
||||
- team@example.com
|
||||
subject: "Reminder: Board Meeting in 15 minutes"
|
||||
message: "The quarterly board meeting starts at 2:00 PM in Conference Room A"
|
||||
|
||||
---
|
||||
# Example 8: DateTime Timer - License Renewal
|
||||
# Use case: Reminder for upcoming license expiration
|
||||
name: license_renewal_reminder
|
||||
ref: admin.license_renewal
|
||||
description: "Remind about license expiration"
|
||||
enabled: true
|
||||
trigger:
|
||||
type: core.datetimetimer
|
||||
parameters:
|
||||
fire_at: "2024-11-15T09:00:00Z" # 30 days before expiration
|
||||
timezone: "UTC"
|
||||
description: "30-day license renewal reminder"
|
||||
action:
|
||||
ref: notifications.send
|
||||
parameters:
|
||||
recipients:
|
||||
- admin@example.com
|
||||
- finance@example.com
|
||||
subject: "Action Required: Software License Expires in 30 Days"
|
||||
message: "Please renew the production software license before Dec 15, 2024"
|
||||
priority: high
|
||||
|
||||
---
|
||||
# Example 9: Combined Pattern - Monitoring with Escalation
|
||||
# Use case: Regular checks with scheduled review
|
||||
name: monitoring_with_review
|
||||
ref: monitoring.combined
|
||||
description: "Regular health checks with weekly review"
|
||||
# Note: This would actually be two separate rules
|
||||
# Rule 9a: Interval check
|
||||
enabled: true
|
||||
trigger:
|
||||
type: core.intervaltimer
|
||||
parameters:
|
||||
unit: minutes
|
||||
interval: 5
|
||||
action:
|
||||
ref: monitoring.check_and_alert
|
||||
parameters:
|
||||
service: payment_api
|
||||
alert_threshold: 3
|
||||
|
||||
# Rule 9b would be a separate rule:
|
||||
# name: monitoring_weekly_review
|
||||
# trigger:
|
||||
# type: core.crontimer
|
||||
# parameters:
|
||||
# expression: "0 0 9 * * 1" # Monday 9 AM
|
||||
# action:
|
||||
# ref: reports.weekly_summary
|
||||
|
||||
---
|
||||
# Best Practices Demonstrated:
|
||||
|
||||
# Interval Timers:
|
||||
# - Use for continuous monitoring (health checks, metrics)
|
||||
# - Use for regular maintenance (backups, cleanup)
|
||||
# - Simple, predictable schedules
|
||||
|
||||
# Cron Timers:
|
||||
# - Use for business-hour operations
|
||||
# - Use for complex recurring schedules
|
||||
# - Use when you need specific times (not just intervals)
|
||||
|
||||
# DateTime Timers:
|
||||
# - Use for one-time events (deployments, migrations)
|
||||
# - Use for reminders and notifications
|
||||
# - Use for scheduled cutover events
|
||||
# - Remember: These fire once and are automatically removed
|
||||
|
||||
Reference in New Issue
Block a user