[WIP] workflow builder

This commit is contained in:
2026-02-23 20:45:10 -06:00
parent d629da32fa
commit 53a3fbb6b1
66 changed files with 7887 additions and 1608 deletions

View File

@@ -38,14 +38,14 @@ pub struct CreateActionRequest {
#[schema(example = 1)]
pub runtime: Option<i64>,
/// Parameter schema (JSON Schema) defining expected inputs
/// Parameter schema (StackStorm-style) defining expected inputs with inline required/secret
#[serde(skip_serializing_if = "Option::is_none")]
#[schema(value_type = Object, nullable = true, example = json!({"type": "object", "properties": {"channel": {"type": "string"}, "message": {"type": "string"}}}))]
#[schema(value_type = Object, nullable = true, example = json!({"channel": {"type": "string", "description": "Slack channel", "required": true}, "message": {"type": "string", "description": "Message text", "required": true}}))]
pub param_schema: Option<JsonValue>,
/// Output schema (JSON Schema) defining expected outputs
/// Output schema (flat format) defining expected outputs with inline required/secret
#[serde(skip_serializing_if = "Option::is_none")]
#[schema(value_type = Object, nullable = true, example = json!({"type": "object", "properties": {"message_id": {"type": "string"}}}))]
#[schema(value_type = Object, nullable = true, example = json!({"message_id": {"type": "string", "description": "ID of the sent message", "required": true}}))]
pub out_schema: Option<JsonValue>,
}
@@ -71,7 +71,7 @@ pub struct UpdateActionRequest {
#[schema(example = 1)]
pub runtime: Option<i64>,
/// Parameter schema
/// Parameter schema (StackStorm-style with inline required/secret)
#[schema(value_type = Object, nullable = true)]
pub param_schema: Option<JsonValue>,
@@ -115,7 +115,7 @@ pub struct ActionResponse {
#[schema(example = 1)]
pub runtime: Option<i64>,
/// Parameter schema
/// Parameter schema (StackStorm-style with inline required/secret)
#[schema(value_type = Object, nullable = true)]
pub param_schema: Option<JsonValue>,

View File

@@ -137,8 +137,8 @@ pub struct CreateInquiryRequest {
#[schema(example = "Approve deployment to production?")]
pub prompt: String,
/// Optional JSON schema for the expected response format
#[schema(value_type = Object, example = json!({"type": "object", "properties": {"approved": {"type": "boolean"}}}))]
/// Optional schema for the expected response format (flat format with inline required/secret)
#[schema(value_type = Object, example = json!({"approved": {"type": "boolean", "description": "Whether the deployment is approved", "required": true}}))]
pub response_schema: Option<JsonSchema>,
/// Optional identity ID to assign this inquiry to

View File

@@ -28,9 +28,9 @@ pub struct CreatePackRequest {
#[schema(example = "1.0.0")]
pub version: String,
/// Configuration schema (JSON Schema)
/// Configuration schema (flat format with inline required/secret per parameter)
#[serde(default = "default_empty_object")]
#[schema(value_type = Object, example = json!({"type": "object", "properties": {"api_token": {"type": "string"}}}))]
#[schema(value_type = Object, example = json!({"api_token": {"type": "string", "description": "API authentication key", "required": true, "secret": true}}))]
pub conf_schema: JsonValue,
/// Pack configuration values
@@ -95,11 +95,6 @@ pub struct InstallPackRequest {
#[schema(example = "main")]
pub ref_spec: Option<String>,
/// Force reinstall if pack already exists
#[serde(default)]
#[schema(example = false)]
pub force: bool,
/// Skip running pack tests during installation
#[serde(default)]
#[schema(example = false)]

View File

@@ -28,14 +28,14 @@ pub struct CreateTriggerRequest {
#[schema(example = "Triggers when a webhook is received")]
pub description: Option<String>,
/// Parameter schema (JSON Schema) defining event payload structure
/// Parameter schema (StackStorm-style) defining trigger configuration with inline required/secret
#[serde(skip_serializing_if = "Option::is_none")]
#[schema(value_type = Object, nullable = true, example = json!({"type": "object", "properties": {"url": {"type": "string"}}}))]
#[schema(value_type = Object, nullable = true, example = json!({"url": {"type": "string", "description": "Webhook URL", "required": true}}))]
pub param_schema: Option<JsonValue>,
/// Output schema (JSON Schema) defining event data structure
/// Output schema (flat format) defining event data structure with inline required/secret
#[serde(skip_serializing_if = "Option::is_none")]
#[schema(value_type = Object, nullable = true, example = json!({"type": "object", "properties": {"payload": {"type": "object"}}}))]
#[schema(value_type = Object, nullable = true, example = json!({"payload": {"type": "object", "description": "Event payload data", "required": true}}))]
pub out_schema: Option<JsonValue>,
/// Whether the trigger is enabled
@@ -56,7 +56,7 @@ pub struct UpdateTriggerRequest {
#[schema(example = "Updated webhook trigger description")]
pub description: Option<String>,
/// Parameter schema
/// Parameter schema (StackStorm-style with inline required/secret)
#[schema(value_type = Object, nullable = true)]
pub param_schema: Option<JsonValue>,
@@ -100,7 +100,7 @@ pub struct TriggerResponse {
#[schema(example = true)]
pub enabled: bool,
/// Parameter schema
/// Parameter schema (StackStorm-style with inline required/secret)
#[schema(value_type = Object, nullable = true)]
pub param_schema: Option<JsonValue>,
@@ -208,9 +208,9 @@ pub struct CreateSensorRequest {
#[schema(example = "monitoring.cpu_threshold")]
pub trigger_ref: String,
/// Parameter schema (JSON Schema) for sensor configuration
/// Parameter schema (flat format) for sensor configuration
#[serde(skip_serializing_if = "Option::is_none")]
#[schema(value_type = Object, nullable = true, example = json!({"type": "object", "properties": {"threshold": {"type": "number"}}}))]
#[schema(value_type = Object, nullable = true, example = json!({"threshold": {"type": "number", "description": "Alert threshold", "required": true}}))]
pub param_schema: Option<JsonValue>,
/// Configuration values for this sensor instance (conforms to param_schema)
@@ -242,7 +242,7 @@ pub struct UpdateSensorRequest {
#[schema(example = "/sensors/monitoring/cpu_monitor_v2.py")]
pub entrypoint: Option<String>,
/// Parameter schema
/// Parameter schema (StackStorm-style with inline required/secret)
#[schema(value_type = Object, nullable = true)]
pub param_schema: Option<JsonValue>,
@@ -302,7 +302,7 @@ pub struct SensorResponse {
#[schema(example = true)]
pub enabled: bool,
/// Parameter schema
/// Parameter schema (StackStorm-style with inline required/secret)
#[schema(value_type = Object, nullable = true)]
pub param_schema: Option<JsonValue>,

View File

@@ -6,6 +6,54 @@ use serde_json::Value as JsonValue;
use utoipa::{IntoParams, ToSchema};
use validator::Validate;
/// Request DTO for saving a workflow file to disk and syncing to DB
#[derive(Debug, Clone, Deserialize, Validate, ToSchema)]
pub struct SaveWorkflowFileRequest {
/// Workflow name (becomes filename: {name}.workflow.yaml)
#[validate(length(min = 1, max = 255))]
#[schema(example = "deploy_app")]
pub name: String,
/// Human-readable label
#[validate(length(min = 1, max = 255))]
#[schema(example = "Deploy Application")]
pub label: String,
/// Workflow description
#[schema(example = "Deploys an application to the target environment")]
pub description: Option<String>,
/// Workflow version (semantic versioning recommended)
#[validate(length(min = 1, max = 50))]
#[schema(example = "1.0.0")]
pub version: String,
/// Pack reference this workflow belongs to
#[validate(length(min = 1, max = 255))]
#[schema(example = "core")]
pub pack_ref: String,
/// The full workflow definition as JSON (will be serialized to YAML on disk)
#[schema(value_type = Object)]
pub definition: JsonValue,
/// Parameter schema (flat format with inline required/secret)
#[schema(value_type = Object, nullable = true)]
pub param_schema: Option<JsonValue>,
/// Output schema (flat format)
#[schema(value_type = Object, nullable = true)]
pub out_schema: Option<JsonValue>,
/// Tags for categorization
#[schema(example = json!(["deployment", "automation"]))]
pub tags: Option<Vec<String>>,
/// Whether the workflow is enabled
#[schema(example = true)]
pub enabled: Option<bool>,
}
/// Request DTO for creating a new workflow
#[derive(Debug, Clone, Deserialize, Validate, ToSchema)]
pub struct CreateWorkflowRequest {
@@ -33,12 +81,12 @@ pub struct CreateWorkflowRequest {
#[schema(example = "1.0.0")]
pub version: String,
/// Parameter schema (JSON Schema) defining expected inputs
#[schema(value_type = Object, example = json!({"type": "object", "properties": {"severity": {"type": "string"}, "channel": {"type": "string"}}}))]
/// Parameter schema (StackStorm-style) defining expected inputs with inline required/secret
#[schema(value_type = Object, example = json!({"severity": {"type": "string", "description": "Incident severity", "required": true}, "channel": {"type": "string", "description": "Notification channel"}}))]
pub param_schema: Option<JsonValue>,
/// Output schema (JSON Schema) defining expected outputs
#[schema(value_type = Object, example = json!({"type": "object", "properties": {"incident_id": {"type": "string"}}}))]
/// Output schema (flat format) defining expected outputs with inline required/secret
#[schema(value_type = Object, example = json!({"incident_id": {"type": "string", "description": "Unique incident identifier", "required": true}}))]
pub out_schema: Option<JsonValue>,
/// Workflow definition (complete workflow YAML structure as JSON)
@@ -71,7 +119,7 @@ pub struct UpdateWorkflowRequest {
#[schema(example = "1.1.0")]
pub version: Option<String>,
/// Parameter schema
/// Parameter schema (StackStorm-style with inline required/secret)
#[schema(value_type = Object, nullable = true)]
pub param_schema: Option<JsonValue>,
@@ -123,7 +171,7 @@ pub struct WorkflowResponse {
#[schema(example = "1.0.0")]
pub version: String,
/// Parameter schema
/// Parameter schema (StackStorm-style with inline required/secret)
#[schema(value_type = Object, nullable = true)]
pub param_schema: Option<JsonValue>,

View File

@@ -40,7 +40,9 @@ use crate::{
#[derive(Debug, Clone, Serialize, Deserialize, Validate, ToSchema)]
pub struct CreateEventRequest {
/// Trigger reference (e.g., "core.timer", "core.webhook")
/// Also accepts "trigger_type" for compatibility with the sensor interface spec.
#[validate(length(min = 1))]
#[serde(alias = "trigger_type")]
#[schema(example = "core.timer")]
pub trigger_ref: String,

View File

@@ -10,9 +10,13 @@ use axum::{
use std::sync::Arc;
use validator::Validate;
use attune_common::models::OwnerType;
use attune_common::repositories::{
action::ActionRepository,
key::{CreateKeyInput, KeyRepository, UpdateKeyInput},
Create, Delete, List, Update,
pack::PackRepository,
trigger::SensorRepository,
Create, Delete, FindByRef, List, Update,
};
use crate::auth::RequireAuth;
@@ -157,6 +161,78 @@ pub async fn create_key(
)));
}
// Auto-resolve owner IDs from refs when only the ref is provided.
// This makes the API more ergonomic for sensors and other clients that
// know the owner ref but not the numeric database ID.
let mut owner_sensor = request.owner_sensor;
let mut owner_action = request.owner_action;
let mut owner_pack = request.owner_pack;
match request.owner_type {
OwnerType::Sensor => {
if owner_sensor.is_none() {
if let Some(ref sensor_ref) = request.owner_sensor_ref {
if let Some(sensor) =
SensorRepository::find_by_ref(&state.db, sensor_ref).await?
{
tracing::debug!(
"Auto-resolved owner_sensor from ref '{}' to id {}",
sensor_ref,
sensor.id
);
owner_sensor = Some(sensor.id);
} else {
return Err(ApiError::BadRequest(format!(
"Sensor with ref '{}' not found",
sensor_ref
)));
}
}
}
}
OwnerType::Action => {
if owner_action.is_none() {
if let Some(ref action_ref) = request.owner_action_ref {
if let Some(action) =
ActionRepository::find_by_ref(&state.db, action_ref).await?
{
tracing::debug!(
"Auto-resolved owner_action from ref '{}' to id {}",
action_ref,
action.id
);
owner_action = Some(action.id);
} else {
return Err(ApiError::BadRequest(format!(
"Action with ref '{}' not found",
action_ref
)));
}
}
}
}
OwnerType::Pack => {
if owner_pack.is_none() {
if let Some(ref pack_ref) = request.owner_pack_ref {
if let Some(pack) = PackRepository::find_by_ref(&state.db, pack_ref).await? {
tracing::debug!(
"Auto-resolved owner_pack from ref '{}' to id {}",
pack_ref,
pack.id
);
owner_pack = Some(pack.id);
} else {
return Err(ApiError::BadRequest(format!(
"Pack with ref '{}' not found",
pack_ref
)));
}
}
}
}
_ => {}
}
// Encrypt value if requested
let (value, encryption_key_hash) = if request.encrypted {
let encryption_key = state
@@ -190,11 +266,11 @@ pub async fn create_key(
owner_type: request.owner_type,
owner: request.owner,
owner_identity: request.owner_identity,
owner_pack: request.owner_pack,
owner_pack,
owner_pack_ref: request.owner_pack_ref,
owner_action: request.owner_action,
owner_action,
owner_action_ref: request.owner_action_ref,
owner_sensor: request.owner_sensor,
owner_sensor,
owner_sensor_ref: request.owner_sensor_ref,
name: request.name,
encrypted: request.encrypted,

View File

@@ -14,7 +14,10 @@ use validator::Validate;
use attune_common::models::pack_test::PackTestResult;
use attune_common::mq::{MessageEnvelope, MessageType, PackRegisteredPayload};
use attune_common::repositories::{
action::ActionRepository,
pack::{CreatePackInput, UpdatePackInput},
rule::{RestoreRuleInput, RuleRepository},
trigger::TriggerRepository,
Create, Delete, FindById, FindByRef, PackRepository, PackTestRepository, Pagination, Update,
};
use attune_common::workflow::{PackWorkflowService, PackWorkflowServiceConfig};
@@ -545,6 +548,9 @@ async fn register_pack_internal(
.and_then(|v| v.as_str())
.map(|s| s.to_string());
// Ad-hoc rules to restore after pack reinstallation
let mut saved_adhoc_rules: Vec<attune_common::models::rule::Rule> = Vec::new();
// Check if pack already exists
if !force {
if PackRepository::exists_by_ref(&state.db, &pack_ref).await? {
@@ -554,8 +560,20 @@ async fn register_pack_internal(
)));
}
} else {
// Delete existing pack if force is true
// Delete existing pack if force is true, preserving ad-hoc (user-created) rules
if let Some(existing_pack) = PackRepository::find_by_ref(&state.db, &pack_ref).await? {
// Save ad-hoc rules before deletion — CASCADE on pack FK would destroy them
saved_adhoc_rules = RuleRepository::find_adhoc_by_pack(&state.db, existing_pack.id)
.await
.unwrap_or_default();
if !saved_adhoc_rules.is_empty() {
tracing::info!(
"Preserving {} ad-hoc rule(s) during reinstall of pack '{}'",
saved_adhoc_rules.len(),
pack_ref
);
}
PackRepository::delete(&state.db, existing_pack.id).await?;
tracing::info!("Deleted existing pack '{}' for forced reinstall", pack_ref);
}
@@ -671,6 +689,123 @@ async fn register_pack_internal(
}
}
// Restore ad-hoc rules that were saved before pack deletion, and
// re-link any rules from other packs whose action/trigger FKs were
// set to NULL when the old pack's entities were cascade-deleted.
{
// Phase 1: Restore saved ad-hoc rules
if !saved_adhoc_rules.is_empty() {
let mut restored = 0u32;
for saved_rule in &saved_adhoc_rules {
// Resolve action and trigger IDs by ref (they may have been recreated)
let action_id = ActionRepository::find_by_ref(&state.db, &saved_rule.action_ref)
.await
.ok()
.flatten()
.map(|a| a.id);
let trigger_id = TriggerRepository::find_by_ref(&state.db, &saved_rule.trigger_ref)
.await
.ok()
.flatten()
.map(|t| t.id);
let input = RestoreRuleInput {
r#ref: saved_rule.r#ref.clone(),
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: saved_rule.label.clone(),
description: saved_rule.description.clone(),
action: action_id,
action_ref: saved_rule.action_ref.clone(),
trigger: trigger_id,
trigger_ref: saved_rule.trigger_ref.clone(),
conditions: saved_rule.conditions.clone(),
action_params: saved_rule.action_params.clone(),
trigger_params: saved_rule.trigger_params.clone(),
enabled: saved_rule.enabled,
};
match RuleRepository::restore_rule(&state.db, input).await {
Ok(rule) => {
restored += 1;
if rule.action.is_none() || rule.trigger.is_none() {
tracing::warn!(
"Restored ad-hoc rule '{}' with unresolved references \
(action: {}, trigger: {})",
rule.r#ref,
if rule.action.is_some() {
"linked"
} else {
"NULL"
},
if rule.trigger.is_some() {
"linked"
} else {
"NULL"
},
);
}
}
Err(e) => {
tracing::warn!(
"Failed to restore ad-hoc rule '{}': {}",
saved_rule.r#ref,
e
);
}
}
}
tracing::info!(
"Restored {}/{} ad-hoc rule(s) for pack '{}'",
restored,
saved_adhoc_rules.len(),
pack.r#ref
);
}
// Phase 2: Re-link rules from other packs whose action/trigger FKs
// were set to NULL when the old pack's entities were cascade-deleted
let new_actions = ActionRepository::find_by_pack(&state.db, pack.id)
.await
.unwrap_or_default();
let new_triggers = TriggerRepository::find_by_pack(&state.db, pack.id)
.await
.unwrap_or_default();
for action in &new_actions {
match RuleRepository::relink_action_by_ref(&state.db, &action.r#ref, action.id).await {
Ok(count) if count > 0 => {
tracing::info!("Re-linked {} rule(s) to action '{}'", count, action.r#ref);
}
Err(e) => {
tracing::warn!(
"Failed to re-link rules to action '{}': {}",
action.r#ref,
e
);
}
_ => {}
}
}
for trigger in &new_triggers {
match RuleRepository::relink_trigger_by_ref(&state.db, &trigger.r#ref, trigger.id).await
{
Ok(count) if count > 0 => {
tracing::info!("Re-linked {} rule(s) to trigger '{}'", count, trigger.r#ref);
}
Err(e) => {
tracing::warn!(
"Failed to re-link rules to trigger '{}': {}",
trigger.r#ref,
e
);
}
_ => {}
}
}
}
// Set up runtime environments for the pack's actions.
// This creates virtualenvs, installs dependencies, etc. based on each
// runtime's execution_config from the database.
@@ -964,7 +1099,6 @@ async fn register_pack_internal(
responses(
(status = 201, description = "Pack installed successfully", body = ApiResponse<PackInstallResponse>),
(status = 400, description = "Invalid request or tests failed", body = ApiResponse<String>),
(status = 409, description = "Pack already exists", body = ApiResponse<String>),
(status = 501, description = "Not implemented yet", body = ApiResponse<String>),
),
security(("bearer_auth" = []))
@@ -1122,12 +1256,14 @@ pub async fn install_pack(
tracing::info!("Pack moved to permanent storage: {:?}", final_path);
// Register the pack in database (from permanent storage location)
// Register the pack in database (from permanent storage location).
// Remote installs always force-overwrite: if you're pulling from a remote,
// the intent is to get that pack installed regardless of local state.
let pack_id = register_pack_internal(
state.clone(),
user_sub,
final_path.to_string_lossy().to_string(),
request.force,
true, // always force for remote installs
request.skip_tests,
)
.await

View File

@@ -4,9 +4,10 @@ use axum::{
extract::{Path, Query, State},
http::StatusCode,
response::IntoResponse,
routing::get,
routing::{get, post, put},
Json, Router,
};
use std::path::PathBuf;
use std::sync::Arc;
use validator::Validate;
@@ -23,8 +24,8 @@ use crate::{
dto::{
common::{PaginatedResponse, PaginationParams},
workflow::{
CreateWorkflowRequest, UpdateWorkflowRequest, WorkflowResponse, WorkflowSearchParams,
WorkflowSummary,
CreateWorkflowRequest, SaveWorkflowFileRequest, UpdateWorkflowRequest,
WorkflowResponse, WorkflowSearchParams, WorkflowSummary,
},
ApiResponse, SuccessResponse,
},
@@ -340,6 +341,202 @@ pub async fn delete_workflow(
Ok((StatusCode::OK, Json(response)))
}
/// Save a workflow file to disk and sync it to the database
///
/// Writes a `{name}.workflow.yaml` file to `{packs_base_dir}/{pack_ref}/actions/workflows/`
/// and creates or updates the corresponding workflow_definition record in the database.
#[utoipa::path(
post,
path = "/api/v1/packs/{pack_ref}/workflow-files",
tag = "workflows",
params(
("pack_ref" = String, Path, description = "Pack reference identifier")
),
request_body = SaveWorkflowFileRequest,
responses(
(status = 201, description = "Workflow file saved and synced", body = inline(ApiResponse<WorkflowResponse>)),
(status = 400, description = "Validation error"),
(status = 404, description = "Pack not found"),
(status = 409, description = "Workflow with same ref already exists"),
(status = 500, description = "Failed to write workflow file")
),
security(("bearer_auth" = []))
)]
pub async fn save_workflow_file(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Path(pack_ref): Path<String>,
Json(request): Json<SaveWorkflowFileRequest>,
) -> ApiResult<impl IntoResponse> {
request.validate()?;
// Verify pack exists
let pack = PackRepository::find_by_ref(&state.db, &pack_ref)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Pack '{}' not found", pack_ref)))?;
let workflow_ref = format!("{}.{}", pack_ref, request.name);
// Check if workflow already exists
if WorkflowDefinitionRepository::find_by_ref(&state.db, &workflow_ref)
.await?
.is_some()
{
return Err(ApiError::Conflict(format!(
"Workflow with ref '{}' already exists",
workflow_ref
)));
}
// Write YAML file to disk
let packs_base_dir = PathBuf::from(&state.config.packs_base_dir);
write_workflow_yaml(&packs_base_dir, &pack_ref, &request).await?;
// Create workflow in database
let definition_json = serde_json::to_value(&request.definition).map_err(|e| {
ApiError::BadRequest(format!("Failed to serialize workflow definition: {}", e))
})?;
let workflow_input = CreateWorkflowDefinitionInput {
r#ref: workflow_ref,
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: request.label,
description: request.description,
version: request.version,
param_schema: request.param_schema,
out_schema: request.out_schema,
definition: definition_json,
tags: request.tags.unwrap_or_default(),
enabled: request.enabled.unwrap_or(true),
};
let workflow = WorkflowDefinitionRepository::create(&state.db, workflow_input).await?;
let response = ApiResponse::with_message(
WorkflowResponse::from(workflow),
"Workflow file saved and synced successfully",
);
Ok((StatusCode::CREATED, Json(response)))
}
/// Update a workflow file on disk and sync changes to the database
#[utoipa::path(
put,
path = "/api/v1/workflows/{ref}/file",
tag = "workflows",
params(
("ref" = String, Path, description = "Workflow reference identifier")
),
request_body = SaveWorkflowFileRequest,
responses(
(status = 200, description = "Workflow file updated and synced", body = inline(ApiResponse<WorkflowResponse>)),
(status = 400, description = "Validation error"),
(status = 404, description = "Workflow not found"),
(status = 500, description = "Failed to write workflow file")
),
security(("bearer_auth" = []))
)]
pub async fn update_workflow_file(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Path(workflow_ref): Path<String>,
Json(request): Json<SaveWorkflowFileRequest>,
) -> ApiResult<impl IntoResponse> {
request.validate()?;
// Check if workflow exists
let existing_workflow = WorkflowDefinitionRepository::find_by_ref(&state.db, &workflow_ref)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Workflow '{}' not found", workflow_ref)))?;
// Verify pack exists
let _pack = PackRepository::find_by_ref(&state.db, &request.pack_ref)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Pack '{}' not found", request.pack_ref)))?;
// Write updated YAML file to disk
let packs_base_dir = PathBuf::from(&state.config.packs_base_dir);
write_workflow_yaml(&packs_base_dir, &request.pack_ref, &request).await?;
// Update workflow in database
let definition_json = serde_json::to_value(&request.definition).map_err(|e| {
ApiError::BadRequest(format!("Failed to serialize workflow definition: {}", e))
})?;
let update_input = UpdateWorkflowDefinitionInput {
label: Some(request.label),
description: request.description,
version: Some(request.version),
param_schema: request.param_schema,
out_schema: request.out_schema,
definition: Some(definition_json),
tags: request.tags,
enabled: request.enabled,
};
let workflow =
WorkflowDefinitionRepository::update(&state.db, existing_workflow.id, update_input).await?;
let response = ApiResponse::with_message(
WorkflowResponse::from(workflow),
"Workflow file updated and synced successfully",
);
Ok((StatusCode::OK, Json(response)))
}
/// Write a workflow definition to disk as YAML
async fn write_workflow_yaml(
packs_base_dir: &PathBuf,
pack_ref: &str,
request: &SaveWorkflowFileRequest,
) -> Result<(), ApiError> {
let workflows_dir = packs_base_dir
.join(pack_ref)
.join("actions")
.join("workflows");
// Ensure the directory exists
tokio::fs::create_dir_all(&workflows_dir)
.await
.map_err(|e| {
ApiError::InternalServerError(format!(
"Failed to create workflows directory '{}': {}",
workflows_dir.display(),
e
))
})?;
let filename = format!("{}.workflow.yaml", request.name);
let filepath = workflows_dir.join(&filename);
// Serialize definition to YAML
let yaml_content = serde_yaml_ng::to_string(&request.definition).map_err(|e| {
ApiError::BadRequest(format!("Failed to serialize workflow to YAML: {}", e))
})?;
// Write file
tokio::fs::write(&filepath, yaml_content)
.await
.map_err(|e| {
ApiError::InternalServerError(format!(
"Failed to write workflow file '{}': {}",
filepath.display(),
e
))
})?;
tracing::info!(
"Wrote workflow file: {} ({} bytes)",
filepath.display(),
filepath.metadata().map(|m| m.len()).unwrap_or(0)
);
Ok(())
}
/// Create workflow routes
pub fn routes() -> Router<Arc<AppState>> {
Router::new()
@@ -350,7 +547,9 @@ pub fn routes() -> Router<Arc<AppState>> {
.put(update_workflow)
.delete(delete_workflow),
)
.route("/workflows/{ref}/file", put(update_workflow_file))
.route("/packs/{pack_ref}/workflows", get(list_workflows_by_pack))
.route("/packs/{pack_ref}/workflow-files", post(save_workflow_file))
}
#[cfg(test)]
@@ -362,4 +561,43 @@ mod tests {
// Just verify the router can be constructed
let _router = routes();
}
#[test]
fn test_save_request_validation() {
let req = SaveWorkflowFileRequest {
name: "test_workflow".to_string(),
label: "Test Workflow".to_string(),
description: Some("A test workflow".to_string()),
version: "1.0.0".to_string(),
pack_ref: "core".to_string(),
definition: serde_json::json!({
"ref": "core.test_workflow",
"label": "Test Workflow",
"version": "1.0.0",
"tasks": [{"name": "task1", "action": "core.echo"}]
}),
param_schema: None,
out_schema: None,
tags: None,
enabled: None,
};
assert!(req.validate().is_ok());
}
#[test]
fn test_save_request_validation_empty_name() {
let req = SaveWorkflowFileRequest {
name: "".to_string(), // Invalid: empty
label: "Test".to_string(),
description: None,
version: "1.0.0".to_string(),
pack_ref: "core".to_string(),
definition: serde_json::json!({}),
param_schema: None,
out_schema: None,
tags: None,
enabled: None,
};
assert!(req.validate().is_err());
}
}

View File

@@ -1,9 +1,14 @@
//! Parameter validation module
//!
//! Validates trigger and action parameters against their declared JSON schemas.
//! Template-aware: values containing `{{ }}` template expressions are replaced
//! with schema-appropriate placeholders before validation, so template expressions
//! pass type checks while literal values are still validated normally.
//! Validates trigger and action parameters against their declared schemas.
//! Schemas use the flat StackStorm-style format:
//! { "param_name": { "type": "string", "required": true, "secret": true, ... }, ... }
//!
//! Before validation, flat schemas are converted to standard JSON Schema so we
//! can reuse the `jsonschema` crate. Template-aware: values containing `{{ }}`
//! template expressions are replaced with schema-appropriate placeholders before
//! validation, so template expressions pass type checks while literal values are
//! still validated normally.
use attune_common::models::{action::Action, trigger::Trigger};
use jsonschema::Validator;
@@ -11,6 +16,68 @@ use serde_json::Value;
use crate::middleware::ApiError;
/// Convert a flat StackStorm-style parameter schema into a standard JSON Schema
/// object suitable for `jsonschema::Validator`.
///
/// Input (flat):
/// ```json
/// { "url": { "type": "string", "required": true }, "timeout": { "type": "integer", "default": 30 } }
/// ```
///
/// Output (JSON Schema):
/// ```json
/// { "type": "object", "properties": { "url": { "type": "string" }, "timeout": { "type": "integer", "default": 30 } }, "required": ["url"] }
/// ```
fn flat_to_json_schema(flat: &Value) -> Value {
let Some(map) = flat.as_object() else {
// Not an object — return a permissive schema
return serde_json::json!({});
};
// If it already looks like a JSON Schema (has "type": "object" + "properties"),
// pass it through unchanged for backward tolerance.
if map.get("type").and_then(|v| v.as_str()) == Some("object") && map.contains_key("properties")
{
return flat.clone();
}
let mut properties = serde_json::Map::new();
let mut required: Vec<Value> = Vec::new();
for (key, prop_def) in map {
let Some(prop_obj) = prop_def.as_object() else {
// Skip non-object entries (shouldn't happen in valid schemas)
continue;
};
// Clone the property definition, stripping `required` and `secret`
// (they are not valid JSON Schema keywords).
let mut clean = prop_obj.clone();
let is_required = clean
.remove("required")
.and_then(|v| v.as_bool())
.unwrap_or(false);
clean.remove("secret");
// `position` is also an Attune extension, not JSON Schema
clean.remove("position");
if is_required {
required.push(Value::String(key.clone()));
}
properties.insert(key.clone(), Value::Object(clean));
}
let mut schema = serde_json::Map::new();
schema.insert("type".to_string(), Value::String("object".to_string()));
schema.insert("properties".to_string(), Value::Object(properties));
if !required.is_empty() {
schema.insert("required".to_string(), Value::Array(required));
}
Value::Object(schema)
}
/// Check if a JSON value is (or contains) a template expression.
fn is_template_expression(value: &Value) -> bool {
match value {
@@ -100,7 +167,8 @@ fn placeholder_for_schema(property_schema: &Value) -> Value {
/// schema-appropriate placeholders. Only replaces leaf values that match
/// `{{ ... }}`; non-template values are left untouched for normal validation.
///
/// `schema` should be the full JSON Schema object (with `properties`, `type`, etc).
/// `schema` must be a standard JSON Schema object (with `properties`, `type`, etc).
/// Call `flat_to_json_schema` first if starting from flat format.
fn replace_templates_with_placeholders(params: &Value, schema: &Value) -> Value {
match params {
Value::Object(map) => {
@@ -164,17 +232,23 @@ fn replace_templates_with_placeholders(params: &Value, schema: &Value) -> Value
/// Validate trigger parameters against the trigger's parameter schema.
/// Template expressions (`{{ ... }}`) are accepted for any field type.
///
/// The schema is expected in flat StackStorm format and is converted to
/// JSON Schema internally for validation.
pub fn validate_trigger_params(trigger: &Trigger, params: &Value) -> Result<(), ApiError> {
// If no schema is defined, accept any parameters
let Some(schema) = &trigger.param_schema else {
let Some(flat_schema) = &trigger.param_schema else {
return Ok(());
};
// Convert flat format to JSON Schema for validation
let schema = flat_to_json_schema(flat_schema);
// Replace template expressions with schema-appropriate placeholders
let sanitized = replace_templates_with_placeholders(params, schema);
let sanitized = replace_templates_with_placeholders(params, &schema);
// Compile the JSON schema
let compiled_schema = Validator::new(schema).map_err(|e| {
let compiled_schema = Validator::new(&schema).map_err(|e| {
ApiError::InternalServerError(format!(
"Invalid parameter schema for trigger '{}': {}",
trigger.r#ref, e
@@ -207,17 +281,23 @@ pub fn validate_trigger_params(trigger: &Trigger, params: &Value) -> Result<(),
/// Validate action parameters against the action's parameter schema.
/// Template expressions (`{{ ... }}`) are accepted for any field type.
///
/// The schema is expected in flat StackStorm format and is converted to
/// JSON Schema internally for validation.
pub fn validate_action_params(action: &Action, params: &Value) -> Result<(), ApiError> {
// If no schema is defined, accept any parameters
let Some(schema) = &action.param_schema else {
let Some(flat_schema) = &action.param_schema else {
return Ok(());
};
// Convert flat format to JSON Schema for validation
let schema = flat_to_json_schema(flat_schema);
// Replace template expressions with schema-appropriate placeholders
let sanitized = replace_templates_with_placeholders(params, schema);
let sanitized = replace_templates_with_placeholders(params, &schema);
// Compile the JSON schema
let compiled_schema = Validator::new(schema).map_err(|e| {
let compiled_schema = Validator::new(&schema).map_err(|e| {
ApiError::InternalServerError(format!(
"Invalid parameter schema for action '{}': {}",
action.r#ref, e
@@ -309,15 +389,65 @@ mod tests {
// ── Basic trigger validation (no templates) ──────────────────────
// ── flat_to_json_schema unit tests ───────────────────────────────
#[test]
fn test_flat_to_json_schema_basic() {
let flat = json!({
"url": { "type": "string", "required": true },
"timeout": { "type": "integer", "default": 30 }
});
let result = flat_to_json_schema(&flat);
assert_eq!(result["type"], "object");
assert_eq!(result["properties"]["url"]["type"], "string");
// `required` should be stripped from individual properties
assert!(result["properties"]["url"].get("required").is_none());
assert_eq!(result["properties"]["timeout"]["default"], 30);
// Top-level required array should contain "url"
let req = result["required"].as_array().unwrap();
assert!(req.contains(&json!("url")));
assert!(!req.contains(&json!("timeout")));
}
#[test]
fn test_flat_to_json_schema_strips_secret_and_position() {
let flat = json!({
"token": { "type": "string", "secret": true, "position": 0, "required": true }
});
let result = flat_to_json_schema(&flat);
let token = &result["properties"]["token"];
assert!(token.get("secret").is_none());
assert!(token.get("position").is_none());
assert!(token.get("required").is_none());
}
#[test]
fn test_flat_to_json_schema_empty() {
let flat = json!({});
let result = flat_to_json_schema(&flat);
assert_eq!(result["type"], "object");
assert!(result.get("required").is_none());
}
#[test]
fn test_flat_to_json_schema_passthrough_json_schema() {
// If already JSON Schema format, pass through unchanged
let js = json!({
"type": "object",
"properties": { "x": { "type": "string" } },
"required": ["x"]
});
let result = flat_to_json_schema(&js);
assert_eq!(result, js);
}
// ── Basic trigger validation (flat format) ──────────────────────
#[test]
fn test_validate_trigger_params_with_valid_params() {
let schema = json!({
"type": "object",
"properties": {
"unit": { "type": "string", "enum": ["seconds", "minutes", "hours"] },
"delta": { "type": "integer", "minimum": 1 }
},
"required": ["unit", "delta"]
"unit": { "type": "string", "enum": ["seconds", "minutes", "hours"], "required": true },
"delta": { "type": "integer", "minimum": 1, "required": true }
});
let trigger = make_trigger(Some(schema));
@@ -328,12 +458,8 @@ mod tests {
#[test]
fn test_validate_trigger_params_with_invalid_params() {
let schema = json!({
"type": "object",
"properties": {
"unit": { "type": "string", "enum": ["seconds", "minutes", "hours"] },
"delta": { "type": "integer", "minimum": 1 }
},
"required": ["unit", "delta"]
"unit": { "type": "string", "enum": ["seconds", "minutes", "hours"], "required": true },
"delta": { "type": "integer", "minimum": 1, "required": true }
});
let trigger = make_trigger(Some(schema));
@@ -351,16 +477,12 @@ mod tests {
assert!(validate_trigger_params(&trigger, &params).is_err());
}
// ── Basic action validation (no templates) ───────────────────────
// ── Basic action validation (flat format) ───────────────────────
#[test]
fn test_validate_action_params_with_valid_params() {
let schema = json!({
"type": "object",
"properties": {
"message": { "type": "string" }
},
"required": ["message"]
"message": { "type": "string", "required": true }
});
let action = make_action(Some(schema));
@@ -371,11 +493,7 @@ mod tests {
#[test]
fn test_validate_action_params_with_empty_params_but_required_fields() {
let schema = json!({
"type": "object",
"properties": {
"message": { "type": "string" }
},
"required": ["message"]
"message": { "type": "string", "required": true }
});
let action = make_action(Some(schema));
@@ -383,16 +501,12 @@ mod tests {
assert!(validate_action_params(&action, &params).is_err());
}
// ── Template-aware validation ────────────────────────────────────
// ── Template-aware validation (flat format) ──────────────────────
#[test]
fn test_template_in_integer_field_passes() {
let schema = json!({
"type": "object",
"properties": {
"counter": { "type": "integer" }
},
"required": ["counter"]
"counter": { "type": "integer", "required": true }
});
let action = make_action(Some(schema));
@@ -403,11 +517,7 @@ mod tests {
#[test]
fn test_template_in_boolean_field_passes() {
let schema = json!({
"type": "object",
"properties": {
"verbose": { "type": "boolean" }
},
"required": ["verbose"]
"verbose": { "type": "boolean", "required": true }
});
let action = make_action(Some(schema));
@@ -418,11 +528,7 @@ mod tests {
#[test]
fn test_template_in_number_field_passes() {
let schema = json!({
"type": "object",
"properties": {
"threshold": { "type": "number", "minimum": 0.0 }
},
"required": ["threshold"]
"threshold": { "type": "number", "minimum": 0.0, "required": true }
});
let action = make_action(Some(schema));
@@ -433,11 +539,7 @@ mod tests {
#[test]
fn test_template_in_enum_field_passes() {
let schema = json!({
"type": "object",
"properties": {
"level": { "type": "string", "enum": ["info", "warn", "error"] }
},
"required": ["level"]
"level": { "type": "string", "enum": ["info", "warn", "error"], "required": true }
});
let action = make_action(Some(schema));
@@ -448,11 +550,7 @@ mod tests {
#[test]
fn test_template_in_array_field_passes() {
let schema = json!({
"type": "object",
"properties": {
"recipients": { "type": "array", "items": { "type": "string" } }
},
"required": ["recipients"]
"recipients": { "type": "array", "items": { "type": "string" }, "required": true }
});
let action = make_action(Some(schema));
@@ -463,11 +561,7 @@ mod tests {
#[test]
fn test_template_in_object_field_passes() {
let schema = json!({
"type": "object",
"properties": {
"metadata": { "type": "object" }
},
"required": ["metadata"]
"metadata": { "type": "object", "required": true }
});
let action = make_action(Some(schema));
@@ -478,13 +572,9 @@ mod tests {
#[test]
fn test_mixed_template_and_literal_values() {
let schema = json!({
"type": "object",
"properties": {
"message": { "type": "string" },
"count": { "type": "integer" },
"verbose": { "type": "boolean" }
},
"required": ["message", "count", "verbose"]
"message": { "type": "string", "required": true },
"count": { "type": "integer", "required": true },
"verbose": { "type": "boolean", "required": true }
});
let action = make_action(Some(schema));
@@ -498,6 +588,26 @@ mod tests {
assert!(validate_action_params(&action, &params).is_ok());
}
// ── Secret fields are ignored during validation ──────────────────
#[test]
fn test_secret_field_validated_normally() {
let schema = json!({
"api_key": { "type": "string", "required": true, "secret": true },
"endpoint": { "type": "string" }
});
let action = make_action(Some(schema));
// Valid: secret field provided
let params = json!({ "api_key": "sk-1234", "endpoint": "https://api.example.com" });
assert!(validate_action_params(&action, &params).is_ok());
// Invalid: secret field missing but required
let params = json!({ "endpoint": "https://api.example.com" });
assert!(validate_action_params(&action, &params).is_err());
}
#[test]
fn test_literal_values_still_validated() {
let schema = json!({

View File

@@ -8,6 +8,26 @@ use sqlx::{Executor, Postgres, QueryBuilder};
use super::{Create, Delete, FindById, FindByRef, List, Repository, Update};
/// Input for restoring an ad-hoc rule during pack reinstallation.
/// Unlike `CreateRuleInput`, action and trigger IDs are optional because
/// the referenced entities may not exist yet or may have been removed.
#[derive(Debug, Clone)]
pub struct RestoreRuleInput {
pub r#ref: String,
pub pack: Id,
pub pack_ref: String,
pub label: String,
pub description: String,
pub action: Option<Id>,
pub action_ref: String,
pub trigger: Option<Id>,
pub trigger_ref: String,
pub conditions: serde_json::Value,
pub action_params: serde_json::Value,
pub trigger_params: serde_json::Value,
pub enabled: bool,
}
/// Repository for Rule operations
pub struct RuleRepository;
@@ -337,4 +357,121 @@ impl RuleRepository {
Ok(rules)
}
/// Find ad-hoc (user-created) rules belonging to a specific pack.
/// Used to preserve custom rules during pack reinstallation.
pub async fn find_adhoc_by_pack<'e, E>(executor: E, pack_id: Id) -> Result<Vec<Rule>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let rules = sqlx::query_as::<_, Rule>(
r#"
SELECT id, ref, pack, pack_ref, label, description, action, action_ref,
trigger, trigger_ref, conditions, action_params, trigger_params, enabled, is_adhoc, created, updated
FROM rule
WHERE pack = $1 AND is_adhoc = true
ORDER BY ref ASC
"#,
)
.bind(pack_id)
.fetch_all(executor)
.await?;
Ok(rules)
}
/// Restore an ad-hoc rule after pack reinstallation.
/// Accepts `Option<Id>` for action and trigger so the rule is preserved
/// even if its referenced entities no longer exist.
pub async fn restore_rule<'e, E>(executor: E, input: RestoreRuleInput) -> Result<Rule>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let rule = sqlx::query_as::<_, Rule>(
r#"
INSERT INTO rule (ref, pack, pack_ref, label, description, action, action_ref,
trigger, trigger_ref, conditions, action_params, trigger_params, enabled, is_adhoc)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, true)
RETURNING id, ref, pack, pack_ref, label, description, action, action_ref,
trigger, trigger_ref, conditions, action_params, trigger_params, enabled, is_adhoc, created, updated
"#,
)
.bind(&input.r#ref)
.bind(input.pack)
.bind(&input.pack_ref)
.bind(&input.label)
.bind(&input.description)
.bind(input.action)
.bind(&input.action_ref)
.bind(input.trigger)
.bind(&input.trigger_ref)
.bind(&input.conditions)
.bind(&input.action_params)
.bind(&input.trigger_params)
.bind(input.enabled)
.fetch_one(executor)
.await
.map_err(|e| {
if let sqlx::Error::Database(ref db_err) = e {
if db_err.is_unique_violation() {
return Error::already_exists("Rule", "ref", &input.r#ref);
}
}
e.into()
})?;
Ok(rule)
}
/// Re-link rules whose action FK is NULL back to a newly recreated action,
/// matched by `action_ref`. Used after pack reinstallation to fix rules
/// from other packs that referenced actions in the reinstalled pack.
pub async fn relink_action_by_ref<'e, E>(
executor: E,
action_ref: &str,
action_id: Id,
) -> Result<u64>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let result = sqlx::query(
r#"
UPDATE rule
SET action = $1, updated = NOW()
WHERE action IS NULL AND action_ref = $2
"#,
)
.bind(action_id)
.bind(action_ref)
.execute(executor)
.await?;
Ok(result.rows_affected())
}
/// Re-link rules whose trigger FK is NULL back to a newly recreated trigger,
/// matched by `trigger_ref`. Used after pack reinstallation to fix rules
/// from other packs that referenced triggers in the reinstalled pack.
pub async fn relink_trigger_by_ref<'e, E>(
executor: E,
trigger_ref: &str,
trigger_id: Id,
) -> Result<u64>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let result = sqlx::query(
r#"
UPDATE rule
SET trigger = $1, updated = NOW()
WHERE trigger IS NULL AND trigger_ref = $2
"#,
)
.bind(trigger_id)
.bind(trigger_ref)
.execute(executor)
.await?;
Ok(result.rows_affected())
}
}

View File

@@ -15,7 +15,8 @@ pub use pack_service::{
};
pub use parser::{
parse_workflow_file, parse_workflow_yaml, workflow_to_json, BackoffStrategy, DecisionBranch,
ParseError, ParseResult, PublishDirective, RetryConfig, Task, TaskType, WorkflowDefinition,
ParseError, ParseResult, PublishDirective, RetryConfig, Task, TaskTransition, TaskType,
WorkflowDefinition,
};
pub use registrar::{RegistrationOptions, RegistrationResult, WorkflowRegistrar};
pub use validator::{ValidationError, ValidationResult, WorkflowValidator};

View File

@@ -2,6 +2,38 @@
//!
//! This module handles parsing workflow YAML files into structured Rust types
//! that can be validated and stored in the database.
//!
//! Supports two task transition formats:
//!
//! **New format (Orquesta-style `next` array):**
//! ```yaml
//! tasks:
//! - name: task1
//! action: core.echo
//! next:
//! - when: "{{ succeeded() }}"
//! publish:
//! - result: "{{ result() }}"
//! do:
//! - task2
//! - log
//! - when: "{{ failed() }}"
//! do:
//! - error_handler
//! ```
//!
//! **Legacy format (flat fields):**
//! ```yaml
//! tasks:
//! - name: task1
//! action: core.echo
//! on_success: task2
//! on_failure: error_handler
//! ```
//!
//! When legacy fields are present, they are automatically converted to `next`
//! transitions during parsing. The canonical internal representation always
//! uses the `next` array.
use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
@@ -85,7 +117,40 @@ pub struct WorkflowDefinition {
pub tags: Vec<String>,
}
/// Task definition - can be action, parallel, or workflow type
// ---------------------------------------------------------------------------
// Task transition types (Orquesta-style)
// ---------------------------------------------------------------------------
/// A single task transition evaluated after task completion.
///
/// Transitions are evaluated in order. When `when` is not defined,
/// the transition is unconditional (fires on any completion).
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskTransition {
/// Condition expression (e.g., "{{ succeeded() }}", "{{ failed() }}")
#[serde(skip_serializing_if = "Option::is_none")]
pub when: Option<String>,
/// Variables to publish into the workflow context on this transition
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub publish: Vec<PublishDirective>,
/// Next tasks to invoke when transition criteria is met
#[serde(default, skip_serializing_if = "Option::is_none")]
pub r#do: Option<Vec<String>>,
}
// ---------------------------------------------------------------------------
// Task definition
// ---------------------------------------------------------------------------
/// Task definition - can be action, parallel, or workflow type.
///
/// Supports both the new `next` transition format and legacy flat fields
/// (`on_success`, `on_failure`, etc.) for backward compatibility. During
/// deserialization the legacy fields are captured; call
/// [`Task::normalize_transitions`] (done automatically during parsing) to
/// merge them into the canonical `next` array.
#[derive(Debug, Clone, Serialize, Deserialize, Validate)]
pub struct Task {
/// Unique task name within the workflow
@@ -103,7 +168,7 @@ pub struct Task {
#[serde(default)]
pub input: HashMap<String, JsonValue>,
/// Conditional execution
/// Conditional execution (task-level — controls whether this task runs)
pub when: Option<String>,
/// With-items iteration
@@ -115,41 +180,195 @@ pub struct Task {
/// Concurrency limit for with-items
pub concurrency: Option<usize>,
/// Variable publishing
#[serde(default)]
pub publish: Vec<PublishDirective>,
/// Retry configuration
pub retry: Option<RetryConfig>,
/// Timeout in seconds
pub timeout: Option<u32>,
/// Transition on success
/// Orquesta-style transitions — the canonical representation.
/// Each entry can specify a `when` condition, `publish` directives,
/// and a list of next tasks (`do`).
#[serde(default)]
pub next: Vec<TaskTransition>,
// -- Legacy transition fields (read during deserialization) -------------
// These are kept for backward compatibility with older workflow YAML
// files. During [`normalize_transitions`] they are folded into `next`.
/// Legacy: transition on success
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_success: Option<String>,
/// Transition on failure
/// Legacy: transition on failure
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_failure: Option<String>,
/// Transition on complete (regardless of status)
/// Legacy: transition on complete (regardless of status)
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_complete: Option<String>,
/// Transition on timeout
/// Legacy: transition on timeout
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_timeout: Option<String>,
/// Decision-based transitions
#[serde(default)]
/// Legacy: decision-based transitions
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub decision: Vec<DecisionBranch>,
/// Join barrier - wait for N inbound tasks to complete before executing
/// If not specified, task executes immediately when any predecessor completes
/// Special value "all" can be represented as the count of inbound edges
/// Legacy: task-level variable publishing (moved to per-transition in new model)
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub publish: Vec<PublishDirective>,
/// Join barrier - wait for N inbound tasks to complete before executing.
/// If not specified, task executes immediately when any predecessor completes.
/// Special value "all" can be represented as the count of inbound edges.
pub join: Option<usize>,
/// Parallel tasks (for parallel type)
pub tasks: Option<Vec<Task>>,
}
impl Task {
/// Returns `true` if any legacy transition fields are populated.
fn has_legacy_transitions(&self) -> bool {
self.on_success.is_some()
|| self.on_failure.is_some()
|| self.on_complete.is_some()
|| self.on_timeout.is_some()
|| !self.decision.is_empty()
}
/// Convert legacy flat transition fields into the `next` array.
///
/// If `next` is already populated, legacy fields are ignored (the new
/// format takes precedence). After normalization the legacy fields are
/// cleared so serialization only emits the canonical `next` form.
pub fn normalize_transitions(&mut self) {
// If `next` is already populated, the new format wins — clear legacy
if !self.next.is_empty() {
self.clear_legacy_fields();
return;
}
// Nothing to convert
if !self.has_legacy_transitions() && self.publish.is_empty() {
return;
}
let mut transitions: Vec<TaskTransition> = Vec::new();
if let Some(ref target) = self.on_success {
transitions.push(TaskTransition {
when: Some("{{ succeeded() }}".to_string()),
publish: Vec::new(),
r#do: Some(vec![target.clone()]),
});
}
if let Some(ref target) = self.on_failure {
transitions.push(TaskTransition {
when: Some("{{ failed() }}".to_string()),
publish: Vec::new(),
r#do: Some(vec![target.clone()]),
});
}
if let Some(ref target) = self.on_complete {
// on_complete = unconditional
transitions.push(TaskTransition {
when: None,
publish: Vec::new(),
r#do: Some(vec![target.clone()]),
});
}
if let Some(ref target) = self.on_timeout {
transitions.push(TaskTransition {
when: Some("{{ timed_out() }}".to_string()),
publish: Vec::new(),
r#do: Some(vec![target.clone()]),
});
}
// Convert legacy decision branches
for branch in &self.decision {
transitions.push(TaskTransition {
when: branch.when.clone(),
publish: Vec::new(),
r#do: Some(vec![branch.next.clone()]),
});
}
// Attach legacy task-level publish to the first succeeded transition,
// or create a publish-only transition if none exist
if !self.publish.is_empty() {
let succeeded_idx = transitions
.iter()
.position(|t| matches!(&t.when, Some(w) if w.contains("succeeded()")));
if let Some(idx) = succeeded_idx {
transitions[idx].publish = self.publish.clone();
} else if transitions.is_empty() {
transitions.push(TaskTransition {
when: Some("{{ succeeded() }}".to_string()),
publish: self.publish.clone(),
r#do: None,
});
} else {
// Attach to the first transition
transitions[0].publish = self.publish.clone();
}
}
self.next = transitions;
self.clear_legacy_fields();
}
/// Clear legacy transition fields after normalization
fn clear_legacy_fields(&mut self) {
self.on_success = None;
self.on_failure = None;
self.on_complete = None;
self.on_timeout = None;
self.decision.clear();
self.publish.clear();
}
/// Collect all task names referenced by transitions (both `next` and legacy).
/// Used for validation.
pub fn all_transition_targets(&self) -> Vec<&str> {
let mut targets: Vec<&str> = Vec::new();
// From `next` array
for transition in &self.next {
if let Some(ref do_list) = transition.r#do {
for target in do_list {
targets.push(target.as_str());
}
}
}
// From legacy fields (in case normalize hasn't been called yet)
if let Some(ref t) = self.on_success {
targets.push(t.as_str());
}
if let Some(ref t) = self.on_failure {
targets.push(t.as_str());
}
if let Some(ref t) = self.on_complete {
targets.push(t.as_str());
}
if let Some(ref t) = self.on_timeout {
targets.push(t.as_str());
}
for branch in &self.decision {
targets.push(branch.next.as_str());
}
targets
}
}
fn default_task_type() -> TaskType {
TaskType::Action
}
@@ -214,7 +433,7 @@ pub enum BackoffStrategy {
Exponential,
}
/// Decision-based transition
/// Legacy decision-based transition (kept for backward compatibility)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DecisionBranch {
/// Condition to evaluate (template string)
@@ -228,10 +447,17 @@ pub struct DecisionBranch {
pub default: bool,
}
// ---------------------------------------------------------------------------
// Parsing & validation
// ---------------------------------------------------------------------------
/// Parse workflow YAML string into WorkflowDefinition
pub fn parse_workflow_yaml(yaml: &str) -> ParseResult<WorkflowDefinition> {
// Parse YAML
let workflow: WorkflowDefinition = serde_yaml_ng::from_str(yaml)?;
let mut workflow: WorkflowDefinition = serde_yaml_ng::from_str(yaml)?;
// Normalize legacy transitions into `next` arrays
normalize_all_transitions(&mut workflow);
// Validate structure
workflow.validate()?;
@@ -249,6 +475,19 @@ pub fn parse_workflow_file(path: &std::path::Path) -> ParseResult<WorkflowDefini
parse_workflow_yaml(&contents)
}
/// Normalize all tasks in a workflow definition, converting legacy fields to `next`.
fn normalize_all_transitions(workflow: &mut WorkflowDefinition) {
for task in &mut workflow.tasks {
task.normalize_transitions();
// Recursively normalize sub-tasks (parallel)
if let Some(ref mut sub_tasks) = task.tasks {
for sub in sub_tasks {
sub.normalize_transitions();
}
}
}
}
/// Validate workflow structure and references
fn validate_workflow_structure(workflow: &WorkflowDefinition) -> ParseResult<()> {
// Collect all task names
@@ -294,30 +533,12 @@ fn validate_task(task: &Task, task_names: &std::collections::HashSet<&str>) -> P
}
}
// Validate transitions reference existing tasks
for transition in [
&task.on_success,
&task.on_failure,
&task.on_complete,
&task.on_timeout,
]
.iter()
.filter_map(|t| t.as_ref())
{
if !task_names.contains(transition.as_str()) {
// Validate all transition targets reference existing tasks
for target in task.all_transition_targets() {
if !task_names.contains(target) {
return Err(ParseError::InvalidTaskReference(format!(
"Task '{}' references non-existent task '{}'",
task.name, transition
)));
}
}
// Validate decision branches
for branch in &task.decision {
if !task_names.contains(branch.next.as_str()) {
return Err(ParseError::InvalidTaskReference(format!(
"Task '{}' decision branch references non-existent task '{}'",
task.name, branch.next
task.name, target
)));
}
}
@@ -352,8 +573,12 @@ pub fn workflow_to_json(workflow: &WorkflowDefinition) -> Result<JsonValue, serd
mod tests {
use super::*;
// -----------------------------------------------------------------------
// Legacy format tests (backward compatibility)
// -----------------------------------------------------------------------
#[test]
fn test_parse_simple_workflow() {
fn test_parse_simple_workflow_legacy() {
let yaml = r#"
ref: test.simple_workflow
label: Simple Workflow
@@ -371,15 +596,26 @@ tasks:
"#;
let result = parse_workflow_yaml(yaml);
assert!(result.is_ok());
assert!(result.is_ok(), "Parse failed: {:?}", result.err());
let workflow = result.unwrap();
assert_eq!(workflow.tasks.len(), 2);
assert_eq!(workflow.tasks[0].name, "task1");
// Legacy on_success should have been normalized into `next`
assert!(workflow.tasks[0].on_success.is_none());
assert_eq!(workflow.tasks[0].next.len(), 1);
assert_eq!(
workflow.tasks[0].next[0].when.as_deref(),
Some("{{ succeeded() }}")
);
assert_eq!(
workflow.tasks[0].next[0].r#do,
Some(vec!["task2".to_string()])
);
}
#[test]
fn test_cycles_now_allowed() {
// After Orquesta-style refactoring, cycles are now supported
fn test_cycles_now_allowed_legacy() {
let yaml = r#"
ref: test.circular
label: Circular Workflow (Now Allowed)
@@ -403,7 +639,7 @@ tasks:
}
#[test]
fn test_invalid_task_reference() {
fn test_invalid_task_reference_legacy() {
let yaml = r#"
ref: test.invalid_ref
label: Invalid Reference
@@ -418,12 +654,12 @@ tasks:
assert!(result.is_err());
match result {
Err(ParseError::InvalidTaskReference(_)) => (),
_ => panic!("Expected InvalidTaskReference error"),
other => panic!("Expected InvalidTaskReference error, got: {:?}", other),
}
}
#[test]
fn test_parallel_task() {
fn test_parallel_task_legacy() {
let yaml = r#"
ref: test.parallel
label: Parallel Workflow
@@ -442,12 +678,357 @@ tasks:
"#;
let result = parse_workflow_yaml(yaml);
assert!(result.is_ok());
assert!(result.is_ok(), "Parse failed: {:?}", result.err());
let workflow = result.unwrap();
assert_eq!(workflow.tasks[0].r#type, TaskType::Parallel);
assert_eq!(workflow.tasks[0].tasks.as_ref().unwrap().len(), 2);
// Legacy on_success converted to next
assert_eq!(workflow.tasks[0].next.len(), 1);
}
// -----------------------------------------------------------------------
// New format tests (Orquesta-style `next`)
// -----------------------------------------------------------------------
#[test]
fn test_parse_next_format_simple() {
let yaml = r#"
ref: test.next_simple
label: Next Format Workflow
version: 1.0.0
tasks:
- name: task1
action: core.echo
input:
message: "Hello"
next:
- when: "{{ succeeded() }}"
do:
- task2
- name: task2
action: core.echo
input:
message: "World"
"#;
let result = parse_workflow_yaml(yaml);
assert!(result.is_ok(), "Parse failed: {:?}", result.err());
let workflow = result.unwrap();
assert_eq!(workflow.tasks.len(), 2);
assert_eq!(workflow.tasks[0].next.len(), 1);
assert_eq!(
workflow.tasks[0].next[0].when.as_deref(),
Some("{{ succeeded() }}")
);
assert_eq!(
workflow.tasks[0].next[0].r#do,
Some(vec!["task2".to_string()])
);
}
#[test]
fn test_parse_next_format_multiple_transitions() {
let yaml = r#"
ref: test.next_multi
label: Multi-Transition Workflow
version: 1.0.0
tasks:
- name: task1
action: core.echo
next:
- when: "{{ succeeded() }}"
publish:
- msg: "task1 done"
- result_val: "{{ result() }}"
do:
- log
- task3
- when: "{{ failed() }}"
publish:
- msg: "task1 failed"
do:
- log
- error_handler
- name: task3
action: core.complete
- name: log
action: core.log
- name: error_handler
action: core.handle_error
"#;
let result = parse_workflow_yaml(yaml);
assert!(result.is_ok(), "Parse failed: {:?}", result.err());
let workflow = result.unwrap();
let task1 = &workflow.tasks[0];
assert_eq!(task1.next.len(), 2);
// First transition: succeeded
assert_eq!(task1.next[0].when.as_deref(), Some("{{ succeeded() }}"));
assert_eq!(task1.next[0].publish.len(), 2);
assert_eq!(
task1.next[0].r#do,
Some(vec!["log".to_string(), "task3".to_string()])
);
// Second transition: failed
assert_eq!(task1.next[1].when.as_deref(), Some("{{ failed() }}"));
assert_eq!(task1.next[1].publish.len(), 1);
assert_eq!(
task1.next[1].r#do,
Some(vec!["log".to_string(), "error_handler".to_string()])
);
}
#[test]
fn test_parse_next_format_publish_only() {
let yaml = r#"
ref: test.publish_only
label: Publish Only Workflow
version: 1.0.0
tasks:
- name: compute
action: math.add
next:
- when: "{{ succeeded() }}"
publish:
- result: "{{ result() }}"
"#;
let result = parse_workflow_yaml(yaml);
assert!(result.is_ok(), "Parse failed: {:?}", result.err());
let workflow = result.unwrap();
let task = &workflow.tasks[0];
assert_eq!(task.next.len(), 1);
assert!(task.next[0].r#do.is_none());
assert_eq!(task.next[0].publish.len(), 1);
}
#[test]
fn test_parse_next_format_unconditional() {
let yaml = r#"
ref: test.unconditional
label: Unconditional Transition
version: 1.0.0
tasks:
- name: task1
action: core.echo
next:
- do:
- task2
- name: task2
action: core.echo
"#;
let result = parse_workflow_yaml(yaml);
assert!(result.is_ok(), "Parse failed: {:?}", result.err());
let workflow = result.unwrap();
assert_eq!(workflow.tasks[0].next.len(), 1);
assert!(workflow.tasks[0].next[0].when.is_none());
assert_eq!(
workflow.tasks[0].next[0].r#do,
Some(vec!["task2".to_string()])
);
}
#[test]
fn test_next_takes_precedence_over_legacy() {
// When both `next` and legacy fields are present, `next` wins
let yaml = r#"
ref: test.precedence
label: Precedence Test
version: 1.0.0
tasks:
- name: task1
action: core.echo
on_success: task2
next:
- when: "{{ succeeded() }}"
do:
- task3
- name: task2
action: core.echo
- name: task3
action: core.echo
"#;
let result = parse_workflow_yaml(yaml);
assert!(result.is_ok(), "Parse failed: {:?}", result.err());
let workflow = result.unwrap();
let task1 = &workflow.tasks[0];
// `next` should contain only the explicit next entry, not the legacy one
assert_eq!(task1.next.len(), 1);
assert_eq!(task1.next[0].r#do, Some(vec!["task3".to_string()]));
// Legacy field should have been cleared
assert!(task1.on_success.is_none());
}
#[test]
fn test_invalid_task_reference_in_next() {
let yaml = r#"
ref: test.invalid_next_ref
label: Invalid Next Ref
version: 1.0.0
tasks:
- name: task1
action: core.echo
next:
- when: "{{ succeeded() }}"
do:
- nonexistent_task
"#;
let result = parse_workflow_yaml(yaml);
assert!(result.is_err());
match result {
Err(ParseError::InvalidTaskReference(msg)) => {
assert!(msg.contains("nonexistent_task"));
}
other => panic!("Expected InvalidTaskReference error, got: {:?}", other),
}
}
#[test]
fn test_cycles_allowed_in_next_format() {
let yaml = r#"
ref: test.cycle_next
label: Cycle with Next
version: 1.0.0
tasks:
- name: task1
action: core.echo
next:
- when: "{{ succeeded() }}"
do:
- task2
- name: task2
action: core.echo
next:
- when: "{{ succeeded() }}"
do:
- task1
"#;
let result = parse_workflow_yaml(yaml);
assert!(result.is_ok(), "Cycles should be allowed");
}
#[test]
fn test_legacy_all_transition_types() {
let yaml = r#"
ref: test.all_legacy
label: All Legacy Types
version: 1.0.0
tasks:
- name: task1
action: core.echo
on_success: task_s
on_failure: task_f
on_complete: task_c
on_timeout: task_t
- name: task_s
action: core.echo
- name: task_f
action: core.echo
- name: task_c
action: core.echo
- name: task_t
action: core.echo
"#;
let result = parse_workflow_yaml(yaml);
assert!(result.is_ok(), "Parse failed: {:?}", result.err());
let workflow = result.unwrap();
let task1 = &workflow.tasks[0];
// All legacy fields should be normalized into `next`
assert_eq!(task1.next.len(), 4);
assert!(task1.on_success.is_none());
assert!(task1.on_failure.is_none());
assert!(task1.on_complete.is_none());
assert!(task1.on_timeout.is_none());
// Check the order and conditions
assert_eq!(task1.next[0].when.as_deref(), Some("{{ succeeded() }}"));
assert_eq!(task1.next[0].r#do, Some(vec!["task_s".to_string()]));
assert_eq!(task1.next[1].when.as_deref(), Some("{{ failed() }}"));
assert_eq!(task1.next[1].r#do, Some(vec!["task_f".to_string()]));
// on_complete → unconditional
assert!(task1.next[2].when.is_none());
assert_eq!(task1.next[2].r#do, Some(vec!["task_c".to_string()]));
assert_eq!(task1.next[3].when.as_deref(), Some("{{ timed_out() }}"));
assert_eq!(task1.next[3].r#do, Some(vec!["task_t".to_string()]));
}
#[test]
fn test_legacy_publish_attached_to_succeeded_transition() {
let yaml = r#"
ref: test.legacy_publish
label: Legacy Publish
version: 1.0.0
tasks:
- name: task1
action: core.echo
on_success: task2
publish:
- result: "done"
- name: task2
action: core.echo
"#;
let result = parse_workflow_yaml(yaml);
assert!(result.is_ok(), "Parse failed: {:?}", result.err());
let workflow = result.unwrap();
let task1 = &workflow.tasks[0];
assert_eq!(task1.next.len(), 1);
assert_eq!(task1.next[0].publish.len(), 1);
assert!(task1.publish.is_empty()); // cleared after normalization
}
#[test]
fn test_legacy_decision_branches() {
let yaml = r#"
ref: test.decision
label: Decision Workflow
version: 1.0.0
tasks:
- name: check
action: core.check
decision:
- when: "{{ result().status == 'ok' }}"
next: success_task
- when: "{{ result().status == 'error' }}"
next: error_task
- name: success_task
action: core.echo
- name: error_task
action: core.echo
"#;
let result = parse_workflow_yaml(yaml);
assert!(result.is_ok(), "Parse failed: {:?}", result.err());
let workflow = result.unwrap();
let task = &workflow.tasks[0];
assert_eq!(task.next.len(), 2);
assert!(task.decision.is_empty()); // cleared
assert_eq!(
task.next[0].when.as_deref(),
Some("{{ result().status == 'ok' }}")
);
assert_eq!(task.next[0].r#do, Some(vec!["success_task".to_string()]));
}
// -----------------------------------------------------------------------
// Existing tests
// -----------------------------------------------------------------------
#[test]
fn test_with_items() {
let yaml = r#"
@@ -471,27 +1052,98 @@ tasks:
}
#[test]
fn test_retry_config() {
fn test_json_roundtrip() {
let yaml = r#"
ref: test.retry
label: Retry Workflow
ref: test.roundtrip
label: Roundtrip Test
version: 1.0.0
tasks:
- name: flaky_task
action: core.flaky
retry:
count: 5
delay: 10
backoff: exponential
max_delay: 60
- name: task1
action: core.echo
next:
- when: "{{ succeeded() }}"
publish:
- msg: "done"
do:
- task2
- name: task2
action: core.echo
"#;
let workflow = parse_workflow_yaml(yaml).unwrap();
let json = workflow_to_json(&workflow).unwrap();
// Verify the JSON has the `next` array
let tasks = json.get("tasks").unwrap().as_array().unwrap();
let task1_next = tasks[0].get("next").unwrap().as_array().unwrap();
assert_eq!(task1_next.len(), 1);
assert_eq!(
task1_next[0].get("when").unwrap().as_str().unwrap(),
"{{ succeeded() }}"
);
// Verify legacy fields are absent
assert!(tasks[0].get("on_success").is_none());
}
#[test]
fn test_workflow_with_join() {
let yaml = r#"
ref: test.join
label: Join Workflow
version: 1.0.0
tasks:
- name: task1
action: core.echo
next:
- when: "{{ succeeded() }}"
do:
- task3
- name: task2
action: core.echo
next:
- when: "{{ succeeded() }}"
do:
- task3
- name: task3
join: 2
action: core.echo
"#;
let result = parse_workflow_yaml(yaml);
assert!(result.is_ok());
assert!(result.is_ok(), "Parse failed: {:?}", result.err());
let workflow = result.unwrap();
let retry = workflow.tasks[0].retry.as_ref().unwrap();
assert_eq!(retry.count, 5);
assert_eq!(retry.delay, 10);
assert_eq!(retry.backoff, BackoffStrategy::Exponential);
assert_eq!(workflow.tasks[2].join, Some(2));
}
#[test]
fn test_multiple_do_targets() {
let yaml = r#"
ref: test.multi_do
label: Multiple Do Targets
version: 1.0.0
tasks:
- name: task1
action: core.echo
next:
- when: "{{ succeeded() }}"
do:
- task2
- task3
- name: task2
action: core.echo
- name: task3
action: core.echo
"#;
let result = parse_workflow_yaml(yaml);
assert!(result.is_ok(), "Parse failed: {:?}", result.err());
let workflow = result.unwrap();
let task1 = &workflow.tasks[0];
assert_eq!(task1.next.len(), 1);
assert_eq!(
task1.next[0].r#do,
Some(vec!["task2".to_string(), "task3".to_string()])
);
}
}

View File

@@ -254,24 +254,11 @@ impl WorkflowValidator {
let mut graph = HashMap::new();
for task in &workflow.tasks {
let mut transitions = Vec::new();
if let Some(ref next) = task.on_success {
transitions.push(next.clone());
}
if let Some(ref next) = task.on_failure {
transitions.push(next.clone());
}
if let Some(ref next) = task.on_complete {
transitions.push(next.clone());
}
if let Some(ref next) = task.on_timeout {
transitions.push(next.clone());
}
for branch in &task.decision {
transitions.push(branch.next.clone());
}
let transitions: Vec<String> = task
.all_transition_targets()
.into_iter()
.map(|s| s.to_string())
.collect();
graph.insert(task.name.clone(), transitions);
}
@@ -284,21 +271,8 @@ impl WorkflowValidator {
let mut has_predecessor = HashSet::new();
for task in &workflow.tasks {
if let Some(ref next) = task.on_success {
has_predecessor.insert(next.clone());
}
if let Some(ref next) = task.on_failure {
has_predecessor.insert(next.clone());
}
if let Some(ref next) = task.on_complete {
has_predecessor.insert(next.clone());
}
if let Some(ref next) = task.on_timeout {
has_predecessor.insert(next.clone());
}
for branch in &task.decision {
has_predecessor.insert(branch.next.clone());
for target in task.all_transition_targets() {
has_predecessor.insert(target.to_string());
}
}

View File

@@ -3,6 +3,12 @@
//! This module builds executable task graphs from workflow definitions.
//! Workflows are directed graphs where tasks are nodes and transitions are edges.
//! Execution follows transitions from completed tasks, naturally supporting cycles.
//!
//! Uses the Orquesta-style `next` transition model where each task has an ordered
//! list of transitions. Each transition can specify:
//! - `when` — a condition expression (e.g., "{{ succeeded() }}", "{{ failed() }}")
//! - `publish` — variables to publish into the workflow context
//! - `do` — next tasks to invoke when the condition is met
use attune_common::workflow::{Task, TaskType, WorkflowDefinition};
use std::collections::{HashMap, HashSet};
@@ -51,7 +57,7 @@ pub struct TaskNode {
/// Input template
pub input: serde_json::Value,
/// Conditional execution
/// Conditional execution (task-level — controls whether the task runs at all)
pub when: Option<String>,
/// With-items iteration
@@ -63,17 +69,14 @@ pub struct TaskNode {
/// Concurrency limit
pub concurrency: Option<usize>,
/// Variable publishing directives
pub publish: Vec<String>,
/// Retry configuration
pub retry: Option<RetryConfig>,
/// Timeout in seconds
pub timeout: Option<u32>,
/// Transitions
pub transitions: TaskTransitions,
/// Orquesta-style transitions — evaluated in order after task completes
pub transitions: Vec<GraphTransition>,
/// Sub-tasks (for parallel tasks)
pub sub_tasks: Option<Vec<TaskNode>>,
@@ -85,22 +88,27 @@ pub struct TaskNode {
pub join: Option<usize>,
}
/// Task transitions
#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)]
pub struct TaskTransitions {
pub on_success: Option<String>,
pub on_failure: Option<String>,
pub on_complete: Option<String>,
pub on_timeout: Option<String>,
pub decision: Vec<DecisionBranch>,
/// A single transition in the task graph (Orquesta-style).
///
/// Transitions are evaluated in order after a task completes. When `when` is
/// `None` the transition is unconditional.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct GraphTransition {
/// Condition expression (e.g., "{{ succeeded() }}", "{{ failed() }}")
pub when: Option<String>,
/// Variable publishing directives (key-value pairs)
pub publish: Vec<PublishVar>,
/// Next tasks to invoke when transition criteria is met
pub do_tasks: Vec<String>,
}
/// Decision branch
/// A single publish variable (key = expression)
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct DecisionBranch {
pub when: Option<String>,
pub next: String,
pub default: bool,
pub struct PublishVar {
pub name: String,
pub expression: String,
}
/// Retry configuration
@@ -121,8 +129,56 @@ pub enum BackoffStrategy {
Exponential,
}
// ---------------------------------------------------------------------------
// Transition classification helpers
// ---------------------------------------------------------------------------
/// Classify a `when` expression for quick matching.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TransitionKind {
/// Matches `succeeded()` expressions
Succeeded,
/// Matches `failed()` expressions
Failed,
/// Matches `timed_out()` expressions
TimedOut,
/// No condition — fires on any completion
Always,
/// Custom condition expression
Custom,
}
impl GraphTransition {
/// Classify this transition's `when` expression into a [`TransitionKind`].
pub fn kind(&self) -> TransitionKind {
match &self.when {
None => TransitionKind::Always,
Some(expr) => {
let normalized = expr.to_lowercase().replace(|c: char| c.is_whitespace(), "");
if normalized.contains("succeeded()") {
TransitionKind::Succeeded
} else if normalized.contains("failed()") {
TransitionKind::Failed
} else if normalized.contains("timed_out()") {
TransitionKind::TimedOut
} else {
TransitionKind::Custom
}
}
}
}
}
// ---------------------------------------------------------------------------
// TaskGraph implementation
// ---------------------------------------------------------------------------
impl TaskGraph {
/// Create a graph from a workflow definition
/// Create a graph from a workflow definition.
///
/// The workflow's tasks should already have their transitions normalized
/// (legacy `on_success`/`on_failure` fields merged into `next`) — this is
/// done automatically by [`attune_common::workflow::parse_workflow_yaml`].
pub fn from_workflow(workflow: &WorkflowDefinition) -> GraphResult<Self> {
let mut builder = GraphBuilder::new();
@@ -149,40 +205,93 @@ impl TaskGraph {
}
/// Get the next tasks to execute after a task completes.
/// Evaluates transitions based on task status.
///
/// Evaluates transitions in order based on the task's completion status.
/// A transition fires if its `when` condition matches the task status:
/// - `succeeded()` fires when `success == true`
/// - `failed()` fires when `success == false`
/// - No condition (always) fires regardless
/// - Custom conditions are included (actual expression evaluation
/// happens in the workflow coordinator with runtime context)
///
/// Multiple transitions can fire — they are independent of each other.
///
/// # Arguments
/// * `task_name` - The name of the task that completed
/// * `success` - Whether the task succeeded
///
/// # Returns
/// A vector of task names to schedule next
/// A vector of (task_name, publish_vars) tuples to schedule next
pub fn next_tasks(&self, task_name: &str, success: bool) -> Vec<String> {
let mut next = Vec::new();
if let Some(node) = self.nodes.get(task_name) {
// Check explicit transitions based on task status
if success {
if let Some(ref next_task) = node.transitions.on_success {
next.push(next_task.clone());
for transition in &node.transitions {
let should_fire = match transition.kind() {
TransitionKind::Succeeded => success,
TransitionKind::Failed => !success,
TransitionKind::TimedOut => !success, // timeout is a form of failure
TransitionKind::Always => true,
TransitionKind::Custom => true, // include custom — real eval in coordinator
};
if should_fire {
for target in &transition.do_tasks {
if !next.contains(target) {
next.push(target.clone());
}
}
}
} else if let Some(ref next_task) = node.transitions.on_failure {
next.push(next_task.clone());
}
// on_complete runs regardless of success/failure
if let Some(ref next_task) = node.transitions.on_complete {
next.push(next_task.clone());
}
// Decision branches (evaluated separately in coordinator with context)
// We don't evaluate them here since they need runtime context
}
next
}
/// Get the next tasks with full transition information.
///
/// Returns matching transitions with their publish directives and targets,
/// giving the coordinator full context for variable publishing.
pub fn matching_transitions(&self, task_name: &str, success: bool) -> Vec<&GraphTransition> {
let mut matching = Vec::new();
if let Some(node) = self.nodes.get(task_name) {
for transition in &node.transitions {
let should_fire = match transition.kind() {
TransitionKind::Succeeded => success,
TransitionKind::Failed => !success,
TransitionKind::TimedOut => !success,
TransitionKind::Always => true,
TransitionKind::Custom => true,
};
if should_fire {
matching.push(transition);
}
}
}
matching
}
/// Collect all unique target task names from all transitions of a given task.
pub fn all_transition_targets(&self, task_name: &str) -> HashSet<String> {
let mut targets = HashSet::new();
if let Some(node) = self.nodes.get(task_name) {
for transition in &node.transitions {
for target in &transition.do_tasks {
targets.insert(target.clone());
}
}
}
targets
}
}
// ---------------------------------------------------------------------------
// Graph builder
// ---------------------------------------------------------------------------
/// Graph builder helper
struct GraphBuilder {
nodes: HashMap<String, TaskNode>,
@@ -198,14 +307,12 @@ impl GraphBuilder {
}
fn add_task(&mut self, task: &Task) -> GraphResult<()> {
let node = self.task_to_node(task)?;
let node = Self::task_to_node(task)?;
self.nodes.insert(task.name.clone(), node);
Ok(())
}
fn task_to_node(&self, task: &Task) -> GraphResult<TaskNode> {
let publish = extract_publish_vars(&task.publish);
fn task_to_node(task: &Task) -> GraphResult<TaskNode> {
let retry = task.retry.as_ref().map(|r| RetryConfig {
count: r.count,
delay: r.delay,
@@ -220,26 +327,21 @@ impl GraphBuilder {
on_error: r.on_error.clone(),
});
let transitions = TaskTransitions {
on_success: task.on_success.clone(),
on_failure: task.on_failure.clone(),
on_complete: task.on_complete.clone(),
on_timeout: task.on_timeout.clone(),
decision: task
.decision
.iter()
.map(|d| DecisionBranch {
when: d.when.clone(),
next: d.next.clone(),
default: d.default,
})
.collect(),
};
// Convert parser TaskTransition list → graph GraphTransition list
let transitions: Vec<GraphTransition> = task
.next
.iter()
.map(|t| GraphTransition {
when: t.when.clone(),
publish: extract_publish_vars(&t.publish),
do_tasks: t.r#do.clone().unwrap_or_default(),
})
.collect();
let sub_tasks = if let Some(ref tasks) = task.tasks {
let mut sub_nodes = Vec::new();
for subtask in tasks {
sub_nodes.push(self.task_to_node(subtask)?);
sub_nodes.push(Self::task_to_node(subtask)?);
}
Some(sub_nodes)
} else {
@@ -255,7 +357,6 @@ impl GraphBuilder {
with_items: task.with_items.clone(),
batch_size: task.batch_size,
concurrency: task.concurrency,
publish,
retry,
timeout: task.timeout,
transitions,
@@ -268,7 +369,6 @@ impl GraphBuilder {
fn build(mut self) -> GraphResult<Self> {
// Compute inbound edges from transitions
self.compute_inbound_edges()?;
Ok(self)
}
@@ -276,44 +376,27 @@ impl GraphBuilder {
let node_names: Vec<String> = self.nodes.keys().cloned().collect();
for node_name in &node_names {
if let Some(node) = self.nodes.get(node_name) {
// Collect all tasks this task can transition to
let successors = vec![
node.transitions.on_success.as_ref(),
node.transitions.on_failure.as_ref(),
node.transitions.on_complete.as_ref(),
node.transitions.on_timeout.as_ref(),
];
// Collect all successor task names from this node's transitions
let successors: Vec<String> = {
let node = self.nodes.get(node_name).unwrap();
node.transitions
.iter()
.flat_map(|t| t.do_tasks.iter().cloned())
.collect()
};
// For each successor, record this task as an inbound edge
for successor in successors.into_iter().flatten() {
if !self.nodes.contains_key(successor) {
return Err(GraphError::InvalidTaskReference(format!(
"Task '{}' references non-existent task '{}'",
node_name, successor
)));
}
self.inbound_edges
.entry(successor.clone())
.or_insert_with(HashSet::new)
.insert(node_name.clone());
for successor in &successors {
if !self.nodes.contains_key(successor) {
return Err(GraphError::InvalidTaskReference(format!(
"Task '{}' references non-existent task '{}'",
node_name, successor
)));
}
// Add decision branch edges
for branch in &node.transitions.decision {
if !self.nodes.contains_key(&branch.next) {
return Err(GraphError::InvalidTaskReference(format!(
"Task '{}' decision references non-existent task '{}'",
node_name, branch.next
)));
}
self.inbound_edges
.entry(branch.next.clone())
.or_insert_with(HashSet::new)
.insert(node_name.clone());
}
self.inbound_edges
.entry(successor.clone())
.or_default()
.insert(node_name.clone());
}
}
@@ -350,7 +433,7 @@ impl From<GraphBuilder> for TaskGraph {
for source in inbound {
outbound_edges
.entry(source.clone())
.or_insert_with(HashSet::new)
.or_default()
.insert(task.clone());
}
}
@@ -364,24 +447,40 @@ impl From<GraphBuilder> for TaskGraph {
}
}
/// Extract variable names from publish directives
fn extract_publish_vars(publish: &[attune_common::workflow::PublishDirective]) -> Vec<String> {
// ---------------------------------------------------------------------------
// Publish variable extraction
// ---------------------------------------------------------------------------
/// Extract publish variable names and expressions from parser publish directives.
fn extract_publish_vars(publish: &[attune_common::workflow::PublishDirective]) -> Vec<PublishVar> {
use attune_common::workflow::PublishDirective;
let mut vars = Vec::new();
for directive in publish {
match directive {
PublishDirective::Simple(map) => {
vars.extend(map.keys().cloned());
for (key, value) in map {
vars.push(PublishVar {
name: key.clone(),
expression: value.clone(),
});
}
}
PublishDirective::Key(key) => {
vars.push(key.clone());
vars.push(PublishVar {
name: key.clone(),
expression: "{{ result() }}".to_string(),
});
}
}
}
vars
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
@@ -396,10 +495,16 @@ version: 1.0.0
tasks:
- name: task1
action: core.echo
on_success: task2
next:
- when: "{{ succeeded() }}"
do:
- task2
- name: task2
action: core.echo
on_success: task3
next:
- when: "{{ succeeded() }}"
do:
- task3
- name: task3
action: core.echo
"#;
@@ -422,7 +527,7 @@ tasks:
assert_eq!(graph.inbound_edges["task3"].len(), 1);
assert!(graph.inbound_edges["task3"].contains("task2"));
// Check transitions
// Check transitions via next_tasks
let next = graph.next_tasks("task1", true);
assert_eq!(next.len(), 1);
assert_eq!(next[0], "task2");
@@ -433,40 +538,11 @@ tasks:
}
#[test]
fn test_parallel_entry_points() {
fn test_simple_sequential_graph_legacy() {
// Legacy format should still work (parser normalizes to `next`)
let yaml = r#"
ref: test.parallel_start
label: Parallel Start
version: 1.0.0
tasks:
- name: task1
action: core.echo
on_success: final
- name: task2
action: core.echo
on_success: final
- name: final
action: core.complete
"#;
let workflow = workflow::parse_workflow_yaml(yaml).unwrap();
let graph = TaskGraph::from_workflow(&workflow).unwrap();
assert_eq!(graph.entry_points.len(), 2);
assert!(graph.entry_points.contains(&"task1".to_string()));
assert!(graph.entry_points.contains(&"task2".to_string()));
// final task should have both as inbound edges
assert_eq!(graph.inbound_edges["final"].len(), 2);
assert!(graph.inbound_edges["final"].contains("task1"));
assert!(graph.inbound_edges["final"].contains("task2"));
}
#[test]
fn test_transitions() {
let yaml = r#"
ref: test.transitions
label: Transition Test
ref: test.sequential_legacy
label: Sequential Workflow (Legacy)
version: 1.0.0
tasks:
- name: task1
@@ -482,18 +558,155 @@ tasks:
let workflow = workflow::parse_workflow_yaml(yaml).unwrap();
let graph = TaskGraph::from_workflow(&workflow).unwrap();
// Test next_tasks follows transitions
assert_eq!(graph.nodes.len(), 3);
assert_eq!(graph.entry_points.len(), 1);
let next = graph.next_tasks("task1", true);
assert_eq!(next, vec!["task2"]);
let next = graph.next_tasks("task2", true);
assert_eq!(next, vec!["task3"]);
}
// task3 has no transitions
let next = graph.next_tasks("task3", true);
#[test]
fn test_parallel_entry_points() {
let yaml = r#"
ref: test.parallel_start
label: Parallel Start
version: 1.0.0
tasks:
- name: task1
action: core.echo
next:
- when: "{{ succeeded() }}"
do:
- final_task
- name: task2
action: core.echo
next:
- when: "{{ succeeded() }}"
do:
- final_task
- name: final_task
action: core.complete
"#;
let workflow = workflow::parse_workflow_yaml(yaml).unwrap();
let graph = TaskGraph::from_workflow(&workflow).unwrap();
assert_eq!(graph.entry_points.len(), 2);
assert!(graph.entry_points.contains(&"task1".to_string()));
assert!(graph.entry_points.contains(&"task2".to_string()));
// final_task should have both as inbound edges
assert_eq!(graph.inbound_edges["final_task"].len(), 2);
assert!(graph.inbound_edges["final_task"].contains("task1"));
assert!(graph.inbound_edges["final_task"].contains("task2"));
}
#[test]
fn test_transitions_success_and_failure() {
let yaml = r#"
ref: test.transitions
label: Transition Test
version: 1.0.0
tasks:
- name: task1
action: core.echo
next:
- when: "{{ succeeded() }}"
do:
- task2
- when: "{{ failed() }}"
do:
- error_handler
- name: task2
action: core.echo
- name: error_handler
action: core.handle_error
"#;
let workflow = workflow::parse_workflow_yaml(yaml).unwrap();
let graph = TaskGraph::from_workflow(&workflow).unwrap();
// On success, should go to task2
let next = graph.next_tasks("task1", true);
assert_eq!(next, vec!["task2"]);
// On failure, should go to error_handler
let next = graph.next_tasks("task1", false);
assert_eq!(next, vec!["error_handler"]);
// task2 has no transitions
let next = graph.next_tasks("task2", true);
assert!(next.is_empty());
}
#[test]
fn test_multiple_do_targets() {
let yaml = r#"
ref: test.multi_do
label: Multi Do Targets
version: 1.0.0
tasks:
- name: task1
action: core.echo
next:
- when: "{{ succeeded() }}"
publish:
- msg: "task1 done"
do:
- log
- task2
- name: task2
action: core.echo
- name: log
action: core.log
"#;
let workflow = workflow::parse_workflow_yaml(yaml).unwrap();
let graph = TaskGraph::from_workflow(&workflow).unwrap();
let next = graph.next_tasks("task1", true);
assert_eq!(next.len(), 2);
assert!(next.contains(&"log".to_string()));
assert!(next.contains(&"task2".to_string()));
// Check publish vars
let transitions = graph.matching_transitions("task1", true);
assert_eq!(transitions.len(), 1);
assert_eq!(transitions[0].publish.len(), 1);
assert_eq!(transitions[0].publish[0].name, "msg");
assert_eq!(transitions[0].publish[0].expression, "task1 done");
}
#[test]
fn test_unconditional_transition() {
let yaml = r#"
ref: test.unconditional
label: Unconditional
version: 1.0.0
tasks:
- name: task1
action: core.echo
next:
- do:
- task2
- name: task2
action: core.echo
"#;
let workflow = workflow::parse_workflow_yaml(yaml).unwrap();
let graph = TaskGraph::from_workflow(&workflow).unwrap();
// Unconditional fires on both success and failure
let next = graph.next_tasks("task1", true);
assert_eq!(next, vec!["task2"]);
let next = graph.next_tasks("task1", false);
assert_eq!(next, vec!["task2"]);
}
#[test]
fn test_cycle_support() {
let yaml = r#"
@@ -503,8 +716,13 @@ version: 1.0.0
tasks:
- name: check
action: core.check
on_success: process
on_failure: check
next:
- when: "{{ succeeded() }}"
do:
- process
- when: "{{ failed() }}"
do:
- check
- name: process
action: core.process
"#;
@@ -513,13 +731,12 @@ tasks:
// Should not error on cycles
let graph = TaskGraph::from_workflow(&workflow).unwrap();
// Note: check has a self-reference (check -> check on failure)
// check has a self-reference (check -> check on failure)
// So it has an inbound edge and is not an entry point
// process also has an inbound edge (check -> process on success)
// Therefore, there are no entry points in this workflow
assert_eq!(graph.entry_points.len(), 0);
// check can transition to itself on failure (cycle)
// check transitions to itself on failure (cycle)
let next = graph.next_tasks("check", false);
assert_eq!(next, vec!["check"]);
@@ -537,18 +754,24 @@ version: 1.0.0
tasks:
- name: task1
action: core.echo
on_success: final
next:
- when: "{{ succeeded() }}"
do:
- final_task
- name: task2
action: core.echo
on_success: final
- name: final
next:
- when: "{{ succeeded() }}"
do:
- final_task
- name: final_task
action: core.complete
"#;
let workflow = workflow::parse_workflow_yaml(yaml).unwrap();
let graph = TaskGraph::from_workflow(&workflow).unwrap();
let inbound = graph.get_inbound_tasks("final");
let inbound = graph.get_inbound_tasks("final_task");
assert_eq!(inbound.len(), 2);
assert!(inbound.contains(&"task1".to_string()));
assert!(inbound.contains(&"task2".to_string()));
@@ -556,4 +779,156 @@ tasks:
let inbound = graph.get_inbound_tasks("task1");
assert_eq!(inbound.len(), 0);
}
#[test]
fn test_transition_kind_classification() {
let succeeded = GraphTransition {
when: Some("{{ succeeded() }}".to_string()),
publish: vec![],
do_tasks: vec!["t".to_string()],
};
assert_eq!(succeeded.kind(), TransitionKind::Succeeded);
let failed = GraphTransition {
when: Some("{{ failed() }}".to_string()),
publish: vec![],
do_tasks: vec!["t".to_string()],
};
assert_eq!(failed.kind(), TransitionKind::Failed);
let timed_out = GraphTransition {
when: Some("{{ timed_out() }}".to_string()),
publish: vec![],
do_tasks: vec!["t".to_string()],
};
assert_eq!(timed_out.kind(), TransitionKind::TimedOut);
let always = GraphTransition {
when: None,
publish: vec![],
do_tasks: vec!["t".to_string()],
};
assert_eq!(always.kind(), TransitionKind::Always);
let custom = GraphTransition {
when: Some("{{ result().status == 'ok' }}".to_string()),
publish: vec![],
do_tasks: vec!["t".to_string()],
};
assert_eq!(custom.kind(), TransitionKind::Custom);
}
#[test]
fn test_publish_extraction() {
let yaml = r#"
ref: test.publish
label: Publish Test
version: 1.0.0
tasks:
- name: task1
action: core.echo
next:
- when: "{{ succeeded() }}"
publish:
- result_val: "{{ result() }}"
- msg: "done"
do:
- task2
- name: task2
action: core.echo
"#;
let workflow = workflow::parse_workflow_yaml(yaml).unwrap();
let graph = TaskGraph::from_workflow(&workflow).unwrap();
let task1 = graph.get_task("task1").unwrap();
assert_eq!(task1.transitions.len(), 1);
assert_eq!(task1.transitions[0].publish.len(), 2);
// Note: HashMap ordering is not guaranteed, so just check both exist
let publish_names: Vec<&str> = task1.transitions[0]
.publish
.iter()
.map(|p| p.name.as_str())
.collect();
assert!(publish_names.contains(&"result_val"));
assert!(publish_names.contains(&"msg"));
}
#[test]
fn test_all_transition_targets() {
let yaml = r#"
ref: test.all_targets
label: All Targets Test
version: 1.0.0
tasks:
- name: task1
action: core.echo
next:
- when: "{{ succeeded() }}"
do:
- task2
- task3
- when: "{{ failed() }}"
do:
- error_handler
- name: task2
action: core.echo
- name: task3
action: core.echo
- name: error_handler
action: core.handle_error
"#;
let workflow = workflow::parse_workflow_yaml(yaml).unwrap();
let graph = TaskGraph::from_workflow(&workflow).unwrap();
let targets = graph.all_transition_targets("task1");
assert_eq!(targets.len(), 3);
assert!(targets.contains("task2"));
assert!(targets.contains("task3"));
assert!(targets.contains("error_handler"));
}
#[test]
fn test_mixed_success_failure_and_always() {
let yaml = r#"
ref: test.mixed
label: Mixed Transitions
version: 1.0.0
tasks:
- name: task1
action: core.echo
next:
- when: "{{ succeeded() }}"
do:
- success_task
- when: "{{ failed() }}"
do:
- failure_task
- do:
- always_task
- name: success_task
action: core.echo
- name: failure_task
action: core.echo
- name: always_task
action: core.echo
"#;
let workflow = workflow::parse_workflow_yaml(yaml).unwrap();
let graph = TaskGraph::from_workflow(&workflow).unwrap();
// On success: succeeded + always fire
let next = graph.next_tasks("task1", true);
assert_eq!(next.len(), 2);
assert!(next.contains(&"success_task".to_string()));
assert!(next.contains(&"always_task".to_string()));
// On failure: failed + always fire
let next = graph.next_tasks("task1", false);
assert_eq!(next.len(), 2);
assert!(next.contains(&"failure_task".to_string()));
assert!(next.contains(&"always_task".to_string()));
}
}

View File

@@ -53,7 +53,7 @@ pub use coordinator::{
WorkflowCoordinator, WorkflowExecutionHandle, WorkflowExecutionResult, WorkflowExecutionState,
WorkflowExecutionStatus,
};
pub use graph::{GraphError, GraphResult, TaskGraph, TaskNode, TaskTransitions};
pub use graph::{GraphError, GraphResult, GraphTransition, TaskGraph, TaskNode};
pub use task_executor::{
TaskExecutionError, TaskExecutionResult, TaskExecutionStatus, TaskExecutor,
};

View File

@@ -132,10 +132,22 @@ impl TaskExecutor {
if let Some(ref output) = result.output {
context.set_task_result(&task.name, output.clone());
// Publish variables
if !task.publish.is_empty() {
if let Err(e) = context.publish_from_result(output, &task.publish, None) {
warn!("Failed to publish variables for task {}: {}", task.name, e);
// Publish variables from matching transitions
let success = matches!(result.status, TaskExecutionStatus::Success);
for transition in &task.transitions {
let should_fire = match transition.kind() {
super::graph::TransitionKind::Succeeded => success,
super::graph::TransitionKind::Failed => !success,
super::graph::TransitionKind::TimedOut => !success,
super::graph::TransitionKind::Always => true,
super::graph::TransitionKind::Custom => true,
};
if should_fire && !transition.publish.is_empty() {
let var_names: Vec<String> =
transition.publish.iter().map(|p| p.name.clone()).collect();
if let Err(e) = context.publish_from_result(output, &var_names, None) {
warn!("Failed to publish variables for task {}: {}", task.name, e);
}
}
}
}

View File

@@ -165,7 +165,17 @@ impl ActionExecutor {
}
}
// Otherwise, parse action_ref and query by pack.ref + action.ref
// Fallback: look up by the full qualified action ref directly
let action = sqlx::query_as::<_, Action>("SELECT * FROM action WHERE ref = $1")
.bind(&execution.action_ref)
.fetch_optional(&self.pool)
.await?;
if let Some(action) = action {
return Ok(action);
}
// Final fallback: parse action_ref as "pack.action" and query by pack ref
let parts: Vec<&str> = execution.action_ref.split('.').collect();
if parts.len() != 2 {
return Err(Error::validation(format!(
@@ -175,9 +185,8 @@ impl ActionExecutor {
}
let pack_ref = parts[0];
let action_ref = parts[1];
// Query action by pack ref and action ref
// Query action by pack ref and full action ref
let action = sqlx::query_as::<_, Action>(
r#"
SELECT a.*
@@ -187,7 +196,7 @@ impl ActionExecutor {
"#,
)
.bind(pack_ref)
.bind(action_ref)
.bind(&execution.action_ref)
.fetch_optional(&self.pool)
.await?
.ok_or_else(|| Error::not_found("Action", "ref", execution.action_ref.clone()))?;
@@ -368,9 +377,40 @@ impl ActionExecutor {
if action_file_path.exists() {
Some(action_file_path)
} else {
// Detailed diagnostics to help track down missing action files
let pack_dir_exists = pack_dir.exists();
let actions_dir = pack_dir.join("actions");
let actions_dir_exists = actions_dir.exists();
let actions_dir_contents: Vec<String> = if actions_dir_exists {
std::fs::read_dir(&actions_dir)
.map(|entries| {
entries
.filter_map(|e| e.ok())
.map(|e| e.file_name().to_string_lossy().to_string())
.collect()
})
.unwrap_or_default()
} else {
vec![]
};
warn!(
"Action file not found at {:?} for action {}",
action_file_path, action.r#ref
"Action file not found for action '{}': \
expected_path={}, \
packs_base_dir={}, \
pack_ref={}, \
entrypoint={}, \
pack_dir_exists={}, \
actions_dir_exists={}, \
actions_dir_contents={:?}",
action.r#ref,
action_file_path.display(),
self.packs_base_dir.display(),
action.pack_ref,
entry_point,
pack_dir_exists,
actions_dir_exists,
actions_dir_contents,
);
None
}
@@ -567,9 +607,7 @@ impl ActionExecutor {
warn!(
"Execution {} failed without ExecutionResult - {}: {}",
execution_id,
"early/catastrophic failure",
err_msg
execution_id, "early/catastrophic failure", err_msg
);
// Check if stderr log exists and is non-empty from artifact storage