working out the worker/execution interface

This commit is contained in:
2026-02-08 12:55:33 -06:00
parent c62f41669d
commit a74e13fa0b
108 changed files with 21162 additions and 674 deletions

View File

@@ -17,7 +17,6 @@ path = "src/main.rs"
[dependencies]
# Internal dependencies
attune-common = { path = "../common" }
attune-worker = { path = "../worker" }
# Async runtime
tokio = { workspace = true }

View File

@@ -17,6 +17,10 @@ pub struct CreateExecutionRequest {
/// Execution parameters/configuration
#[schema(value_type = Object, example = json!({"channel": "#alerts", "message": "Manual test"}))]
pub parameters: Option<JsonValue>,
/// Environment variables for this execution
#[schema(value_type = Object, example = json!({"DEBUG": "true", "LOG_LEVEL": "info"}))]
pub env_vars: Option<JsonValue>,
}
/// Response DTO for execution information

View File

@@ -336,10 +336,455 @@ pub struct PackWorkflowValidationResponse {
pub errors: std::collections::HashMap<String, Vec<String>>,
}
/// Request DTO for downloading packs
#[derive(Debug, Clone, Deserialize, Validate, ToSchema)]
pub struct DownloadPacksRequest {
/// List of pack sources (git URLs, HTTP URLs, or registry refs)
#[validate(length(min = 1))]
#[schema(example = json!(["https://github.com/attune/pack-slack.git", "aws@2.0.0"]))]
pub packs: Vec<String>,
/// Destination directory for downloaded packs
#[validate(length(min = 1))]
#[schema(example = "/tmp/attune-packs")]
pub destination_dir: String,
/// Pack registry URL for resolving references
#[schema(example = "https://registry.attune.io/index.json")]
pub registry_url: Option<String>,
/// Git reference (branch, tag, or commit) for git sources
#[schema(example = "v1.0.0")]
pub ref_spec: Option<String>,
/// Download timeout in seconds
#[serde(default = "default_download_timeout")]
#[schema(example = 300)]
pub timeout: u64,
/// Verify SSL certificates
#[serde(default = "default_true")]
#[schema(example = true)]
pub verify_ssl: bool,
}
/// Response DTO for download packs operation
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct DownloadPacksResponse {
/// Successfully downloaded packs
pub downloaded_packs: Vec<DownloadedPack>,
/// Failed pack downloads
pub failed_packs: Vec<FailedPack>,
/// Total number of packs requested
pub total_count: usize,
/// Number of successful downloads
pub success_count: usize,
/// Number of failed downloads
pub failure_count: usize,
}
/// Information about a downloaded pack
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct DownloadedPack {
/// Original source
pub source: String,
/// Source type (git, http, registry)
pub source_type: String,
/// Local path to downloaded pack
pub pack_path: String,
/// Pack reference from pack.yaml
pub pack_ref: String,
/// Pack version from pack.yaml
pub pack_version: String,
/// Git commit hash (for git sources)
pub git_commit: Option<String>,
/// Directory checksum
pub checksum: Option<String>,
}
/// Information about a failed pack download
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct FailedPack {
/// Pack source that failed
pub source: String,
/// Error message
pub error: String,
}
/// Request DTO for getting pack dependencies
#[derive(Debug, Clone, Deserialize, Validate, ToSchema)]
pub struct GetPackDependenciesRequest {
/// List of pack directory paths to analyze
#[validate(length(min = 1))]
#[schema(example = json!(["/tmp/attune-packs/slack"]))]
pub pack_paths: Vec<String>,
/// Skip pack.yaml validation
#[serde(default)]
#[schema(example = false)]
pub skip_validation: bool,
}
/// Response DTO for get pack dependencies operation
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct GetPackDependenciesResponse {
/// All dependencies found
pub dependencies: Vec<PackDependency>,
/// Runtime requirements by pack
pub runtime_requirements: std::collections::HashMap<String, RuntimeRequirements>,
/// Dependencies not yet installed
pub missing_dependencies: Vec<PackDependency>,
/// Packs that were analyzed
pub analyzed_packs: Vec<AnalyzedPack>,
/// Errors encountered during analysis
pub errors: Vec<DependencyError>,
}
/// Pack dependency information
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct PackDependency {
/// Pack reference
pub pack_ref: String,
/// Version specification
pub version_spec: String,
/// Pack that requires this dependency
pub required_by: String,
/// Whether dependency is already installed
pub already_installed: bool,
}
/// Runtime requirements for a pack
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct RuntimeRequirements {
/// Pack reference
pub pack_ref: String,
/// Python requirements
pub python: Option<PythonRequirements>,
/// Node.js requirements
pub nodejs: Option<NodeJsRequirements>,
}
/// Python runtime requirements
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct PythonRequirements {
/// Python version requirement
pub version: Option<String>,
/// Path to requirements.txt
pub requirements_file: Option<String>,
}
/// Node.js runtime requirements
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct NodeJsRequirements {
/// Node.js version requirement
pub version: Option<String>,
/// Path to package.json
pub package_file: Option<String>,
}
/// Information about an analyzed pack
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct AnalyzedPack {
/// Pack reference
pub pack_ref: String,
/// Pack directory path
pub pack_path: String,
/// Whether pack has dependencies
pub has_dependencies: bool,
/// Number of dependencies
pub dependency_count: usize,
}
/// Dependency analysis error
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct DependencyError {
/// Pack path where error occurred
pub pack_path: String,
/// Error message
pub error: String,
}
/// Request DTO for building pack environments
#[derive(Debug, Clone, Deserialize, Validate, ToSchema)]
pub struct BuildPackEnvsRequest {
/// List of pack directory paths
#[validate(length(min = 1))]
#[schema(example = json!(["/tmp/attune-packs/slack"]))]
pub pack_paths: Vec<String>,
/// Base directory for permanent pack storage
#[schema(example = "/opt/attune/packs")]
pub packs_base_dir: Option<String>,
/// Python version to use
#[serde(default = "default_python_version")]
#[schema(example = "3.11")]
pub python_version: String,
/// Node.js version to use
#[serde(default = "default_nodejs_version")]
#[schema(example = "20")]
pub nodejs_version: String,
/// Skip building Python environments
#[serde(default)]
#[schema(example = false)]
pub skip_python: bool,
/// Skip building Node.js environments
#[serde(default)]
#[schema(example = false)]
pub skip_nodejs: bool,
/// Force rebuild of existing environments
#[serde(default)]
#[schema(example = false)]
pub force_rebuild: bool,
/// Timeout in seconds for building each environment
#[serde(default = "default_build_timeout")]
#[schema(example = 600)]
pub timeout: u64,
}
/// Response DTO for build pack environments operation
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct BuildPackEnvsResponse {
/// Successfully built environments
pub built_environments: Vec<BuiltEnvironment>,
/// Failed environment builds
pub failed_environments: Vec<FailedEnvironment>,
/// Summary statistics
pub summary: BuildSummary,
}
/// Information about a built environment
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct BuiltEnvironment {
/// Pack reference
pub pack_ref: String,
/// Pack directory path
pub pack_path: String,
/// Built environments
pub environments: Environments,
/// Build duration in milliseconds
pub duration_ms: u64,
}
/// Environment details
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct Environments {
/// Python environment
pub python: Option<PythonEnvironment>,
/// Node.js environment
pub nodejs: Option<NodeJsEnvironment>,
}
/// Python environment details
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct PythonEnvironment {
/// Path to virtualenv
pub virtualenv_path: String,
/// Whether requirements were installed
pub requirements_installed: bool,
/// Number of packages installed
pub package_count: usize,
/// Python version used
pub python_version: String,
}
/// Node.js environment details
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct NodeJsEnvironment {
/// Path to node_modules
pub node_modules_path: String,
/// Whether dependencies were installed
pub dependencies_installed: bool,
/// Number of packages installed
pub package_count: usize,
/// Node.js version used
pub nodejs_version: String,
}
/// Failed environment build
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct FailedEnvironment {
/// Pack reference
pub pack_ref: String,
/// Pack directory path
pub pack_path: String,
/// Runtime that failed
pub runtime: String,
/// Error message
pub error: String,
}
/// Build summary statistics
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct BuildSummary {
/// Total packs processed
pub total_packs: usize,
/// Successfully built
pub success_count: usize,
/// Failed builds
pub failure_count: usize,
/// Python environments built
pub python_envs_built: usize,
/// Node.js environments built
pub nodejs_envs_built: usize,
/// Total duration in milliseconds
pub total_duration_ms: u64,
}
/// Request DTO for registering multiple packs
#[derive(Debug, Clone, Deserialize, Validate, ToSchema)]
pub struct RegisterPacksRequest {
/// List of pack directory paths to register
#[validate(length(min = 1))]
#[schema(example = json!(["/tmp/attune-packs/slack"]))]
pub pack_paths: Vec<String>,
/// Base directory for permanent storage
#[schema(example = "/opt/attune/packs")]
pub packs_base_dir: Option<String>,
/// Skip schema validation
#[serde(default)]
#[schema(example = false)]
pub skip_validation: bool,
/// Skip running pack tests
#[serde(default)]
#[schema(example = false)]
pub skip_tests: bool,
/// Force registration (replace if exists)
#[serde(default)]
#[schema(example = false)]
pub force: bool,
}
/// Response DTO for register packs operation
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct RegisterPacksResponse {
/// Successfully registered packs
pub registered_packs: Vec<RegisteredPack>,
/// Failed pack registrations
pub failed_packs: Vec<FailedPackRegistration>,
/// Summary statistics
pub summary: RegistrationSummary,
}
/// Information about a registered pack
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct RegisteredPack {
/// Pack reference
pub pack_ref: String,
/// Pack database ID
pub pack_id: i64,
/// Pack version
pub pack_version: String,
/// Permanent storage path
pub storage_path: String,
/// Registered components by type
pub components_registered: ComponentCounts,
/// Test results
pub test_result: Option<TestResult>,
/// Validation results
pub validation_results: ValidationResults,
}
/// Component counts
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct ComponentCounts {
/// Number of actions
pub actions: usize,
/// Number of sensors
pub sensors: usize,
/// Number of triggers
pub triggers: usize,
/// Number of rules
pub rules: usize,
/// Number of workflows
pub workflows: usize,
/// Number of policies
pub policies: usize,
}
/// Test result
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct TestResult {
/// Test status
pub status: String,
/// Total number of tests
pub total_tests: usize,
/// Number passed
pub passed: usize,
/// Number failed
pub failed: usize,
}
/// Validation results
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct ValidationResults {
/// Whether validation passed
pub valid: bool,
/// Validation errors
pub errors: Vec<String>,
}
/// Failed pack registration
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct FailedPackRegistration {
/// Pack reference
pub pack_ref: String,
/// Pack path
pub pack_path: String,
/// Error message
pub error: String,
/// Error stage
pub error_stage: String,
}
/// Registration summary
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct RegistrationSummary {
/// Total packs processed
pub total_packs: usize,
/// Successfully registered
pub success_count: usize,
/// Failed registrations
pub failure_count: usize,
/// Total components registered
pub total_components: usize,
/// Duration in milliseconds
pub duration_ms: u64,
}
fn default_empty_object() -> JsonValue {
serde_json::json!({})
}
fn default_download_timeout() -> u64 {
300
}
fn default_build_timeout() -> u64 {
600
}
fn default_python_version() -> String {
"3.11".to_string()
}
fn default_nodejs_version() -> String {
"20".to_string()
}
fn default_true() -> bool {
true
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -69,6 +69,10 @@ pub async fn create_execution(
.parameters
.as_ref()
.and_then(|p| serde_json::from_value(p.clone()).ok()),
env_vars: request
.env_vars
.as_ref()
.and_then(|e| serde_json::from_value(e.clone()).ok()),
parent: None,
enforcement: None,
executor: None,

View File

@@ -23,9 +23,11 @@ use crate::{
dto::{
common::{PaginatedResponse, PaginationParams},
pack::{
CreatePackRequest, InstallPackRequest, PackInstallResponse, PackResponse, PackSummary,
BuildPackEnvsRequest, BuildPackEnvsResponse, CreatePackRequest, DownloadPacksRequest,
DownloadPacksResponse, GetPackDependenciesRequest, GetPackDependenciesResponse,
InstallPackRequest, PackInstallResponse, PackResponse, PackSummary,
PackWorkflowSyncResponse, PackWorkflowValidationResponse, RegisterPackRequest,
UpdatePackRequest, WorkflowSyncResult,
RegisterPacksRequest, RegisterPacksResponse, UpdatePackRequest, WorkflowSyncResult,
},
ApiResponse, SuccessResponse,
},
@@ -307,7 +309,7 @@ async fn execute_and_store_pack_tests(
pack_version: &str,
trigger_type: &str,
) -> Result<attune_common::models::pack_test::PackTestResult, ApiError> {
use attune_worker::{TestConfig, TestExecutor};
use attune_common::test_executor::{TestConfig, TestExecutor};
use serde_yaml_ng;
// Load pack.yaml from filesystem
@@ -1036,7 +1038,7 @@ pub async fn test_pack(
RequireAuth(_user): RequireAuth,
Path(pack_ref): Path<String>,
) -> ApiResult<impl IntoResponse> {
use attune_worker::{TestConfig, TestExecutor};
use attune_common::test_executor::{TestConfig, TestExecutor};
use serde_yaml_ng;
// Get pack from database
@@ -1202,11 +1204,547 @@ pub async fn get_pack_latest_test(
/// Note: Nested resource routes (e.g., /packs/:ref/actions) are defined
/// in their respective modules (actions.rs, triggers.rs, rules.rs) to avoid
/// route conflicts and maintain proper separation of concerns.
/// Download packs from various sources
#[utoipa::path(
post,
path = "/api/v1/packs/download",
tag = "packs",
request_body = DownloadPacksRequest,
responses(
(status = 200, description = "Packs downloaded", body = ApiResponse<DownloadPacksResponse>),
(status = 400, description = "Invalid request"),
),
security(("bearer_auth" = []))
)]
pub async fn download_packs(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Json(request): Json<DownloadPacksRequest>,
) -> ApiResult<Json<ApiResponse<DownloadPacksResponse>>> {
use attune_common::pack_registry::PackInstaller;
// Create temp directory
let temp_dir = std::env::temp_dir().join("attune-pack-downloads");
std::fs::create_dir_all(&temp_dir)
.map_err(|e| ApiError::InternalServerError(format!("Failed to create temp dir: {}", e)))?;
// Create installer
let registry_config = if state.config.pack_registry.enabled {
Some(state.config.pack_registry.clone())
} else {
None
};
let installer = PackInstaller::new(&temp_dir, registry_config)
.await
.map_err(|e| ApiError::InternalServerError(format!("Failed to create installer: {}", e)))?;
let mut downloaded = Vec::new();
let mut failed = Vec::new();
for source in &request.packs {
let pack_source = detect_pack_source(source, request.ref_spec.as_deref())?;
let source_type_str = get_source_type(&pack_source).to_string();
match installer.install(pack_source).await {
Ok(installed) => {
// Read pack.yaml
let pack_yaml_path = installed.path.join("pack.yaml");
if let Ok(content) = std::fs::read_to_string(&pack_yaml_path) {
if let Ok(yaml) = serde_yaml_ng::from_str::<serde_yaml_ng::Value>(&content) {
let pack_ref = yaml
.get("ref")
.and_then(|v| v.as_str())
.unwrap_or("unknown")
.to_string();
let pack_version = yaml
.get("version")
.and_then(|v| v.as_str())
.unwrap_or("0.0.0")
.to_string();
downloaded.push(crate::dto::pack::DownloadedPack {
source: source.clone(),
source_type: source_type_str.clone(),
pack_path: installed.path.to_string_lossy().to_string(),
pack_ref,
pack_version,
git_commit: None,
checksum: installed.checksum,
});
}
}
}
Err(e) => {
failed.push(crate::dto::pack::FailedPack {
source: source.clone(),
error: e.to_string(),
});
}
}
}
let response = DownloadPacksResponse {
success_count: downloaded.len(),
failure_count: failed.len(),
total_count: request.packs.len(),
downloaded_packs: downloaded,
failed_packs: failed,
};
Ok(Json(ApiResponse::new(response)))
}
/// Get pack dependencies
#[utoipa::path(
post,
path = "/api/v1/packs/dependencies",
tag = "packs",
request_body = GetPackDependenciesRequest,
responses(
(status = 200, description = "Dependencies analyzed", body = ApiResponse<GetPackDependenciesResponse>),
(status = 400, description = "Invalid request"),
),
security(("bearer_auth" = []))
)]
pub async fn get_pack_dependencies(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Json(request): Json<GetPackDependenciesRequest>,
) -> ApiResult<Json<ApiResponse<GetPackDependenciesResponse>>> {
use attune_common::repositories::List;
let mut dependencies = Vec::new();
let mut runtime_requirements = std::collections::HashMap::new();
let mut analyzed_packs = Vec::new();
let mut errors = Vec::new();
// Get installed packs
let installed_packs_list = PackRepository::list(&state.db).await?;
let installed_refs: std::collections::HashSet<String> =
installed_packs_list.into_iter().map(|p| p.r#ref).collect();
for pack_path in &request.pack_paths {
let pack_yaml_path = std::path::Path::new(pack_path).join("pack.yaml");
if !pack_yaml_path.exists() {
errors.push(crate::dto::pack::DependencyError {
pack_path: pack_path.clone(),
error: "pack.yaml not found".to_string(),
});
continue;
}
let content = match std::fs::read_to_string(&pack_yaml_path) {
Ok(c) => c,
Err(e) => {
errors.push(crate::dto::pack::DependencyError {
pack_path: pack_path.clone(),
error: format!("Failed to read pack.yaml: {}", e),
});
continue;
}
};
let yaml: serde_yaml_ng::Value = match serde_yaml_ng::from_str(&content) {
Ok(y) => y,
Err(e) => {
errors.push(crate::dto::pack::DependencyError {
pack_path: pack_path.clone(),
error: format!("Failed to parse pack.yaml: {}", e),
});
continue;
}
};
let pack_ref = yaml
.get("ref")
.and_then(|v| v.as_str())
.unwrap_or("unknown")
.to_string();
// Extract dependencies
let mut dep_count = 0;
if let Some(deps) = yaml.get("dependencies").and_then(|d| d.as_sequence()) {
for dep in deps {
if let Some(dep_str) = dep.as_str() {
let parts: Vec<&str> = dep_str.splitn(2, '@').collect();
let dep_ref = parts[0].to_string();
let version_spec = parts.get(1).unwrap_or(&"*").to_string();
let already_installed = installed_refs.contains(&dep_ref);
dependencies.push(crate::dto::pack::PackDependency {
pack_ref: dep_ref.clone(),
version_spec: version_spec.clone(),
required_by: pack_ref.clone(),
already_installed,
});
dep_count += 1;
}
}
}
// Extract runtime requirements
let mut runtime_req = crate::dto::pack::RuntimeRequirements {
pack_ref: pack_ref.clone(),
python: None,
nodejs: None,
};
if let Some(python_ver) = yaml.get("python").and_then(|v| v.as_str()) {
let req_file = std::path::Path::new(pack_path).join("requirements.txt");
runtime_req.python = Some(crate::dto::pack::PythonRequirements {
version: Some(python_ver.to_string()),
requirements_file: if req_file.exists() {
Some(req_file.to_string_lossy().to_string())
} else {
None
},
});
}
if let Some(nodejs_ver) = yaml.get("nodejs").and_then(|v| v.as_str()) {
let pkg_file = std::path::Path::new(pack_path).join("package.json");
runtime_req.nodejs = Some(crate::dto::pack::NodeJsRequirements {
version: Some(nodejs_ver.to_string()),
package_file: if pkg_file.exists() {
Some(pkg_file.to_string_lossy().to_string())
} else {
None
},
});
}
if runtime_req.python.is_some() || runtime_req.nodejs.is_some() {
runtime_requirements.insert(pack_ref.clone(), runtime_req);
}
analyzed_packs.push(crate::dto::pack::AnalyzedPack {
pack_ref: pack_ref.clone(),
pack_path: pack_path.clone(),
has_dependencies: dep_count > 0,
dependency_count: dep_count,
});
}
let missing_dependencies: Vec<_> = dependencies
.iter()
.filter(|d| !d.already_installed)
.cloned()
.collect();
let response = GetPackDependenciesResponse {
dependencies,
runtime_requirements,
missing_dependencies,
analyzed_packs,
errors,
};
Ok(Json(ApiResponse::new(response)))
}
/// Build pack environments
#[utoipa::path(
post,
path = "/api/v1/packs/build-envs",
tag = "packs",
request_body = BuildPackEnvsRequest,
responses(
(status = 200, description = "Environments built", body = ApiResponse<BuildPackEnvsResponse>),
(status = 400, description = "Invalid request"),
),
security(("bearer_auth" = []))
)]
pub async fn build_pack_envs(
State(_state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Json(request): Json<BuildPackEnvsRequest>,
) -> ApiResult<Json<ApiResponse<BuildPackEnvsResponse>>> {
use std::path::Path;
use std::process::Command;
let start = std::time::Instant::now();
let mut built_environments = Vec::new();
let mut failed_environments = Vec::new();
let mut python_envs_built = 0;
let mut nodejs_envs_built = 0;
for pack_path in &request.pack_paths {
let pack_path_obj = Path::new(pack_path);
let pack_start = std::time::Instant::now();
// Read pack.yaml to get pack_ref and runtime requirements
let pack_yaml_path = pack_path_obj.join("pack.yaml");
if !pack_yaml_path.exists() {
failed_environments.push(crate::dto::pack::FailedEnvironment {
pack_ref: "unknown".to_string(),
pack_path: pack_path.clone(),
runtime: "unknown".to_string(),
error: "pack.yaml not found".to_string(),
});
continue;
}
let content = match std::fs::read_to_string(&pack_yaml_path) {
Ok(c) => c,
Err(e) => {
failed_environments.push(crate::dto::pack::FailedEnvironment {
pack_ref: "unknown".to_string(),
pack_path: pack_path.clone(),
runtime: "unknown".to_string(),
error: format!("Failed to read pack.yaml: {}", e),
});
continue;
}
};
let yaml: serde_yaml_ng::Value = match serde_yaml_ng::from_str(&content) {
Ok(y) => y,
Err(e) => {
failed_environments.push(crate::dto::pack::FailedEnvironment {
pack_ref: "unknown".to_string(),
pack_path: pack_path.clone(),
runtime: "unknown".to_string(),
error: format!("Failed to parse pack.yaml: {}", e),
});
continue;
}
};
let pack_ref = yaml
.get("ref")
.and_then(|v| v.as_str())
.unwrap_or("unknown")
.to_string();
let mut python_env = None;
let mut nodejs_env = None;
let mut has_error = false;
// Check for Python environment
if !request.skip_python {
if let Some(_python_ver) = yaml.get("python").and_then(|v| v.as_str()) {
let requirements_file = pack_path_obj.join("requirements.txt");
if requirements_file.exists() {
// Check if Python is available
match Command::new("python3").arg("--version").output() {
Ok(output) if output.status.success() => {
let version_str = String::from_utf8_lossy(&output.stdout);
let venv_path = pack_path_obj.join("venv");
// Check if venv exists or if force_rebuild is set
if !venv_path.exists() || request.force_rebuild {
tracing::info!(
pack_ref = %pack_ref,
"Python environment would be built here in production"
);
}
// Report environment status (detection mode)
python_env = Some(crate::dto::pack::PythonEnvironment {
virtualenv_path: venv_path.to_string_lossy().to_string(),
requirements_installed: venv_path.exists(),
package_count: 0, // Would count from pip freeze in production
python_version: version_str.trim().to_string(),
});
python_envs_built += 1;
}
_ => {
failed_environments.push(crate::dto::pack::FailedEnvironment {
pack_ref: pack_ref.clone(),
pack_path: pack_path.clone(),
runtime: "python".to_string(),
error: "Python 3 not available in system".to_string(),
});
has_error = true;
}
}
}
}
}
// Check for Node.js environment
if !has_error && !request.skip_nodejs {
if let Some(_nodejs_ver) = yaml.get("nodejs").and_then(|v| v.as_str()) {
let package_file = pack_path_obj.join("package.json");
if package_file.exists() {
// Check if Node.js is available
match Command::new("node").arg("--version").output() {
Ok(output) if output.status.success() => {
let version_str = String::from_utf8_lossy(&output.stdout);
let node_modules = pack_path_obj.join("node_modules");
// Check if node_modules exists or if force_rebuild is set
if !node_modules.exists() || request.force_rebuild {
tracing::info!(
pack_ref = %pack_ref,
"Node.js environment would be built here in production"
);
}
// Report environment status (detection mode)
nodejs_env = Some(crate::dto::pack::NodeJsEnvironment {
node_modules_path: node_modules.to_string_lossy().to_string(),
dependencies_installed: node_modules.exists(),
package_count: 0, // Would count from package.json in production
nodejs_version: version_str.trim().to_string(),
});
nodejs_envs_built += 1;
}
_ => {
failed_environments.push(crate::dto::pack::FailedEnvironment {
pack_ref: pack_ref.clone(),
pack_path: pack_path.clone(),
runtime: "nodejs".to_string(),
error: "Node.js not available in system".to_string(),
});
has_error = true;
}
}
}
}
}
if !has_error && (python_env.is_some() || nodejs_env.is_some()) {
built_environments.push(crate::dto::pack::BuiltEnvironment {
pack_ref,
pack_path: pack_path.clone(),
environments: crate::dto::pack::Environments {
python: python_env,
nodejs: nodejs_env,
},
duration_ms: pack_start.elapsed().as_millis() as u64,
});
}
}
let success_count = built_environments.len();
let failure_count = failed_environments.len();
let response = BuildPackEnvsResponse {
built_environments,
failed_environments,
summary: crate::dto::pack::BuildSummary {
total_packs: request.pack_paths.len(),
success_count,
failure_count,
python_envs_built,
nodejs_envs_built,
total_duration_ms: start.elapsed().as_millis() as u64,
},
};
Ok(Json(ApiResponse::new(response)))
}
/// Register multiple packs
#[utoipa::path(
post,
path = "/api/v1/packs/register-batch",
tag = "packs",
request_body = RegisterPacksRequest,
responses(
(status = 200, description = "Packs registered", body = ApiResponse<RegisterPacksResponse>),
(status = 400, description = "Invalid request"),
),
security(("bearer_auth" = []))
)]
pub async fn register_packs_batch(
State(state): State<Arc<AppState>>,
RequireAuth(user): RequireAuth,
Json(request): Json<RegisterPacksRequest>,
) -> ApiResult<Json<ApiResponse<RegisterPacksResponse>>> {
let start = std::time::Instant::now();
let mut registered = Vec::new();
let mut failed = Vec::new();
let total_components = 0;
for pack_path in &request.pack_paths {
// Call the existing register_pack_internal function
let register_req = crate::dto::pack::RegisterPackRequest {
path: pack_path.clone(),
force: request.force,
skip_tests: request.skip_tests,
};
match register_pack_internal(
state.clone(),
user.claims.sub.clone(),
register_req.path.clone(),
register_req.force,
register_req.skip_tests,
)
.await
{
Ok(pack_id) => {
// Fetch pack details
if let Ok(Some(pack)) = PackRepository::find_by_id(&state.db, pack_id).await {
// Count components (simplified)
registered.push(crate::dto::pack::RegisteredPack {
pack_ref: pack.r#ref.clone(),
pack_id,
pack_version: pack.version.clone(),
storage_path: format!("{}/{}", state.config.packs_base_dir, pack.r#ref),
components_registered: crate::dto::pack::ComponentCounts {
actions: 0,
sensors: 0,
triggers: 0,
rules: 0,
workflows: 0,
policies: 0,
},
test_result: None,
validation_results: crate::dto::pack::ValidationResults {
valid: true,
errors: Vec::new(),
},
});
}
}
Err(e) => {
failed.push(crate::dto::pack::FailedPackRegistration {
pack_ref: "unknown".to_string(),
pack_path: pack_path.clone(),
error: e.to_string(),
error_stage: "registration".to_string(),
});
}
}
}
let response = RegisterPacksResponse {
registered_packs: registered.clone(),
failed_packs: failed.clone(),
summary: crate::dto::pack::RegistrationSummary {
total_packs: request.pack_paths.len(),
success_count: registered.len(),
failure_count: failed.len(),
total_components,
duration_ms: start.elapsed().as_millis() as u64,
},
};
Ok(Json(ApiResponse::new(response)))
}
pub fn routes() -> Router<Arc<AppState>> {
Router::new()
.route("/packs", get(list_packs).post(create_pack))
.route("/packs/register", axum::routing::post(register_pack))
.route(
"/packs/register-batch",
axum::routing::post(register_packs_batch),
)
.route("/packs/install", axum::routing::post(install_pack))
.route("/packs/download", axum::routing::post(download_packs))
.route(
"/packs/dependencies",
axum::routing::post(get_pack_dependencies),
)
.route("/packs/build-envs", axum::routing::post(build_pack_envs))
.route(
"/packs/{ref}",
get(get_pack).put(update_pack).delete(delete_pack),

View File

@@ -69,6 +69,7 @@ async fn create_test_execution(pool: &PgPool, action_id: i64) -> Result<Executio
action: Some(action_id),
action_ref: format!("action_{}", action_id),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,

View File

@@ -17,6 +17,7 @@ pub mod pack_registry;
pub mod repositories;
pub mod runtime_detection;
pub mod schema;
pub mod test_executor;
pub mod utils;
pub mod workflow;

View File

@@ -37,8 +37,132 @@ pub type JsonSchema = JsonValue;
pub mod enums {
use serde::{Deserialize, Serialize};
use sqlx::Type;
use std::fmt;
use std::str::FromStr;
use utoipa::ToSchema;
/// How parameters should be delivered to an action
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "lowercase")]
pub enum ParameterDelivery {
/// Pass parameters via stdin (secure, recommended for most cases)
Stdin,
/// Pass parameters via temporary file (secure, best for large payloads)
File,
}
impl Default for ParameterDelivery {
fn default() -> Self {
Self::Stdin
}
}
impl fmt::Display for ParameterDelivery {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Stdin => write!(f, "stdin"),
Self::File => write!(f, "file"),
}
}
}
impl FromStr for ParameterDelivery {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"stdin" => Ok(Self::Stdin),
"file" => Ok(Self::File),
_ => Err(format!("Invalid parameter delivery method: {}", s)),
}
}
}
impl sqlx::Type<sqlx::Postgres> for ParameterDelivery {
fn type_info() -> sqlx::postgres::PgTypeInfo {
<String as sqlx::Type<sqlx::Postgres>>::type_info()
}
}
impl<'r> sqlx::Decode<'r, sqlx::Postgres> for ParameterDelivery {
fn decode(value: sqlx::postgres::PgValueRef<'r>) -> Result<Self, sqlx::error::BoxDynError> {
let s = <String as sqlx::Decode<sqlx::Postgres>>::decode(value)?;
s.parse().map_err(|e: String| e.into())
}
}
impl<'q> sqlx::Encode<'q, sqlx::Postgres> for ParameterDelivery {
fn encode_by_ref(
&self,
buf: &mut sqlx::postgres::PgArgumentBuffer,
) -> Result<sqlx::encode::IsNull, sqlx::error::BoxDynError> {
Ok(<String as sqlx::Encode<sqlx::Postgres>>::encode(self.to_string(), buf)?)
}
}
/// Format for parameter serialization
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "lowercase")]
pub enum ParameterFormat {
/// KEY='VALUE' format (one per line)
Dotenv,
/// JSON object
Json,
/// YAML format
Yaml,
}
impl Default for ParameterFormat {
fn default() -> Self {
Self::Json
}
}
impl fmt::Display for ParameterFormat {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Json => write!(f, "json"),
Self::Dotenv => write!(f, "dotenv"),
Self::Yaml => write!(f, "yaml"),
}
}
}
impl FromStr for ParameterFormat {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"json" => Ok(Self::Json),
"dotenv" => Ok(Self::Dotenv),
"yaml" => Ok(Self::Yaml),
_ => Err(format!("Invalid parameter format: {}", s)),
}
}
}
impl sqlx::Type<sqlx::Postgres> for ParameterFormat {
fn type_info() -> sqlx::postgres::PgTypeInfo {
<String as sqlx::Type<sqlx::Postgres>>::type_info()
}
}
impl<'r> sqlx::Decode<'r, sqlx::Postgres> for ParameterFormat {
fn decode(value: sqlx::postgres::PgValueRef<'r>) -> Result<Self, sqlx::error::BoxDynError> {
let s = <String as sqlx::Decode<sqlx::Postgres>>::decode(value)?;
s.parse().map_err(|e: String| e.into())
}
}
impl<'q> sqlx::Encode<'q, sqlx::Postgres> for ParameterFormat {
fn encode_by_ref(
&self,
buf: &mut sqlx::postgres::PgArgumentBuffer,
) -> Result<sqlx::encode::IsNull, sqlx::error::BoxDynError> {
Ok(<String as sqlx::Encode<sqlx::Postgres>>::encode(self.to_string(), buf)?)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Type, ToSchema)]
#[sqlx(type_name = "worker_type_enum", rename_all = "lowercase")]
#[serde(rename_all = "lowercase")]
@@ -310,6 +434,10 @@ pub mod action {
pub is_workflow: bool,
pub workflow_def: Option<Id>,
pub is_adhoc: bool,
#[sqlx(default)]
pub parameter_delivery: ParameterDelivery,
#[sqlx(default)]
pub parameter_format: ParameterFormat,
pub created: DateTime<Utc>,
pub updated: DateTime<Utc>,
}
@@ -493,6 +621,11 @@ pub mod execution {
pub action_ref: String,
pub config: Option<JsonDict>,
/// Environment variables for this execution (string -> string mapping)
/// These are set as environment variables in the action's process.
/// Separate from parameters which are passed via stdin/file.
pub env_vars: Option<JsonDict>,
/// Parent execution ID (generic hierarchy for all execution types)
///
/// Used for:

View File

@@ -20,6 +20,7 @@ pub struct CreateExecutionInput {
pub action: Option<Id>,
pub action_ref: String,
pub config: Option<JsonDict>,
pub env_vars: Option<JsonDict>,
pub parent: Option<Id>,
pub enforcement: Option<Id>,
pub executor: Option<Id>,
@@ -54,7 +55,7 @@ impl FindById for ExecutionRepository {
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Execution>(
"SELECT id, action, action_ref, config, parent, enforcement, executor, status, result, workflow_task, created, updated FROM execution WHERE id = $1"
"SELECT id, action, action_ref, config, env_vars, parent, enforcement, executor, status, result, workflow_task, created, updated FROM execution WHERE id = $1"
).bind(id).fetch_optional(executor).await.map_err(Into::into)
}
}
@@ -66,7 +67,7 @@ impl List for ExecutionRepository {
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Execution>(
"SELECT id, action, action_ref, config, parent, enforcement, executor, status, result, workflow_task, created, updated FROM execution ORDER BY created DESC LIMIT 1000"
"SELECT id, action, action_ref, config, env_vars, parent, enforcement, executor, status, result, workflow_task, created, updated FROM execution ORDER BY created DESC LIMIT 1000"
).fetch_all(executor).await.map_err(Into::into)
}
}
@@ -79,8 +80,8 @@ impl Create for ExecutionRepository {
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Execution>(
"INSERT INTO execution (action, action_ref, config, parent, enforcement, executor, status, result, workflow_task) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING id, action, action_ref, config, parent, enforcement, executor, status, result, workflow_task, created, updated"
).bind(input.action).bind(&input.action_ref).bind(&input.config).bind(input.parent).bind(input.enforcement).bind(input.executor).bind(input.status).bind(&input.result).bind(sqlx::types::Json(&input.workflow_task)).fetch_one(executor).await.map_err(Into::into)
"INSERT INTO execution (action, action_ref, config, env_vars, parent, enforcement, executor, status, result, workflow_task) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) RETURNING id, action, action_ref, config, env_vars, parent, enforcement, executor, status, result, workflow_task, created, updated"
).bind(input.action).bind(&input.action_ref).bind(&input.config).bind(&input.env_vars).bind(input.parent).bind(input.enforcement).bind(input.executor).bind(input.status).bind(&input.result).bind(sqlx::types::Json(&input.workflow_task)).fetch_one(executor).await.map_err(Into::into)
}
}
@@ -129,7 +130,7 @@ impl Update for ExecutionRepository {
}
query.push(", updated = NOW() WHERE id = ").push_bind(id);
query.push(" RETURNING id, action, action_ref, config, parent, enforcement, executor, status, result, workflow_task, created, updated");
query.push(" RETURNING id, action, action_ref, config, env_vars, parent, enforcement, executor, status, result, workflow_task, created, updated");
query
.build_query_as::<Execution>()
@@ -162,7 +163,7 @@ impl ExecutionRepository {
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Execution>(
"SELECT id, action, action_ref, config, parent, enforcement, executor, status, result, workflow_task, created, updated FROM execution WHERE status = $1 ORDER BY created DESC"
"SELECT id, action, action_ref, config, env_vars, parent, enforcement, executor, status, result, workflow_task, created, updated FROM execution WHERE status = $1 ORDER BY created DESC"
).bind(status).fetch_all(executor).await.map_err(Into::into)
}
@@ -174,7 +175,7 @@ impl ExecutionRepository {
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Execution>(
"SELECT id, action, action_ref, config, parent, enforcement, executor, status, result, workflow_task, created, updated FROM execution WHERE enforcement = $1 ORDER BY created DESC"
"SELECT id, action, action_ref, config, env_vars, parent, enforcement, executor, status, result, workflow_task, created, updated FROM execution WHERE enforcement = $1 ORDER BY created DESC"
).bind(enforcement_id).fetch_all(executor).await.map_err(Into::into)
}
}

View File

@@ -2,10 +2,8 @@
//!
//! Executes pack tests by running test runners and collecting results.
use attune_common::error::{Error, Result};
use attune_common::models::pack_test::{
PackTestResult, TestCaseResult, TestStatus, TestSuiteResult,
};
use crate::error::{Error, Result};
use crate::models::pack_test::{PackTestResult, TestCaseResult, TestStatus, TestSuiteResult};
use chrono::Utc;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;

View File

@@ -37,6 +37,7 @@ async fn test_create_execution_basic() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: Some(json!({"param1": "value1"})),
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -69,6 +70,7 @@ async fn test_create_execution_without_action() {
action: None,
action_ref: action_ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -101,6 +103,7 @@ async fn test_create_execution_with_all_fields() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: Some(json!({"timeout": 300, "retry": true})),
env_vars: None,
parent: None,
enforcement: None,
executor: None, // Don't reference non-existent identity
@@ -135,6 +138,7 @@ async fn test_create_execution_with_parent() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -152,6 +156,7 @@ async fn test_create_execution_with_parent() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: Some(parent.id),
enforcement: None,
executor: None,
@@ -189,6 +194,7 @@ async fn test_find_execution_by_id() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -240,6 +246,7 @@ async fn test_list_executions() {
action: Some(action.id),
action_ref: format!("{}_{}", action.r#ref, i),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -284,6 +291,7 @@ async fn test_list_executions_ordered_by_created_desc() {
action: Some(action.id),
action_ref: format!("{}_{}", action.r#ref, i),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -333,6 +341,7 @@ async fn test_update_execution_status() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -376,6 +385,7 @@ async fn test_update_execution_result() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -420,6 +430,7 @@ async fn test_update_execution_executor() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -462,6 +473,7 @@ async fn test_update_execution_status_transitions() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -551,6 +563,7 @@ async fn test_update_execution_failed_status() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -594,6 +607,7 @@ async fn test_update_execution_no_changes() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -636,6 +650,7 @@ async fn test_delete_execution() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -700,6 +715,7 @@ async fn test_find_executions_by_status() {
action: Some(action.id),
action_ref: format!("{}_{}", action.r#ref, i),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -745,6 +761,7 @@ async fn test_find_executions_by_enforcement() {
action: Some(action.id),
action_ref: format!("{}_1", action.r#ref),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -762,6 +779,7 @@ async fn test_find_executions_by_enforcement() {
action: Some(action.id),
action_ref: format!("{}_{}", action.r#ref, i),
config: None,
env_vars: None,
parent: None,
enforcement: if i == 2 { None } else { None }, // Can't reference non-existent enforcement
executor: None,
@@ -804,6 +822,7 @@ async fn test_parent_child_execution_hierarchy() {
action: Some(action.id),
action_ref: format!("{}.parent", action.r#ref),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -823,6 +842,7 @@ async fn test_parent_child_execution_hierarchy() {
action: Some(action.id),
action_ref: format!("{}.child_{}", action.r#ref, i),
config: None,
env_vars: None,
parent: Some(parent.id),
enforcement: None,
executor: None,
@@ -865,6 +885,7 @@ async fn test_nested_execution_hierarchy() {
action: Some(action.id),
action_ref: format!("{}.grandparent", action.r#ref),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -882,6 +903,7 @@ async fn test_nested_execution_hierarchy() {
action: Some(action.id),
action_ref: format!("{}.parent", action.r#ref),
config: None,
env_vars: None,
parent: Some(grandparent.id),
enforcement: None,
executor: None,
@@ -899,6 +921,7 @@ async fn test_nested_execution_hierarchy() {
action: Some(action.id),
action_ref: format!("{}.child", action.r#ref),
config: None,
env_vars: None,
parent: Some(parent.id),
enforcement: None,
executor: None,
@@ -939,6 +962,7 @@ async fn test_execution_timestamps() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -1008,6 +1032,7 @@ async fn test_execution_config_json() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: Some(complex_config.clone()),
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -1039,6 +1064,7 @@ async fn test_execution_result_json() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,

View File

@@ -44,6 +44,7 @@ async fn test_create_inquiry_minimal() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -102,6 +103,7 @@ async fn test_create_inquiry_with_response_schema() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -158,6 +160,7 @@ async fn test_create_inquiry_with_timeout() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -210,6 +213,7 @@ async fn test_create_inquiry_with_assigned_user() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -296,6 +300,7 @@ async fn test_find_inquiry_by_id() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -355,6 +360,7 @@ async fn test_get_inquiry_by_id() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -422,6 +428,7 @@ async fn test_list_inquiries() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -481,6 +488,7 @@ async fn test_update_inquiry_status() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -535,6 +543,7 @@ async fn test_update_inquiry_status_transitions() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -618,6 +627,7 @@ async fn test_update_inquiry_response() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -674,6 +684,7 @@ async fn test_update_inquiry_with_response_and_status() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -730,6 +741,7 @@ async fn test_update_inquiry_assignment() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -795,6 +807,7 @@ async fn test_update_inquiry_no_changes() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -869,6 +882,7 @@ async fn test_delete_inquiry() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -926,6 +940,7 @@ async fn test_delete_execution_cascades_to_inquiries() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -991,6 +1006,7 @@ async fn test_find_inquiries_by_status() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -1068,6 +1084,7 @@ async fn test_find_inquiries_by_execution() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -1085,6 +1102,7 @@ async fn test_find_inquiries_by_execution() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -1147,6 +1165,7 @@ async fn test_inquiry_timestamps_auto_managed() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
@@ -1212,6 +1231,7 @@ async fn test_inquiry_complex_response_schema() {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,

View File

@@ -224,7 +224,8 @@ impl EnforcementProcessor {
action: Some(action_id),
action_ref: action_ref.clone(),
config: enforcement.config.clone(),
parent: None, // TODO: Handle workflow parent-child relationships
env_vars: None, // No custom env vars for rule-triggered executions
parent: None, // TODO: Handle workflow parent-child relationships
enforcement: Some(enforcement.id),
executor: None, // Will be assigned during scheduling
status: attune_common::models::enums::ExecutionStatus::Requested,

View File

@@ -194,6 +194,7 @@ impl ExecutionManager {
action: None,
action_ref: action_ref.clone(),
config: parent.config.clone(), // Pass parent config to child
env_vars: parent.env_vars.clone(), // Pass parent env vars to child
parent: Some(parent.id), // Link to parent execution
enforcement: parent.enforcement,
executor: None, // Will be assigned during scheduling

View File

@@ -18,11 +18,13 @@ use attune_common::{
FindById, FindByRef, Update,
},
};
use chrono::Utc;
use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
use sqlx::PgPool;
use std::sync::Arc;
use tracing::{debug, error, info};
use std::time::Duration;
use tracing::{debug, error, info, warn};
/// Payload for execution scheduled messages
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -40,6 +42,13 @@ pub struct ExecutionScheduler {
consumer: Arc<Consumer>,
}
/// Default heartbeat interval in seconds (should match worker config default)
const DEFAULT_HEARTBEAT_INTERVAL: u64 = 30;
/// Maximum age multiplier for heartbeat staleness check
/// Workers are considered stale if heartbeat is older than HEARTBEAT_INTERVAL * HEARTBEAT_STALENESS_MULTIPLIER
const HEARTBEAT_STALENESS_MULTIPLIER: u64 = 3;
impl ExecutionScheduler {
/// Create a new execution scheduler
pub fn new(pool: PgPool, publisher: Arc<Publisher>, consumer: Arc<Consumer>) -> Self {
@@ -196,6 +205,20 @@ impl ExecutionScheduler {
return Err(anyhow::anyhow!("No active workers available"));
}
// Filter by heartbeat freshness (only workers with recent heartbeats)
let fresh_workers: Vec<_> = active_workers
.into_iter()
.filter(|w| Self::is_worker_heartbeat_fresh(w))
.collect();
if fresh_workers.is_empty() {
warn!("No workers with fresh heartbeats available. All active workers have stale heartbeats.");
return Err(anyhow::anyhow!(
"No workers with fresh heartbeats available (heartbeat older than {} seconds)",
DEFAULT_HEARTBEAT_INTERVAL * HEARTBEAT_STALENESS_MULTIPLIER
));
}
// TODO: Implement intelligent worker selection:
// - Consider worker load/capacity
// - Consider worker affinity (same pack, same runtime)
@@ -203,7 +226,7 @@ impl ExecutionScheduler {
// - Round-robin or least-connections strategy
// For now, just select the first available worker
Ok(active_workers
Ok(fresh_workers
.into_iter()
.next()
.expect("Worker list should not be empty"))
@@ -253,6 +276,43 @@ impl ExecutionScheduler {
false
}
/// Check if a worker's heartbeat is fresh enough to schedule work
///
/// A worker is considered fresh if its last heartbeat is within
/// HEARTBEAT_STALENESS_MULTIPLIER * HEARTBEAT_INTERVAL seconds.
fn is_worker_heartbeat_fresh(worker: &attune_common::models::Worker) -> bool {
let Some(last_heartbeat) = worker.last_heartbeat else {
warn!(
"Worker {} has no heartbeat recorded, considering stale",
worker.name
);
return false;
};
let now = Utc::now();
let age = now.signed_duration_since(last_heartbeat);
let max_age = Duration::from_secs(DEFAULT_HEARTBEAT_INTERVAL * HEARTBEAT_STALENESS_MULTIPLIER);
let is_fresh = age.to_std().unwrap_or(Duration::MAX) <= max_age;
if !is_fresh {
warn!(
"Worker {} heartbeat is stale: last seen {} seconds ago (max: {} seconds)",
worker.name,
age.num_seconds(),
max_age.as_secs()
);
} else {
debug!(
"Worker {} heartbeat is fresh: last seen {} seconds ago",
worker.name,
age.num_seconds()
);
}
is_fresh
}
/// Queue execution to a specific worker
async fn queue_to_worker(
publisher: &Publisher,
@@ -294,6 +354,86 @@ impl ExecutionScheduler {
#[cfg(test)]
mod tests {
use super::*;
use attune_common::models::{Worker, WorkerRole, WorkerStatus, WorkerType};
use chrono::{Duration as ChronoDuration, Utc};
fn create_test_worker(name: &str, heartbeat_offset_secs: i64) -> Worker {
let last_heartbeat = if heartbeat_offset_secs == 0 {
None
} else {
Some(Utc::now() - ChronoDuration::seconds(heartbeat_offset_secs))
};
Worker {
id: 1,
name: name.to_string(),
worker_type: WorkerType::Local,
worker_role: WorkerRole::Action,
runtime: None,
host: Some("localhost".to_string()),
port: Some(8080),
status: Some(WorkerStatus::Active),
capabilities: Some(serde_json::json!({
"runtimes": ["shell", "python"]
})),
meta: None,
last_heartbeat,
created: Utc::now(),
updated: Utc::now(),
}
}
#[test]
fn test_heartbeat_freshness_with_recent_heartbeat() {
// Worker with heartbeat 30 seconds ago (within limit)
let worker = create_test_worker("test-worker", 30);
assert!(
ExecutionScheduler::is_worker_heartbeat_fresh(&worker),
"Worker with 30s old heartbeat should be considered fresh"
);
}
#[test]
fn test_heartbeat_freshness_with_stale_heartbeat() {
// Worker with heartbeat 100 seconds ago (beyond 3x30s = 90s limit)
let worker = create_test_worker("test-worker", 100);
assert!(
!ExecutionScheduler::is_worker_heartbeat_fresh(&worker),
"Worker with 100s old heartbeat should be considered stale"
);
}
#[test]
fn test_heartbeat_freshness_at_boundary() {
// Worker with heartbeat exactly at the 90 second boundary
let worker = create_test_worker("test-worker", 90);
assert!(
!ExecutionScheduler::is_worker_heartbeat_fresh(&worker),
"Worker with 90s old heartbeat should be considered stale (at boundary)"
);
}
#[test]
fn test_heartbeat_freshness_with_no_heartbeat() {
// Worker with no heartbeat recorded
let worker = create_test_worker("test-worker", 0);
assert!(
!ExecutionScheduler::is_worker_heartbeat_fresh(&worker),
"Worker with no heartbeat should be considered stale"
);
}
#[test]
fn test_heartbeat_freshness_with_very_recent() {
// Worker with heartbeat 5 seconds ago
let worker = create_test_worker("test-worker", 5);
assert!(
ExecutionScheduler::is_worker_heartbeat_fresh(&worker),
"Worker with 5s old heartbeat should be considered fresh"
);
}
#[test]
fn test_scheduler_creation() {
// This is a placeholder test

View File

@@ -113,6 +113,7 @@ async fn create_test_execution(
action: Some(action_id),
action_ref: action_ref.to_string(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,

View File

@@ -108,6 +108,7 @@ async fn create_test_execution(
action: Some(action_id),
action_ref: action_ref.to_string(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,

View File

@@ -250,6 +250,7 @@ impl SensorManager {
let mut child = Command::new(&sensor_script)
.env("ATTUNE_API_URL", &self.inner.api_url)
.env("ATTUNE_API_TOKEN", &token_response.token)
.env("ATTUNE_SENSOR_ID", &sensor.id.to_string())
.env("ATTUNE_SENSOR_REF", &sensor.r#ref)
.env("ATTUNE_SENSOR_TRIGGERS", &trigger_instances_json)
.env("ATTUNE_MQ_URL", &self.inner.mq_url)

View File

@@ -16,6 +16,7 @@ tokio = { workspace = true }
sqlx = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_yaml_ng = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
anyhow = { workspace = true }
@@ -30,6 +31,6 @@ thiserror = { workspace = true }
aes-gcm = { workspace = true }
sha2 = { workspace = true }
base64 = { workspace = true }
tempfile = { workspace = true }
[dev-dependencies]
tempfile = { workspace = true }

View File

@@ -27,6 +27,7 @@ pub struct ActionExecutor {
max_stdout_bytes: usize,
max_stderr_bytes: usize,
packs_base_dir: PathBuf,
api_url: String,
}
impl ActionExecutor {
@@ -39,6 +40,7 @@ impl ActionExecutor {
max_stdout_bytes: usize,
max_stderr_bytes: usize,
packs_base_dir: PathBuf,
api_url: String,
) -> Self {
Self {
pool,
@@ -48,6 +50,7 @@ impl ActionExecutor {
max_stdout_bytes,
max_stderr_bytes,
packs_base_dir,
api_url,
}
}
@@ -100,7 +103,16 @@ impl ActionExecutor {
}
// Update execution with result
if result.is_success() {
let is_success = result.is_success();
debug!(
"Execution {} result: exit_code={}, error={:?}, is_success={}",
execution_id,
result.exit_code,
result.error,
is_success
);
if is_success {
self.handle_execution_success(execution_id, &result).await?;
} else {
self.handle_execution_failure(execution_id, Some(&result))
@@ -190,35 +202,63 @@ impl ActionExecutor {
let mut parameters = HashMap::new();
if let Some(config) = &execution.config {
info!("Execution config present: {:?}", config);
// Try to get parameters from config.parameters first
if let Some(params) = config.get("parameters") {
info!("Found config.parameters key");
if let JsonValue::Object(map) = params {
for (key, value) in map {
parameters.insert(key.clone(), value.clone());
}
}
} else if let JsonValue::Object(map) = config {
info!("No config.parameters key, treating entire config as parameters");
// If no parameters key, treat entire config as parameters
// (this handles rule action_params being placed at root level)
for (key, value) in map {
// Skip special keys that aren't action parameters
if key != "context" && key != "env" {
info!("Adding parameter: {} = {:?}", key, value);
parameters.insert(key.clone(), value.clone());
} else {
info!("Skipping special key: {}", key);
}
}
} else {
info!("Config is not an Object, cannot extract parameters");
}
} else {
info!("No execution config present");
}
// Prepare environment variables
let mut env = HashMap::new();
env.insert("ATTUNE_EXECUTION_ID".to_string(), execution.id.to_string());
env.insert(
"ATTUNE_ACTION_REF".to_string(),
execution.action_ref.clone(),
);
info!("Extracted {} parameters: {:?}", parameters.len(), parameters);
if let Some(action_id) = execution.action {
env.insert("ATTUNE_ACTION_ID".to_string(), action_id.to_string());
// Prepare standard environment variables
let mut env = HashMap::new();
// Standard execution context variables (see docs/QUICKREF-execution-environment.md)
env.insert("ATTUNE_EXEC_ID".to_string(), execution.id.to_string());
env.insert("ATTUNE_ACTION".to_string(), execution.action_ref.clone());
env.insert("ATTUNE_API_URL".to_string(), self.api_url.clone());
// TODO: Generate execution-scoped API token
// For now, set placeholder to maintain interface compatibility
env.insert("ATTUNE_API_TOKEN".to_string(), "".to_string());
// Add rule and trigger context if execution was triggered by enforcement
if let Some(enforcement_id) = execution.enforcement {
if let Ok(Some(enforcement)) = sqlx::query_as::<
_,
attune_common::models::event::Enforcement,
>("SELECT * FROM enforcement WHERE id = $1")
.bind(enforcement_id)
.fetch_optional(&self.pool)
.await
{
env.insert("ATTUNE_RULE".to_string(), enforcement.rule_ref);
env.insert("ATTUNE_TRIGGER".to_string(), enforcement.trigger_ref);
}
}
// Add context data as environment variables from config
@@ -341,6 +381,8 @@ impl ActionExecutor {
runtime_name,
max_stdout_bytes: self.max_stdout_bytes,
max_stderr_bytes: self.max_stderr_bytes,
parameter_delivery: action.parameter_delivery,
parameter_format: action.parameter_format,
};
Ok(context)
@@ -392,7 +434,10 @@ impl ActionExecutor {
execution_id: i64,
result: &ExecutionResult,
) -> Result<()> {
info!("Execution {} succeeded", execution_id);
info!(
"Execution {} succeeded (exit_code={}, duration={}ms)",
execution_id, result.exit_code, result.duration_ms
);
// Build comprehensive result with execution metadata
let exec_dir = self.artifact_manager.get_execution_dir(execution_id);
@@ -402,29 +447,15 @@ impl ActionExecutor {
"succeeded": true,
});
// Add log file paths if logs exist
// Include stdout content directly in result
if !result.stdout.is_empty() {
let stdout_path = exec_dir.join("stdout.log");
result_data["stdout_log"] = serde_json::json!(stdout_path.to_string_lossy());
// Include stdout preview (first 1000 chars)
let stdout_preview = if result.stdout.len() > 1000 {
format!("{}...", &result.stdout[..1000])
} else {
result.stdout.clone()
};
result_data["stdout"] = serde_json::json!(stdout_preview);
result_data["stdout"] = serde_json::json!(result.stdout);
}
if !result.stderr.is_empty() {
// Include stderr log path only if stderr is non-empty and non-whitespace
if !result.stderr.trim().is_empty() {
let stderr_path = exec_dir.join("stderr.log");
result_data["stderr_log"] = serde_json::json!(stderr_path.to_string_lossy());
// Include stderr preview (first 1000 chars)
let stderr_preview = if result.stderr.len() > 1000 {
format!("{}...", &result.stderr[..1000])
} else {
result.stderr.clone()
};
result_data["stderr"] = serde_json::json!(stderr_preview);
}
// Include parsed result if available
@@ -450,7 +481,14 @@ impl ActionExecutor {
execution_id: i64,
result: Option<&ExecutionResult>,
) -> Result<()> {
error!("Execution {} failed", execution_id);
if let Some(r) = result {
error!(
"Execution {} failed (exit_code={}, error={:?}, duration={}ms)",
execution_id, r.exit_code, r.error, r.duration_ms
);
} else {
error!("Execution {} failed during preparation", execution_id);
}
let exec_dir = self.artifact_manager.get_execution_dir(execution_id);
let mut result_data = serde_json::json!({
@@ -466,29 +504,15 @@ impl ActionExecutor {
result_data["error"] = serde_json::json!(error);
}
// Add log file paths and previews if logs exist
// Include stdout content directly in result
if !exec_result.stdout.is_empty() {
let stdout_path = exec_dir.join("stdout.log");
result_data["stdout_log"] = serde_json::json!(stdout_path.to_string_lossy());
// Include stdout preview (first 1000 chars)
let stdout_preview = if exec_result.stdout.len() > 1000 {
format!("{}...", &exec_result.stdout[..1000])
} else {
exec_result.stdout.clone()
};
result_data["stdout"] = serde_json::json!(stdout_preview);
result_data["stdout"] = serde_json::json!(exec_result.stdout);
}
if !exec_result.stderr.is_empty() {
// Include stderr log path only if stderr is non-empty and non-whitespace
if !exec_result.stderr.trim().is_empty() {
let stderr_path = exec_dir.join("stderr.log");
result_data["stderr_log"] = serde_json::json!(stderr_path.to_string_lossy());
// Include stderr preview (first 1000 chars)
let stderr_preview = if exec_result.stderr.len() > 1000 {
format!("{}...", &exec_result.stderr[..1000])
} else {
exec_result.stderr.clone()
};
result_data["stderr"] = serde_json::json!(stderr_preview);
}
// Add truncation warnings if applicable
@@ -509,33 +533,23 @@ impl ActionExecutor {
warn!("Execution {} failed without ExecutionResult - this indicates an early/catastrophic failure", execution_id);
// Check if stderr log exists from artifact storage
// Check if stderr log exists and is non-empty from artifact storage
let stderr_path = exec_dir.join("stderr.log");
if stderr_path.exists() {
result_data["stderr_log"] = serde_json::json!(stderr_path.to_string_lossy());
// Try to read a preview if file exists
if let Ok(contents) = tokio::fs::read_to_string(&stderr_path).await {
let preview = if contents.len() > 1000 {
format!("{}...", &contents[..1000])
} else {
contents
};
result_data["stderr"] = serde_json::json!(preview);
if !contents.trim().is_empty() {
result_data["stderr_log"] = serde_json::json!(stderr_path.to_string_lossy());
}
}
}
// Check if stdout log exists from artifact storage
let stdout_path = exec_dir.join("stdout.log");
if stdout_path.exists() {
result_data["stdout_log"] = serde_json::json!(stdout_path.to_string_lossy());
// Try to read a preview if file exists
if let Ok(contents) = tokio::fs::read_to_string(&stdout_path).await {
let preview = if contents.len() > 1000 {
format!("{}...", &contents[..1000])
} else {
contents
};
result_data["stdout"] = serde_json::json!(preview);
if !contents.is_empty() {
result_data["stdout"] = serde_json::json!(contents);
}
}
}
}

View File

@@ -10,7 +10,6 @@ pub mod registration;
pub mod runtime;
pub mod secrets;
pub mod service;
pub mod test_executor;
// Re-export commonly used types
pub use executor::ActionExecutor;
@@ -22,4 +21,5 @@ pub use runtime::{
};
pub use secrets::SecretManager;
pub use service::WorkerService;
pub use test_executor::{TestConfig, TestExecutor};
// Re-export test executor from common (shared business logic)
pub use attune_common::test_executor::{TestConfig, TestExecutor};

View File

@@ -3,6 +3,7 @@
use anyhow::Result;
use attune_common::config::Config;
use clap::Parser;
use tokio::signal::unix::{signal, SignalKind};
use tracing::info;
use attune_worker::service::WorkerService;
@@ -70,8 +71,26 @@ async fn main() -> Result<()> {
info!("Attune Worker Service is ready");
// Run until interrupted
service.run().await?;
// Start the service
service.start().await?;
// Setup signal handlers for graceful shutdown
let mut sigint = signal(SignalKind::interrupt())?;
let mut sigterm = signal(SignalKind::terminate())?;
tokio::select! {
_ = sigint.recv() => {
info!("Received SIGINT signal");
}
_ = sigterm.recv() => {
info!("Received SIGTERM signal");
}
}
info!("Shutting down gracefully...");
// Stop the service and mark worker as inactive
service.stop().await?;
info!("Attune Worker Service shutdown complete");

View File

@@ -7,6 +7,7 @@ pub mod dependency;
pub mod local;
pub mod log_writer;
pub mod native;
pub mod parameter_passing;
pub mod python;
pub mod python_venv;
pub mod shell;
@@ -18,6 +19,7 @@ pub use python::PythonRuntime;
pub use shell::ShellRuntime;
use async_trait::async_trait;
use attune_common::models::{ParameterDelivery, ParameterFormat};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::PathBuf;
@@ -29,6 +31,7 @@ pub use dependency::{
DependencySpec, EnvironmentInfo,
};
pub use log_writer::{BoundedLogResult, BoundedLogWriter};
pub use parameter_passing::{ParameterDeliveryConfig, PreparedParameters};
pub use python_venv::PythonVenvManager;
/// Runtime execution result
@@ -108,6 +111,14 @@ pub struct ExecutionContext {
/// Maximum stderr size in bytes (for log truncation)
#[serde(default = "default_max_log_bytes")]
pub max_stderr_bytes: usize,
/// How parameters should be delivered to the action
#[serde(default)]
pub parameter_delivery: ParameterDelivery,
/// Format for parameter serialization
#[serde(default)]
pub parameter_format: ParameterFormat,
}
fn default_max_log_bytes() -> usize {
@@ -133,6 +144,8 @@ impl ExecutionContext {
runtime_name: None,
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: ParameterDelivery::default(),
parameter_format: ParameterFormat::default(),
}
}
}

View File

@@ -4,14 +4,16 @@
//! This runtime is used for Rust binaries and other compiled executables.
use super::{
parameter_passing::{self, ParameterDeliveryConfig},
BoundedLogWriter, ExecutionContext, ExecutionResult, Runtime, RuntimeError, RuntimeResult,
};
use async_trait::async_trait;
use std::path::PathBuf;
use std::process::Stdio;
use std::time::Instant;
use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader};
use tokio::process::Command;
use tokio::time::{timeout, Duration};
use tokio::time::Duration;
use tracing::{debug, info, warn};
/// Native runtime for executing compiled binaries
@@ -35,11 +37,11 @@ impl NativeRuntime {
/// Execute a native binary with parameters and environment variables
async fn execute_binary(
&self,
binary_path: std::path::PathBuf,
parameters: &std::collections::HashMap<String, serde_json::Value>,
binary_path: PathBuf,
secrets: &std::collections::HashMap<String, String>,
env: &std::collections::HashMap<String, String>,
exec_timeout: Option<u64>,
parameters_stdin: Option<&str>,
timeout: Option<u64>,
max_stdout_bytes: usize,
max_stderr_bytes: usize,
) -> RuntimeResult<ExecutionResult> {
@@ -76,22 +78,11 @@ impl NativeRuntime {
cmd.current_dir(work_dir);
}
// Add environment variables
// Add environment variables (including parameter delivery metadata)
for (key, value) in env {
cmd.env(key, value);
}
// Add parameters as environment variables with ATTUNE_ACTION_ prefix
for (key, value) in parameters {
let value_str = match value {
serde_json::Value::String(s) => s.clone(),
serde_json::Value::Number(n) => n.to_string(),
serde_json::Value::Bool(b) => b.to_string(),
_ => serde_json::to_string(value)?,
};
cmd.env(format!("ATTUNE_ACTION_{}", key.to_uppercase()), value_str);
}
// Configure stdio
cmd.stdin(Stdio::piped())
.stdout(Stdio::piped())
@@ -102,29 +93,42 @@ impl NativeRuntime {
.spawn()
.map_err(|e| RuntimeError::ExecutionFailed(format!("Failed to spawn binary: {}", e)))?;
// Write secrets to stdin - if this fails, the process has already started
// so we should continue and capture whatever output we can
let stdin_write_error = if !secrets.is_empty() {
if let Some(mut stdin) = child.stdin.take() {
// Write to stdin - parameters (if using stdin delivery) and/or secrets
// If this fails, the process has already started, so we continue and capture output
let stdin_write_error = if let Some(mut stdin) = child.stdin.take() {
let mut error = None;
// Write parameters first if using stdin delivery
if let Some(params_data) = parameters_stdin {
if let Err(e) = stdin.write_all(params_data.as_bytes()).await {
error = Some(format!("Failed to write parameters to stdin: {}", e));
} else if let Err(e) = stdin.write_all(b"\n---ATTUNE_PARAMS_END---\n").await {
error = Some(format!("Failed to write parameter delimiter: {}", e));
}
}
// Write secrets as JSON (always, for backward compatibility)
if error.is_none() && !secrets.is_empty() {
match serde_json::to_string(secrets) {
Ok(secrets_json) => {
if let Err(e) = stdin.write_all(secrets_json.as_bytes()).await {
Some(format!("Failed to write secrets to stdin: {}", e))
} else if let Err(e) = stdin.shutdown().await {
Some(format!("Failed to close stdin: {}", e))
} else {
None
error = Some(format!("Failed to write secrets to stdin: {}", e));
} else if let Err(e) = stdin.write_all(b"\n").await {
error = Some(format!("Failed to write newline to stdin: {}", e));
}
}
Err(e) => Some(format!("Failed to serialize secrets: {}", e)),
Err(e) => error = Some(format!("Failed to serialize secrets: {}", e)),
}
} else {
None
}
// Close stdin
if let Err(e) = stdin.shutdown().await {
if error.is_none() {
error = Some(format!("Failed to close stdin: {}", e));
}
}
error
} else {
if let Some(stdin) = child.stdin.take() {
drop(stdin); // Close stdin if no secrets
}
None
};
@@ -184,8 +188,8 @@ impl NativeRuntime {
let (stdout_writer, stderr_writer) = tokio::join!(stdout_task, stderr_task);
// Wait for process with timeout
let wait_result = if let Some(timeout_secs) = exec_timeout {
match timeout(Duration::from_secs(timeout_secs), child.wait()).await {
let wait_result = if let Some(timeout_secs) = timeout {
match tokio::time::timeout(Duration::from_secs(timeout_secs), child.wait()).await {
Ok(result) => result,
Err(_) => {
warn!(
@@ -317,10 +321,26 @@ impl Runtime for NativeRuntime {
async fn execute(&self, context: ExecutionContext) -> RuntimeResult<ExecutionResult> {
info!(
"Executing native action: {} (execution_id: {})",
context.action_ref, context.execution_id
"Executing native action: {} (execution_id: {}) with parameter delivery: {:?}, format: {:?}",
context.action_ref, context.execution_id, context.parameter_delivery, context.parameter_format
);
// Prepare environment and parameters according to delivery method
let mut env = context.env.clone();
let config = ParameterDeliveryConfig {
delivery: context.parameter_delivery,
format: context.parameter_format,
};
let prepared_params = parameter_passing::prepare_parameters(
&context.parameters,
&mut env,
config,
)?;
// Get stdin content if parameters are delivered via stdin
let parameters_stdin = prepared_params.stdin_content();
// Get the binary path
let binary_path = context.code_path.ok_or_else(|| {
RuntimeError::InvalidAction("Native runtime requires code_path to be set".to_string())
@@ -328,9 +348,9 @@ impl Runtime for NativeRuntime {
self.execute_binary(
binary_path,
&context.parameters,
&context.secrets,
&context.env,
&env,
parameters_stdin,
context.timeout,
context.max_stdout_bytes,
context.max_stderr_bytes,

View File

@@ -0,0 +1,320 @@
//! Parameter Passing Module
//!
//! Provides utilities for formatting and delivering action parameters
//! in different formats (dotenv, JSON, YAML) via different methods
//! (environment variables, stdin, temporary files).
use attune_common::models::{ParameterDelivery, ParameterFormat};
use serde_json::Value as JsonValue;
use std::collections::HashMap;
use std::io::Write;
use std::path::PathBuf;
use tempfile::NamedTempFile;
use tracing::debug;
use super::RuntimeError;
/// Format parameters according to the specified format
pub fn format_parameters(
parameters: &HashMap<String, JsonValue>,
format: ParameterFormat,
) -> Result<String, RuntimeError> {
match format {
ParameterFormat::Dotenv => format_dotenv(parameters),
ParameterFormat::Json => format_json(parameters),
ParameterFormat::Yaml => format_yaml(parameters),
}
}
/// Format parameters as dotenv (key='value')
/// Note: Parameter names are preserved as-is (case-sensitive)
fn format_dotenv(parameters: &HashMap<String, JsonValue>) -> Result<String, RuntimeError> {
let mut lines = Vec::new();
for (key, value) in parameters {
let value_str = value_to_string(value);
// Escape single quotes in value
let escaped_value = value_str.replace('\'', "'\\''");
lines.push(format!("{}='{}'", key, escaped_value));
}
Ok(lines.join("\n"))
}
/// Format parameters as JSON
fn format_json(parameters: &HashMap<String, JsonValue>) -> Result<String, RuntimeError> {
serde_json::to_string_pretty(parameters).map_err(|e| {
RuntimeError::ExecutionFailed(format!(
"Failed to serialize parameters to JSON: {}",
e
))
})
}
/// Format parameters as YAML
fn format_yaml(parameters: &HashMap<String, JsonValue>) -> Result<String, RuntimeError> {
serde_yaml_ng::to_string(parameters).map_err(|e| {
RuntimeError::ExecutionFailed(format!(
"Failed to serialize parameters to YAML: {}",
e
))
})
}
/// Convert JSON value to string representation
fn value_to_string(value: &JsonValue) -> String {
match value {
JsonValue::String(s) => s.clone(),
JsonValue::Number(n) => n.to_string(),
JsonValue::Bool(b) => b.to_string(),
JsonValue::Null => String::new(),
_ => serde_json::to_string(value).unwrap_or_else(|_| String::new()),
}
}
/// Create a temporary file with parameters
pub fn create_parameter_file(
parameters: &HashMap<String, JsonValue>,
format: ParameterFormat,
) -> Result<NamedTempFile, RuntimeError> {
let formatted = format_parameters(parameters, format)?;
let mut temp_file = NamedTempFile::new()
.map_err(|e| RuntimeError::IoError(e))?;
// Set restrictive permissions (owner read-only)
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = temp_file.as_file().metadata()
.map_err(|e| RuntimeError::IoError(e))?
.permissions();
perms.set_mode(0o400); // Read-only for owner
temp_file.as_file().set_permissions(perms)
.map_err(|e| RuntimeError::IoError(e))?;
}
temp_file
.write_all(formatted.as_bytes())
.map_err(|e| RuntimeError::IoError(e))?;
temp_file
.flush()
.map_err(|e| RuntimeError::IoError(e))?;
debug!(
"Created parameter file at {:?} with format {:?}",
temp_file.path(),
format
);
Ok(temp_file)
}
/// Parameter delivery configuration
#[derive(Debug, Clone)]
pub struct ParameterDeliveryConfig {
pub delivery: ParameterDelivery,
pub format: ParameterFormat,
}
/// Prepared parameters ready for execution
#[derive(Debug)]
pub enum PreparedParameters {
/// Parameters are in environment variables
Environment,
/// Parameters will be passed via stdin
Stdin(String),
/// Parameters are in a temporary file
File {
path: PathBuf,
#[allow(dead_code)]
temp_file: NamedTempFile,
},
}
impl PreparedParameters {
/// Get the file path if this is file-based delivery
pub fn file_path(&self) -> Option<&PathBuf> {
match self {
PreparedParameters::File { path, .. } => Some(path),
_ => None,
}
}
/// Get the stdin content if this is stdin-based delivery
pub fn stdin_content(&self) -> Option<&str> {
match self {
PreparedParameters::Stdin(content) => Some(content),
_ => None,
}
}
}
/// Prepare parameters for delivery according to the specified method and format
pub fn prepare_parameters(
parameters: &HashMap<String, JsonValue>,
env: &mut HashMap<String, String>,
config: ParameterDeliveryConfig,
) -> Result<PreparedParameters, RuntimeError> {
match config.delivery {
ParameterDelivery::Stdin => {
// Format parameters for stdin
let formatted = format_parameters(parameters, config.format)?;
// Add environment variables to indicate delivery method
env.insert(
"ATTUNE_PARAMETER_DELIVERY".to_string(),
"stdin".to_string(),
);
env.insert(
"ATTUNE_PARAMETER_FORMAT".to_string(),
config.format.to_string(),
);
Ok(PreparedParameters::Stdin(formatted))
}
ParameterDelivery::File => {
// Create temporary file with parameters
let temp_file = create_parameter_file(parameters, config.format)?;
let path = temp_file.path().to_path_buf();
// Add environment variables to indicate delivery method and file location
env.insert(
"ATTUNE_PARAMETER_DELIVERY".to_string(),
"file".to_string(),
);
env.insert(
"ATTUNE_PARAMETER_FORMAT".to_string(),
config.format.to_string(),
);
env.insert(
"ATTUNE_PARAMETER_FILE".to_string(),
path.to_string_lossy().to_string(),
);
Ok(PreparedParameters::File { path, temp_file })
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
#[test]
fn test_format_dotenv() {
let mut params = HashMap::new();
params.insert("message".to_string(), json!("Hello, World!"));
params.insert("count".to_string(), json!(42));
params.insert("enabled".to_string(), json!(true));
let result = format_dotenv(&params).unwrap();
assert!(result.contains("message='Hello, World!'"));
assert!(result.contains("count='42'"));
assert!(result.contains("enabled='true'"));
}
#[test]
fn test_format_dotenv_escaping() {
let mut params = HashMap::new();
params.insert("message".to_string(), json!("It's a test"));
let result = format_dotenv(&params).unwrap();
assert!(result.contains("message='It'\\''s a test'"));
}
#[test]
fn test_format_json() {
let mut params = HashMap::new();
params.insert("message".to_string(), json!("Hello"));
params.insert("count".to_string(), json!(42));
let result = format_json(&params).unwrap();
let parsed: HashMap<String, JsonValue> = serde_json::from_str(&result).unwrap();
assert_eq!(parsed.get("message"), Some(&json!("Hello")));
assert_eq!(parsed.get("count"), Some(&json!(42)));
}
#[test]
fn test_format_yaml() {
let mut params = HashMap::new();
params.insert("message".to_string(), json!("Hello"));
params.insert("count".to_string(), json!(42));
let result = format_yaml(&params).unwrap();
assert!(result.contains("message:"));
assert!(result.contains("Hello"));
assert!(result.contains("count:"));
assert!(result.contains("42"));
}
#[test]
#[test]
fn test_create_parameter_file() {
let mut params = HashMap::new();
params.insert("key".to_string(), json!("value"));
let temp_file = create_parameter_file(&params, ParameterFormat::Json).unwrap();
let content = std::fs::read_to_string(temp_file.path()).unwrap();
assert!(content.contains("key"));
assert!(content.contains("value"));
}
#[test]
fn test_prepare_parameters_stdin() {
let mut params = HashMap::new();
params.insert("test".to_string(), json!("value"));
let mut env = HashMap::new();
let config = ParameterDeliveryConfig {
delivery: ParameterDelivery::Stdin,
format: ParameterFormat::Json,
};
let result = prepare_parameters(&params, &mut env, config).unwrap();
assert!(matches!(result, PreparedParameters::Stdin(_)));
assert_eq!(
env.get("ATTUNE_PARAMETER_DELIVERY"),
Some(&"stdin".to_string())
);
assert_eq!(
env.get("ATTUNE_PARAMETER_FORMAT"),
Some(&"json".to_string())
);
}
#[test]
fn test_prepare_parameters_file() {
let mut params = HashMap::new();
params.insert("test".to_string(), json!("value"));
let mut env = HashMap::new();
let config = ParameterDeliveryConfig {
delivery: ParameterDelivery::File,
format: ParameterFormat::Yaml,
};
let result = prepare_parameters(&params, &mut env, config).unwrap();
assert!(matches!(result, PreparedParameters::File { .. }));
assert_eq!(
env.get("ATTUNE_PARAMETER_DELIVERY"),
Some(&"file".to_string())
);
assert_eq!(
env.get("ATTUNE_PARAMETER_FORMAT"),
Some(&"yaml".to_string())
);
assert!(env.contains_key("ATTUNE_PARAMETER_FILE"));
}
}

View File

@@ -3,6 +3,7 @@
//! Executes shell scripts and commands using subprocess execution.
use super::{
parameter_passing::{self, ParameterDeliveryConfig},
BoundedLogWriter, ExecutionContext, ExecutionResult, Runtime, RuntimeError, RuntimeResult,
};
use async_trait::async_trait;
@@ -53,6 +54,7 @@ impl ShellRuntime {
&self,
mut cmd: Command,
secrets: &std::collections::HashMap<String, String>,
parameters_stdin: Option<&str>,
timeout_secs: Option<u64>,
max_stdout_bytes: usize,
max_stderr_bytes: usize,
@@ -66,22 +68,36 @@ impl ShellRuntime {
.stderr(Stdio::piped())
.spawn()?;
// Write secrets to stdin - if this fails, the process has already started
// so we should continue and capture whatever output we can
// Write to stdin - parameters (if using stdin delivery) and/or secrets
// If this fails, the process has already started, so we continue and capture output
let stdin_write_error = if let Some(mut stdin) = child.stdin.take() {
match serde_json::to_string(secrets) {
Ok(secrets_json) => {
if let Err(e) = stdin.write_all(secrets_json.as_bytes()).await {
Some(format!("Failed to write secrets to stdin: {}", e))
} else if let Err(e) = stdin.write_all(b"\n").await {
Some(format!("Failed to write newline to stdin: {}", e))
} else {
drop(stdin);
None
}
let mut error = None;
// Write parameters first if using stdin delivery
if let Some(params_data) = parameters_stdin {
if let Err(e) = stdin.write_all(params_data.as_bytes()).await {
error = Some(format!("Failed to write parameters to stdin: {}", e));
} else if let Err(e) = stdin.write_all(b"\n---ATTUNE_PARAMS_END---\n").await {
error = Some(format!("Failed to write parameter delimiter: {}", e));
}
Err(e) => Some(format!("Failed to serialize secrets: {}", e)),
}
// Write secrets as JSON (always, for backward compatibility)
if error.is_none() && !secrets.is_empty() {
match serde_json::to_string(secrets) {
Ok(secrets_json) => {
if let Err(e) = stdin.write_all(secrets_json.as_bytes()).await {
error = Some(format!("Failed to write secrets to stdin: {}", e));
} else if let Err(e) = stdin.write_all(b"\n").await {
error = Some(format!("Failed to write newline to stdin: {}", e));
}
}
Err(e) => error = Some(format!("Failed to serialize secrets: {}", e)),
}
}
drop(stdin);
error
} else {
None
};
@@ -315,9 +331,10 @@ impl ShellRuntime {
/// Execute shell script directly
async fn execute_shell_code(
&self,
script: String,
code: String,
secrets: &std::collections::HashMap<String, String>,
env: &std::collections::HashMap<String, String>,
parameters_stdin: Option<&str>,
timeout_secs: Option<u64>,
max_stdout_bytes: usize,
max_stderr_bytes: usize,
@@ -329,7 +346,7 @@ impl ShellRuntime {
// Build command
let mut cmd = Command::new(&self.shell_path);
cmd.arg("-c").arg(&script);
cmd.arg("-c").arg(&code);
// Add environment variables
for (key, value) in env {
@@ -339,6 +356,7 @@ impl ShellRuntime {
self.execute_with_streaming(
cmd,
secrets,
parameters_stdin,
timeout_secs,
max_stdout_bytes,
max_stderr_bytes,
@@ -349,22 +367,23 @@ impl ShellRuntime {
/// Execute shell script from file
async fn execute_shell_file(
&self,
code_path: PathBuf,
script_path: PathBuf,
secrets: &std::collections::HashMap<String, String>,
env: &std::collections::HashMap<String, String>,
parameters_stdin: Option<&str>,
timeout_secs: Option<u64>,
max_stdout_bytes: usize,
max_stderr_bytes: usize,
) -> RuntimeResult<ExecutionResult> {
debug!(
"Executing shell file: {:?} with {} secrets",
code_path,
script_path,
secrets.len()
);
// Build command
let mut cmd = Command::new(&self.shell_path);
cmd.arg(&code_path);
cmd.arg(&script_path);
// Add environment variables
for (key, value) in env {
@@ -374,6 +393,7 @@ impl ShellRuntime {
self.execute_with_streaming(
cmd,
secrets,
parameters_stdin,
timeout_secs,
max_stdout_bytes,
max_stderr_bytes,
@@ -412,29 +432,49 @@ impl Runtime for ShellRuntime {
async fn execute(&self, context: ExecutionContext) -> RuntimeResult<ExecutionResult> {
info!(
"Executing shell action: {} (execution_id: {})",
context.action_ref, context.execution_id
"Executing shell action: {} (execution_id: {}) with parameter delivery: {:?}, format: {:?}",
context.action_ref, context.execution_id, context.parameter_delivery, context.parameter_format
);
info!(
"Action parameters (count: {}): {:?}",
context.parameters.len(),
context.parameters
);
// Prepare environment and parameters according to delivery method
let mut env = context.env.clone();
let config = ParameterDeliveryConfig {
delivery: context.parameter_delivery,
format: context.parameter_format,
};
let prepared_params = parameter_passing::prepare_parameters(
&context.parameters,
&mut env,
config,
)?;
// Get stdin content if parameters are delivered via stdin
let parameters_stdin = prepared_params.stdin_content();
if let Some(stdin_data) = parameters_stdin {
info!(
"Parameters to be sent via stdin (length: {} bytes):\n{}",
stdin_data.len(),
stdin_data
);
} else {
info!("No parameters will be sent via stdin");
}
// If code_path is provided, execute the file directly
if let Some(code_path) = &context.code_path {
// Merge parameters into environment variables with ATTUNE_ACTION_ prefix
let mut env = context.env.clone();
for (key, value) in &context.parameters {
let value_str = match value {
serde_json::Value::String(s) => s.clone(),
serde_json::Value::Number(n) => n.to_string(),
serde_json::Value::Bool(b) => b.to_string(),
_ => serde_json::to_string(value)?,
};
env.insert(format!("ATTUNE_ACTION_{}", key.to_uppercase()), value_str);
}
return self
.execute_shell_file(
code_path.clone(),
&context.secrets,
&env,
parameters_stdin,
context.timeout,
context.max_stdout_bytes,
context.max_stderr_bytes,
@@ -447,7 +487,8 @@ impl Runtime for ShellRuntime {
self.execute_shell_code(
script,
&context.secrets,
&context.env,
&env,
parameters_stdin,
context.timeout,
context.max_stdout_bytes,
context.max_stderr_bytes,
@@ -534,6 +575,8 @@ mod tests {
runtime_name: Some("shell".to_string()),
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: attune_common::models::ParameterDelivery::default(),
parameter_format: attune_common::models::ParameterFormat::default(),
};
let result = runtime.execute(context).await.unwrap();
@@ -564,6 +607,8 @@ mod tests {
runtime_name: Some("shell".to_string()),
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: attune_common::models::ParameterDelivery::default(),
parameter_format: attune_common::models::ParameterFormat::default(),
};
let result = runtime.execute(context).await.unwrap();
@@ -589,6 +634,8 @@ mod tests {
runtime_name: Some("shell".to_string()),
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: attune_common::models::ParameterDelivery::default(),
parameter_format: attune_common::models::ParameterFormat::default(),
};
let result = runtime.execute(context).await.unwrap();
@@ -616,6 +663,8 @@ mod tests {
runtime_name: Some("shell".to_string()),
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: attune_common::models::ParameterDelivery::default(),
parameter_format: attune_common::models::ParameterFormat::default(),
};
let result = runtime.execute(context).await.unwrap();
@@ -658,6 +707,8 @@ echo "missing=$missing"
runtime_name: Some("shell".to_string()),
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: attune_common::models::ParameterDelivery::default(),
parameter_format: attune_common::models::ParameterFormat::default(),
};
let result = runtime.execute(context).await.unwrap();

View File

@@ -10,7 +10,7 @@ use attune_common::models::ExecutionStatus;
use attune_common::mq::{
config::MessageQueueConfig as MqConfig, Connection, Consumer, ConsumerConfig,
ExecutionCompletedPayload, ExecutionStatusChangedPayload, MessageEnvelope, MessageType,
Publisher, PublisherConfig, QueueConfig,
Publisher, PublisherConfig,
};
use attune_common::repositories::{execution::ExecutionRepository, FindById};
use chrono::Utc;
@@ -230,6 +230,11 @@ impl WorkerService {
.map(|w| w.max_stderr_bytes)
.unwrap_or(10 * 1024 * 1024);
let packs_base_dir = std::path::PathBuf::from(&config.packs_base_dir);
// Get API URL from environment or construct from server config
let api_url = std::env::var("ATTUNE_API_URL")
.unwrap_or_else(|_| format!("http://{}:{}", config.server.host, config.server.port));
let executor = Arc::new(ActionExecutor::new(
pool.clone(),
runtime_registry,
@@ -238,6 +243,7 @@ impl WorkerService {
max_stdout_bytes,
max_stderr_bytes,
packs_base_dir,
api_url,
));
// Initialize heartbeat manager
@@ -430,8 +436,13 @@ impl WorkerService {
}
// Publish completion notification for queue management
if let Err(e) =
Self::publish_completion_notification(&db_pool, &publisher, execution_id).await
if let Err(e) = Self::publish_completion_notification(
&db_pool,
&publisher,
execution_id,
ExecutionStatus::Completed,
)
.await
{
error!(
"Failed to publish completion notification for execution {}: {}",
@@ -458,8 +469,13 @@ impl WorkerService {
}
// Publish completion notification for queue management
if let Err(e) =
Self::publish_completion_notification(&db_pool, &publisher, execution_id).await
if let Err(e) = Self::publish_completion_notification(
&db_pool,
&publisher,
execution_id,
ExecutionStatus::Failed,
)
.await
{
error!(
"Failed to publish completion notification for execution {}: {}",
@@ -528,6 +544,7 @@ impl WorkerService {
db_pool: &PgPool,
publisher: &Publisher,
execution_id: i64,
final_status: ExecutionStatus,
) -> Result<()> {
// Fetch execution to get action_id and other required fields
let execution = ExecutionRepository::find_by_id(db_pool, execution_id)
@@ -556,7 +573,7 @@ impl WorkerService {
execution_id: execution.id,
action_id,
action_ref: execution.action_ref.clone(),
status: format!("{:?}", execution.status),
status: format!("{:?}", final_status),
result: execution.result.clone(),
completed_at: Utc::now(),
};
@@ -576,21 +593,7 @@ impl WorkerService {
Ok(())
}
/// Run the worker service until interrupted
pub async fn run(&mut self) -> Result<()> {
self.start().await?;
// Wait for shutdown signal
tokio::signal::ctrl_c()
.await
.map_err(|e| Error::Internal(format!("Failed to wait for shutdown signal: {}", e)))?;
info!("Received shutdown signal");
self.stop().await?;
Ok(())
}
}
#[cfg(test)]