working on runtime executions

This commit is contained in:
2026-02-16 22:04:20 -06:00
parent f52320f889
commit 904ede04be
99 changed files with 6778 additions and 5929 deletions

View File

@@ -117,17 +117,17 @@ pub struct RuleResponse {
#[schema(example = "Send Slack notification when an error occurs")]
pub description: String,
/// Action ID
/// Action ID (null if the referenced action has been deleted)
#[schema(example = 1)]
pub action: i64,
pub action: Option<i64>,
/// Action reference
#[schema(example = "slack.post_message")]
pub action_ref: String,
/// Trigger ID
/// Trigger ID (null if the referenced trigger has been deleted)
#[schema(example = 1)]
pub trigger: i64,
pub trigger: Option<i64>,
/// Trigger reference
#[schema(example = "system.error_event")]

View File

@@ -12,6 +12,7 @@ use std::sync::Arc;
use validator::Validate;
use attune_common::models::pack_test::PackTestResult;
use attune_common::mq::{MessageEnvelope, MessageType, PackRegisteredPayload};
use attune_common::repositories::{
pack::{CreatePackInput, UpdatePackInput},
Create, Delete, FindById, FindByRef, PackRepository, PackTestRepository, Pagination, Update,
@@ -291,13 +292,30 @@ pub async fn delete_pack(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Pack '{}' not found", pack_ref)))?;
// Delete the pack
// Delete the pack from the database (cascades to actions, triggers, sensors, rules, etc.
// Foreign keys on execution, event, enforcement, and rule tables use ON DELETE SET NULL
// so historical records are preserved with their text ref fields intact.)
let deleted = PackRepository::delete(&state.db, pack.id).await?;
if !deleted {
return Err(ApiError::NotFound(format!("Pack '{}' not found", pack_ref)));
}
// Remove pack directory from permanent storage
let pack_dir = PathBuf::from(&state.config.packs_base_dir).join(&pack_ref);
if pack_dir.exists() {
if let Err(e) = std::fs::remove_dir_all(&pack_dir) {
tracing::warn!(
"Pack '{}' deleted from database but failed to remove directory {}: {}",
pack_ref,
pack_dir.display(),
e
);
} else {
tracing::info!("Removed pack directory: {}", pack_dir.display());
}
}
let response = SuccessResponse::new(format!("Pack '{}' deleted successfully", pack_ref));
Ok((StatusCode::OK, Json(response)))
@@ -310,77 +328,121 @@ async fn execute_and_store_pack_tests(
pack_ref: &str,
pack_version: &str,
trigger_type: &str,
) -> Result<attune_common::models::pack_test::PackTestResult, ApiError> {
pack_dir_override: Option<&std::path::Path>,
) -> Option<Result<attune_common::models::pack_test::PackTestResult, ApiError>> {
use attune_common::test_executor::{TestConfig, TestExecutor};
use serde_yaml_ng;
// Load pack.yaml from filesystem
let packs_base_dir = PathBuf::from(&state.config.packs_base_dir);
let pack_dir = packs_base_dir.join(pack_ref);
let pack_dir = match pack_dir_override {
Some(dir) => dir.to_path_buf(),
None => packs_base_dir.join(pack_ref),
};
if !pack_dir.exists() {
return Err(ApiError::NotFound(format!(
return Some(Err(ApiError::NotFound(format!(
"Pack directory not found: {}",
pack_dir.display()
)));
))));
}
let pack_yaml_path = pack_dir.join("pack.yaml");
if !pack_yaml_path.exists() {
return Err(ApiError::NotFound(format!(
return Some(Err(ApiError::NotFound(format!(
"pack.yaml not found for pack '{}'",
pack_ref
)));
))));
}
// Parse pack.yaml
let pack_yaml_content = tokio::fs::read_to_string(&pack_yaml_path)
.await
.map_err(|e| ApiError::InternalServerError(format!("Failed to read pack.yaml: {}", e)))?;
let pack_yaml_content = match tokio::fs::read_to_string(&pack_yaml_path).await {
Ok(content) => content,
Err(e) => {
return Some(Err(ApiError::InternalServerError(format!(
"Failed to read pack.yaml: {}",
e
))))
}
};
let pack_yaml: serde_yaml_ng::Value = serde_yaml_ng::from_str(&pack_yaml_content)
.map_err(|e| ApiError::InternalServerError(format!("Failed to parse pack.yaml: {}", e)))?;
let pack_yaml: serde_yaml_ng::Value = match serde_yaml_ng::from_str(&pack_yaml_content) {
Ok(v) => v,
Err(e) => {
return Some(Err(ApiError::InternalServerError(format!(
"Failed to parse pack.yaml: {}",
e
))))
}
};
// Extract test configuration
let testing_config = pack_yaml.get("testing").ok_or_else(|| {
ApiError::BadRequest(format!(
"No testing configuration found in pack.yaml for pack '{}'",
pack_ref
))
})?;
// Extract test configuration - if absent or disabled, skip tests gracefully
let testing_config = match pack_yaml.get("testing") {
Some(config) => config,
None => {
tracing::info!(
"No testing configuration found in pack.yaml for pack '{}', skipping tests",
pack_ref
);
return None;
}
};
let test_config: TestConfig =
serde_yaml_ng::from_value(testing_config.clone()).map_err(|e| {
ApiError::InternalServerError(format!("Failed to parse test configuration: {}", e))
})?;
let test_config: TestConfig = match serde_yaml_ng::from_value(testing_config.clone()) {
Ok(config) => config,
Err(e) => {
return Some(Err(ApiError::InternalServerError(format!(
"Failed to parse test configuration: {}",
e
))))
}
};
if !test_config.enabled {
return Err(ApiError::BadRequest(format!(
"Testing is disabled for pack '{}'",
tracing::info!(
"Testing is disabled for pack '{}', skipping tests",
pack_ref
)));
);
return None;
}
// Create test executor
let executor = TestExecutor::new(packs_base_dir);
// Execute tests
let result = executor
.execute_pack_tests(pack_ref, pack_version, &test_config)
.await
.map_err(|e| ApiError::InternalServerError(format!("Test execution failed: {}", e)))?;
// Execute tests - use execute_pack_tests_at when we have a specific directory
// (e.g., temp dir during installation before pack is moved to permanent storage)
let result = match if pack_dir_override.is_some() {
executor
.execute_pack_tests_at(&pack_dir, pack_ref, pack_version, &test_config)
.await
} else {
executor
.execute_pack_tests(pack_ref, pack_version, &test_config)
.await
} {
Ok(r) => r,
Err(e) => {
return Some(Err(ApiError::InternalServerError(format!(
"Test execution failed: {}",
e
))))
}
};
// Store test results in database
let pack_test_repo = PackTestRepository::new(state.db.clone());
pack_test_repo
if let Err(e) = pack_test_repo
.create(pack_id, pack_version, trigger_type, &result)
.await
.map_err(|e| {
tracing::warn!("Failed to store test results: {}", e);
ApiError::DatabaseError(format!("Failed to store test results: {}", e))
})?;
{
tracing::warn!("Failed to store test results: {}", e);
return Some(Err(ApiError::DatabaseError(format!(
"Failed to store test results: {}",
e
))));
}
Ok(result)
Some(Ok(result))
}
/// Register a pack from local filesystem
@@ -578,38 +640,313 @@ async fn register_pack_internal(
}
}
// Execute tests if not skipped
if !skip_tests {
match execute_and_store_pack_tests(&state, pack.id, &pack.r#ref, &pack.version, "register")
.await
{
Ok(result) => {
let test_passed = result.status == "passed";
// Load pack components (triggers, actions, sensors) into the database
{
use attune_common::pack_registry::PackComponentLoader;
if !test_passed && !force {
// Tests failed and force is not set - rollback pack creation
let _ = PackRepository::delete(&state.db, pack.id).await;
return Err(ApiError::BadRequest(format!(
"Pack registration failed: tests did not pass. Use force=true to register anyway."
)));
}
if !test_passed && force {
tracing::warn!(
"Pack '{}' tests failed but force=true, continuing with registration",
pack.r#ref
);
let component_loader = PackComponentLoader::new(&state.db, pack.id, &pack.r#ref);
match component_loader.load_all(&pack_path).await {
Ok(load_result) => {
tracing::info!(
"Pack '{}' components loaded: {} runtimes, {} triggers, {} actions, {} sensors ({} skipped, {} warnings)",
pack.r#ref,
load_result.runtimes_loaded,
load_result.triggers_loaded,
load_result.actions_loaded,
load_result.sensors_loaded,
load_result.total_skipped(),
load_result.warnings.len()
);
for warning in &load_result.warnings {
tracing::warn!("Pack component warning: {}", warning);
}
}
Err(e) => {
tracing::warn!("Failed to execute tests for pack '{}': {}", pack.r#ref, e);
// If tests can't be executed and force is not set, fail the registration
if !force {
let _ = PackRepository::delete(&state.db, pack.id).await;
return Err(ApiError::BadRequest(format!(
"Pack registration failed: could not execute tests. Error: {}. Use force=true to register anyway.",
e
)));
tracing::warn!(
"Failed to load components for pack '{}': {}. Components can be loaded manually.",
pack.r#ref,
e
);
}
}
}
// Set up runtime environments for the pack's actions.
// This creates virtualenvs, installs dependencies, etc. based on each
// runtime's execution_config from the database.
//
// Environment directories are placed at:
// {runtime_envs_dir}/{pack_ref}/{runtime_name}
// e.g., /opt/attune/runtime_envs/python_example/python
// This keeps the pack directory clean and read-only.
{
use attune_common::repositories::runtime::RuntimeRepository;
use attune_common::repositories::FindById as _;
let runtime_envs_base = PathBuf::from(&state.config.runtime_envs_dir);
// Collect unique runtime IDs from the pack's actions
let actions =
attune_common::repositories::ActionRepository::find_by_pack(&state.db, pack.id)
.await
.unwrap_or_default();
let mut seen_runtime_ids = std::collections::HashSet::new();
for action in &actions {
if let Some(runtime_id) = action.runtime {
seen_runtime_ids.insert(runtime_id);
}
}
for runtime_id in seen_runtime_ids {
match RuntimeRepository::find_by_id(&state.db, runtime_id).await {
Ok(Some(rt)) => {
let exec_config = rt.parsed_execution_config();
let rt_name = rt.name.to_lowercase();
// Check if this runtime has environment/dependency config
if exec_config.environment.is_some() || exec_config.has_dependencies(&pack_path)
{
// Compute external env_dir: {runtime_envs_dir}/{pack_ref}/{runtime_name}
let env_dir = runtime_envs_base.join(&pack.r#ref).join(&rt_name);
tracing::info!(
"Runtime '{}' for pack '{}' requires environment setup (env_dir: {})",
rt.name,
pack.r#ref,
env_dir.display()
);
// Attempt to create environment if configured.
// NOTE: In Docker deployments the API container typically does NOT
// have runtime interpreters (e.g., python3) installed, so this will
// fail. That is expected — the worker service will create the
// environment on-demand before the first execution. This block is
// a best-effort optimisation for non-Docker (bare-metal) setups
// where the API host has the interpreter available.
if let Some(ref env_cfg) = exec_config.environment {
if env_cfg.env_type != "none" {
if !env_dir.exists() && !env_cfg.create_command.is_empty() {
// Ensure parent directories exist
if let Some(parent) = env_dir.parent() {
let _ = std::fs::create_dir_all(parent);
}
let vars = exec_config
.build_template_vars_with_env(&pack_path, Some(&env_dir));
let resolved_cmd = attune_common::models::runtime::RuntimeExecutionConfig::resolve_command(
&env_cfg.create_command,
&vars,
);
tracing::info!(
"Attempting to create {} environment (best-effort) at {}: {:?}",
env_cfg.env_type,
env_dir.display(),
resolved_cmd
);
if let Some((program, args)) = resolved_cmd.split_first() {
match tokio::process::Command::new(program)
.args(args)
.current_dir(&pack_path)
.output()
.await
{
Ok(output) if output.status.success() => {
tracing::info!(
"Created {} environment at {}",
env_cfg.env_type,
env_dir.display()
);
}
Ok(output) => {
let stderr =
String::from_utf8_lossy(&output.stderr);
tracing::info!(
"Environment creation skipped in API service (exit {}): {}. \
The worker will create it on first execution.",
output.status.code().unwrap_or(-1),
stderr.trim()
);
}
Err(e) => {
tracing::info!(
"Runtime '{}' not available in API service: {}. \
The worker will create the environment on first execution.",
program, e
);
}
}
}
}
}
}
// Attempt to install dependencies if manifest file exists.
// Same caveat as above — this is best-effort in the API service.
if let Some(ref dep_cfg) = exec_config.dependencies {
let manifest_path = pack_path.join(&dep_cfg.manifest_file);
if manifest_path.exists() && !dep_cfg.install_command.is_empty() {
// Only attempt if the environment directory already exists
// (i.e., the venv creation above succeeded).
let env_exists = env_dir.exists();
if env_exists {
let vars = exec_config
.build_template_vars_with_env(&pack_path, Some(&env_dir));
let resolved_cmd = attune_common::models::runtime::RuntimeExecutionConfig::resolve_command(
&dep_cfg.install_command,
&vars,
);
tracing::info!(
"Installing dependencies for pack '{}': {:?}",
pack.r#ref,
resolved_cmd
);
if let Some((program, args)) = resolved_cmd.split_first() {
match tokio::process::Command::new(program)
.args(args)
.current_dir(&pack_path)
.output()
.await
{
Ok(output) if output.status.success() => {
tracing::info!(
"Dependencies installed for pack '{}'",
pack.r#ref
);
}
Ok(output) => {
let stderr =
String::from_utf8_lossy(&output.stderr);
tracing::info!(
"Dependency installation skipped in API service (exit {}): {}. \
The worker will handle this on first execution.",
output.status.code().unwrap_or(-1),
stderr.trim()
);
}
Err(e) => {
tracing::info!(
"Dependency installer not available in API service: {}. \
The worker will handle this on first execution.",
e
);
}
}
}
} else {
tracing::info!(
"Skipping dependency installation for pack '{}' — \
environment not yet created. The worker will handle \
environment setup and dependency installation on first execution.",
pack.r#ref
);
}
}
}
}
}
Ok(None) => {
tracing::debug!(
"Runtime ID {} not found, skipping environment setup",
runtime_id
);
}
Err(e) => {
tracing::warn!("Failed to load runtime {}: {}", runtime_id, e);
}
}
}
}
// Execute tests if not skipped
if !skip_tests {
if let Some(test_outcome) = execute_and_store_pack_tests(
&state,
pack.id,
&pack.r#ref,
&pack.version,
"register",
Some(&pack_path),
)
.await
{
match test_outcome {
Ok(result) => {
let test_passed = result.status == "passed";
if !test_passed && !force {
// Tests failed and force is not set - rollback pack creation
let _ = PackRepository::delete(&state.db, pack.id).await;
return Err(ApiError::BadRequest(format!(
"Pack registration failed: tests did not pass. Use force=true to register anyway."
)));
}
if !test_passed && force {
tracing::warn!(
"Pack '{}' tests failed but force=true, continuing with registration",
pack.r#ref
);
}
}
Err(e) => {
tracing::warn!("Failed to execute tests for pack '{}': {}", pack.r#ref, e);
// If tests can't be executed and force is not set, fail the registration
if !force {
let _ = PackRepository::delete(&state.db, pack.id).await;
return Err(ApiError::BadRequest(format!(
"Pack registration failed: could not execute tests. Error: {}. Use force=true to register anyway.",
e
)));
}
}
}
} else {
tracing::info!(
"No tests to run for pack '{}', proceeding with registration",
pack.r#ref
);
}
}
// Publish pack.registered event so workers can proactively set up
// runtime environments (virtualenvs, node_modules, etc.).
if let Some(ref publisher) = state.publisher {
let runtime_names = attune_common::pack_environment::collect_runtime_names_for_pack(
&state.db, pack.id, &pack_path,
)
.await;
if !runtime_names.is_empty() {
let payload = PackRegisteredPayload {
pack_id: pack.id,
pack_ref: pack.r#ref.clone(),
version: pack.version.clone(),
runtime_names: runtime_names.clone(),
};
let envelope = MessageEnvelope::new(MessageType::PackRegistered, payload);
match publisher.publish_envelope(&envelope).await {
Ok(()) => {
tracing::info!(
"Published pack.registered event for pack '{}' (runtimes: {:?})",
pack.r#ref,
runtime_names,
);
}
Err(e) => {
tracing::warn!(
"Failed to publish pack.registered event for pack '{}': {}. \
Workers will set up environments lazily on first execution.",
pack.r#ref,
e,
);
}
}
}
@@ -756,36 +1093,54 @@ pub async fn install_pack(
tracing::info!("Skipping dependency validation (disabled by user)");
}
// Register the pack in database (from temp location)
let register_request = crate::dto::pack::RegisterPackRequest {
path: installed.path.to_string_lossy().to_string(),
force: request.force,
skip_tests: request.skip_tests,
// Read pack.yaml to get pack_ref so we can move to permanent storage first.
// This ensures virtualenvs and dependencies are created at the final location
// (Python venvs are NOT relocatable — they contain hardcoded paths).
let pack_yaml_path_for_ref = installed.path.join("pack.yaml");
let pack_ref_for_storage = {
let content = std::fs::read_to_string(&pack_yaml_path_for_ref).map_err(|e| {
ApiError::InternalServerError(format!("Failed to read pack.yaml: {}", e))
})?;
let yaml: serde_yaml_ng::Value = serde_yaml_ng::from_str(&content).map_err(|e| {
ApiError::InternalServerError(format!("Failed to parse pack.yaml: {}", e))
})?;
yaml.get("ref")
.and_then(|v| v.as_str())
.ok_or_else(|| ApiError::BadRequest("Missing 'ref' field in pack.yaml".to_string()))?
.to_string()
};
let pack_id = register_pack_internal(
state.clone(),
user_sub,
register_request.path.clone(),
register_request.force,
register_request.skip_tests,
)
.await?;
// Fetch the registered pack to get pack_ref and version
let pack = PackRepository::find_by_id(&state.db, pack_id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Pack with ID {} not found", pack_id)))?;
// Move pack to permanent storage
// Move pack to permanent storage BEFORE registration so that environment
// setup (virtualenv creation, dependency installation) happens at the
// final location rather than a temporary directory.
let storage = PackStorage::new(&state.config.packs_base_dir);
let final_path = storage
.install_pack(&installed.path, &pack.r#ref, Some(&pack.version))
.install_pack(&installed.path, &pack_ref_for_storage, None)
.map_err(|e| {
ApiError::InternalServerError(format!("Failed to move pack to storage: {}", e))
})?;
tracing::info!("Pack installed to permanent storage: {:?}", final_path);
tracing::info!("Pack moved to permanent storage: {:?}", final_path);
// Register the pack in database (from permanent storage location)
let pack_id = register_pack_internal(
state.clone(),
user_sub,
final_path.to_string_lossy().to_string(),
request.force,
request.skip_tests,
)
.await
.map_err(|e| {
// Clean up the permanent storage if registration fails
let _ = std::fs::remove_dir_all(&final_path);
e
})?;
// Fetch the registered pack
let pack = PackRepository::find_by_id(&state.db, pack_id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Pack with ID {} not found", pack_id)))?;
// Calculate checksum of installed pack
let checksum = calculate_directory_checksum(&final_path)
@@ -823,7 +1178,7 @@ pub async fn install_pack(
let response = PackInstallResponse {
pack: PackResponse::from(pack),
test_result: None, // TODO: Include test results
tests_skipped: register_request.skip_tests,
tests_skipped: request.skip_tests,
};
Ok((StatusCode::OK, Json(crate::dto::ApiResponse::new(response))))
@@ -1105,7 +1460,7 @@ pub async fn test_pack(
// Execute tests
let result = executor
.execute_pack_tests(&pack_ref, &pack.version, &test_config)
.execute_pack_tests_at(&pack_dir, &pack_ref, &pack.version, &test_config)
.await
.map_err(|e| ApiError::InternalServerError(format!("Test execution failed: {}", e)))?;

View File

@@ -345,9 +345,9 @@ pub async fn create_rule(
let payload = RuleCreatedPayload {
rule_id: rule.id,
rule_ref: rule.r#ref.clone(),
trigger_id: Some(rule.trigger),
trigger_id: rule.trigger,
trigger_ref: rule.trigger_ref.clone(),
action_id: Some(rule.action),
action_id: rule.action,
action_ref: rule.action_ref.clone(),
trigger_params: Some(rule.trigger_params.clone()),
enabled: rule.enabled,

View File

@@ -219,6 +219,7 @@ mod tests {
is_adhoc: false,
parameter_delivery: attune_common::models::ParameterDelivery::default(),
parameter_format: attune_common::models::ParameterFormat::default(),
output_format: attune_common::models::OutputFormat::default(),
created: chrono::Utc::now(),
updated: chrono::Utc::now(),
};
@@ -238,7 +239,7 @@ mod tests {
});
let action = Action {
id: 1,
id: 2,
r#ref: "test.action".to_string(),
pack: 1,
pack_ref: "test".to_string(),
@@ -253,6 +254,7 @@ mod tests {
is_adhoc: false,
parameter_delivery: attune_common::models::ParameterDelivery::default(),
parameter_format: attune_common::models::ParameterFormat::default(),
output_format: attune_common::models::OutputFormat::default(),
created: chrono::Utc::now(),
updated: chrono::Utc::now(),
};

View File

@@ -576,6 +576,12 @@ pub struct Config {
#[serde(default = "default_packs_base_dir")]
pub packs_base_dir: String,
/// Runtime environments directory (isolated envs like virtualenvs, node_modules).
/// Pattern: {runtime_envs_dir}/{pack_ref}/{runtime_name}
/// e.g., /opt/attune/runtime_envs/python_example/python
#[serde(default = "default_runtime_envs_dir")]
pub runtime_envs_dir: String,
/// Notifier configuration (optional, for notifier service)
pub notifier: Option<NotifierConfig>,
@@ -599,6 +605,10 @@ fn default_packs_base_dir() -> String {
"/opt/attune/packs".to_string()
}
fn default_runtime_envs_dir() -> String {
"/opt/attune/runtime_envs".to_string()
}
impl Default for DatabaseConfig {
fn default() -> Self {
Self {
@@ -833,8 +843,10 @@ mod tests {
worker: None,
sensor: None,
packs_base_dir: default_packs_base_dir(),
runtime_envs_dir: default_runtime_envs_dir(),
notifier: None,
pack_registry: PackRegistryConfig::default(),
executor: None,
};
assert_eq!(config.service_name, "attune");
@@ -904,8 +916,10 @@ mod tests {
worker: None,
sensor: None,
packs_base_dir: default_packs_base_dir(),
runtime_envs_dir: default_runtime_envs_dir(),
notifier: None,
pack_registry: PackRegistryConfig::default(),
executor: None,
};
assert!(config.validate().is_ok());

View File

@@ -414,6 +414,324 @@ pub mod pack {
/// Runtime model
pub mod runtime {
use super::*;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use tracing::{debug, warn};
/// Configuration for how a runtime executes actions.
///
/// Stored as JSONB in the `runtime.execution_config` column.
/// Uses template variables that are resolved at execution time:
/// - `{pack_dir}` — absolute path to the pack directory
/// - `{env_dir}` — resolved environment directory
/// When an external `env_dir` is provided (e.g., from `runtime_envs_dir`
/// config), that path is used directly. Otherwise falls back to
/// `pack_dir/dir_name` for backward compatibility.
/// - `{interpreter}` — resolved interpreter path
/// - `{action_file}` — absolute path to the action script file
/// - `{manifest_path}` — absolute path to the dependency manifest file
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct RuntimeExecutionConfig {
/// Interpreter configuration (how to invoke the action script)
#[serde(default)]
pub interpreter: InterpreterConfig,
/// Optional isolated environment configuration (venv, node_modules, etc.)
#[serde(default)]
pub environment: Option<EnvironmentConfig>,
/// Optional dependency management configuration
#[serde(default)]
pub dependencies: Option<DependencyConfig>,
}
/// Describes the interpreter binary and how it invokes action scripts.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InterpreterConfig {
/// Path or name of the interpreter binary (e.g., "python3", "/bin/bash").
#[serde(default = "default_interpreter_binary")]
pub binary: String,
/// Additional arguments inserted before the action file path
/// (e.g., `["-u"]` for unbuffered Python output).
#[serde(default)]
pub args: Vec<String>,
/// File extension this runtime handles (e.g., ".py", ".sh").
/// Used to match actions to runtimes when runtime_name is not explicit.
#[serde(default)]
pub file_extension: Option<String>,
}
fn default_interpreter_binary() -> String {
"/bin/sh".to_string()
}
impl Default for InterpreterConfig {
fn default() -> Self {
Self {
binary: default_interpreter_binary(),
args: Vec::new(),
file_extension: None,
}
}
}
/// Describes how to create and manage an isolated runtime environment
/// (e.g., Python virtualenv, Node.js node_modules).
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EnvironmentConfig {
/// Type of environment: "virtualenv", "node_modules", "none".
pub env_type: String,
/// Fallback directory name relative to the pack directory (e.g., ".venv").
/// Only used when no external `env_dir` is provided (legacy/bare-metal).
/// In production, the env_dir is computed externally as
/// `{runtime_envs_dir}/{pack_ref}/{runtime_name}`.
#[serde(default = "super::runtime::default_env_dir_name")]
pub dir_name: String,
/// Command(s) to create the environment.
/// Template variables: `{env_dir}`, `{pack_dir}`.
/// Example: `["python3", "-m", "venv", "{env_dir}"]`
#[serde(default)]
pub create_command: Vec<String>,
/// Path to the interpreter inside the environment.
/// When the environment exists, this overrides `interpreter.binary`.
/// Template variables: `{env_dir}`.
/// Example: `"{env_dir}/bin/python3"`
pub interpreter_path: Option<String>,
}
/// Describes how to detect and install dependencies for a pack.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DependencyConfig {
/// Name of the manifest file to look for in the pack directory
/// (e.g., "requirements.txt", "package.json").
pub manifest_file: String,
/// Command to install dependencies.
/// Template variables: `{interpreter}`, `{env_dir}`, `{manifest_path}`, `{pack_dir}`.
/// Example: `["{interpreter}", "-m", "pip", "install", "-r", "{manifest_path}"]`
#[serde(default)]
pub install_command: Vec<String>,
}
fn default_env_dir_name() -> String {
".venv".to_string()
}
impl RuntimeExecutionConfig {
/// Resolve template variables in a single string.
pub fn resolve_template(template: &str, vars: &HashMap<&str, String>) -> String {
let mut result = template.to_string();
for (key, value) in vars {
result = result.replace(&format!("{{{}}}", key), value);
}
result
}
/// Resolve the interpreter binary path using a pack-relative env_dir
/// (legacy fallback — prefers [`resolve_interpreter_with_env`]).
pub fn resolve_interpreter(&self, pack_dir: &Path) -> PathBuf {
let fallback_env_dir = self
.environment
.as_ref()
.map(|cfg| pack_dir.join(&cfg.dir_name));
self.resolve_interpreter_with_env(pack_dir, fallback_env_dir.as_deref())
}
/// Resolve the interpreter binary path for a given pack directory and
/// an explicit environment directory.
///
/// If `env_dir` is provided and exists on disk, returns the
/// environment's interpreter. Otherwise returns the system interpreter.
pub fn resolve_interpreter_with_env(
&self,
pack_dir: &Path,
env_dir: Option<&Path>,
) -> PathBuf {
if let Some(ref env_cfg) = self.environment {
if let Some(ref interp_path_template) = env_cfg.interpreter_path {
if let Some(env_dir) = env_dir {
if env_dir.exists() {
let mut vars = HashMap::new();
vars.insert("env_dir", env_dir.to_string_lossy().to_string());
vars.insert("pack_dir", pack_dir.to_string_lossy().to_string());
let resolved = Self::resolve_template(interp_path_template, &vars);
let resolved_path = PathBuf::from(&resolved);
// Path::exists() follows symlinks — returns true only
// if the final target is reachable. A valid symlink to
// an existing executable passes this check just fine.
if resolved_path.exists() {
debug!(
"Using environment interpreter: {} (template: '{}', env_dir: {})",
resolved_path.display(),
interp_path_template,
env_dir.display(),
);
return resolved_path;
}
// exists() returned false — check whether the path is
// a broken symlink (symlink_metadata succeeds for the
// link itself even when its target is missing).
let is_broken_symlink = std::fs::symlink_metadata(&resolved_path)
.map(|m| m.file_type().is_symlink())
.unwrap_or(false);
if is_broken_symlink {
// Read the dangling target for the diagnostic
let target = std::fs::read_link(&resolved_path)
.map(|t| t.display().to_string())
.unwrap_or_else(|_| "<unreadable>".to_string());
warn!(
"Environment interpreter at '{}' is a broken symlink \
(target '{}' does not exist). This typically happens \
when the venv was created by a different container \
where python3 lives at a different path. \
Recreate the venv with `--copies` or delete '{}' \
and restart the worker. \
Falling back to system interpreter '{}'",
resolved_path.display(),
target,
env_dir.display(),
self.interpreter.binary,
);
} else {
warn!(
"Environment interpreter not found at resolved path '{}' \
(template: '{}', env_dir: {}). \
Falling back to system interpreter '{}'",
resolved_path.display(),
interp_path_template,
env_dir.display(),
self.interpreter.binary,
);
}
} else {
warn!(
"Environment directory does not exist: {}. \
Expected interpreter template '{}' cannot be resolved. \
Falling back to system interpreter '{}'",
env_dir.display(),
interp_path_template,
self.interpreter.binary,
);
}
} else {
debug!(
"No env_dir provided; skipping environment interpreter resolution. \
Using system interpreter '{}'",
self.interpreter.binary,
);
}
} else {
debug!(
"No interpreter_path configured in environment config. \
Using system interpreter '{}'",
self.interpreter.binary,
);
}
} else {
debug!(
"No environment config present. Using system interpreter '{}'",
self.interpreter.binary,
);
}
PathBuf::from(&self.interpreter.binary)
}
/// Resolve the working directory for action execution.
/// Returns the pack directory.
pub fn resolve_working_dir(&self, pack_dir: &Path) -> PathBuf {
pack_dir.to_path_buf()
}
/// Resolve the environment directory for a pack (legacy pack-relative
/// fallback — callers should prefer computing `env_dir` externally
/// from `runtime_envs_dir`).
pub fn resolve_env_dir(&self, pack_dir: &Path) -> Option<PathBuf> {
self.environment
.as_ref()
.map(|env_cfg| pack_dir.join(&env_cfg.dir_name))
}
/// Check whether the pack directory has a dependency manifest file.
pub fn has_dependencies(&self, pack_dir: &Path) -> bool {
if let Some(ref dep_cfg) = self.dependencies {
pack_dir.join(&dep_cfg.manifest_file).exists()
} else {
false
}
}
/// Build template variables using a pack-relative env_dir
/// (legacy fallback — prefers [`build_template_vars_with_env`]).
pub fn build_template_vars(&self, pack_dir: &Path) -> HashMap<&'static str, String> {
let fallback_env_dir = self
.environment
.as_ref()
.map(|cfg| pack_dir.join(&cfg.dir_name));
self.build_template_vars_with_env(pack_dir, fallback_env_dir.as_deref())
}
/// Build template variables for a given pack directory and an explicit
/// environment directory.
///
/// The `env_dir` should be the external runtime environment path
/// (e.g., `/opt/attune/runtime_envs/{pack_ref}/{runtime_name}`).
/// If `None`, falls back to the pack-relative `dir_name`.
pub fn build_template_vars_with_env(
&self,
pack_dir: &Path,
env_dir: Option<&Path>,
) -> HashMap<&'static str, String> {
let mut vars = HashMap::new();
vars.insert("pack_dir", pack_dir.to_string_lossy().to_string());
if let Some(env_dir) = env_dir {
vars.insert("env_dir", env_dir.to_string_lossy().to_string());
} else if let Some(ref env_cfg) = self.environment {
let fallback = pack_dir.join(&env_cfg.dir_name);
vars.insert("env_dir", fallback.to_string_lossy().to_string());
}
let interpreter = self.resolve_interpreter_with_env(pack_dir, env_dir);
vars.insert("interpreter", interpreter.to_string_lossy().to_string());
if let Some(ref dep_cfg) = self.dependencies {
let manifest_path = pack_dir.join(&dep_cfg.manifest_file);
vars.insert("manifest_path", manifest_path.to_string_lossy().to_string());
}
vars
}
/// Resolve a command template (Vec<String>) with the given variables.
pub fn resolve_command(
cmd_template: &[String],
vars: &HashMap<&str, String>,
) -> Vec<String> {
cmd_template
.iter()
.map(|part| Self::resolve_template(part, vars))
.collect()
}
/// Check if this runtime can execute a file based on its extension.
pub fn matches_file_extension(&self, file_path: &Path) -> bool {
if let Some(ref ext) = self.interpreter.file_extension {
let expected = ext.trim_start_matches('.');
file_path
.extension()
.and_then(|e| e.to_str())
.map(|e| e.eq_ignore_ascii_case(expected))
.unwrap_or(false)
} else {
false
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct Runtime {
@@ -426,10 +744,18 @@ pub mod runtime {
pub distributions: JsonDict,
pub installation: Option<JsonDict>,
pub installers: JsonDict,
pub execution_config: JsonDict,
pub created: DateTime<Utc>,
pub updated: DateTime<Utc>,
}
impl Runtime {
/// Parse the `execution_config` JSONB into a typed `RuntimeExecutionConfig`.
pub fn parsed_execution_config(&self) -> RuntimeExecutionConfig {
serde_json::from_value(self.execution_config.clone()).unwrap_or_default()
}
}
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct Worker {
pub id: Id,
@@ -552,9 +878,9 @@ pub mod rule {
pub pack_ref: String,
pub label: String,
pub description: String,
pub action: Id,
pub action: Option<Id>,
pub action_ref: String,
pub trigger: Id,
pub trigger: Option<Id>,
pub trigger_ref: String,
pub conditions: JsonValue,
pub action_params: JsonValue,

View File

@@ -459,6 +459,13 @@ impl Connection {
worker_id
);
let dlx = if config.rabbitmq.dead_letter.enabled {
Some(config.rabbitmq.dead_letter.exchange.as_str())
} else {
None
};
// --- Execution dispatch queue ---
let queue_name = format!("worker.{}.executions", worker_id);
let queue_config = QueueConfig {
name: queue_name.clone(),
@@ -467,12 +474,6 @@ impl Connection {
auto_delete: false,
};
let dlx = if config.rabbitmq.dead_letter.enabled {
Some(config.rabbitmq.dead_letter.exchange.as_str())
} else {
None
};
// Worker queues use TTL to expire unprocessed messages
let ttl_ms = Some(config.rabbitmq.worker_queue_ttl_ms);
@@ -487,6 +488,29 @@ impl Connection {
)
.await?;
// --- Pack registration queue ---
// Each worker gets its own queue for pack.registered events so that
// every worker instance can independently set up runtime environments
// (e.g., Python virtualenvs) when a new pack is registered.
let packs_queue_name = format!("worker.{}.packs", worker_id);
let packs_queue_config = QueueConfig {
name: packs_queue_name.clone(),
durable: true,
exclusive: false,
auto_delete: false,
};
self.declare_queue_with_optional_dlx(&packs_queue_config, dlx)
.await?;
// Bind to pack.registered routing key on the events exchange
self.bind_queue(
&packs_queue_name,
&config.rabbitmq.exchanges.events.name,
"pack.registered",
)
.await?;
info!(
"Worker infrastructure setup complete for worker ID {}",
worker_id

View File

@@ -65,6 +65,8 @@ pub enum MessageType {
RuleEnabled,
/// Rule disabled
RuleDisabled,
/// Pack registered or installed (triggers runtime environment setup in workers)
PackRegistered,
}
impl MessageType {
@@ -82,6 +84,7 @@ impl MessageType {
Self::RuleCreated => "rule.created".to_string(),
Self::RuleEnabled => "rule.enabled".to_string(),
Self::RuleDisabled => "rule.disabled".to_string(),
Self::PackRegistered => "pack.registered".to_string(),
}
}
@@ -98,6 +101,7 @@ impl MessageType {
Self::RuleCreated | Self::RuleEnabled | Self::RuleDisabled => {
"attune.events".to_string()
}
Self::PackRegistered => "attune.events".to_string(),
}
}
@@ -115,6 +119,7 @@ impl MessageType {
Self::RuleCreated => "RuleCreated",
Self::RuleEnabled => "RuleEnabled",
Self::RuleDisabled => "RuleDisabled",
Self::PackRegistered => "PackRegistered",
}
}
}
@@ -433,6 +438,23 @@ pub struct RuleDisabledPayload {
pub trigger_ref: String,
}
/// Payload for PackRegistered message
///
/// Published when a pack is registered or installed so that workers can
/// proactively create runtime environments (virtualenvs, node_modules, etc.)
/// instead of waiting until the first execution.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PackRegisteredPayload {
/// Pack ID
pub pack_id: Id,
/// Pack reference (e.g., "python_example")
pub pack_ref: String,
/// Pack version
pub version: String,
/// Runtime names that require environment setup (lowercase, e.g., ["python"])
pub runtime_names: Vec<String>,
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -60,7 +60,7 @@ pub use messages::{
EnforcementCreatedPayload, EventCreatedPayload, ExecutionCompletedPayload,
ExecutionRequestedPayload, ExecutionStatusChangedPayload, InquiryCreatedPayload,
InquiryRespondedPayload, Message, MessageEnvelope, MessageType, NotificationCreatedPayload,
RuleCreatedPayload, RuleDisabledPayload, RuleEnabledPayload,
PackRegisteredPayload, RuleCreatedPayload, RuleDisabledPayload, RuleEnabledPayload,
};
pub use publisher::{Publisher, PublisherConfig};
@@ -220,6 +220,8 @@ pub mod routing_keys {
pub const INQUIRY_RESPONDED: &str = "inquiry.responded";
/// Notification created routing key
pub const NOTIFICATION_CREATED: &str = "notification.created";
/// Pack registered routing key
pub const PACK_REGISTERED: &str = "pack.registered";
}
#[cfg(test)]

View File

@@ -9,9 +9,12 @@
use crate::config::Config;
use crate::error::{Error, Result};
use crate::models::Runtime;
use crate::repositories::action::ActionRepository;
use crate::repositories::runtime::RuntimeRepository;
use crate::repositories::FindById as _;
use serde_json::Value as JsonValue;
use sqlx::{PgPool, Row};
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use std::path::{Path, PathBuf};
use std::process::Command;
use tokio::fs;
@@ -370,7 +373,8 @@ impl PackEnvironmentManager {
sqlx::query_as::<_, Runtime>(
r#"
SELECT id, ref, pack, pack_ref, description, name,
distributions, installation, installers, created, updated
distributions, installation, installers, execution_config,
created, updated
FROM runtime
WHERE id = $1
"#,
@@ -818,6 +822,53 @@ impl PackEnvironmentManager {
}
}
/// Collect the lowercase runtime names that require environment setup for a pack.
///
/// This queries the pack's actions, resolves their runtimes, and returns the names
/// of any runtimes that have environment or dependency configuration. It is used by
/// the API when publishing `PackRegistered` MQ events so that workers know which
/// runtimes to set up without re-querying the database.
pub async fn collect_runtime_names_for_pack(
db_pool: &PgPool,
pack_id: i64,
pack_path: &Path,
) -> Vec<String> {
let actions = match ActionRepository::find_by_pack(db_pool, pack_id).await {
Ok(a) => a,
Err(e) => {
warn!("Failed to load actions for pack ID {}: {}", pack_id, e);
return Vec::new();
}
};
let mut seen_runtime_ids = HashSet::new();
for action in &actions {
if let Some(runtime_id) = action.runtime {
seen_runtime_ids.insert(runtime_id);
}
}
let mut runtime_names = Vec::new();
for runtime_id in seen_runtime_ids {
match RuntimeRepository::find_by_id(db_pool, runtime_id).await {
Ok(Some(rt)) => {
let exec_config = rt.parsed_execution_config();
if exec_config.environment.is_some() || exec_config.has_dependencies(pack_path) {
runtime_names.push(rt.name.to_lowercase());
}
}
Ok(None) => {
debug!("Runtime ID {} not found, skipping", runtime_id);
}
Err(e) => {
warn!("Failed to load runtime {}: {}", runtime_id, e);
}
}
}
runtime_names
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -0,0 +1,776 @@
//! Pack Component Loader
//!
//! Reads runtime, action, trigger, and sensor YAML definitions from a pack directory
//! and registers them in the database. This is the Rust-native equivalent of
//! the Python `load_core_pack.py` script used during init-packs.
//!
//! Components are loaded in dependency order:
//! 1. Runtimes (no dependencies)
//! 2. Triggers (no dependencies)
//! 3. Actions (depend on runtime)
//! 4. Sensors (depend on triggers and runtime)
use std::collections::HashMap;
use std::path::Path;
use sqlx::PgPool;
use tracing::{info, warn};
use crate::error::{Error, Result};
use crate::models::Id;
use crate::repositories::action::ActionRepository;
use crate::repositories::runtime::{CreateRuntimeInput, RuntimeRepository};
use crate::repositories::trigger::{
CreateSensorInput, CreateTriggerInput, SensorRepository, TriggerRepository,
};
use crate::repositories::{Create, FindByRef};
/// Result of loading pack components into the database.
#[derive(Debug, Default)]
pub struct PackLoadResult {
/// Number of runtimes loaded
pub runtimes_loaded: usize,
/// Number of runtimes skipped (already exist)
pub runtimes_skipped: usize,
/// Number of triggers loaded
pub triggers_loaded: usize,
/// Number of triggers skipped (already exist)
pub triggers_skipped: usize,
/// Number of actions loaded
pub actions_loaded: usize,
/// Number of actions skipped (already exist)
pub actions_skipped: usize,
/// Number of sensors loaded
pub sensors_loaded: usize,
/// Number of sensors skipped (already exist)
pub sensors_skipped: usize,
/// Warnings encountered during loading
pub warnings: Vec<String>,
}
impl PackLoadResult {
pub fn total_loaded(&self) -> usize {
self.runtimes_loaded + self.triggers_loaded + self.actions_loaded + self.sensors_loaded
}
pub fn total_skipped(&self) -> usize {
self.runtimes_skipped + self.triggers_skipped + self.actions_skipped + self.sensors_skipped
}
}
/// Loads pack components (triggers, actions, sensors) from YAML files on disk
/// into the database.
pub struct PackComponentLoader<'a> {
pool: &'a PgPool,
pack_id: Id,
pack_ref: String,
}
impl<'a> PackComponentLoader<'a> {
pub fn new(pool: &'a PgPool, pack_id: Id, pack_ref: &str) -> Self {
Self {
pool,
pack_id,
pack_ref: pack_ref.to_string(),
}
}
/// Load all components from the pack directory.
///
/// Reads triggers, actions, and sensors from their respective subdirectories
/// and registers them in the database. Components that already exist (by ref)
/// are skipped.
pub async fn load_all(&self, pack_dir: &Path) -> Result<PackLoadResult> {
let mut result = PackLoadResult::default();
info!(
"Loading components for pack '{}' from {}",
self.pack_ref,
pack_dir.display()
);
// 1. Load runtimes first (no dependencies)
self.load_runtimes(pack_dir, &mut result).await?;
// 2. Load triggers (no dependencies)
let trigger_ids = self.load_triggers(pack_dir, &mut result).await?;
// 3. Load actions (depend on runtime)
self.load_actions(pack_dir, &mut result).await?;
// 4. Load sensors (depend on triggers and runtime)
self.load_sensors(pack_dir, &trigger_ids, &mut result)
.await?;
info!(
"Pack '{}' component loading complete: {} loaded, {} skipped, {} warnings",
self.pack_ref,
result.total_loaded(),
result.total_skipped(),
result.warnings.len()
);
Ok(result)
}
/// Load trigger definitions from `pack_dir/triggers/*.yaml`.
///
/// Returns a map of trigger ref -> trigger ID for use by sensor loading.
/// Load runtime definitions from `pack_dir/runtimes/*.yaml`.
///
/// Runtimes define how actions and sensors are executed (interpreter,
/// environment setup, dependency management). They are loaded first
/// since actions reference them.
async fn load_runtimes(&self, pack_dir: &Path, result: &mut PackLoadResult) -> Result<()> {
let runtimes_dir = pack_dir.join("runtimes");
if !runtimes_dir.exists() {
info!("No runtimes directory found for pack '{}'", self.pack_ref);
return Ok(());
}
let yaml_files = read_yaml_files(&runtimes_dir)?;
info!(
"Found {} runtime definition(s) for pack '{}'",
yaml_files.len(),
self.pack_ref
);
for (filename, content) in &yaml_files {
let data: serde_yaml_ng::Value = serde_yaml_ng::from_str(content).map_err(|e| {
Error::validation(format!("Failed to parse runtime YAML {}: {}", filename, e))
})?;
let runtime_ref = match data.get("ref").and_then(|v| v.as_str()) {
Some(r) => r.to_string(),
None => {
let msg = format!(
"Runtime YAML {} missing 'ref' field, skipping",
filename
);
warn!("{}", msg);
result.warnings.push(msg);
continue;
}
};
// Check if runtime already exists
if let Some(existing) =
RuntimeRepository::find_by_ref(self.pool, &runtime_ref).await?
{
info!(
"Runtime '{}' already exists (ID: {}), skipping",
runtime_ref, existing.id
);
result.runtimes_skipped += 1;
continue;
}
let name = data
.get("name")
.and_then(|v| v.as_str())
.map(|s| s.to_string())
.unwrap_or_else(|| extract_name_from_ref(&runtime_ref));
let description = data
.get("description")
.and_then(|v| v.as_str())
.map(|s| s.to_string());
let distributions = data
.get("distributions")
.and_then(|v| serde_json::to_value(v).ok())
.unwrap_or_else(|| serde_json::json!({}));
let installation = data
.get("installation")
.and_then(|v| serde_json::to_value(v).ok());
let execution_config = data
.get("execution_config")
.and_then(|v| serde_json::to_value(v).ok())
.unwrap_or_else(|| serde_json::json!({}));
let input = CreateRuntimeInput {
r#ref: runtime_ref.clone(),
pack: Some(self.pack_id),
pack_ref: Some(self.pack_ref.clone()),
description,
name,
distributions,
installation,
execution_config,
};
match RuntimeRepository::create(self.pool, input).await {
Ok(rt) => {
info!(
"Created runtime '{}' (ID: {})",
runtime_ref, rt.id
);
result.runtimes_loaded += 1;
}
Err(e) => {
// Check for unique constraint violation (race condition)
if let Error::Database(ref db_err) = e {
if let sqlx::Error::Database(ref inner) = db_err {
if inner.is_unique_violation() {
info!(
"Runtime '{}' already exists (concurrent creation), skipping",
runtime_ref
);
result.runtimes_skipped += 1;
continue;
}
}
}
let msg = format!("Failed to create runtime '{}': {}", runtime_ref, e);
warn!("{}", msg);
result.warnings.push(msg);
}
}
}
Ok(())
}
async fn load_triggers(
&self,
pack_dir: &Path,
result: &mut PackLoadResult,
) -> Result<HashMap<String, Id>> {
let triggers_dir = pack_dir.join("triggers");
let mut trigger_ids = HashMap::new();
if !triggers_dir.exists() {
info!("No triggers directory found for pack '{}'", self.pack_ref);
return Ok(trigger_ids);
}
let yaml_files = read_yaml_files(&triggers_dir)?;
info!(
"Found {} trigger definition(s) for pack '{}'",
yaml_files.len(),
self.pack_ref
);
for (filename, content) in &yaml_files {
let data: serde_yaml_ng::Value = serde_yaml_ng::from_str(content).map_err(|e| {
Error::validation(format!("Failed to parse trigger YAML {}: {}", filename, e))
})?;
let trigger_ref = match data.get("ref").and_then(|v| v.as_str()) {
Some(r) => r.to_string(),
None => {
let msg = format!("Trigger YAML {} missing 'ref' field, skipping", filename);
warn!("{}", msg);
result.warnings.push(msg);
continue;
}
};
// Check if trigger already exists
if let Some(existing) = TriggerRepository::find_by_ref(self.pool, &trigger_ref).await? {
info!(
"Trigger '{}' already exists (ID: {}), skipping",
trigger_ref, existing.id
);
trigger_ids.insert(trigger_ref, existing.id);
result.triggers_skipped += 1;
continue;
}
let name = extract_name_from_ref(&trigger_ref);
let label = data
.get("label")
.and_then(|v| v.as_str())
.map(|s| s.to_string())
.unwrap_or_else(|| generate_label(&name));
let description = data
.get("description")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
let enabled = data
.get("enabled")
.and_then(|v| v.as_bool())
.unwrap_or(true);
let param_schema = data
.get("parameters")
.and_then(|v| serde_json::to_value(v).ok());
let out_schema = data
.get("output")
.and_then(|v| serde_json::to_value(v).ok());
let input = CreateTriggerInput {
r#ref: trigger_ref.clone(),
pack: Some(self.pack_id),
pack_ref: Some(self.pack_ref.clone()),
label,
description: Some(description),
enabled,
param_schema,
out_schema,
is_adhoc: false,
};
match TriggerRepository::create(self.pool, input).await {
Ok(trigger) => {
info!("Created trigger '{}' (ID: {})", trigger_ref, trigger.id);
trigger_ids.insert(trigger_ref, trigger.id);
result.triggers_loaded += 1;
}
Err(e) => {
let msg = format!("Failed to create trigger '{}': {}", trigger_ref, e);
warn!("{}", msg);
result.warnings.push(msg);
}
}
}
Ok(trigger_ids)
}
/// Load action definitions from `pack_dir/actions/*.yaml`.
async fn load_actions(&self, pack_dir: &Path, result: &mut PackLoadResult) -> Result<()> {
let actions_dir = pack_dir.join("actions");
if !actions_dir.exists() {
info!("No actions directory found for pack '{}'", self.pack_ref);
return Ok(());
}
let yaml_files = read_yaml_files(&actions_dir)?;
info!(
"Found {} action definition(s) for pack '{}'",
yaml_files.len(),
self.pack_ref
);
for (filename, content) in &yaml_files {
let data: serde_yaml_ng::Value = serde_yaml_ng::from_str(content).map_err(|e| {
Error::validation(format!("Failed to parse action YAML {}: {}", filename, e))
})?;
let action_ref = match data.get("ref").and_then(|v| v.as_str()) {
Some(r) => r.to_string(),
None => {
let msg = format!("Action YAML {} missing 'ref' field, skipping", filename);
warn!("{}", msg);
result.warnings.push(msg);
continue;
}
};
// Check if action already exists
if let Some(existing) = ActionRepository::find_by_ref(self.pool, &action_ref).await? {
info!(
"Action '{}' already exists (ID: {}), skipping",
action_ref, existing.id
);
result.actions_skipped += 1;
continue;
}
let name = extract_name_from_ref(&action_ref);
let label = data
.get("label")
.and_then(|v| v.as_str())
.map(|s| s.to_string())
.unwrap_or_else(|| generate_label(&name));
let description = data
.get("description")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
let entrypoint = data
.get("entry_point")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
// Resolve runtime ID from runner_type
let runner_type = data
.get("runner_type")
.and_then(|v| v.as_str())
.unwrap_or("shell");
let runtime_id = self.resolve_runtime_id(runner_type).await?;
let param_schema = data
.get("parameters")
.and_then(|v| serde_json::to_value(v).ok());
let out_schema = data
.get("output")
.and_then(|v| serde_json::to_value(v).ok());
// Read optional fields for parameter delivery/format and output format.
// The database has defaults (stdin, json, text), so we only set these
// in the INSERT if the YAML specifies them.
let parameter_delivery = data
.get("parameter_delivery")
.and_then(|v| v.as_str())
.unwrap_or("stdin")
.to_lowercase();
let parameter_format = data
.get("parameter_format")
.and_then(|v| v.as_str())
.unwrap_or("json")
.to_lowercase();
let output_format = data
.get("output_format")
.and_then(|v| v.as_str())
.unwrap_or("text")
.to_lowercase();
// Use raw SQL to include parameter_delivery, parameter_format,
// output_format which are not in CreateActionInput
let create_result = sqlx::query_scalar::<_, i64>(
r#"
INSERT INTO action (
ref, pack, pack_ref, label, description, entrypoint,
runtime, param_schema, out_schema, is_adhoc,
parameter_delivery, parameter_format, output_format
)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
RETURNING id
"#,
)
.bind(&action_ref)
.bind(self.pack_id)
.bind(&self.pack_ref)
.bind(&label)
.bind(&description)
.bind(&entrypoint)
.bind(runtime_id)
.bind(&param_schema)
.bind(&out_schema)
.bind(false) // is_adhoc
.bind(&parameter_delivery)
.bind(&parameter_format)
.bind(&output_format)
.fetch_one(self.pool)
.await;
match create_result {
Ok(id) => {
info!("Created action '{}' (ID: {})", action_ref, id);
result.actions_loaded += 1;
}
Err(e) => {
// Check for unique constraint violation (already exists race condition)
if let sqlx::Error::Database(ref db_err) = e {
if db_err.is_unique_violation() {
info!(
"Action '{}' already exists (concurrent creation), skipping",
action_ref
);
result.actions_skipped += 1;
continue;
}
}
let msg = format!("Failed to create action '{}': {}", action_ref, e);
warn!("{}", msg);
result.warnings.push(msg);
}
}
}
Ok(())
}
/// Load sensor definitions from `pack_dir/sensors/*.yaml`.
async fn load_sensors(
&self,
pack_dir: &Path,
trigger_ids: &HashMap<String, Id>,
result: &mut PackLoadResult,
) -> Result<()> {
let sensors_dir = pack_dir.join("sensors");
if !sensors_dir.exists() {
info!("No sensors directory found for pack '{}'", self.pack_ref);
return Ok(());
}
let yaml_files = read_yaml_files(&sensors_dir)?;
info!(
"Found {} sensor definition(s) for pack '{}'",
yaml_files.len(),
self.pack_ref
);
// Resolve sensor runtime
let sensor_runtime_id = self.resolve_runtime_id("builtin").await?;
let sensor_runtime_ref = "core.builtin".to_string();
for (filename, content) in &yaml_files {
let data: serde_yaml_ng::Value = serde_yaml_ng::from_str(content).map_err(|e| {
Error::validation(format!("Failed to parse sensor YAML {}: {}", filename, e))
})?;
let sensor_ref = match data.get("ref").and_then(|v| v.as_str()) {
Some(r) => r.to_string(),
None => {
let msg = format!("Sensor YAML {} missing 'ref' field, skipping", filename);
warn!("{}", msg);
result.warnings.push(msg);
continue;
}
};
// Check if sensor already exists
if let Some(existing) = SensorRepository::find_by_ref(self.pool, &sensor_ref).await? {
info!(
"Sensor '{}' already exists (ID: {}), skipping",
sensor_ref, existing.id
);
result.sensors_skipped += 1;
continue;
}
let name = extract_name_from_ref(&sensor_ref);
let label = data
.get("label")
.and_then(|v| v.as_str())
.map(|s| s.to_string())
.unwrap_or_else(|| generate_label(&name));
let description = data
.get("description")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
let enabled = data
.get("enabled")
.and_then(|v| v.as_bool())
.unwrap_or(true);
let entrypoint = data
.get("entry_point")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
// Resolve trigger reference
let (trigger_id, trigger_ref) = self.resolve_sensor_trigger(&data, trigger_ids).await;
let param_schema = data
.get("parameters")
.and_then(|v| serde_json::to_value(v).ok());
let config = data
.get("config")
.and_then(|v| serde_json::to_value(v).ok())
.unwrap_or_else(|| serde_json::json!({}));
let input = CreateSensorInput {
r#ref: sensor_ref.clone(),
pack: Some(self.pack_id),
pack_ref: Some(self.pack_ref.clone()),
label,
description,
entrypoint,
runtime: sensor_runtime_id.unwrap_or(0),
runtime_ref: sensor_runtime_ref.clone(),
trigger: trigger_id.unwrap_or(0),
trigger_ref: trigger_ref.unwrap_or_default(),
enabled,
param_schema,
config: Some(config),
};
match SensorRepository::create(self.pool, input).await {
Ok(sensor) => {
info!("Created sensor '{}' (ID: {})", sensor_ref, sensor.id);
result.sensors_loaded += 1;
}
Err(e) => {
let msg = format!("Failed to create sensor '{}': {}", sensor_ref, e);
warn!("{}", msg);
result.warnings.push(msg);
}
}
}
Ok(())
}
/// Resolve a runtime ID from a runner type string (e.g., "shell", "python", "builtin").
///
/// Looks up the runtime in the database by `core.{name}` ref pattern,
/// then falls back to name-based lookup (case-insensitive).
///
/// - "shell" -> "core.shell"
/// - "python" -> "core.python"
/// - "node" -> "core.nodejs"
/// - "builtin" -> "core.builtin"
async fn resolve_runtime_id(&self, runner_type: &str) -> Result<Option<Id>> {
let runner_lower = runner_type.to_lowercase();
// Runtime refs use the format `{pack_ref}.{name}` (e.g., "core.python").
let refs_to_try = match runner_lower.as_str() {
"shell" | "bash" | "sh" => vec!["core.shell"],
"python" | "python3" => vec!["core.python"],
"node" | "nodejs" | "node.js" => vec!["core.nodejs"],
"native" => vec!["core.native"],
"builtin" => vec!["core.builtin"],
other => vec![other],
};
for runtime_ref in &refs_to_try {
if let Some(runtime) = RuntimeRepository::find_by_ref(self.pool, runtime_ref).await? {
return Ok(Some(runtime.id));
}
}
// Fall back to name-based lookup (case-insensitive)
use crate::repositories::runtime::RuntimeRepository as RR;
if let Some(runtime) = RR::find_by_name(self.pool, &runner_lower).await? {
return Ok(Some(runtime.id));
}
warn!(
"Could not find runtime for runner_type '{}', action will have no runtime",
runner_type
);
Ok(None)
}
/// Resolve the trigger reference and ID for a sensor.
///
/// Handles both `trigger_type` (singular) and `trigger_types` (array) fields.
async fn resolve_sensor_trigger(
&self,
data: &serde_yaml_ng::Value,
trigger_ids: &HashMap<String, Id>,
) -> (Option<Id>, Option<String>) {
// Try trigger_types (array) first, then trigger_type (singular)
let trigger_type_str = data
.get("trigger_types")
.and_then(|v| v.as_sequence())
.and_then(|seq| seq.first())
.and_then(|v| v.as_str())
.or_else(|| data.get("trigger_type").and_then(|v| v.as_str()));
let trigger_ref = match trigger_type_str {
Some(t) => {
if t.contains('.') {
t.to_string()
} else {
format!("{}.{}", self.pack_ref, t)
}
}
None => return (None, None),
};
// Look up trigger ID from our loaded triggers map first
if let Some(&id) = trigger_ids.get(&trigger_ref) {
return (Some(id), Some(trigger_ref));
}
// Fall back to database lookup
match TriggerRepository::find_by_ref(self.pool, &trigger_ref).await {
Ok(Some(trigger)) => (Some(trigger.id), Some(trigger_ref)),
_ => {
warn!("Could not resolve trigger ref '{}' for sensor", trigger_ref);
(None, Some(trigger_ref))
}
}
}
}
/// Read all `.yaml` and `.yml` files from a directory, sorted by filename.
///
/// Returns a Vec of (filename, content) pairs.
fn read_yaml_files(dir: &Path) -> Result<Vec<(String, String)>> {
let mut files = Vec::new();
let entries = std::fs::read_dir(dir)
.map_err(|e| Error::io(format!("Failed to read directory {}: {}", dir.display(), e)))?;
let mut paths: Vec<_> = entries
.filter_map(|e| e.ok())
.filter(|e| {
let path = e.path();
path.is_file()
&& matches!(
path.extension().and_then(|ext| ext.to_str()),
Some("yaml") | Some("yml")
)
})
.collect();
// Sort by filename for deterministic ordering
paths.sort_by_key(|e| e.file_name());
for entry in paths {
let path = entry.path();
let filename = entry.file_name().to_string_lossy().to_string();
let content = std::fs::read_to_string(&path)
.map_err(|e| Error::io(format!("Failed to read file {}: {}", path.display(), e)))?;
files.push((filename, content));
}
Ok(files)
}
/// Extract the short name from a dotted ref (e.g., "core.echo" -> "echo").
fn extract_name_from_ref(r: &str) -> String {
r.rsplit('.').next().unwrap_or(r).to_string()
}
/// Generate a human-readable label from a snake_case name.
///
/// Examples:
/// - "echo" -> "Echo"
/// - "http_request" -> "Http Request"
/// - "datetime_timer" -> "Datetime Timer"
fn generate_label(name: &str) -> String {
name.split('_')
.map(|word| {
let mut chars = word.chars();
match chars.next() {
Some(c) => {
let upper: String = c.to_uppercase().collect();
format!("{}{}", upper, chars.as_str())
}
None => String::new(),
}
})
.collect::<Vec<_>>()
.join(" ")
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_extract_name_from_ref() {
assert_eq!(extract_name_from_ref("core.echo"), "echo");
assert_eq!(extract_name_from_ref("python_example.greet"), "greet");
assert_eq!(extract_name_from_ref("simple"), "simple");
assert_eq!(extract_name_from_ref("a.b.c"), "c");
}
#[test]
fn test_generate_label() {
assert_eq!(generate_label("echo"), "Echo");
assert_eq!(generate_label("http_request"), "Http Request");
assert_eq!(generate_label("datetime_timer"), "Datetime Timer");
assert_eq!(generate_label("a_b_c"), "A B C");
}
}

View File

@@ -9,17 +9,19 @@
pub mod client;
pub mod dependency;
pub mod installer;
pub mod loader;
pub mod storage;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
// Re-export client, installer, storage, and dependency utilities
// Re-export client, installer, loader, storage, and dependency utilities
pub use client::RegistryClient;
pub use dependency::{
DependencyValidation, DependencyValidator, PackDepValidation, RuntimeDepValidation,
};
pub use installer::{InstalledPack, PackInstaller, PackSource};
pub use loader::{PackComponentLoader, PackLoadResult};
pub use storage::{
calculate_directory_checksum, calculate_file_checksum, verify_checksum, PackStorage,
};
@@ -245,7 +247,10 @@ impl Checksum {
pub fn parse(s: &str) -> Result<Self, String> {
let parts: Vec<&str> = s.splitn(2, ':').collect();
if parts.len() != 2 {
return Err(format!("Invalid checksum format: {}. Expected 'algorithm:hash'", s));
return Err(format!(
"Invalid checksum format: {}. Expected 'algorithm:hash'",
s
));
}
let algorithm = parts[0].to_lowercase();
@@ -259,7 +264,10 @@ impl Checksum {
// Basic validation of hash format (hex string)
if !hash.chars().all(|c| c.is_ascii_hexdigit()) {
return Err(format!("Invalid hash format: {}. Must be hexadecimal", hash));
return Err(format!(
"Invalid hash format: {}. Must be hexadecimal",
hash
));
}
Ok(Self { algorithm, hash })

View File

@@ -33,6 +33,7 @@ pub struct CreateRuntimeInput {
pub name: String,
pub distributions: JsonDict,
pub installation: Option<JsonDict>,
pub execution_config: JsonDict,
}
/// Input for updating a runtime
@@ -42,6 +43,7 @@ pub struct UpdateRuntimeInput {
pub name: Option<String>,
pub distributions: Option<JsonDict>,
pub installation: Option<JsonDict>,
pub execution_config: Option<JsonDict>,
}
#[async_trait::async_trait]
@@ -53,7 +55,8 @@ impl FindById for RuntimeRepository {
let runtime = sqlx::query_as::<_, Runtime>(
r#"
SELECT id, ref, pack, pack_ref, description, name,
distributions, installation, installers, created, updated
distributions, installation, installers, execution_config,
created, updated
FROM runtime
WHERE id = $1
"#,
@@ -75,7 +78,8 @@ impl FindByRef for RuntimeRepository {
let runtime = sqlx::query_as::<_, Runtime>(
r#"
SELECT id, ref, pack, pack_ref, description, name,
distributions, installation, installers, created, updated
distributions, installation, installers, execution_config,
created, updated
FROM runtime
WHERE ref = $1
"#,
@@ -97,7 +101,8 @@ impl List for RuntimeRepository {
let runtimes = sqlx::query_as::<_, Runtime>(
r#"
SELECT id, ref, pack, pack_ref, description, name,
distributions, installation, installers, created, updated
distributions, installation, installers, execution_config,
created, updated
FROM runtime
ORDER BY ref ASC
"#,
@@ -120,10 +125,11 @@ impl Create for RuntimeRepository {
let runtime = sqlx::query_as::<_, Runtime>(
r#"
INSERT INTO runtime (ref, pack, pack_ref, description, name,
distributions, installation, installers)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
distributions, installation, installers, execution_config)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
RETURNING id, ref, pack, pack_ref, description, name,
distributions, installation, installers, created, updated
distributions, installation, installers, execution_config,
created, updated
"#,
)
.bind(&input.r#ref)
@@ -134,6 +140,7 @@ impl Create for RuntimeRepository {
.bind(&input.distributions)
.bind(&input.installation)
.bind(serde_json::json!({}))
.bind(&input.execution_config)
.fetch_one(executor)
.await?;
@@ -187,6 +194,15 @@ impl Update for RuntimeRepository {
has_updates = true;
}
if let Some(execution_config) = &input.execution_config {
if has_updates {
query.push(", ");
}
query.push("execution_config = ");
query.push_bind(execution_config);
has_updates = true;
}
if !has_updates {
// No updates requested, fetch and return existing entity
return Self::get_by_id(executor, id).await;
@@ -194,7 +210,10 @@ impl Update for RuntimeRepository {
query.push(", updated = NOW() WHERE id = ");
query.push_bind(id);
query.push(" RETURNING id, ref, pack, pack_ref, description, name, distributions, installation, installers, created, updated");
query.push(
" RETURNING id, ref, pack, pack_ref, description, name, \
distributions, installation, installers, execution_config, created, updated",
);
let runtime = query
.build_query_as::<Runtime>()
@@ -229,7 +248,8 @@ impl RuntimeRepository {
let runtimes = sqlx::query_as::<_, Runtime>(
r#"
SELECT id, ref, pack, pack_ref, description, name,
distributions, installation, installers, created, updated
distributions, installation, installers, execution_config,
created, updated
FROM runtime
WHERE pack = $1
ORDER BY ref ASC
@@ -241,6 +261,29 @@ impl RuntimeRepository {
Ok(runtimes)
}
/// Find a runtime by name (case-insensitive)
pub async fn find_by_name<'e, E>(executor: E, name: &str) -> Result<Option<Runtime>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let runtime = sqlx::query_as::<_, Runtime>(
r#"
SELECT id, ref, pack, pack_ref, description, name,
distributions, installation, installers, execution_config,
created, updated
FROM runtime
WHERE LOWER(name) = LOWER($1)
LIMIT 1
"#,
)
.bind(name)
.fetch_optional(executor)
.await?;
Ok(runtime)
}
}
// ============================================================================
@@ -338,7 +381,7 @@ impl Create for WorkerRepository {
INSERT INTO worker (name, worker_type, runtime, host, port, status,
capabilities, meta)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
RETURNING id, name, worker_type, runtime, host, port, status,
RETURNING id, name, worker_type, worker_role, runtime, host, port, status,
capabilities, meta, last_heartbeat, created, updated
"#,
)
@@ -428,7 +471,10 @@ impl Update for WorkerRepository {
query.push(", updated = NOW() WHERE id = ");
query.push_bind(id);
query.push(" RETURNING id, name, worker_type, worker_role, runtime, host, port, status, capabilities, meta, last_heartbeat, created, updated");
query.push(
" RETURNING id, name, worker_type, worker_role, runtime, host, port, status, \
capabilities, meta, last_heartbeat, created, updated",
);
let worker = query.build_query_as::<Worker>().fetch_one(executor).await?;

View File

@@ -109,13 +109,13 @@ impl RuntimeDetector {
pub async fn detect_from_database(&self) -> Result<Vec<String>> {
info!("Querying database for runtime definitions...");
// Query all runtimes from database (no longer filtered by type)
// Query all runtimes from database
let runtimes = sqlx::query_as::<_, Runtime>(
r#"
SELECT id, ref, pack, pack_ref, description, name,
distributions, installation, installers, created, updated
distributions, installation, installers, execution_config,
created, updated
FROM runtime
WHERE ref NOT LIKE '%.sensor.builtin'
ORDER BY ref
"#,
)

View File

@@ -174,24 +174,18 @@ impl RefValidator {
Ok(())
}
/// Validate pack.type.component format (e.g., "core.action.webhook")
/// Validate pack.name format (e.g., "core.python", "core.shell")
pub fn validate_runtime_ref(ref_str: &str) -> Result<()> {
let parts: Vec<&str> = ref_str.split('.').collect();
if parts.len() != 3 {
if parts.len() != 2 {
return Err(Error::validation(format!(
"Invalid runtime reference format: '{}'. Expected 'pack.type.component'",
"Invalid runtime reference format: '{}'. Expected 'pack.name' (e.g., 'core.python')",
ref_str
)));
}
Self::validate_identifier(parts[0])?;
if parts[1] != "action" && parts[1] != "sensor" {
return Err(Error::validation(format!(
"Invalid runtime type: '{}'. Must be 'action' or 'sensor'",
parts[1]
)));
}
Self::validate_identifier(parts[2])?;
Self::validate_identifier(parts[1])?;
Ok(())
}
@@ -267,13 +261,15 @@ mod tests {
#[test]
fn test_ref_validator_runtime() {
assert!(RefValidator::validate_runtime_ref("core.action.webhook").is_ok());
assert!(RefValidator::validate_runtime_ref("mypack.sensor.monitor").is_ok());
assert!(RefValidator::validate_runtime_ref("core.python").is_ok());
assert!(RefValidator::validate_runtime_ref("core.shell").is_ok());
assert!(RefValidator::validate_runtime_ref("mypack.nodejs").is_ok());
assert!(RefValidator::validate_runtime_ref("core.builtin").is_ok());
// Invalid formats
assert!(RefValidator::validate_runtime_ref("core.webhook").is_err());
assert!(RefValidator::validate_runtime_ref("core.invalid.webhook").is_err());
assert!(RefValidator::validate_runtime_ref("Core.action.webhook").is_err());
assert!(RefValidator::validate_runtime_ref("core.action.webhook").is_err()); // 3-part no longer valid
assert!(RefValidator::validate_runtime_ref("python").is_err()); // missing pack
assert!(RefValidator::validate_runtime_ref("Core.python").is_err()); // uppercase
}
#[test]

View File

@@ -54,12 +54,29 @@ impl TestExecutor {
Self { pack_base_dir }
}
/// Execute all tests for a pack
/// Execute all tests for a pack, looking up the pack directory from the base dir
pub async fn execute_pack_tests(
&self,
pack_ref: &str,
pack_version: &str,
test_config: &TestConfig,
) -> Result<PackTestResult> {
let pack_dir = self.pack_base_dir.join(pack_ref);
self.execute_pack_tests_at(&pack_dir, pack_ref, pack_version, test_config)
.await
}
/// Execute all tests for a pack at a specific directory path.
///
/// Use this when the pack files are not yet at the standard
/// `packs_base_dir/pack_ref` location (e.g., during installation
/// from a temp directory).
pub async fn execute_pack_tests_at(
&self,
pack_dir: &Path,
pack_ref: &str,
pack_version: &str,
test_config: &TestConfig,
) -> Result<PackTestResult> {
info!("Executing tests for pack: {} v{}", pack_ref, pack_version);
@@ -69,7 +86,6 @@ impl TestExecutor {
));
}
let pack_dir = self.pack_base_dir.join(pack_ref);
if !pack_dir.exists() {
return Err(Error::not_found(
"pack_directory",

View File

@@ -874,6 +874,7 @@ pub struct RuntimeFixture {
pub name: String,
pub distributions: serde_json::Value,
pub installation: Option<serde_json::Value>,
pub execution_config: serde_json::Value,
}
impl RuntimeFixture {
@@ -896,6 +897,13 @@ impl RuntimeFixture {
"darwin": { "supported": true }
}),
installation: None,
execution_config: json!({
"interpreter": {
"binary": "/bin/bash",
"args": [],
"file_extension": ".sh"
}
}),
}
}
@@ -920,6 +928,13 @@ impl RuntimeFixture {
"darwin": { "supported": true }
}),
installation: None,
execution_config: json!({
"interpreter": {
"binary": "/bin/bash",
"args": [],
"file_extension": ".sh"
}
}),
}
}
@@ -947,6 +962,7 @@ impl RuntimeFixture {
name: self.name,
distributions: self.distributions,
installation: self.installation,
execution_config: self.execution_config,
};
RuntimeRepository::create(pool, input).await

View File

@@ -555,7 +555,6 @@ async fn test_enum_types_exist() {
"notification_status_enum",
"owner_type_enum",
"policy_method_enum",
"runtime_type_enum",
"worker_status_enum",
"worker_type_enum",
];

View File

@@ -72,6 +72,13 @@ impl RuntimeFixture {
"method": "pip",
"packages": ["requests", "pyyaml"]
})),
execution_config: json!({
"interpreter": {
"binary": "python3",
"args": ["-u"],
"file_extension": ".py"
}
}),
}
}
@@ -88,6 +95,13 @@ impl RuntimeFixture {
name,
distributions: json!({}),
installation: None,
execution_config: json!({
"interpreter": {
"binary": "/bin/bash",
"args": [],
"file_extension": ".sh"
}
}),
}
}
}
@@ -245,6 +259,7 @@ async fn test_update_runtime() {
installation: Some(json!({
"method": "npm"
})),
execution_config: None,
};
let updated = RuntimeRepository::update(&pool, created.id, update_input.clone())
@@ -274,6 +289,7 @@ async fn test_update_runtime_partial() {
name: None,
distributions: None,
installation: None,
execution_config: None,
};
let updated = RuntimeRepository::update(&pool, created.id, update_input.clone())
@@ -428,16 +444,6 @@ async fn test_find_by_pack_empty() {
assert_eq!(runtimes.len(), 0);
}
// ============================================================================
// Enum Tests
// ============================================================================
// Test removed - runtime_type field no longer exists
// #[tokio::test]
// async fn test_runtime_type_enum() {
// // runtime_type field removed from Runtime model
// }
#[tokio::test]
async fn test_runtime_created_successfully() {
let pool = setup_db().await;
@@ -515,13 +521,13 @@ async fn test_list_ordering() {
let fixture = RuntimeFixture::new("list_ordering");
let mut input1 = fixture.create_input("z_last");
input1.r#ref = format!("{}.action.zzz", fixture.test_id);
input1.r#ref = format!("{}.zzz", fixture.test_id);
let mut input2 = fixture.create_input("a_first");
input2.r#ref = format!("{}.sensor.aaa", fixture.test_id);
input2.r#ref = format!("{}.aaa", fixture.test_id);
let mut input3 = fixture.create_input("m_middle");
input3.r#ref = format!("{}.action.mmm", fixture.test_id);
input3.r#ref = format!("{}.mmm", fixture.test_id);
RuntimeRepository::create(&pool, input1)
.await

View File

@@ -550,13 +550,20 @@ async fn test_worker_with_runtime() {
// Create a runtime first
let runtime_input = CreateRuntimeInput {
r#ref: format!("{}.action.test_runtime", fixture.test_id),
r#ref: format!("{}.test_runtime", fixture.test_id),
pack: None,
pack_ref: None,
description: Some("Test runtime".to_string()),
name: "test_runtime".to_string(),
distributions: json!({}),
installation: None,
execution_config: json!({
"interpreter": {
"binary": "/bin/bash",
"args": [],
"file_extension": ".sh"
}
}),
};
let runtime = RuntimeRepository::create(&pool, runtime_input)

View File

@@ -66,9 +66,9 @@ async fn test_create_rule() {
assert_eq!(rule.pack_ref, pack.r#ref);
assert_eq!(rule.label, "Test Rule");
assert_eq!(rule.description, "A test rule");
assert_eq!(rule.action, action.id);
assert_eq!(rule.action, Some(action.id));
assert_eq!(rule.action_ref, action.r#ref);
assert_eq!(rule.trigger, trigger.id);
assert_eq!(rule.trigger, Some(trigger.id));
assert_eq!(rule.trigger_ref, trigger.r#ref);
assert_eq!(
rule.conditions,
@@ -1091,14 +1091,14 @@ async fn test_find_rules_by_action() {
.unwrap();
assert_eq!(action1_rules.len(), 2);
assert!(action1_rules.iter().all(|r| r.action == action1.id));
assert!(action1_rules.iter().all(|r| r.action == Some(action1.id)));
let action2_rules = RuleRepository::find_by_action(&pool, action2.id)
.await
.unwrap();
assert_eq!(action2_rules.len(), 1);
assert_eq!(action2_rules[0].action, action2.id);
assert_eq!(action2_rules[0].action, Some(action2.id));
}
#[tokio::test]
@@ -1172,14 +1172,14 @@ async fn test_find_rules_by_trigger() {
.unwrap();
assert_eq!(trigger1_rules.len(), 2);
assert!(trigger1_rules.iter().all(|r| r.trigger == trigger1.id));
assert!(trigger1_rules.iter().all(|r| r.trigger == Some(trigger1.id)));
let trigger2_rules = RuleRepository::find_by_trigger(&pool, trigger2.id)
.await
.unwrap();
assert_eq!(trigger2_rules.len(), 1);
assert_eq!(trigger2_rules[0].trigger, trigger2.id);
assert_eq!(trigger2_rules[0].trigger, Some(trigger2.id));
}
#[tokio::test]

View File

@@ -9,7 +9,7 @@
//! - Creating execution records
//! - Publishing ExecutionRequested messages
use anyhow::Result;
use anyhow::{bail, Result};
use attune_common::{
models::{Enforcement, Event, Rule},
mq::{
@@ -166,6 +166,24 @@ impl EnforcementProcessor {
return Ok(false);
}
// Check if the rule's action still exists (may have been deleted with its pack)
if rule.action.is_none() {
warn!(
"Rule {} references a deleted action (action_ref: {}), skipping execution",
rule.id, rule.action_ref
);
return Ok(false);
}
// Check if the rule's trigger still exists
if rule.trigger.is_none() {
warn!(
"Rule {} references a deleted trigger (trigger_ref: {}), skipping execution",
rule.id, rule.trigger_ref
);
return Ok(false);
}
// TODO: Evaluate rule conditions against event payload
// For now, we'll create executions for all valid enforcements
@@ -186,13 +204,27 @@ impl EnforcementProcessor {
enforcement: &Enforcement,
rule: &Rule,
) -> Result<()> {
// Extract action ID — should_create_execution already verified it's Some,
// but guard defensively here as well.
let action_id = match rule.action {
Some(id) => id,
None => {
error!(
"Rule {} has no action ID (deleted?), cannot create execution for enforcement {}",
rule.id, enforcement.id
);
bail!(
"Rule {} references a deleted action (action_ref: {})",
rule.id, rule.action_ref
);
}
};
info!(
"Creating execution for enforcement: {}, rule: {}, action: {}",
enforcement.id, rule.id, rule.action
enforcement.id, rule.id, action_id
);
// Get action and pack IDs from rule
let action_id = rule.action;
let pack_id = rule.pack;
let action_ref = &rule.action_ref;
@@ -305,9 +337,9 @@ mod tests {
label: "Test Rule".to_string(),
description: "Test rule description".to_string(),
trigger_ref: "test.trigger".to_string(),
trigger: 1,
trigger: Some(1),
action_ref: "test.action".to_string(),
action: 1,
action: Some(1),
enabled: false, // Disabled
conditions: json!({}),
action_params: json!({}),

View File

@@ -345,22 +345,7 @@ impl RetryManager {
/// Calculate exponential backoff with jitter
fn calculate_backoff(&self, retry_count: i32) -> Duration {
let base_secs = self.config.base_backoff_secs as f64;
let multiplier = self.config.backoff_multiplier;
let max_secs = self.config.max_backoff_secs as f64;
let jitter_factor = self.config.jitter_factor;
// Calculate exponential backoff: base * multiplier^retry_count
let backoff_secs = base_secs * multiplier.powi(retry_count);
// Cap at max
let backoff_secs = backoff_secs.min(max_secs);
// Add jitter: random value between (1 - jitter) and (1 + jitter)
let jitter = 1.0 + (rand::random::<f64>() * 2.0 - 1.0) * jitter_factor;
let backoff_with_jitter = backoff_secs * jitter;
Duration::from_secs(backoff_with_jitter.max(0.0) as u64)
calculate_backoff_duration(&self.config, retry_count)
}
/// Update execution with retry metadata
@@ -408,6 +393,28 @@ impl RetryManager {
}
}
/// Calculate exponential backoff with jitter from a retry config.
///
/// Extracted as a free function so it can be tested without a database pool.
fn calculate_backoff_duration(config: &RetryConfig, retry_count: i32) -> Duration {
let base_secs = config.base_backoff_secs as f64;
let multiplier = config.backoff_multiplier;
let max_secs = config.max_backoff_secs as f64;
let jitter_factor = config.jitter_factor;
// Calculate exponential backoff: base * multiplier^retry_count
let backoff_secs = base_secs * multiplier.powi(retry_count);
// Cap at max
let backoff_secs = backoff_secs.min(max_secs);
// Add jitter: random value between (1 - jitter) and (1 + jitter)
let jitter = 1.0 + (rand::random::<f64>() * 2.0 - 1.0) * jitter_factor;
let backoff_with_jitter = backoff_secs * jitter;
Duration::from_secs(backoff_with_jitter.max(0.0) as u64)
}
/// Check if an error message indicates a retriable failure
#[allow(dead_code)]
pub fn is_error_retriable(error_msg: &str) -> bool {
@@ -466,17 +473,14 @@ mod tests {
#[test]
fn test_backoff_calculation() {
let manager = RetryManager::with_defaults(
// Mock pool - won't be used in this test
unsafe { std::mem::zeroed() },
);
let config = RetryConfig::default();
let backoff0 = manager.calculate_backoff(0);
let backoff1 = manager.calculate_backoff(1);
let backoff2 = manager.calculate_backoff(2);
let backoff0 = calculate_backoff_duration(&config, 0);
let backoff1 = calculate_backoff_duration(&config, 1);
let backoff2 = calculate_backoff_duration(&config, 2);
// First attempt: ~1s
assert!(backoff0.as_secs() >= 0 && backoff0.as_secs() <= 2);
// First attempt: ~1s (with jitter 0..2s)
assert!(backoff0.as_secs() <= 2);
// Second attempt: ~2s
assert!(backoff1.as_secs() >= 1 && backoff1.as_secs() <= 3);
// Third attempt: ~4s

View File

@@ -237,9 +237,7 @@ impl ExecutionTimeoutMonitor {
#[cfg(test)]
mod tests {
use super::*;
use attune_common::mq::MessageQueue;
use chrono::Duration as ChronoDuration;
use sqlx::PgPool;
fn create_test_config() -> TimeoutMonitorConfig {
TimeoutMonitorConfig {
@@ -259,46 +257,39 @@ mod tests {
#[test]
fn test_cutoff_calculation() {
let config = create_test_config();
let pool = PgPool::connect("postgresql://localhost/test")
.await
.expect("DB connection");
let mq = MessageQueue::connect("amqp://localhost")
.await
.expect("MQ connection");
// Test that cutoff is calculated as now - scheduled_timeout
let config = create_test_config(); // scheduled_timeout = 60s
let monitor = ExecutionTimeoutMonitor::new(pool, Arc::new(mq.publisher), config);
let before = Utc::now() - ChronoDuration::seconds(60);
let cutoff = monitor.calculate_cutoff_time();
let now = Utc::now();
let expected_cutoff = now - ChronoDuration::seconds(60);
// calculate_cutoff uses Utc::now() internally, so we compute expected bounds
let timeout_duration =
chrono::Duration::from_std(config.scheduled_timeout).expect("Invalid timeout duration");
let cutoff = Utc::now() - timeout_duration;
// Allow 1 second tolerance
let diff = (cutoff - expected_cutoff).num_seconds().abs();
assert!(diff <= 1, "Cutoff time calculation incorrect");
let after = Utc::now() - ChronoDuration::seconds(60);
// cutoff should be between before and after (both ~60s ago)
let diff_before = (cutoff - before).num_seconds().abs();
let diff_after = (cutoff - after).num_seconds().abs();
assert!(
diff_before <= 1,
"Cutoff time should be ~60s ago (before check)"
);
assert!(
diff_after <= 1,
"Cutoff time should be ~60s ago (after check)"
);
}
#[test]
fn test_disabled_monitor() {
fn test_disabled_config() {
let mut config = create_test_config();
config.enabled = false;
let pool = PgPool::connect("postgresql://localhost/test")
.await
.expect("DB connection");
let mq = MessageQueue::connect("amqp://localhost")
.await
.expect("MQ connection");
let monitor = Arc::new(ExecutionTimeoutMonitor::new(
pool,
Arc::new(mq.publisher),
config,
));
// Should return immediately without error
let result = tokio::time::timeout(Duration::from_secs(1), monitor.start()).await;
assert!(result.is_ok(), "Disabled monitor should return immediately");
// Verify the config is properly set to disabled
assert!(!config.enabled);
assert_eq!(config.scheduled_timeout.as_secs(), 60);
assert_eq!(config.check_interval.as_secs(), 1);
}
}

View File

@@ -297,64 +297,73 @@ impl WorkerHealthProbe {
/// Extract health metrics from worker capabilities
fn extract_health_metrics(&self, worker: &Worker) -> HealthMetrics {
let mut metrics = HealthMetrics {
last_check: Utc::now(),
..Default::default()
extract_health_metrics(worker)
}
}
/// Extract health metrics from worker capabilities.
///
/// Extracted as a free function so it can be tested without a database pool.
fn extract_health_metrics(worker: &Worker) -> HealthMetrics {
let mut metrics = HealthMetrics {
last_check: Utc::now(),
..Default::default()
};
let Some(capabilities) = &worker.capabilities else {
return metrics;
};
let Some(health_obj) = capabilities.get("health") else {
return metrics;
};
// Extract metrics from health object
if let Some(status_str) = health_obj.get("status").and_then(|v| v.as_str()) {
metrics.status = match status_str {
"healthy" => HealthStatus::Healthy,
"degraded" => HealthStatus::Degraded,
"unhealthy" => HealthStatus::Unhealthy,
_ => HealthStatus::Healthy,
};
let Some(capabilities) = &worker.capabilities else {
return metrics;
};
let Some(health_obj) = capabilities.get("health") else {
return metrics;
};
// Extract metrics from health object
if let Some(status_str) = health_obj.get("status").and_then(|v| v.as_str()) {
metrics.status = match status_str {
"healthy" => HealthStatus::Healthy,
"degraded" => HealthStatus::Degraded,
"unhealthy" => HealthStatus::Unhealthy,
_ => HealthStatus::Healthy,
};
}
if let Some(last_check_str) = health_obj.get("last_check").and_then(|v| v.as_str()) {
if let Ok(last_check) = DateTime::parse_from_rfc3339(last_check_str) {
metrics.last_check = last_check.with_timezone(&Utc);
}
}
if let Some(failures) = health_obj
.get("consecutive_failures")
.and_then(|v| v.as_u64())
{
metrics.consecutive_failures = failures as u32;
}
if let Some(total) = health_obj.get("total_executions").and_then(|v| v.as_u64()) {
metrics.total_executions = total;
}
if let Some(failed) = health_obj.get("failed_executions").and_then(|v| v.as_u64()) {
metrics.failed_executions = failed;
}
if let Some(avg_time) = health_obj
.get("average_execution_time_ms")
.and_then(|v| v.as_u64())
{
metrics.average_execution_time_ms = avg_time;
}
if let Some(depth) = health_obj.get("queue_depth").and_then(|v| v.as_u64()) {
metrics.queue_depth = depth as u32;
}
metrics
}
if let Some(last_check_str) = health_obj.get("last_check").and_then(|v| v.as_str()) {
if let Ok(last_check) = DateTime::parse_from_rfc3339(last_check_str) {
metrics.last_check = last_check.with_timezone(&Utc);
}
}
if let Some(failures) = health_obj
.get("consecutive_failures")
.and_then(|v| v.as_u64())
{
metrics.consecutive_failures = failures as u32;
}
if let Some(total) = health_obj.get("total_executions").and_then(|v| v.as_u64()) {
metrics.total_executions = total;
}
if let Some(failed) = health_obj.get("failed_executions").and_then(|v| v.as_u64()) {
metrics.failed_executions = failed;
}
if let Some(avg_time) = health_obj
.get("average_execution_time_ms")
.and_then(|v| v.as_u64())
{
metrics.average_execution_time_ms = avg_time;
}
if let Some(depth) = health_obj.get("queue_depth").and_then(|v| v.as_u64()) {
metrics.queue_depth = depth as u32;
}
metrics
}
impl WorkerHealthProbe {
/// Get recommended worker for execution based on health
#[allow(dead_code)]
pub async fn get_best_worker(&self, runtime_name: &str) -> Result<Option<Worker>> {
@@ -435,8 +444,6 @@ mod tests {
#[test]
fn test_extract_health_metrics() {
let probe = WorkerHealthProbe::with_defaults(Arc::new(unsafe { std::mem::zeroed() }));
let worker = Worker {
id: 1,
name: "test-worker".to_string(),
@@ -461,7 +468,7 @@ mod tests {
updated: Utc::now(),
};
let metrics = probe.extract_health_metrics(&worker);
let metrics = extract_health_metrics(&worker);
assert_eq!(metrics.status, HealthStatus::Degraded);
assert_eq!(metrics.consecutive_failures, 5);
assert_eq!(metrics.queue_depth, 25);

View File

@@ -74,6 +74,13 @@ async fn _create_test_runtime(pool: &PgPool, suffix: &str) -> i64 {
name: format!("Python {}", suffix),
distributions: json!({"ubuntu": "python3"}),
installation: Some(json!({"method": "apt"})),
execution_config: json!({
"interpreter": {
"binary": "python3",
"args": ["-u"],
"file_extension": ".py"
}
}),
};
RuntimeRepository::create(pool, runtime_input)

View File

@@ -69,6 +69,13 @@ async fn create_test_runtime(pool: &PgPool, suffix: &str) -> i64 {
name: format!("Python {}", suffix),
distributions: json!({"ubuntu": "python3"}),
installation: Some(json!({"method": "apt"})),
execution_config: json!({
"interpreter": {
"binary": "python3",
"args": ["-u"],
"file_extension": ".py"
}
}),
};
let runtime = RuntimeRepository::create(pool, runtime_input)

View File

@@ -0,0 +1,497 @@
//! Proactive Runtime Environment Setup
//!
//! This module provides functions for setting up runtime environments (Python
//! virtualenvs, Node.js node_modules, etc.) proactively — either at worker
//! startup (scanning all registered packs) or in response to a `pack.registered`
//! MQ event.
//!
//! The goal is to ensure environments are ready *before* the first execution,
//! eliminating the first-run penalty and potential permission errors that occur
//! when setup is deferred to execution time.
use std::collections::{HashMap, HashSet};
use std::path::Path;
use sqlx::PgPool;
use tracing::{debug, error, info, warn};
use attune_common::mq::PackRegisteredPayload;
use attune_common::repositories::action::ActionRepository;
use attune_common::repositories::pack::PackRepository;
use attune_common::repositories::runtime::RuntimeRepository;
use attune_common::repositories::{FindById, List};
// Re-export the utility that the API also uses so callers can reach it from
// either crate without adding a direct common dependency for this one function.
pub use attune_common::pack_environment::collect_runtime_names_for_pack;
use crate::runtime::process::ProcessRuntime;
/// Result of setting up environments for a single pack.
#[derive(Debug)]
pub struct PackEnvSetupResult {
pub pack_ref: String,
pub environments_created: Vec<String>,
pub environments_skipped: Vec<String>,
pub errors: Vec<String>,
}
/// Result of the full startup scan across all packs.
#[derive(Debug)]
pub struct StartupScanResult {
pub packs_scanned: usize,
pub environments_created: usize,
pub environments_skipped: usize,
pub errors: Vec<String>,
}
/// Scan all registered packs and create missing runtime environments.
///
/// This is called at worker startup, before the worker begins consuming
/// execution messages. It ensures that environments for all known packs
/// are ready to go.
///
/// # Arguments
/// * `db_pool` - Database connection pool
/// * `runtime_filter` - Optional list of runtime names this worker supports
/// (from `ATTUNE_WORKER_RUNTIMES`). If `None`, all runtimes are considered.
/// * `packs_base_dir` - Base directory where pack files are stored
/// * `runtime_envs_dir` - Base directory for isolated runtime environments
pub async fn scan_and_setup_all_environments(
db_pool: &PgPool,
runtime_filter: Option<&[String]>,
packs_base_dir: &Path,
runtime_envs_dir: &Path,
) -> StartupScanResult {
info!("Starting runtime environment scan for all registered packs");
let mut result = StartupScanResult {
packs_scanned: 0,
environments_created: 0,
environments_skipped: 0,
errors: Vec::new(),
};
// Load all runtimes from DB, indexed by ID for quick lookup
let runtimes = match RuntimeRepository::list(db_pool).await {
Ok(rts) => rts,
Err(e) => {
let msg = format!("Failed to load runtimes from database: {}", e);
error!("{}", msg);
result.errors.push(msg);
return result;
}
};
let runtime_map: HashMap<i64, _> = runtimes.into_iter().map(|r| (r.id, r)).collect();
// Load all packs
let packs = match PackRepository::list(db_pool).await {
Ok(p) => p,
Err(e) => {
let msg = format!("Failed to load packs from database: {}", e);
error!("{}", msg);
result.errors.push(msg);
return result;
}
};
info!("Found {} registered pack(s) to scan", packs.len());
for pack in &packs {
result.packs_scanned += 1;
let pack_result = setup_environments_for_pack(
db_pool,
&pack.r#ref,
pack.id,
runtime_filter,
packs_base_dir,
runtime_envs_dir,
&runtime_map,
)
.await;
result.environments_created += pack_result.environments_created.len();
result.environments_skipped += pack_result.environments_skipped.len();
result.errors.extend(pack_result.errors);
}
info!(
"Environment scan complete: {} pack(s) scanned, {} environment(s) created, \
{} skipped, {} error(s)",
result.packs_scanned,
result.environments_created,
result.environments_skipped,
result.errors.len(),
);
result
}
/// Set up environments for a single pack, triggered by a `pack.registered` MQ event.
///
/// This is called when the worker receives a `PackRegistered` message. It only
/// sets up environments for the runtimes listed in the event payload (intersection
/// with this worker's supported runtimes).
pub async fn setup_environments_for_registered_pack(
db_pool: &PgPool,
event: &PackRegisteredPayload,
runtime_filter: Option<&[String]>,
packs_base_dir: &Path,
runtime_envs_dir: &Path,
) -> PackEnvSetupResult {
info!(
"Setting up environments for newly registered pack '{}' (version {})",
event.pack_ref, event.version
);
let mut pack_result = PackEnvSetupResult {
pack_ref: event.pack_ref.clone(),
environments_created: Vec::new(),
environments_skipped: Vec::new(),
errors: Vec::new(),
};
let pack_dir = packs_base_dir.join(&event.pack_ref);
if !pack_dir.exists() {
let msg = format!(
"Pack directory does not exist: {}. Skipping environment setup.",
pack_dir.display()
);
warn!("{}", msg);
pack_result.errors.push(msg);
return pack_result;
}
// Filter to runtimes this worker supports
let target_runtimes: Vec<&String> = event
.runtime_names
.iter()
.filter(|name| {
if let Some(filter) = runtime_filter {
filter.contains(name)
} else {
true
}
})
.collect();
if target_runtimes.is_empty() {
debug!(
"No matching runtimes for pack '{}' on this worker (event runtimes: {:?}, worker filter: {:?})",
event.pack_ref, event.runtime_names, runtime_filter,
);
return pack_result;
}
// Load runtime configs from DB by name
let all_runtimes = match RuntimeRepository::list(db_pool).await {
Ok(rts) => rts,
Err(e) => {
let msg = format!("Failed to load runtimes from database: {}", e);
error!("{}", msg);
pack_result.errors.push(msg);
return pack_result;
}
};
for rt_name in target_runtimes {
// Find the runtime in DB (match by lowercase name)
let rt = match all_runtimes
.iter()
.find(|r| r.name.to_lowercase() == *rt_name)
{
Some(r) => r,
None => {
debug!("Runtime '{}' not found in database, skipping", rt_name);
continue;
}
};
let exec_config = rt.parsed_execution_config();
if exec_config.environment.is_none() && !exec_config.has_dependencies(&pack_dir) {
debug!(
"Runtime '{}' has no environment config, skipping for pack '{}'",
rt_name, event.pack_ref,
);
pack_result.environments_skipped.push(rt_name.clone());
continue;
}
let env_dir = runtime_envs_dir.join(&event.pack_ref).join(rt_name);
let process_runtime = ProcessRuntime::new(
rt_name.clone(),
exec_config,
packs_base_dir.to_path_buf(),
runtime_envs_dir.to_path_buf(),
);
match process_runtime
.setup_pack_environment(&pack_dir, &env_dir)
.await
{
Ok(()) => {
info!(
"Environment for runtime '{}' ready for pack '{}'",
rt_name, event.pack_ref,
);
pack_result.environments_created.push(rt_name.clone());
}
Err(e) => {
let msg = format!(
"Failed to set up '{}' environment for pack '{}': {}",
rt_name, event.pack_ref, e,
);
warn!("{}", msg);
pack_result.errors.push(msg);
}
}
}
pack_result
}
/// Internal helper: set up environments for a single pack during the startup scan.
///
/// Discovers which runtimes the pack's actions use, filters by this worker's
/// capabilities, and creates any missing environments.
#[allow(clippy::too_many_arguments)]
async fn setup_environments_for_pack(
db_pool: &PgPool,
pack_ref: &str,
pack_id: i64,
runtime_filter: Option<&[String]>,
packs_base_dir: &Path,
runtime_envs_dir: &Path,
runtime_map: &HashMap<i64, attune_common::models::Runtime>,
) -> PackEnvSetupResult {
let mut pack_result = PackEnvSetupResult {
pack_ref: pack_ref.to_string(),
environments_created: Vec::new(),
environments_skipped: Vec::new(),
errors: Vec::new(),
};
let pack_dir = packs_base_dir.join(pack_ref);
if !pack_dir.exists() {
debug!(
"Pack directory '{}' does not exist on disk, skipping",
pack_dir.display()
);
return pack_result;
}
// Get all actions for this pack
let actions = match ActionRepository::find_by_pack(db_pool, pack_id).await {
Ok(a) => a,
Err(e) => {
let msg = format!("Failed to load actions for pack '{}': {}", pack_ref, e);
warn!("{}", msg);
pack_result.errors.push(msg);
return pack_result;
}
};
// Collect unique runtime IDs referenced by actions in this pack
let mut seen_runtime_ids = HashSet::new();
for action in &actions {
if let Some(runtime_id) = action.runtime {
seen_runtime_ids.insert(runtime_id);
}
}
if seen_runtime_ids.is_empty() {
debug!("Pack '{}' has no actions with runtimes, skipping", pack_ref);
return pack_result;
}
for runtime_id in seen_runtime_ids {
let rt = match runtime_map.get(&runtime_id) {
Some(r) => r,
None => {
// Try fetching from DB directly (might be a newly added runtime)
match RuntimeRepository::find_by_id(db_pool, runtime_id).await {
Ok(Some(r)) => {
// Can't insert into the borrowed map, so just use it inline
let rt_name = r.name.to_lowercase();
process_runtime_for_pack(
&r,
&rt_name,
pack_ref,
runtime_filter,
&pack_dir,
packs_base_dir,
runtime_envs_dir,
&mut pack_result,
)
.await;
continue;
}
Ok(None) => {
debug!("Runtime ID {} not found in database, skipping", runtime_id);
continue;
}
Err(e) => {
warn!("Failed to load runtime {}: {}", runtime_id, e);
continue;
}
}
}
};
let rt_name = rt.name.to_lowercase();
process_runtime_for_pack(
rt,
&rt_name,
pack_ref,
runtime_filter,
&pack_dir,
packs_base_dir,
runtime_envs_dir,
&mut pack_result,
)
.await;
}
if !pack_result.environments_created.is_empty() {
info!(
"Pack '{}': created environments for {:?}",
pack_ref, pack_result.environments_created,
);
}
pack_result
}
/// Process a single runtime for a pack: check filters, check if env exists, create if needed.
#[allow(clippy::too_many_arguments)]
async fn process_runtime_for_pack(
rt: &attune_common::models::Runtime,
rt_name: &str,
pack_ref: &str,
runtime_filter: Option<&[String]>,
pack_dir: &Path,
packs_base_dir: &Path,
runtime_envs_dir: &Path,
pack_result: &mut PackEnvSetupResult,
) {
// Apply worker runtime filter
if let Some(filter) = runtime_filter {
if !filter.iter().any(|f| f == rt_name) {
debug!(
"Runtime '{}' not in worker filter, skipping for pack '{}'",
rt_name, pack_ref,
);
return;
}
}
let exec_config = rt.parsed_execution_config();
// Check if this runtime actually needs an environment
if exec_config.environment.is_none() && !exec_config.has_dependencies(pack_dir) {
debug!(
"Runtime '{}' has no environment config, skipping for pack '{}'",
rt_name, pack_ref,
);
pack_result.environments_skipped.push(rt_name.to_string());
return;
}
let env_dir = runtime_envs_dir.join(pack_ref).join(rt_name);
// Create a temporary ProcessRuntime to perform the setup
let process_runtime = ProcessRuntime::new(
rt_name.to_string(),
exec_config,
packs_base_dir.to_path_buf(),
runtime_envs_dir.to_path_buf(),
);
match process_runtime
.setup_pack_environment(pack_dir, &env_dir)
.await
{
Ok(()) => {
// setup_pack_environment is idempotent — it logs whether it created
// the env or found it already existing.
pack_result.environments_created.push(rt_name.to_string());
}
Err(e) => {
let msg = format!(
"Failed to set up '{}' environment for pack '{}': {}",
rt_name, pack_ref, e,
);
warn!("{}", msg);
pack_result.errors.push(msg);
}
}
}
/// Determine the runtime filter from the `ATTUNE_WORKER_RUNTIMES` environment variable.
///
/// Returns `None` if the variable is not set (meaning all runtimes are accepted).
pub fn runtime_filter_from_env() -> Option<Vec<String>> {
std::env::var("ATTUNE_WORKER_RUNTIMES").ok().map(|val| {
val.split(',')
.map(|s| s.trim().to_lowercase())
.filter(|s| !s.is_empty())
.collect()
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_runtime_filter_from_env_not_set() {
// When ATTUNE_WORKER_RUNTIMES is not set, filter should be None
std::env::remove_var("ATTUNE_WORKER_RUNTIMES");
assert!(runtime_filter_from_env().is_none());
}
#[test]
fn test_runtime_filter_from_env_set() {
std::env::set_var("ATTUNE_WORKER_RUNTIMES", "shell,Python, Node");
let filter = runtime_filter_from_env().unwrap();
assert_eq!(filter, vec!["shell", "python", "node"]);
std::env::remove_var("ATTUNE_WORKER_RUNTIMES");
}
#[test]
fn test_runtime_filter_from_env_empty() {
std::env::set_var("ATTUNE_WORKER_RUNTIMES", "");
let filter = runtime_filter_from_env().unwrap();
assert!(filter.is_empty());
std::env::remove_var("ATTUNE_WORKER_RUNTIMES");
}
#[test]
fn test_pack_env_setup_result_defaults() {
let result = PackEnvSetupResult {
pack_ref: "test".to_string(),
environments_created: vec![],
environments_skipped: vec![],
errors: vec![],
};
assert_eq!(result.pack_ref, "test");
assert!(result.environments_created.is_empty());
assert!(result.errors.is_empty());
}
#[test]
fn test_startup_scan_result_defaults() {
let result = StartupScanResult {
packs_scanned: 0,
environments_created: 0,
environments_skipped: 0,
errors: vec![],
};
assert_eq!(result.packs_scanned, 0);
assert_eq!(result.environments_created, 0);
assert!(result.errors.is_empty());
}
}

View File

@@ -7,6 +7,7 @@ use attune_common::error::{Error, Result};
use attune_common::models::{runtime::Runtime as RuntimeModel, Action, Execution, ExecutionStatus};
use attune_common::repositories::execution::{ExecutionRepository, UpdateExecutionInput};
use attune_common::repositories::{FindById, Update};
use std::path::PathBuf as StdPathBuf;
use serde_json::Value as JsonValue;
use sqlx::PgPool;
@@ -78,7 +79,12 @@ impl ActionExecutor {
Ok(ctx) => ctx,
Err(e) => {
error!("Failed to prepare execution context: {}", e);
self.handle_execution_failure(execution_id, None).await?;
self.handle_execution_failure(
execution_id,
None,
Some(&format!("Failed to prepare execution context: {}", e)),
)
.await?;
return Err(e);
}
};
@@ -91,7 +97,12 @@ impl ActionExecutor {
Err(e) => {
error!("Action execution failed catastrophically: {}", e);
// This should only happen for unrecoverable errors like runtime not found
self.handle_execution_failure(execution_id, None).await?;
self.handle_execution_failure(
execution_id,
None,
Some(&format!("Action execution failed: {}", e)),
)
.await?;
return Err(e);
}
};
@@ -112,7 +123,7 @@ impl ActionExecutor {
if is_success {
self.handle_execution_success(execution_id, &result).await?;
} else {
self.handle_execution_failure(execution_id, Some(&result))
self.handle_execution_failure(execution_id, Some(&result), None)
.await?;
}
@@ -306,18 +317,23 @@ impl ActionExecutor {
let timeout = Some(300_u64);
// Load runtime information if specified
let runtime_name = if let Some(runtime_id) = action.runtime {
match sqlx::query_as::<_, RuntimeModel>("SELECT * FROM runtime WHERE id = $1")
.bind(runtime_id)
.fetch_optional(&self.pool)
.await
let runtime_record = if let Some(runtime_id) = action.runtime {
match sqlx::query_as::<_, RuntimeModel>(
r#"SELECT id, ref, pack, pack_ref, description, name,
distributions, installation, installers, execution_config,
created, updated
FROM runtime WHERE id = $1"#,
)
.bind(runtime_id)
.fetch_optional(&self.pool)
.await
{
Ok(Some(runtime)) => {
debug!(
"Loaded runtime '{}' for action '{}'",
runtime.name, action.r#ref
"Loaded runtime '{}' (ref: {}) for action '{}'",
runtime.name, runtime.r#ref, action.r#ref
);
Some(runtime.name.to_lowercase())
Some(runtime)
}
Ok(None) => {
warn!(
@@ -338,15 +354,16 @@ impl ActionExecutor {
None
};
let runtime_name = runtime_record.as_ref().map(|r| r.name.to_lowercase());
// Determine the pack directory for this action
let pack_dir = self.packs_base_dir.join(&action.pack_ref);
// Construct code_path for pack actions
// Pack actions have their script files in packs/{pack_ref}/actions/{entrypoint}
let code_path = if action.pack_ref.starts_with("core") || !action.is_adhoc {
// This is a pack action, construct the file path
let action_file_path = self
.packs_base_dir
.join(&action.pack_ref)
.join("actions")
.join(&entry_point);
let action_file_path = pack_dir.join("actions").join(&entry_point);
if action_file_path.exists() {
Some(action_file_path)
@@ -368,6 +385,15 @@ impl ActionExecutor {
None
};
// Resolve the working directory from the runtime's execution_config.
// The ProcessRuntime also does this internally, but setting it in the
// context allows the executor to override if needed.
let working_dir: Option<StdPathBuf> = if pack_dir.exists() {
Some(pack_dir)
} else {
None
};
let context = ExecutionContext {
execution_id: execution.id,
action_ref: execution.action_ref.clone(),
@@ -375,7 +401,7 @@ impl ActionExecutor {
env,
secrets, // Passed securely via stdin
timeout,
working_dir: None, // Could be configured per action
working_dir,
entry_point,
code,
code_path,
@@ -482,6 +508,7 @@ impl ActionExecutor {
&self,
execution_id: i64,
result: Option<&ExecutionResult>,
error_message: Option<&str>,
) -> Result<()> {
if let Some(r) = result {
error!(
@@ -489,7 +516,11 @@ impl ActionExecutor {
execution_id, r.exit_code, r.error, r.duration_ms
);
} else {
error!("Execution {} failed during preparation", execution_id);
error!(
"Execution {} failed during preparation: {}",
execution_id,
error_message.unwrap_or("unknown error")
);
}
let exec_dir = self.artifact_manager.get_execution_dir(execution_id);
@@ -531,9 +562,15 @@ impl ActionExecutor {
} else {
// No execution result available (early failure during setup/preparation)
// This should be rare - most errors should be captured in ExecutionResult
result_data["error"] = serde_json::json!("Execution failed during preparation");
let err_msg = error_message.unwrap_or("Execution failed during preparation");
result_data["error"] = serde_json::json!(err_msg);
warn!("Execution {} failed without ExecutionResult - this indicates an early/catastrophic failure", execution_id);
warn!(
"Execution {} failed without ExecutionResult - {}: {}",
execution_id,
"early/catastrophic failure",
err_msg
);
// Check if stderr log exists and is non-empty from artifact storage
let stderr_path = exec_dir.join("stderr.log");

View File

@@ -4,6 +4,7 @@
//! which executes actions in various runtime environments.
pub mod artifacts;
pub mod env_setup;
pub mod executor;
pub mod heartbeat;
pub mod registration;
@@ -16,7 +17,7 @@ pub use executor::ActionExecutor;
pub use heartbeat::HeartbeatManager;
pub use registration::WorkerRegistration;
pub use runtime::{
ExecutionContext, ExecutionResult, LocalRuntime, NativeRuntime, PythonRuntime, Runtime,
ExecutionContext, ExecutionResult, LocalRuntime, NativeRuntime, ProcessRuntime, Runtime,
RuntimeError, RuntimeResult, ShellRuntime,
};
pub use secrets::SecretManager;

View File

@@ -1,28 +1,51 @@
//! Local Runtime Module
//!
//! Provides local execution capabilities by combining Python and Shell runtimes.
//! Provides local execution capabilities by combining Process and Shell runtimes.
//! This module serves as a facade for all local process-based execution.
//!
//! The `ProcessRuntime` is used for Python (and other interpreted languages),
//! driven by `RuntimeExecutionConfig` rather than language-specific Rust code.
use super::native::NativeRuntime;
use super::python::PythonRuntime;
use super::process::ProcessRuntime;
use super::shell::ShellRuntime;
use super::{ExecutionContext, ExecutionResult, Runtime, RuntimeError, RuntimeResult};
use async_trait::async_trait;
use attune_common::models::runtime::{InterpreterConfig, RuntimeExecutionConfig};
use std::path::PathBuf;
use tracing::{debug, info};
/// Local runtime that delegates to Python, Shell, or Native based on action type
/// Local runtime that delegates to Process, Shell, or Native based on action type
pub struct LocalRuntime {
native: NativeRuntime,
python: PythonRuntime,
python: ProcessRuntime,
shell: ShellRuntime,
}
impl LocalRuntime {
/// Create a new local runtime with default settings
/// Create a new local runtime with default settings.
///
/// Uses a default Python `RuntimeExecutionConfig` for the process runtime,
/// since this is a fallback when runtimes haven't been loaded from the database.
pub fn new() -> Self {
let python_config = RuntimeExecutionConfig {
interpreter: InterpreterConfig {
binary: "python3".to_string(),
args: vec![],
file_extension: Some(".py".to_string()),
},
environment: None,
dependencies: None,
};
Self {
native: NativeRuntime::new(),
python: PythonRuntime::new(),
python: ProcessRuntime::new(
"python".to_string(),
python_config,
PathBuf::from("/opt/attune/packs"),
PathBuf::from("/opt/attune/runtime_envs"),
),
shell: ShellRuntime::new(),
}
}
@@ -30,7 +53,7 @@ impl LocalRuntime {
/// Create a local runtime with custom runtimes
pub fn with_runtimes(
native: NativeRuntime,
python: PythonRuntime,
python: ProcessRuntime,
shell: ShellRuntime,
) -> Self {
Self {
@@ -46,7 +69,10 @@ impl LocalRuntime {
debug!("Selected Native runtime for action: {}", context.action_ref);
Ok(&self.native)
} else if self.python.can_execute(context) {
debug!("Selected Python runtime for action: {}", context.action_ref);
debug!(
"Selected Python (ProcessRuntime) for action: {}",
context.action_ref
);
Ok(&self.python)
} else if self.shell.can_execute(context) {
debug!("Selected Shell runtime for action: {}", context.action_ref);
@@ -126,40 +152,6 @@ mod tests {
use crate::runtime::{OutputFormat, ParameterDelivery, ParameterFormat};
use std::collections::HashMap;
#[tokio::test]
async fn test_local_runtime_python() {
let runtime = LocalRuntime::new();
let context = ExecutionContext {
execution_id: 1,
action_ref: "test.python_action".to_string(),
parameters: HashMap::new(),
env: HashMap::new(),
secrets: HashMap::new(),
timeout: Some(10),
working_dir: None,
entry_point: "run".to_string(),
code: Some(
r#"
def run():
return "hello from python"
"#
.to_string(),
),
code_path: None,
runtime_name: Some("python".to_string()),
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: ParameterDelivery::default(),
parameter_format: ParameterFormat::default(),
output_format: OutputFormat::default(),
};
assert!(runtime.can_execute(&context));
let result = runtime.execute(context).await.unwrap();
assert!(result.is_success());
}
#[tokio::test]
async fn test_local_runtime_shell() {
let runtime = LocalRuntime::new();

View File

@@ -1,21 +1,28 @@
//! Runtime Module
//!
//! Provides runtime abstraction and implementations for executing actions
//! in different environments (Python, Shell, Node.js, Containers).
//! in different environments. The primary runtime is `ProcessRuntime`, a
//! generic, configuration-driven runtime that reads its behavior from the
//! database `runtime.execution_config` JSONB column.
//!
//! Language-specific runtimes (Python, Node.js, etc.) are NOT implemented
//! as separate Rust types. Instead, the `ProcessRuntime` handles all
//! languages by using the interpreter, environment, and dependency
//! configuration stored in the database.
pub mod dependency;
pub mod local;
pub mod log_writer;
pub mod native;
pub mod parameter_passing;
pub mod python;
pub mod python_venv;
pub mod process;
pub mod process_executor;
pub mod shell;
// Re-export runtime implementations
pub use local::LocalRuntime;
pub use native::NativeRuntime;
pub use python::PythonRuntime;
pub use process::ProcessRuntime;
pub use shell::ShellRuntime;
use async_trait::async_trait;
@@ -31,7 +38,6 @@ pub use dependency::{
};
pub use log_writer::{BoundedLogResult, BoundedLogWriter};
pub use parameter_passing::{ParameterDeliveryConfig, PreparedParameters};
pub use python_venv::PythonVenvManager;
// Re-export parameter types from common
pub use attune_common::models::{OutputFormat, ParameterDelivery, ParameterFormat};

View File

@@ -92,9 +92,13 @@ fn format_dotenv(parameters: &HashMap<String, JsonValue>) -> Result<String, Runt
Ok(lines.join("\n"))
}
/// Format parameters as JSON
/// Format parameters as JSON (compact, single-line)
///
/// Uses compact format so that actions reading stdin line-by-line
/// (e.g., `json.loads(sys.stdin.readline())`) receive the entire
/// JSON object on a single line.
fn format_json(parameters: &HashMap<String, JsonValue>) -> Result<String, RuntimeError> {
serde_json::to_string_pretty(parameters).map_err(|e| {
serde_json::to_string(parameters).map_err(|e| {
RuntimeError::ExecutionFailed(format!("Failed to serialize parameters to JSON: {}", e))
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,495 @@
//! Shared Process Executor
//!
//! Provides common subprocess execution infrastructure used by all runtime
//! implementations. Handles streaming stdout/stderr capture, bounded log
//! collection, timeout management, stdin parameter/secret delivery, and
//! output format parsing.
use super::{BoundedLogWriter, ExecutionResult, OutputFormat, RuntimeResult};
use std::collections::HashMap;
use std::path::Path;
use std::time::Instant;
use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader};
use tokio::process::Command;
use tokio::time::timeout;
use tracing::{debug, warn};
/// Execute a subprocess command with streaming output capture.
///
/// This is the core execution function used by all runtime implementations.
/// It handles:
/// - Spawning the process with piped I/O
/// - Writing parameters and secrets to stdin
/// - Streaming stdout/stderr with bounded log collection
/// - Timeout management
/// - Output format parsing (JSON, YAML, JSONL, text)
///
/// # Arguments
/// * `cmd` - Pre-configured `Command` (interpreter, args, env vars, working dir already set)
/// * `secrets` - Secrets to pass via stdin (as JSON)
/// * `parameters_stdin` - Optional parameter data to write to stdin before secrets
/// * `timeout_secs` - Optional execution timeout in seconds
/// * `max_stdout_bytes` - Maximum stdout size before truncation
/// * `max_stderr_bytes` - Maximum stderr size before truncation
/// * `output_format` - How to parse stdout (Text, Json, Yaml, Jsonl)
pub async fn execute_streaming(
mut cmd: Command,
secrets: &HashMap<String, String>,
parameters_stdin: Option<&str>,
timeout_secs: Option<u64>,
max_stdout_bytes: usize,
max_stderr_bytes: usize,
output_format: OutputFormat,
) -> RuntimeResult<ExecutionResult> {
let start = Instant::now();
// Spawn process with piped I/O
let mut child = cmd
.stdin(std::process::Stdio::piped())
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()?;
// Write to stdin - parameters (if using stdin delivery) and/or secrets.
// If this fails, the process has already started, so we continue and capture output.
let stdin_write_error = if let Some(mut stdin) = child.stdin.take() {
let mut error = None;
// Write parameters first if using stdin delivery
if let Some(params_data) = parameters_stdin {
if let Err(e) = stdin.write_all(params_data.as_bytes()).await {
error = Some(format!("Failed to write parameters to stdin: {}", e));
} else if let Err(e) = stdin.write_all(b"\n---ATTUNE_PARAMS_END---\n").await {
error = Some(format!("Failed to write parameter delimiter: {}", e));
}
}
// Write secrets as JSON (always, for backward compatibility)
if error.is_none() && !secrets.is_empty() {
match serde_json::to_string(secrets) {
Ok(secrets_json) => {
if let Err(e) = stdin.write_all(secrets_json.as_bytes()).await {
error = Some(format!("Failed to write secrets to stdin: {}", e));
} else if let Err(e) = stdin.write_all(b"\n").await {
error = Some(format!("Failed to write newline to stdin: {}", e));
}
}
Err(e) => error = Some(format!("Failed to serialize secrets: {}", e)),
}
}
drop(stdin);
error
} else {
None
};
// Create bounded writers
let mut stdout_writer = BoundedLogWriter::new_stdout(max_stdout_bytes);
let mut stderr_writer = BoundedLogWriter::new_stderr(max_stderr_bytes);
// Take stdout and stderr streams
let stdout = child.stdout.take().expect("stdout not captured");
let stderr = child.stderr.take().expect("stderr not captured");
// Create buffered readers
let mut stdout_reader = BufReader::new(stdout);
let mut stderr_reader = BufReader::new(stderr);
// Stream both outputs concurrently
let stdout_task = async {
let mut line = Vec::new();
loop {
line.clear();
match stdout_reader.read_until(b'\n', &mut line).await {
Ok(0) => break, // EOF
Ok(_) => {
if stdout_writer.write_all(&line).await.is_err() {
break;
}
}
Err(_) => break,
}
}
stdout_writer
};
let stderr_task = async {
let mut line = Vec::new();
loop {
line.clear();
match stderr_reader.read_until(b'\n', &mut line).await {
Ok(0) => break, // EOF
Ok(_) => {
if stderr_writer.write_all(&line).await.is_err() {
break;
}
}
Err(_) => break,
}
}
stderr_writer
};
// Wait for both streams and the process
let (stdout_writer, stderr_writer, wait_result) =
tokio::join!(stdout_task, stderr_task, async {
if let Some(timeout_secs) = timeout_secs {
timeout(std::time::Duration::from_secs(timeout_secs), child.wait()).await
} else {
Ok(child.wait().await)
}
});
let duration_ms = start.elapsed().as_millis() as u64;
// Get results from bounded writers
let stdout_result = stdout_writer.into_result();
let stderr_result = stderr_writer.into_result();
// Handle process wait result
let (exit_code, process_error) = match wait_result {
Ok(Ok(status)) => (status.code().unwrap_or(-1), None),
Ok(Err(e)) => {
warn!("Process wait failed but captured output: {}", e);
(-1, Some(format!("Process wait failed: {}", e)))
}
Err(_) => {
// Timeout occurred
return Ok(ExecutionResult {
exit_code: -1,
stdout: stdout_result.content.clone(),
stderr: stderr_result.content.clone(),
result: None,
duration_ms,
error: Some(format!(
"Execution timed out after {} seconds",
timeout_secs.unwrap()
)),
stdout_truncated: stdout_result.truncated,
stderr_truncated: stderr_result.truncated,
stdout_bytes_truncated: stdout_result.bytes_truncated,
stderr_bytes_truncated: stderr_result.bytes_truncated,
});
}
};
debug!(
"Process execution completed: exit_code={}, duration={}ms, stdout_truncated={}, stderr_truncated={}",
exit_code, duration_ms, stdout_result.truncated, stderr_result.truncated
);
// Parse result from stdout based on output_format
let result = if exit_code == 0 && !stdout_result.content.trim().is_empty() {
parse_output(&stdout_result.content, output_format)
} else {
None
};
// Determine error message
let error = if let Some(proc_err) = process_error {
Some(proc_err)
} else if let Some(stdin_err) = stdin_write_error {
// Ignore broken pipe errors for fast-exiting successful actions.
// These occur when the process exits before we finish writing secrets to stdin.
let is_broken_pipe = stdin_err.contains("Broken pipe") || stdin_err.contains("os error 32");
let is_fast_exit = duration_ms < 500;
let is_success = exit_code == 0;
if is_broken_pipe && is_fast_exit && is_success {
debug!(
"Ignoring broken pipe error for fast-exiting successful action ({}ms)",
duration_ms
);
None
} else {
Some(stdin_err)
}
} else if exit_code != 0 {
Some(if stderr_result.content.is_empty() {
format!("Command exited with code {}", exit_code)
} else {
// Use last line of stderr as error, or full stderr if short
if stderr_result.content.lines().count() > 5 {
stderr_result
.content
.lines()
.last()
.unwrap_or("")
.to_string()
} else {
stderr_result.content.clone()
}
})
} else {
None
};
Ok(ExecutionResult {
exit_code,
// Only populate stdout if result wasn't parsed (avoid duplication)
stdout: if result.is_some() {
String::new()
} else {
stdout_result.content.clone()
},
stderr: stderr_result.content.clone(),
result,
duration_ms,
error,
stdout_truncated: stdout_result.truncated,
stderr_truncated: stderr_result.truncated,
stdout_bytes_truncated: stdout_result.bytes_truncated,
stderr_bytes_truncated: stderr_result.bytes_truncated,
})
}
/// Parse stdout content according to the specified output format.
fn parse_output(stdout: &str, format: OutputFormat) -> Option<serde_json::Value> {
let trimmed = stdout.trim();
if trimmed.is_empty() {
return None;
}
match format {
OutputFormat::Text => {
// No parsing - text output is captured in stdout field
None
}
OutputFormat::Json => {
// Try to parse full stdout as JSON first (handles multi-line JSON),
// then fall back to last line only (for scripts that log before output)
serde_json::from_str(trimmed).ok().or_else(|| {
trimmed
.lines()
.last()
.and_then(|line| serde_json::from_str(line).ok())
})
}
OutputFormat::Yaml => {
// Try to parse stdout as YAML
serde_yaml_ng::from_str(trimmed).ok()
}
OutputFormat::Jsonl => {
// Parse each line as JSON and collect into array
let mut items = Vec::new();
for line in trimmed.lines() {
if let Ok(value) = serde_json::from_str::<serde_json::Value>(line) {
items.push(value);
}
}
if items.is_empty() {
None
} else {
Some(serde_json::Value::Array(items))
}
}
}
}
/// Build a `Command` for executing an action script with the given interpreter.
///
/// This configures the command with:
/// - The interpreter binary and any additional args
/// - The action file path as the final argument
/// - Environment variables from the execution context
/// - Working directory (pack directory)
///
/// # Arguments
/// * `interpreter` - Path to the interpreter binary
/// * `interpreter_args` - Additional args before the action file
/// * `action_file` - Path to the action script file
/// * `working_dir` - Working directory for the process (typically the pack dir)
/// * `env_vars` - Environment variables to set
pub fn build_action_command(
interpreter: &Path,
interpreter_args: &[String],
action_file: &Path,
working_dir: Option<&Path>,
env_vars: &HashMap<String, String>,
) -> Command {
let mut cmd = Command::new(interpreter);
// Add interpreter args (e.g., "-u" for unbuffered Python)
for arg in interpreter_args {
cmd.arg(arg);
}
// Add the action file as the last argument
cmd.arg(action_file);
// Set working directory
if let Some(dir) = working_dir {
if dir.exists() {
cmd.current_dir(dir);
}
}
// Set environment variables
for (key, value) in env_vars {
cmd.env(key, value);
}
cmd
}
/// Build a `Command` for executing inline code with the given interpreter.
///
/// This is used for ad-hoc/inline actions where code is passed as a string
/// rather than a file path.
///
/// # Arguments
/// * `interpreter` - Path to the interpreter binary
/// * `code` - The inline code to execute
/// * `env_vars` - Environment variables to set
pub fn build_inline_command(
interpreter: &Path,
code: &str,
env_vars: &HashMap<String, String>,
) -> Command {
let mut cmd = Command::new(interpreter);
// Pass code via -c flag (works for bash, python, etc.)
cmd.arg("-c").arg(code);
// Set environment variables
for (key, value) in env_vars {
cmd.env(key, value);
}
cmd
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_output_text() {
let result = parse_output("hello world", OutputFormat::Text);
assert!(result.is_none());
}
#[test]
fn test_parse_output_json() {
let result = parse_output(r#"{"key": "value"}"#, OutputFormat::Json);
assert!(result.is_some());
assert_eq!(result.unwrap()["key"], "value");
}
#[test]
fn test_parse_output_json_with_log_prefix() {
let result = parse_output(
"some log line\nanother log\n{\"key\": \"value\"}",
OutputFormat::Json,
);
assert!(result.is_some());
assert_eq!(result.unwrap()["key"], "value");
}
#[test]
fn test_parse_output_jsonl() {
let result = parse_output("{\"a\": 1}\n{\"b\": 2}\n{\"c\": 3}", OutputFormat::Jsonl);
assert!(result.is_some());
let arr = result.unwrap();
assert_eq!(arr.as_array().unwrap().len(), 3);
}
#[test]
fn test_parse_output_yaml() {
let result = parse_output("key: value\nother: 42", OutputFormat::Yaml);
assert!(result.is_some());
let val = result.unwrap();
assert_eq!(val["key"], "value");
assert_eq!(val["other"], 42);
}
#[test]
fn test_parse_output_empty() {
assert!(parse_output("", OutputFormat::Json).is_none());
assert!(parse_output(" ", OutputFormat::Yaml).is_none());
assert!(parse_output("\n", OutputFormat::Jsonl).is_none());
}
#[tokio::test]
async fn test_execute_streaming_simple() {
let mut cmd = Command::new("/bin/echo");
cmd.arg("hello world");
let result = execute_streaming(
cmd,
&HashMap::new(),
None,
Some(10),
1024 * 1024,
1024 * 1024,
OutputFormat::Text,
)
.await
.unwrap();
assert_eq!(result.exit_code, 0);
assert!(result.stdout.contains("hello world"));
assert!(result.error.is_none());
}
#[tokio::test]
async fn test_execute_streaming_json_output() {
let mut cmd = Command::new("/bin/bash");
cmd.arg("-c").arg(r#"echo '{"status": "ok", "count": 42}'"#);
let result = execute_streaming(
cmd,
&HashMap::new(),
None,
Some(10),
1024 * 1024,
1024 * 1024,
OutputFormat::Json,
)
.await
.unwrap();
assert_eq!(result.exit_code, 0);
assert!(result.result.is_some());
let parsed = result.result.unwrap();
assert_eq!(parsed["status"], "ok");
assert_eq!(parsed["count"], 42);
}
#[tokio::test]
async fn test_execute_streaming_failure() {
let mut cmd = Command::new("/bin/bash");
cmd.arg("-c").arg("echo 'error msg' >&2; exit 1");
let result = execute_streaming(
cmd,
&HashMap::new(),
None,
Some(10),
1024 * 1024,
1024 * 1024,
OutputFormat::Text,
)
.await
.unwrap();
assert_eq!(result.exit_code, 1);
assert!(result.error.is_some());
assert!(result.stderr.contains("error msg"));
}
#[tokio::test]
async fn test_build_action_command() {
let interpreter = Path::new("/usr/bin/python3");
let args = vec!["-u".to_string()];
let action_file = Path::new("/opt/attune/packs/mypack/actions/hello.py");
let mut env = HashMap::new();
env.insert("ATTUNE_EXEC_ID".to_string(), "123".to_string());
let cmd = build_action_command(interpreter, &args, action_file, None, &env);
// We can't easily inspect Command internals, but at least verify it builds without panic
let _ = cmd;
}
}

View File

@@ -10,29 +10,34 @@ use attune_common::models::ExecutionStatus;
use attune_common::mq::{
config::MessageQueueConfig as MqConfig, Connection, Consumer, ConsumerConfig,
ExecutionCompletedPayload, ExecutionStatusChangedPayload, MessageEnvelope, MessageType,
Publisher, PublisherConfig,
PackRegisteredPayload, Publisher, PublisherConfig,
};
use attune_common::repositories::{execution::ExecutionRepository, FindById};
use chrono::Utc;
use serde::{Deserialize, Serialize};
use sqlx::PgPool;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::RwLock;
use tokio::task::JoinHandle;
use tracing::{error, info, warn};
use tracing::{debug, error, info, warn};
use crate::artifacts::ArtifactManager;
use crate::env_setup;
use crate::executor::ActionExecutor;
use crate::heartbeat::HeartbeatManager;
use crate::registration::WorkerRegistration;
use crate::runtime::local::LocalRuntime;
use crate::runtime::native::NativeRuntime;
use crate::runtime::python::PythonRuntime;
use crate::runtime::process::ProcessRuntime;
use crate::runtime::shell::ShellRuntime;
use crate::runtime::{DependencyManagerRegistry, PythonVenvManager, RuntimeRegistry};
use crate::runtime::RuntimeRegistry;
use crate::secrets::SecretManager;
use attune_common::repositories::runtime::RuntimeRepository;
use attune_common::repositories::List;
/// Message payload for execution.scheduled events
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ExecutionScheduledPayload {
@@ -53,7 +58,15 @@ pub struct WorkerService {
publisher: Arc<Publisher>,
consumer: Option<Arc<Consumer>>,
consumer_handle: Option<JoinHandle<()>>,
pack_consumer: Option<Arc<Consumer>>,
pack_consumer_handle: Option<JoinHandle<()>>,
worker_id: Option<i64>,
/// Runtime filter derived from ATTUNE_WORKER_RUNTIMES
runtime_filter: Option<Vec<String>>,
/// Base directory for pack files
packs_base_dir: PathBuf,
/// Base directory for isolated runtime environments
runtime_envs_dir: PathBuf,
}
impl WorkerService {
@@ -119,86 +132,104 @@ impl WorkerService {
let artifact_manager = ArtifactManager::new(artifact_base_dir);
artifact_manager.initialize().await?;
let packs_base_dir = std::path::PathBuf::from(&config.packs_base_dir);
let runtime_envs_dir = std::path::PathBuf::from(&config.runtime_envs_dir);
// Determine which runtimes to register based on configuration
// This reads from ATTUNE_WORKER_RUNTIMES env var (highest priority)
let configured_runtimes = if let Ok(runtimes_env) = std::env::var("ATTUNE_WORKER_RUNTIMES")
{
info!(
"Registering runtimes from ATTUNE_WORKER_RUNTIMES: {}",
runtimes_env
);
runtimes_env
.split(',')
.map(|s| s.trim().to_lowercase())
.filter(|s| !s.is_empty())
.collect::<Vec<String>>()
} else {
// Fallback to auto-detection if not configured
info!("No ATTUNE_WORKER_RUNTIMES found, registering all available runtimes");
vec![
"shell".to_string(),
"python".to_string(),
"native".to_string(),
]
};
info!("Configured runtimes: {:?}", configured_runtimes);
// Initialize dependency manager registry for isolated environments
let mut dependency_manager_registry = DependencyManagerRegistry::new();
// Only setup Python virtual environment manager if Python runtime is needed
if configured_runtimes.contains(&"python".to_string()) {
let venv_base_dir = std::path::PathBuf::from(
config
.worker
.as_ref()
.and_then(|w| w.name.clone())
.map(|name| format!("/tmp/attune/venvs/{}", name))
.unwrap_or_else(|| "/tmp/attune/venvs".to_string()),
);
let python_venv_manager = PythonVenvManager::new(venv_base_dir);
dependency_manager_registry.register(Box::new(python_venv_manager));
info!("Dependency manager initialized with Python venv support");
}
let dependency_manager_arc = Arc::new(dependency_manager_registry);
// ATTUNE_WORKER_RUNTIMES env var filters which runtimes this worker handles.
// If not set, all action runtimes from the database are loaded.
let runtime_filter: Option<Vec<String>> =
std::env::var("ATTUNE_WORKER_RUNTIMES").ok().map(|env_val| {
info!(
"Filtering runtimes from ATTUNE_WORKER_RUNTIMES: {}",
env_val
);
env_val
.split(',')
.map(|s| s.trim().to_lowercase())
.filter(|s| !s.is_empty())
.collect()
});
// Initialize runtime registry
let mut runtime_registry = RuntimeRegistry::new();
// Register runtimes based on configuration
for runtime_name in &configured_runtimes {
match runtime_name.as_str() {
"python" => {
let python_runtime = PythonRuntime::with_dependency_manager(
std::path::PathBuf::from("python3"),
std::path::PathBuf::from("/tmp/attune/actions"),
dependency_manager_arc.clone(),
// Load runtimes from the database and create ProcessRuntime instances.
// Each runtime row's `execution_config` JSONB drives how the ProcessRuntime
// invokes interpreters, manages environments, and installs dependencies.
// We skip runtimes with empty execution_config (e.g., the built-in sensor
// runtime) since they have no interpreter and cannot execute as a process.
match RuntimeRepository::list(&pool).await {
Ok(db_runtimes) => {
let executable_runtimes: Vec<_> = db_runtimes
.into_iter()
.filter(|r| {
let config = r.parsed_execution_config();
// A runtime is executable if it has a non-default interpreter
// (the default is "/bin/sh" from InterpreterConfig::default,
// but runtimes with no execution_config at all will have an
// empty JSON object that deserializes to defaults with no
// file_extension — those are not real process runtimes).
config.interpreter.file_extension.is_some()
|| r.execution_config != serde_json::json!({})
})
.collect();
info!(
"Found {} executable runtime(s) in database",
executable_runtimes.len()
);
for rt in executable_runtimes {
let rt_name = rt.name.to_lowercase();
// Apply filter if ATTUNE_WORKER_RUNTIMES is set
if let Some(ref filter) = runtime_filter {
if !filter.contains(&rt_name) {
debug!(
"Skipping runtime '{}' (not in ATTUNE_WORKER_RUNTIMES filter)",
rt_name
);
continue;
}
}
let exec_config = rt.parsed_execution_config();
let process_runtime = ProcessRuntime::new(
rt_name.clone(),
exec_config,
packs_base_dir.clone(),
runtime_envs_dir.clone(),
);
runtime_registry.register(Box::new(process_runtime));
info!(
"Registered ProcessRuntime '{}' from database (ref: {})",
rt_name, rt.r#ref
);
runtime_registry.register(Box::new(python_runtime));
info!("Registered Python runtime");
}
"shell" => {
runtime_registry.register(Box::new(ShellRuntime::new()));
info!("Registered Shell runtime");
}
"native" => {
runtime_registry.register(Box::new(NativeRuntime::new()));
info!("Registered Native runtime");
}
"node" => {
warn!("Node.js runtime requested but not yet implemented, skipping");
}
_ => {
warn!("Unknown runtime type '{}', skipping", runtime_name);
}
}
Err(e) => {
warn!(
"Failed to load runtimes from database: {}. \
Falling back to built-in defaults.",
e
);
}
}
// Only register local runtime as fallback if no specific runtimes configured
// (LocalRuntime contains Python/Shell/Native and tries to validate all)
if configured_runtimes.is_empty() {
// If no runtimes were loaded from the DB, register built-in defaults
if runtime_registry.list_runtimes().is_empty() {
info!("No runtimes loaded from database, registering built-in defaults");
// Shell runtime (always available)
runtime_registry.register(Box::new(ShellRuntime::new()));
info!("Registered built-in Shell runtime");
// Native runtime (for compiled binaries)
runtime_registry.register(Box::new(NativeRuntime::new()));
info!("Registered built-in Native runtime");
// Local runtime as catch-all fallback
let local_runtime = LocalRuntime::new();
runtime_registry.register(Box::new(local_runtime));
info!("Registered Local runtime (fallback)");
@@ -231,7 +262,6 @@ impl WorkerService {
.as_ref()
.map(|w| w.max_stderr_bytes)
.unwrap_or(10 * 1024 * 1024);
let packs_base_dir = std::path::PathBuf::from(&config.packs_base_dir);
// Get API URL from environment or construct from server config
let api_url = std::env::var("ATTUNE_API_URL")
@@ -244,7 +274,7 @@ impl WorkerService {
secret_manager,
max_stdout_bytes,
max_stderr_bytes,
packs_base_dir,
packs_base_dir.clone(),
api_url,
));
@@ -259,6 +289,9 @@ impl WorkerService {
heartbeat_interval,
));
// Capture the runtime filter for use in env setup
let runtime_filter_for_service = runtime_filter.clone();
Ok(Self {
config,
db_pool: pool,
@@ -269,7 +302,12 @@ impl WorkerService {
publisher: Arc::new(publisher),
consumer: None,
consumer_handle: None,
pack_consumer: None,
pack_consumer_handle: None,
worker_id: None,
runtime_filter: runtime_filter_for_service,
packs_base_dir,
runtime_envs_dir,
})
}
@@ -288,6 +326,7 @@ impl WorkerService {
info!("Worker registered with ID: {}", worker_id);
// Setup worker-specific message queue infrastructure
// (includes per-worker execution queue AND pack registration queue)
let mq_config = MqConfig::default();
self.mq_connection
.setup_worker_infrastructure(worker_id, &mq_config)
@@ -297,12 +336,20 @@ impl WorkerService {
})?;
info!("Worker-specific message queue infrastructure setup completed");
// Proactively set up runtime environments for all registered packs.
// This runs before we start consuming execution messages so that
// environments are ready by the time the first execution arrives.
self.scan_and_setup_environments().await;
// Start heartbeat
self.heartbeat.start().await?;
// Start consuming execution messages
self.start_execution_consumer().await?;
// Start consuming pack registration events
self.start_pack_consumer().await?;
info!("Worker Service started successfully");
Ok(())
@@ -316,6 +363,137 @@ impl WorkerService {
/// 3. Wait for in-flight tasks with timeout
/// 4. Close MQ connection
/// 5. Close DB connection
/// Scan all registered packs and create missing runtime environments.
async fn scan_and_setup_environments(&self) {
let filter_refs: Option<Vec<String>> = self.runtime_filter.clone();
let filter_slice: Option<&[String]> = filter_refs.as_deref();
let result = env_setup::scan_and_setup_all_environments(
&self.db_pool,
filter_slice,
&self.packs_base_dir,
&self.runtime_envs_dir,
)
.await;
if !result.errors.is_empty() {
warn!(
"Environment startup scan completed with {} error(s): {:?}",
result.errors.len(),
result.errors,
);
} else {
info!(
"Environment startup scan completed: {} pack(s) scanned, \
{} environment(s) ensured, {} skipped",
result.packs_scanned, result.environments_created, result.environments_skipped,
);
}
}
/// Start consuming pack.registered events from the per-worker packs queue.
async fn start_pack_consumer(&mut self) -> Result<()> {
let worker_id = self
.worker_id
.ok_or_else(|| Error::Internal("Worker not registered".to_string()))?;
let queue_name = format!("worker.{}.packs", worker_id);
info!(
"Starting pack registration consumer for queue: {}",
queue_name
);
let consumer = Arc::new(
Consumer::new(
&self.mq_connection,
ConsumerConfig {
queue: queue_name.clone(),
tag: format!("worker-{}-packs", worker_id),
prefetch_count: 5,
auto_ack: false,
exclusive: false,
},
)
.await
.map_err(|e| Error::Internal(format!("Failed to create pack consumer: {}", e)))?,
);
let db_pool = self.db_pool.clone();
let consumer_for_task = consumer.clone();
let queue_name_for_log = queue_name.clone();
let runtime_filter = self.runtime_filter.clone();
let packs_base_dir = self.packs_base_dir.clone();
let runtime_envs_dir = self.runtime_envs_dir.clone();
let handle = tokio::spawn(async move {
info!(
"Pack consumer loop started for queue '{}'",
queue_name_for_log
);
let result = consumer_for_task
.consume_with_handler(move |envelope: MessageEnvelope<PackRegisteredPayload>| {
let db_pool = db_pool.clone();
let runtime_filter = runtime_filter.clone();
let packs_base_dir = packs_base_dir.clone();
let runtime_envs_dir = runtime_envs_dir.clone();
async move {
info!(
"Received pack.registered event for pack '{}' (version {})",
envelope.payload.pack_ref, envelope.payload.version,
);
let filter_slice: Option<Vec<String>> = runtime_filter;
let filter_ref: Option<&[String]> = filter_slice.as_deref();
let pack_result = env_setup::setup_environments_for_registered_pack(
&db_pool,
&envelope.payload,
filter_ref,
&packs_base_dir,
&runtime_envs_dir,
)
.await;
if !pack_result.errors.is_empty() {
warn!(
"Pack '{}' environment setup had {} error(s): {:?}",
pack_result.pack_ref,
pack_result.errors.len(),
pack_result.errors,
);
} else if !pack_result.environments_created.is_empty() {
info!(
"Pack '{}' environments set up: {:?}",
pack_result.pack_ref, pack_result.environments_created,
);
}
Ok(())
}
})
.await;
match result {
Ok(()) => info!(
"Pack consumer loop for queue '{}' ended",
queue_name_for_log
),
Err(e) => error!(
"Pack consumer loop for queue '{}' failed: {}",
queue_name_for_log, e
),
}
});
self.pack_consumer = Some(consumer);
self.pack_consumer_handle = Some(handle);
info!("Pack registration consumer initialized");
Ok(())
}
pub async fn stop(&mut self) -> Result<()> {
info!("Stopping Worker Service - initiating graceful shutdown");
@@ -355,14 +533,20 @@ impl WorkerService {
Err(_) => warn!("Shutdown timeout reached - some tasks may have been interrupted"),
}
// 4. Abort consumer task and close message queue connection
// 4. Abort consumer tasks and close message queue connection
if let Some(handle) = self.consumer_handle.take() {
info!("Stopping consumer task...");
info!("Stopping execution consumer task...");
handle.abort();
// Wait briefly for the task to finish
let _ = handle.await;
}
if let Some(handle) = self.pack_consumer_handle.take() {
info!("Stopping pack consumer task...");
handle.abort();
let _ = handle.await;
}
info!("Closing message queue connection...");
if let Err(e) = self.mq_connection.close().await {
warn!("Error closing message queue: {}", e);

View File

@@ -1,248 +1,542 @@
//! Integration tests for Python virtual environment dependency isolation
//! Integration tests for runtime environment and dependency isolation
//!
//! Tests the end-to-end flow of creating isolated Python environments
//! for packs with dependencies.
//! Tests the end-to-end flow of creating isolated runtime environments
//! for packs using the ProcessRuntime configuration-driven approach.
//!
//! Environment directories are placed at:
//! {runtime_envs_dir}/{pack_ref}/{runtime_name}
//! e.g., /tmp/.../runtime_envs/testpack/python
//! This keeps the pack directory clean and read-only.
use attune_worker::runtime::{
DependencyManager, DependencyManagerRegistry, DependencySpec, PythonVenvManager,
use attune_common::models::runtime::{
DependencyConfig, EnvironmentConfig, InterpreterConfig, RuntimeExecutionConfig,
};
use attune_worker::runtime::process::ProcessRuntime;
use attune_worker::runtime::ExecutionContext;
use attune_worker::runtime::Runtime;
use attune_worker::runtime::{OutputFormat, ParameterDelivery, ParameterFormat};
use std::collections::HashMap;
use std::path::PathBuf;
use tempfile::TempDir;
#[tokio::test]
async fn test_python_venv_creation() {
let temp_dir = TempDir::new().unwrap();
let manager = PythonVenvManager::new(temp_dir.path().to_path_buf());
fn make_python_config() -> RuntimeExecutionConfig {
RuntimeExecutionConfig {
interpreter: InterpreterConfig {
binary: "python3".to_string(),
args: vec!["-u".to_string()],
file_extension: Some(".py".to_string()),
},
environment: Some(EnvironmentConfig {
env_type: "virtualenv".to_string(),
dir_name: ".venv".to_string(),
create_command: vec![
"python3".to_string(),
"-m".to_string(),
"venv".to_string(),
"{env_dir}".to_string(),
],
interpreter_path: Some("{env_dir}/bin/python3".to_string()),
}),
dependencies: Some(DependencyConfig {
manifest_file: "requirements.txt".to_string(),
install_command: vec![
"{interpreter}".to_string(),
"-m".to_string(),
"pip".to_string(),
"install".to_string(),
"-r".to_string(),
"{manifest_path}".to_string(),
],
}),
}
}
let spec = DependencySpec::new("python").with_dependency("requests==2.28.0");
fn make_shell_config() -> RuntimeExecutionConfig {
RuntimeExecutionConfig {
interpreter: InterpreterConfig {
binary: "/bin/bash".to_string(),
args: vec![],
file_extension: Some(".sh".to_string()),
},
environment: None,
dependencies: None,
}
}
let env_info = manager
.ensure_environment("test_pack", &spec)
.await
.expect("Failed to create environment");
assert_eq!(env_info.runtime, "python");
assert!(env_info.is_valid);
assert!(env_info.path.exists());
assert!(env_info.executable_path.exists());
fn make_context(action_ref: &str, entry_point: &str, runtime_name: &str) -> ExecutionContext {
ExecutionContext {
execution_id: 1,
action_ref: action_ref.to_string(),
parameters: HashMap::new(),
env: HashMap::new(),
secrets: HashMap::new(),
timeout: Some(30),
working_dir: None,
entry_point: entry_point.to_string(),
code: None,
code_path: None,
runtime_name: Some(runtime_name.to_string()),
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: ParameterDelivery::default(),
parameter_format: ParameterFormat::default(),
output_format: OutputFormat::default(),
}
}
#[tokio::test]
async fn test_venv_idempotency() {
async fn test_python_venv_creation_via_process_runtime() {
let temp_dir = TempDir::new().unwrap();
let manager = PythonVenvManager::new(temp_dir.path().to_path_buf());
let packs_base_dir = temp_dir.path().join("packs");
let runtime_envs_dir = temp_dir.path().join("runtime_envs");
let pack_dir = packs_base_dir.join("testpack");
std::fs::create_dir_all(&pack_dir).unwrap();
let spec = DependencySpec::new("python").with_dependency("requests==2.28.0");
let env_dir = runtime_envs_dir.join("testpack").join("python");
let runtime = ProcessRuntime::new(
"python".to_string(),
make_python_config(),
packs_base_dir,
runtime_envs_dir,
);
// Setup the pack environment (creates venv at external location)
runtime
.setup_pack_environment(&pack_dir, &env_dir)
.await
.expect("Failed to create venv environment");
// Verify venv was created at the external runtime_envs location
assert!(env_dir.exists(), "Virtualenv directory should exist at external location");
let venv_python = env_dir.join("bin").join("python3");
assert!(
venv_python.exists(),
"Virtualenv python3 binary should exist"
);
// Verify pack directory was NOT modified
assert!(
!pack_dir.join(".venv").exists(),
"Pack directory should not contain .venv — environments are external"
);
}
#[tokio::test]
async fn test_venv_creation_is_idempotent() {
let temp_dir = TempDir::new().unwrap();
let packs_base_dir = temp_dir.path().join("packs");
let runtime_envs_dir = temp_dir.path().join("runtime_envs");
let pack_dir = packs_base_dir.join("testpack");
std::fs::create_dir_all(&pack_dir).unwrap();
let env_dir = runtime_envs_dir.join("testpack").join("python");
let runtime = ProcessRuntime::new(
"python".to_string(),
make_python_config(),
packs_base_dir,
runtime_envs_dir,
);
// Create environment first time
let env_info1 = manager
.ensure_environment("test_pack", &spec)
runtime
.setup_pack_environment(&pack_dir, &env_dir)
.await
.expect("Failed to create environment");
let created_at1 = env_info1.created_at;
assert!(env_dir.exists());
// Call ensure_environment again with same dependencies
let env_info2 = manager
.ensure_environment("test_pack", &spec)
// Create environment second time — should succeed without error
runtime
.setup_pack_environment(&pack_dir, &env_dir)
.await
.expect("Failed to ensure environment");
.expect("Second setup should succeed (idempotent)");
// Should return existing environment (same created_at)
assert_eq!(env_info1.created_at, env_info2.created_at);
assert_eq!(created_at1, env_info2.created_at);
assert!(env_dir.exists());
}
#[tokio::test]
async fn test_venv_update_on_dependency_change() {
async fn test_dependency_installation() {
let temp_dir = TempDir::new().unwrap();
let manager = PythonVenvManager::new(temp_dir.path().to_path_buf());
let packs_base_dir = temp_dir.path().join("packs");
let runtime_envs_dir = temp_dir.path().join("runtime_envs");
let pack_dir = packs_base_dir.join("testpack");
std::fs::create_dir_all(&pack_dir).unwrap();
let spec1 = DependencySpec::new("python").with_dependency("requests==2.28.0");
let env_dir = runtime_envs_dir.join("testpack").join("python");
// Create environment with first set of dependencies
let env_info1 = manager
.ensure_environment("test_pack", &spec1)
// Write a requirements.txt with a simple, fast-to-install package
std::fs::write(
pack_dir.join("requirements.txt"),
"pip>=21.0\n", // pip is already installed, so this is fast
)
.unwrap();
let runtime = ProcessRuntime::new(
"python".to_string(),
make_python_config(),
packs_base_dir,
runtime_envs_dir,
);
// Setup creates the venv and installs dependencies
runtime
.setup_pack_environment(&pack_dir, &env_dir)
.await
.expect("Failed to setup environment with dependencies");
assert!(env_dir.exists());
}
#[tokio::test]
async fn test_no_environment_for_shell_runtime() {
let temp_dir = TempDir::new().unwrap();
let packs_base_dir = temp_dir.path().join("packs");
let runtime_envs_dir = temp_dir.path().join("runtime_envs");
let pack_dir = packs_base_dir.join("testpack");
std::fs::create_dir_all(&pack_dir).unwrap();
let env_dir = runtime_envs_dir.join("testpack").join("shell");
let runtime = ProcessRuntime::new(
"shell".to_string(),
make_shell_config(),
packs_base_dir,
runtime_envs_dir,
);
// Shell runtime has no environment config — should be a no-op
runtime
.setup_pack_environment(&pack_dir, &env_dir)
.await
.expect("Shell setup should succeed (no environment to create)");
// No environment should exist
assert!(!env_dir.exists());
assert!(!pack_dir.join(".venv").exists());
assert!(!pack_dir.join("node_modules").exists());
}
#[tokio::test]
async fn test_pack_has_dependencies_detection() {
let temp_dir = TempDir::new().unwrap();
let packs_base_dir = temp_dir.path().join("packs");
let runtime_envs_dir = temp_dir.path().join("runtime_envs");
let pack_dir = packs_base_dir.join("testpack");
std::fs::create_dir_all(&pack_dir).unwrap();
let runtime = ProcessRuntime::new(
"python".to_string(),
make_python_config(),
packs_base_dir,
runtime_envs_dir,
);
// No requirements.txt yet
assert!(
!runtime.pack_has_dependencies(&pack_dir),
"Should not detect dependencies without manifest file"
);
// Create requirements.txt
std::fs::write(pack_dir.join("requirements.txt"), "requests>=2.28.0\n").unwrap();
assert!(
runtime.pack_has_dependencies(&pack_dir),
"Should detect dependencies when manifest file exists"
);
}
#[tokio::test]
async fn test_environment_exists_detection() {
let temp_dir = TempDir::new().unwrap();
let packs_base_dir = temp_dir.path().join("packs");
let runtime_envs_dir = temp_dir.path().join("runtime_envs");
let pack_dir = packs_base_dir.join("testpack");
std::fs::create_dir_all(&pack_dir).unwrap();
let env_dir = runtime_envs_dir.join("testpack").join("python");
let runtime = ProcessRuntime::new(
"python".to_string(),
make_python_config(),
packs_base_dir,
runtime_envs_dir,
);
// No venv yet — environment_exists uses pack_ref string
assert!(
!runtime.environment_exists("testpack"),
"Environment should not exist before setup"
);
// Create the venv
runtime
.setup_pack_environment(&pack_dir, &env_dir)
.await
.expect("Failed to create environment");
let created_at1 = env_info1.created_at;
// Give it a moment to ensure timestamp difference
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
// Change dependencies
let spec2 = DependencySpec::new("python").with_dependency("requests==2.29.0");
// Should recreate environment
let env_info2 = manager
.ensure_environment("test_pack", &spec2)
.await
.expect("Failed to update environment");
// Updated timestamp should be newer
assert!(env_info2.updated_at >= created_at1);
assert!(
runtime.environment_exists("testpack"),
"Environment should exist after setup"
);
}
#[tokio::test]
async fn test_multiple_pack_isolation() {
let temp_dir = TempDir::new().unwrap();
let manager = PythonVenvManager::new(temp_dir.path().to_path_buf());
let packs_base_dir = temp_dir.path().join("packs");
let runtime_envs_dir = temp_dir.path().join("runtime_envs");
let spec1 = DependencySpec::new("python").with_dependency("requests==2.28.0");
let spec2 = DependencySpec::new("python").with_dependency("flask==2.3.0");
let pack_a_dir = packs_base_dir.join("pack_a");
let pack_b_dir = packs_base_dir.join("pack_b");
std::fs::create_dir_all(&pack_a_dir).unwrap();
std::fs::create_dir_all(&pack_b_dir).unwrap();
// Create environments for two different packs
let env1 = manager
.ensure_environment("pack_a", &spec1)
let env_dir_a = runtime_envs_dir.join("pack_a").join("python");
let env_dir_b = runtime_envs_dir.join("pack_b").join("python");
let runtime = ProcessRuntime::new(
"python".to_string(),
make_python_config(),
packs_base_dir,
runtime_envs_dir,
);
// Setup environments for two different packs
runtime
.setup_pack_environment(&pack_a_dir, &env_dir_a)
.await
.expect("Failed to create environment for pack_a");
.expect("Failed to setup pack_a");
let env2 = manager
.ensure_environment("pack_b", &spec2)
runtime
.setup_pack_environment(&pack_b_dir, &env_dir_b)
.await
.expect("Failed to create environment for pack_b");
.expect("Failed to setup pack_b");
// Should have different paths
assert_ne!(env1.path, env2.path);
assert_ne!(env1.executable_path, env2.executable_path);
// Each pack should have its own venv at the external location
assert!(env_dir_a.exists(), "pack_a should have its own venv");
assert!(env_dir_b.exists(), "pack_b should have its own venv");
assert_ne!(env_dir_a, env_dir_b, "Venvs should be in different directories");
// Both should be valid
assert!(env1.is_valid);
assert!(env2.is_valid);
// Pack directories should remain clean
assert!(!pack_a_dir.join(".venv").exists(), "pack_a dir should not contain .venv");
assert!(!pack_b_dir.join(".venv").exists(), "pack_b dir should not contain .venv");
}
#[tokio::test]
async fn test_get_executable_path() {
async fn test_execute_python_action_with_venv() {
let temp_dir = TempDir::new().unwrap();
let manager = PythonVenvManager::new(temp_dir.path().to_path_buf());
let packs_base_dir = temp_dir.path().join("packs");
let runtime_envs_dir = temp_dir.path().join("runtime_envs");
let pack_dir = packs_base_dir.join("testpack");
let actions_dir = pack_dir.join("actions");
std::fs::create_dir_all(&actions_dir).unwrap();
let spec = DependencySpec::new("python");
let env_dir = runtime_envs_dir.join("testpack").join("python");
manager
.ensure_environment("test_pack", &spec)
// Write a Python script
std::fs::write(
actions_dir.join("hello.py"),
r#"
import sys
print(f"Python from: {sys.executable}")
print("Hello from venv action!")
"#,
)
.unwrap();
let runtime = ProcessRuntime::new(
"python".to_string(),
make_python_config(),
packs_base_dir,
runtime_envs_dir,
);
// Setup the venv first
runtime
.setup_pack_environment(&pack_dir, &env_dir)
.await
.expect("Failed to create environment");
.expect("Failed to setup venv");
let python_path = manager
.get_executable_path("test_pack")
.await
.expect("Failed to get executable path");
// Now execute the action
let mut context = make_context("testpack.hello", "hello.py", "python");
context.code_path = Some(actions_dir.join("hello.py"));
assert!(python_path.exists());
assert!(python_path.to_string_lossy().contains("test_pack"));
let result = runtime.execute(context).await.unwrap();
assert_eq!(result.exit_code, 0, "Action should succeed");
assert!(
result.stdout.contains("Hello from venv action!"),
"Should see output from action. Got: {}",
result.stdout
);
// Verify it's using the venv Python (at external runtime_envs location)
assert!(
result.stdout.contains("runtime_envs"),
"Should be using the venv python from external runtime_envs dir. Got: {}",
result.stdout
);
}
#[tokio::test]
async fn test_validate_environment() {
async fn test_execute_shell_action_no_venv() {
let temp_dir = TempDir::new().unwrap();
let manager = PythonVenvManager::new(temp_dir.path().to_path_buf());
let packs_base_dir = temp_dir.path().join("packs");
let runtime_envs_dir = temp_dir.path().join("runtime_envs");
let pack_dir = packs_base_dir.join("testpack");
let actions_dir = pack_dir.join("actions");
std::fs::create_dir_all(&actions_dir).unwrap();
// Non-existent environment should not be valid
let is_valid = manager
.validate_environment("nonexistent")
.await
.expect("Validation check failed");
assert!(!is_valid);
std::fs::write(
actions_dir.join("greet.sh"),
"#!/bin/bash\necho 'Hello from shell!'",
)
.unwrap();
// Create environment
let spec = DependencySpec::new("python");
manager
.ensure_environment("test_pack", &spec)
.await
.expect("Failed to create environment");
let runtime = ProcessRuntime::new(
"shell".to_string(),
make_shell_config(),
packs_base_dir,
runtime_envs_dir,
);
// Should now be valid
let is_valid = manager
.validate_environment("test_pack")
.await
.expect("Validation check failed");
assert!(is_valid);
let mut context = make_context("testpack.greet", "greet.sh", "shell");
context.code_path = Some(actions_dir.join("greet.sh"));
let result = runtime.execute(context).await.unwrap();
assert_eq!(result.exit_code, 0);
assert!(result.stdout.contains("Hello from shell!"));
}
#[tokio::test]
async fn test_remove_environment() {
async fn test_working_directory_is_pack_dir() {
let temp_dir = TempDir::new().unwrap();
let manager = PythonVenvManager::new(temp_dir.path().to_path_buf());
let packs_base_dir = temp_dir.path().join("packs");
let runtime_envs_dir = temp_dir.path().join("runtime_envs");
let pack_dir = packs_base_dir.join("testpack");
let actions_dir = pack_dir.join("actions");
std::fs::create_dir_all(&actions_dir).unwrap();
let spec = DependencySpec::new("python");
// Script that prints the working directory
std::fs::write(actions_dir.join("cwd.sh"), "#!/bin/bash\npwd").unwrap();
// Create environment
let env_info = manager
.ensure_environment("test_pack", &spec)
.await
.expect("Failed to create environment");
let runtime = ProcessRuntime::new(
"shell".to_string(),
make_shell_config(),
packs_base_dir,
runtime_envs_dir,
);
let path = env_info.path.clone();
assert!(path.exists());
let mut context = make_context("testpack.cwd", "cwd.sh", "shell");
context.code_path = Some(actions_dir.join("cwd.sh"));
// Remove environment
manager
.remove_environment("test_pack")
.await
.expect("Failed to remove environment");
let result = runtime.execute(context).await.unwrap();
assert!(!path.exists());
// Get environment should return None
let env = manager
.get_environment("test_pack")
.await
.expect("Failed to get environment");
assert!(env.is_none());
assert_eq!(result.exit_code, 0);
let output_path = result.stdout.trim();
assert_eq!(
output_path,
pack_dir.to_string_lossy().as_ref(),
"Working directory should be the pack directory"
);
}
#[tokio::test]
async fn test_list_environments() {
async fn test_interpreter_resolution_with_venv() {
let temp_dir = TempDir::new().unwrap();
let manager = PythonVenvManager::new(temp_dir.path().to_path_buf());
let packs_base_dir = temp_dir.path().join("packs");
let runtime_envs_dir = temp_dir.path().join("runtime_envs");
let pack_dir = packs_base_dir.join("testpack");
std::fs::create_dir_all(&pack_dir).unwrap();
let spec = DependencySpec::new("python");
let env_dir = runtime_envs_dir.join("testpack").join("python");
// Create multiple environments
manager
.ensure_environment("pack_a", &spec)
let config = make_python_config();
let runtime = ProcessRuntime::new(
"python".to_string(),
config.clone(),
packs_base_dir,
runtime_envs_dir,
);
// Before venv creation — should resolve to system python
let interpreter = config.resolve_interpreter_with_env(&pack_dir, Some(&env_dir));
assert_eq!(
interpreter,
PathBuf::from("python3"),
"Without venv, should use system python"
);
// Create venv at external location
runtime
.setup_pack_environment(&pack_dir, &env_dir)
.await
.expect("Failed to create pack_a");
.expect("Failed to create venv");
manager
.ensure_environment("pack_b", &spec)
.await
.expect("Failed to create pack_b");
manager
.ensure_environment("pack_c", &spec)
.await
.expect("Failed to create pack_c");
// List should return all three
let environments = manager
.list_environments()
.await
.expect("Failed to list environments");
assert_eq!(environments.len(), 3);
// After venv creation — should resolve to venv python at external location
let interpreter = config.resolve_interpreter_with_env(&pack_dir, Some(&env_dir));
let expected_venv_python = env_dir.join("bin").join("python3");
assert_eq!(
interpreter, expected_venv_python,
"With venv, should use venv python from external runtime_envs dir"
);
}
#[tokio::test]
async fn test_dependency_manager_registry() {
async fn test_skip_deps_install_without_manifest() {
let temp_dir = TempDir::new().unwrap();
let mut registry = DependencyManagerRegistry::new();
let packs_base_dir = temp_dir.path().join("packs");
let runtime_envs_dir = temp_dir.path().join("runtime_envs");
let pack_dir = packs_base_dir.join("testpack");
std::fs::create_dir_all(&pack_dir).unwrap();
let python_manager = PythonVenvManager::new(temp_dir.path().to_path_buf());
registry.register(Box::new(python_manager));
let env_dir = runtime_envs_dir.join("testpack").join("python");
// Should support python
assert!(registry.supports("python"));
assert!(!registry.supports("nodejs"));
// No requirements.txt — install_dependencies should be a no-op
let runtime = ProcessRuntime::new(
"python".to_string(),
make_python_config(),
packs_base_dir,
runtime_envs_dir,
);
// Should be able to get manager
let manager = registry.get("python");
assert!(manager.is_some());
assert_eq!(manager.unwrap().runtime_type(), "python");
// Setup should still create the venv but skip dependency installation
runtime
.setup_pack_environment(&pack_dir, &env_dir)
.await
.expect("Setup should succeed without manifest");
assert!(
env_dir.exists(),
"Venv should still be created at external location"
);
}
#[tokio::test]
async fn test_dependency_spec_builder() {
async fn test_runtime_config_matches_file_extension() {
let config = make_python_config();
assert!(config.matches_file_extension(std::path::Path::new("hello.py")));
assert!(config.matches_file_extension(std::path::Path::new(
"/opt/attune/packs/mypack/actions/script.py"
)));
assert!(!config.matches_file_extension(std::path::Path::new("hello.sh")));
assert!(!config.matches_file_extension(std::path::Path::new("hello.js")));
let shell_config = make_shell_config();
assert!(shell_config.matches_file_extension(std::path::Path::new("run.sh")));
assert!(!shell_config.matches_file_extension(std::path::Path::new("run.py")));
}
#[tokio::test]
async fn test_dependency_spec_builder_still_works() {
// The DependencySpec types are still available for generic use
use attune_worker::runtime::DependencySpec;
let spec = DependencySpec::new("python")
.with_dependency("requests==2.28.0")
.with_dependency("flask>=2.0.0")
@@ -256,122 +550,68 @@ async fn test_dependency_spec_builder() {
}
#[tokio::test]
async fn test_requirements_file_content() {
async fn test_process_runtime_setup_and_validate() {
let temp_dir = TempDir::new().unwrap();
let manager = PythonVenvManager::new(temp_dir.path().to_path_buf());
let runtime_envs_dir = temp_dir.path().join("runtime_envs");
let requirements = "requests==2.28.0\nflask==2.3.0\npydantic>=2.0.0";
let spec = DependencySpec::new("python").with_requirements_file(requirements.to_string());
let shell_runtime = ProcessRuntime::new(
"shell".to_string(),
make_shell_config(),
temp_dir.path().to_path_buf(),
runtime_envs_dir.clone(),
);
let env_info = manager
.ensure_environment("test_pack", &spec)
.await
.expect("Failed to create environment with requirements file");
// Setup and validate should succeed for shell
shell_runtime.setup().await.unwrap();
shell_runtime.validate().await.unwrap();
assert!(env_info.is_valid);
assert!(env_info.installed_dependencies.len() > 0);
let python_runtime = ProcessRuntime::new(
"python".to_string(),
make_python_config(),
temp_dir.path().to_path_buf(),
runtime_envs_dir,
);
// Setup and validate should succeed for python (warns if not available)
python_runtime.setup().await.unwrap();
python_runtime.validate().await.unwrap();
}
#[tokio::test]
async fn test_pack_ref_sanitization() {
async fn test_can_execute_by_runtime_name() {
let temp_dir = TempDir::new().unwrap();
let manager = PythonVenvManager::new(temp_dir.path().to_path_buf());
let spec = DependencySpec::new("python");
let runtime = ProcessRuntime::new(
"python".to_string(),
make_python_config(),
temp_dir.path().to_path_buf(),
temp_dir.path().join("runtime_envs"),
);
// Pack refs with special characters should be sanitized
let env_info = manager
.ensure_environment("core.http", &spec)
.await
.expect("Failed to create environment");
let context = make_context("mypack.hello", "hello.py", "python");
assert!(runtime.can_execute(&context));
// Path should not contain dots
let path_str = env_info.path.to_string_lossy();
assert!(path_str.contains("core_http"));
assert!(!path_str.contains("core.http"));
let wrong_context = make_context("mypack.hello", "hello.py", "shell");
assert!(!runtime.can_execute(&wrong_context));
}
#[tokio::test]
async fn test_needs_update_detection() {
async fn test_can_execute_by_file_extension() {
let temp_dir = TempDir::new().unwrap();
let manager = PythonVenvManager::new(temp_dir.path().to_path_buf());
let spec1 = DependencySpec::new("python").with_dependency("requests==2.28.0");
let runtime = ProcessRuntime::new(
"python".to_string(),
make_python_config(),
temp_dir.path().to_path_buf(),
temp_dir.path().join("runtime_envs"),
);
// Non-existent environment needs update
let needs_update = manager
.needs_update("test_pack", &spec1)
.await
.expect("Failed to check update status");
assert!(needs_update);
let mut context = make_context("mypack.hello", "hello.py", "");
context.runtime_name = None;
context.code_path = Some(PathBuf::from("/tmp/packs/mypack/actions/hello.py"));
assert!(runtime.can_execute(&context));
// Create environment
manager
.ensure_environment("test_pack", &spec1)
.await
.expect("Failed to create environment");
// Same spec should not need update
let needs_update = manager
.needs_update("test_pack", &spec1)
.await
.expect("Failed to check update status");
assert!(!needs_update);
// Different spec should need update
let spec2 = DependencySpec::new("python").with_dependency("requests==2.29.0");
let needs_update = manager
.needs_update("test_pack", &spec2)
.await
.expect("Failed to check update status");
assert!(needs_update);
}
#[tokio::test]
async fn test_empty_dependencies() {
let temp_dir = TempDir::new().unwrap();
let manager = PythonVenvManager::new(temp_dir.path().to_path_buf());
// Pack with no dependencies should still create venv
let spec = DependencySpec::new("python");
assert!(!spec.has_dependencies());
let env_info = manager
.ensure_environment("test_pack", &spec)
.await
.expect("Failed to create environment without dependencies");
assert!(env_info.is_valid);
assert!(env_info.path.exists());
}
#[tokio::test]
async fn test_get_environment_caching() {
let temp_dir = TempDir::new().unwrap();
let manager = PythonVenvManager::new(temp_dir.path().to_path_buf());
let spec = DependencySpec::new("python");
// Create environment
manager
.ensure_environment("test_pack", &spec)
.await
.expect("Failed to create environment");
// First get_environment should read from disk
let env1 = manager
.get_environment("test_pack")
.await
.expect("Failed to get environment")
.expect("Environment not found");
// Second get_environment should use cache
let env2 = manager
.get_environment("test_pack")
.await
.expect("Failed to get environment")
.expect("Environment not found");
assert_eq!(env1.id, env2.id);
assert_eq!(env1.path, env2.path);
context.code_path = Some(PathBuf::from("/tmp/packs/mypack/actions/hello.sh"));
context.entry_point = "hello.sh".to_string();
assert!(!runtime.can_execute(&context));
}

View File

@@ -3,89 +3,99 @@
//! Tests that verify stdout/stderr are properly truncated when they exceed
//! configured size limits, preventing OOM issues with large output.
use attune_worker::runtime::{ExecutionContext, PythonRuntime, Runtime, ShellRuntime};
use attune_common::models::runtime::{InterpreterConfig, RuntimeExecutionConfig};
use attune_worker::runtime::process::ProcessRuntime;
use attune_worker::runtime::{ExecutionContext, Runtime, ShellRuntime};
use std::collections::HashMap;
use std::path::PathBuf;
use tempfile::TempDir;
#[tokio::test]
async fn test_python_stdout_truncation() {
let runtime = PythonRuntime::new();
fn make_python_process_runtime(packs_base_dir: PathBuf) -> ProcessRuntime {
let config = RuntimeExecutionConfig {
interpreter: InterpreterConfig {
binary: "python3".to_string(),
args: vec!["-u".to_string()],
file_extension: Some(".py".to_string()),
},
environment: None,
dependencies: None,
};
ProcessRuntime::new("python".to_string(), config, packs_base_dir.clone(), packs_base_dir.join("../runtime_envs"))
}
// Create a Python script that outputs more than the limit
let code = r#"
import sys
# Output 1KB of data (will exceed 500 byte limit)
for i in range(100):
print("x" * 10)
"#;
let context = ExecutionContext {
execution_id: 1,
action_ref: "test.large_output".to_string(),
fn make_python_context(
execution_id: i64,
action_ref: &str,
code: &str,
max_stdout_bytes: usize,
max_stderr_bytes: usize,
) -> ExecutionContext {
ExecutionContext {
execution_id,
action_ref: action_ref.to_string(),
parameters: HashMap::new(),
env: HashMap::new(),
secrets: HashMap::new(),
timeout: Some(10),
working_dir: None,
entry_point: "test_script".to_string(),
entry_point: "inline".to_string(),
code: Some(code.to_string()),
code_path: None,
runtime_name: Some("python".to_string()),
max_stdout_bytes: 500, // Small limit to trigger truncation
max_stderr_bytes: 1024,
max_stdout_bytes,
max_stderr_bytes,
parameter_delivery: attune_worker::runtime::ParameterDelivery::default(),
parameter_format: attune_worker::runtime::ParameterFormat::default(),
};
output_format: attune_worker::runtime::OutputFormat::default(),
}
}
#[tokio::test]
async fn test_python_stdout_truncation() {
let tmp = TempDir::new().unwrap();
let runtime = make_python_process_runtime(tmp.path().to_path_buf());
// Create a Python one-liner that outputs more than the limit
let code = "import sys\nfor i in range(100):\n print('x' * 10)";
let context = make_python_context(1, "test.large_output", code, 500, 1024);
let result = runtime.execute(context).await.unwrap();
// Should succeed but with truncated output
assert!(result.is_success());
assert_eq!(result.exit_code, 0);
assert!(result.stdout_truncated);
assert!(result.stdout.contains("[OUTPUT TRUNCATED"));
assert!(
result.stdout.contains("[OUTPUT TRUNCATED"),
"Expected truncation marker in stdout, got: {}",
result.stdout
);
assert!(result.stdout_bytes_truncated > 0);
assert!(result.stdout.len() <= 500);
assert!(result.stdout.len() <= 600); // some overhead for the truncation message
}
#[tokio::test]
async fn test_python_stderr_truncation() {
let runtime = PythonRuntime::new();
let tmp = TempDir::new().unwrap();
let runtime = make_python_process_runtime(tmp.path().to_path_buf());
// Create a Python script that outputs to stderr
let code = r#"
import sys
# Output 1KB of data to stderr
for i in range(100):
sys.stderr.write("error message line\n")
"#;
// Python one-liner that outputs to stderr
let code = "import sys\nfor i in range(100):\n sys.stderr.write('error message line\\n')";
let context = ExecutionContext {
execution_id: 2,
action_ref: "test.large_stderr".to_string(),
parameters: HashMap::new(),
env: HashMap::new(),
secrets: HashMap::new(),
timeout: Some(10),
working_dir: None,
entry_point: "test_script".to_string(),
code: Some(code.to_string()),
code_path: None,
runtime_name: Some("python".to_string()),
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 300, // Small limit for stderr
parameter_delivery: attune_worker::runtime::ParameterDelivery::default(),
parameter_format: attune_worker::runtime::ParameterFormat::default(),
};
let context = make_python_context(2, "test.large_stderr", code, 10 * 1024 * 1024, 300);
let result = runtime.execute(context).await.unwrap();
// Should succeed but with truncated stderr
assert!(result.is_success());
assert_eq!(result.exit_code, 0);
assert!(!result.stdout_truncated);
assert!(result.stderr_truncated);
assert!(result.stderr.contains("[OUTPUT TRUNCATED"));
assert!(result.stderr.contains("stderr exceeded size limit"));
assert!(
result.stderr.contains("[OUTPUT TRUNCATED"),
"Expected truncation marker in stderr, got: {}",
result.stderr
);
assert!(result.stderr_bytes_truncated > 0);
assert!(result.stderr.len() <= 300);
}
#[tokio::test]
@@ -94,7 +104,7 @@ async fn test_shell_stdout_truncation() {
// Shell script that outputs more than the limit
let code = r#"
for i in {1..100}; do
for i in $(seq 1 100); do
echo "This is a long line of text that will add up quickly"
done
"#;
@@ -115,177 +125,167 @@ done
max_stderr_bytes: 1024,
parameter_delivery: attune_worker::runtime::ParameterDelivery::default(),
parameter_format: attune_worker::runtime::ParameterFormat::default(),
output_format: attune_worker::runtime::OutputFormat::default(),
};
let result = runtime.execute(context).await.unwrap();
// Should succeed but with truncated output
assert!(result.is_success());
assert_eq!(result.exit_code, 0);
assert!(result.stdout_truncated);
assert!(result.stdout.contains("[OUTPUT TRUNCATED"));
assert!(
result.stdout.contains("[OUTPUT TRUNCATED"),
"Expected truncation marker, got: {}",
result.stdout
);
assert!(result.stdout_bytes_truncated > 0);
assert!(result.stdout.len() <= 400);
}
#[tokio::test]
async fn test_no_truncation_under_limit() {
let runtime = PythonRuntime::new();
let tmp = TempDir::new().unwrap();
let runtime = make_python_process_runtime(tmp.path().to_path_buf());
// Small output that won't trigger truncation
let code = r#"
print("Hello, World!")
"#;
let code = "print('Hello, World!')";
let context = ExecutionContext {
execution_id: 4,
action_ref: "test.small_output".to_string(),
parameters: HashMap::new(),
env: HashMap::new(),
secrets: HashMap::new(),
timeout: Some(10),
working_dir: None,
entry_point: "test_script".to_string(),
code: Some(code.to_string()),
code_path: None,
runtime_name: Some("python".to_string()),
max_stdout_bytes: 10 * 1024 * 1024, // Large limit
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: attune_worker::runtime::ParameterDelivery::default(),
parameter_format: attune_worker::runtime::ParameterFormat::default(),
};
let context = make_python_context(
4,
"test.small_output",
code,
10 * 1024 * 1024,
10 * 1024 * 1024,
);
let result = runtime.execute(context).await.unwrap();
// Should succeed without truncation
assert!(result.is_success());
assert_eq!(result.exit_code, 0);
assert!(!result.stdout_truncated);
assert!(!result.stderr_truncated);
assert_eq!(result.stdout_bytes_truncated, 0);
assert_eq!(result.stderr_bytes_truncated, 0);
assert!(result.stdout.contains("Hello, World!"));
assert!(
result.stdout.contains("Hello, World!"),
"Expected Hello, World! in stdout, got: {}",
result.stdout
);
}
#[tokio::test]
async fn test_both_streams_truncated() {
let runtime = PythonRuntime::new();
let tmp = TempDir::new().unwrap();
let runtime = make_python_process_runtime(tmp.path().to_path_buf());
// Script that outputs to both stdout and stderr
let code = r#"
import sys
# Output to both streams
for i in range(50):
print("stdout line " + str(i))
sys.stderr.write("stderr line " + str(i) + "\n")
"#;
let code = "import sys\nfor i in range(50):\n print('stdout line ' + str(i))\n sys.stderr.write('stderr line ' + str(i) + '\\n')";
let context = ExecutionContext {
execution_id: 5,
action_ref: "test.dual_truncation".to_string(),
parameters: HashMap::new(),
env: HashMap::new(),
secrets: HashMap::new(),
timeout: Some(10),
working_dir: None,
entry_point: "test_script".to_string(),
code: Some(code.to_string()),
code_path: None,
runtime_name: Some("python".to_string()),
max_stdout_bytes: 300, // Both limits are small
max_stderr_bytes: 300,
parameter_delivery: attune_worker::runtime::ParameterDelivery::default(),
parameter_format: attune_worker::runtime::ParameterFormat::default(),
};
let context = make_python_context(5, "test.dual_truncation", code, 300, 300);
let result = runtime.execute(context).await.unwrap();
// Should succeed but with both streams truncated
assert!(result.is_success());
assert_eq!(result.exit_code, 0);
assert!(result.stdout_truncated);
assert!(result.stderr_truncated);
assert!(result.stdout.contains("[OUTPUT TRUNCATED"));
assert!(result.stderr.contains("[OUTPUT TRUNCATED"));
assert!(result.stdout_bytes_truncated > 0);
assert!(result.stderr_bytes_truncated > 0);
assert!(result.stdout.len() <= 300);
assert!(result.stderr.len() <= 300);
}
#[tokio::test]
async fn test_truncation_with_timeout() {
let runtime = PythonRuntime::new();
let tmp = TempDir::new().unwrap();
let runtime = make_python_process_runtime(tmp.path().to_path_buf());
// Script that times out but should still capture truncated logs
let code = r#"
import time
for i in range(1000):
print(f"Line {i}")
time.sleep(30) # Will timeout before this
"#;
// Script that produces output then times out
let code = "import time\nfor i in range(1000):\n print(f'Line {i}')\ntime.sleep(30)";
let context = ExecutionContext {
execution_id: 6,
action_ref: "test.timeout_truncation".to_string(),
parameters: HashMap::new(),
env: HashMap::new(),
secrets: HashMap::new(),
timeout: Some(2), // Short timeout
working_dir: None,
entry_point: "test_script".to_string(),
code: Some(code.to_string()),
code_path: None,
runtime_name: Some("python".to_string()),
max_stdout_bytes: 500,
max_stderr_bytes: 1024,
parameter_delivery: attune_worker::runtime::ParameterDelivery::default(),
parameter_format: attune_worker::runtime::ParameterFormat::default(),
};
let mut context = make_python_context(6, "test.timeout_truncation", code, 500, 1024);
context.timeout = Some(2); // Short timeout
let result = runtime.execute(context).await.unwrap();
// Should timeout with truncated logs
assert!(!result.is_success());
assert!(result.error.is_some());
assert!(result.error.as_ref().unwrap().contains("timed out"));
// Logs may or may not be truncated depending on how fast it runs
assert!(
result.error.as_ref().unwrap().contains("timed out"),
"Expected timeout error, got: {:?}",
result.error
);
}
#[tokio::test]
async fn test_exact_limit_no_truncation() {
let runtime = PythonRuntime::new();
async fn test_small_output_no_truncation() {
let tmp = TempDir::new().unwrap();
let runtime = make_python_process_runtime(tmp.path().to_path_buf());
// Output a small amount that won't trigger truncation
// The Python wrapper adds JSON result output, so we need headroom
let code = r#"
import sys
sys.stdout.write("Small output")
"#;
let code = "import sys; sys.stdout.write('Small output')";
let context = make_python_context(
7,
"test.exact_limit",
code,
10 * 1024 * 1024,
10 * 1024 * 1024,
);
let result = runtime.execute(context).await.unwrap();
// Should succeed without truncation
assert_eq!(result.exit_code, 0);
assert!(!result.stdout_truncated);
assert!(
result.stdout.contains("Small output"),
"Expected 'Small output' in stdout, got: {:?}",
result.stdout
);
}
#[tokio::test]
async fn test_shell_process_runtime_truncation() {
// Test truncation through ProcessRuntime with shell config too
let tmp = TempDir::new().unwrap();
let config = RuntimeExecutionConfig {
interpreter: InterpreterConfig {
binary: "/bin/bash".to_string(),
args: vec![],
file_extension: Some(".sh".to_string()),
},
environment: None,
dependencies: None,
};
let runtime = ProcessRuntime::new("shell".to_string(), config, tmp.path().to_path_buf(), tmp.path().join("runtime_envs"));
let context = ExecutionContext {
execution_id: 7,
action_ref: "test.exact_limit".to_string(),
execution_id: 8,
action_ref: "test.shell_process_truncation".to_string(),
parameters: HashMap::new(),
env: HashMap::new(),
secrets: HashMap::new(),
timeout: Some(10),
working_dir: None,
entry_point: "test_script".to_string(),
code: Some(code.to_string()),
entry_point: "inline".to_string(),
code: Some(
"for i in $(seq 1 200); do echo \"output line $i padding text here\"; done".to_string(),
),
code_path: None,
runtime_name: Some("python".to_string()),
max_stdout_bytes: 10 * 1024 * 1024, // Large limit to avoid truncation
max_stderr_bytes: 10 * 1024 * 1024,
runtime_name: Some("shell".to_string()),
max_stdout_bytes: 500,
max_stderr_bytes: 1024,
parameter_delivery: attune_worker::runtime::ParameterDelivery::default(),
parameter_format: attune_worker::runtime::ParameterFormat::default(),
output_format: attune_worker::runtime::OutputFormat::default(),
};
let result = runtime.execute(context).await.unwrap();
// Should succeed without truncation
eprintln!(
"test_exact_limit_no_truncation: exit_code={}, error={:?}, stdout={:?}, stderr={:?}",
result.exit_code, result.error, result.stdout, result.stderr
);
assert!(result.is_success());
assert!(!result.stdout_truncated);
assert!(result.stdout.contains("Small output"));
assert_eq!(result.exit_code, 0);
assert!(result.stdout_truncated);
assert!(result.stdout.contains("[OUTPUT TRUNCATED"));
assert!(result.stdout_bytes_truncated > 0);
}

View File

@@ -3,14 +3,50 @@
//! These tests verify that secrets are NOT exposed in process environment
//! or command-line arguments, ensuring secure secret passing via stdin.
use attune_worker::runtime::python::PythonRuntime;
use attune_common::models::runtime::{InterpreterConfig, RuntimeExecutionConfig};
use attune_worker::runtime::process::ProcessRuntime;
use attune_worker::runtime::shell::ShellRuntime;
use attune_worker::runtime::{ExecutionContext, Runtime};
use std::collections::HashMap;
use std::path::PathBuf;
use tempfile::TempDir;
fn make_python_process_runtime(packs_base_dir: PathBuf) -> ProcessRuntime {
let config = RuntimeExecutionConfig {
interpreter: InterpreterConfig {
binary: "python3".to_string(),
args: vec!["-u".to_string()],
file_extension: Some(".py".to_string()),
},
environment: None,
dependencies: None,
};
let runtime_envs_dir = packs_base_dir.parent().unwrap_or(&packs_base_dir).join("runtime_envs");
ProcessRuntime::new("python".to_string(), config, packs_base_dir, runtime_envs_dir)
}
#[tokio::test]
async fn test_python_secrets_not_in_environ() {
let runtime = PythonRuntime::new();
let tmp = TempDir::new().unwrap();
let runtime = make_python_process_runtime(tmp.path().to_path_buf());
// Inline Python code that checks environment for secrets
let code = r#"
import os, json
environ_str = str(os.environ)
# Secrets should NOT be in environment
has_secret_in_env = 'super_secret_key_do_not_expose' in environ_str
has_password_in_env = 'secret_pass_123' in environ_str
has_secret_prefix = any(k.startswith('SECRET_') for k in os.environ)
result = {
'secrets_in_environ': has_secret_in_env or has_password_in_env or has_secret_prefix,
'environ_check': 'SECRET_' not in environ_str
}
print(json.dumps(result))
"#;
let context = ExecutionContext {
execution_id: 1,
@@ -28,69 +64,36 @@ async fn test_python_secrets_not_in_environ() {
},
timeout: Some(10),
working_dir: None,
entry_point: "run".to_string(),
code: Some(
r#"
import os
def run():
# Check if secrets are in environment variables
environ_str = str(os.environ)
# Secrets should NOT be in environment
has_secret_in_env = 'super_secret_key_do_not_expose' in environ_str
has_password_in_env = 'secret_pass_123' in environ_str
has_secret_prefix = 'SECRET_API_KEY' in os.environ or 'SECRET_PASSWORD' in os.environ
# But they SHOULD be accessible via get_secret()
api_key_accessible = get_secret('api_key') == 'super_secret_key_do_not_expose'
password_accessible = get_secret('password') == 'secret_pass_123'
return {
'secrets_in_environ': has_secret_in_env or has_password_in_env or has_secret_prefix,
'api_key_accessible': api_key_accessible,
'password_accessible': password_accessible,
'environ_check': 'SECRET_' not in environ_str
}
"#
.to_string(),
),
entry_point: "inline".to_string(),
code: Some(code.to_string()),
code_path: None,
runtime_name: Some("python".to_string()),
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: attune_worker::runtime::ParameterDelivery::default(),
parameter_format: attune_worker::runtime::ParameterFormat::default(),
output_format: attune_worker::runtime::OutputFormat::Json,
};
let result = runtime.execute(context).await.unwrap();
assert!(result.is_success(), "Execution should succeed");
assert_eq!(
result.exit_code, 0,
"Execution should succeed. stderr: {}",
result.stderr
);
let result_data = result.result.unwrap();
let result_obj = result_data.get("result").unwrap();
let result_data = result.result.expect("Should have parsed JSON result");
// Critical security check: secrets should NOT be in environment
assert_eq!(
result_obj.get("secrets_in_environ").unwrap(),
result_data.get("secrets_in_environ").unwrap(),
&serde_json::json!(false),
"SECURITY FAILURE: Secrets found in process environment!"
);
// Verify secrets ARE accessible via secure method
assert_eq!(
result_obj.get("api_key_accessible").unwrap(),
&serde_json::json!(true),
"Secrets should be accessible via get_secret()"
);
assert_eq!(
result_obj.get("password_accessible").unwrap(),
&serde_json::json!(true),
"Secrets should be accessible via get_secret()"
);
// Verify no SECRET_ prefix in environment
assert_eq!(
result_obj.get("environ_check").unwrap(),
result_data.get("environ_check").unwrap(),
&serde_json::json!(true),
"Environment should not contain SECRET_ prefix variables"
);
@@ -159,30 +162,47 @@ echo "SECURITY_PASS: Secrets not in environment but accessible via get_secret"
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: attune_worker::runtime::ParameterDelivery::default(),
parameter_format: attune_worker::runtime::ParameterFormat::default(),
output_format: attune_worker::runtime::OutputFormat::default(),
};
let result = runtime.execute(context).await.unwrap();
// Check execution succeeded
assert!(result.is_success(), "Execution should succeed");
assert!(
result.is_success(),
"Execution should succeed. stderr: {}",
result.stderr
);
assert_eq!(result.exit_code, 0, "Exit code should be 0");
// Verify security pass message
assert!(
result.stdout.contains("SECURITY_PASS"),
"Security checks should pass"
"Security checks should pass. stdout: {}",
result.stdout
);
assert!(
!result.stdout.contains("SECURITY_FAIL"),
"Should not have security failures"
"Should not have security failures. stdout: {}",
result.stdout
);
}
#[tokio::test]
async fn test_python_secret_isolation_between_actions() {
let runtime = PythonRuntime::new();
async fn test_python_secrets_isolated_between_actions() {
let tmp = TempDir::new().unwrap();
let runtime = make_python_process_runtime(tmp.path().to_path_buf());
// First action with secret A — read it from stdin
let code1 = r#"
import sys, json
# Read secrets from stdin (the process executor writes them as JSON on stdin)
secrets_line = sys.stdin.readline().strip()
secrets = json.loads(secrets_line) if secrets_line else {}
print(json.dumps({'secret_a': secrets.get('secret_a')}))
"#;
// First action with secret A
let context1 = ExecutionContext {
execution_id: 3,
action_ref: "security.action1".to_string(),
@@ -195,26 +215,36 @@ async fn test_python_secret_isolation_between_actions() {
},
timeout: Some(10),
working_dir: None,
entry_point: "run".to_string(),
code: Some(
r#"
def run():
return {'secret_a': get_secret('secret_a')}
"#
.to_string(),
),
entry_point: "inline".to_string(),
code: Some(code1.to_string()),
code_path: None,
runtime_name: Some("python".to_string()),
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: attune_worker::runtime::ParameterDelivery::default(),
parameter_format: attune_worker::runtime::ParameterFormat::default(),
output_format: attune_worker::runtime::OutputFormat::Json,
};
let result1 = runtime.execute(context1).await.unwrap();
assert!(result1.is_success());
assert_eq!(
result1.exit_code, 0,
"First action should succeed. stderr: {}",
result1.stderr
);
// Second action with secret B — should NOT see secret A
let code2 = r#"
import sys, json
secrets_line = sys.stdin.readline().strip()
secrets = json.loads(secrets_line) if secrets_line else {}
print(json.dumps({
'secret_a_leaked': secrets.get('secret_a') is not None,
'secret_b_present': secrets.get('secret_b') == 'value_b'
}))
"#;
// Second action with secret B (should not see secret A)
let context2 = ExecutionContext {
execution_id: 4,
action_ref: "security.action2".to_string(),
@@ -227,42 +257,34 @@ def run():
},
timeout: Some(10),
working_dir: None,
entry_point: "run".to_string(),
code: Some(
r#"
def run():
# Should NOT see secret_a from previous action
secret_a = get_secret('secret_a')
secret_b = get_secret('secret_b')
return {
'secret_a_leaked': secret_a is not None,
'secret_b_present': secret_b == 'value_b'
}
"#
.to_string(),
),
entry_point: "inline".to_string(),
code: Some(code2.to_string()),
code_path: None,
runtime_name: Some("python".to_string()),
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: attune_worker::runtime::ParameterDelivery::default(),
parameter_format: attune_worker::runtime::ParameterFormat::default(),
output_format: attune_worker::runtime::OutputFormat::Json,
};
let result2 = runtime.execute(context2).await.unwrap();
assert!(result2.is_success());
assert_eq!(
result2.exit_code, 0,
"Second action should succeed. stderr: {}",
result2.stderr
);
let result_data = result2.result.unwrap();
let result_obj = result_data.get("result").unwrap();
let result_data = result2.result.expect("Should have parsed JSON result");
// Verify secrets don't leak between actions
assert_eq!(
result_obj.get("secret_a_leaked").unwrap(),
result_data.get("secret_a_leaked").unwrap(),
&serde_json::json!(false),
"Secret from previous action should not leak"
);
assert_eq!(
result_obj.get("secret_b_present").unwrap(),
result_data.get("secret_b_present").unwrap(),
&serde_json::json!(true),
"Current action's secret should be present"
);
@@ -270,43 +292,44 @@ def run():
#[tokio::test]
async fn test_python_empty_secrets() {
let runtime = PythonRuntime::new();
let tmp = TempDir::new().unwrap();
let runtime = make_python_process_runtime(tmp.path().to_path_buf());
// With no secrets, stdin should have nothing (or empty) — action should still work
let code = r#"
print("ok")
"#;
let context = ExecutionContext {
execution_id: 5,
action_ref: "security.no_secrets".to_string(),
parameters: HashMap::new(),
env: HashMap::new(),
secrets: HashMap::new(), // No secrets
secrets: HashMap::new(),
timeout: Some(10),
working_dir: None,
entry_point: "run".to_string(),
code: Some(
r#"
def run():
# get_secret should return None for non-existent secrets
result = get_secret('nonexistent')
return {'result': result}
"#
.to_string(),
),
entry_point: "inline".to_string(),
code: Some(code.to_string()),
code_path: None,
runtime_name: Some("python".to_string()),
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: attune_worker::runtime::ParameterDelivery::default(),
parameter_format: attune_worker::runtime::ParameterFormat::default(),
output_format: attune_worker::runtime::OutputFormat::default(),
};
let result = runtime.execute(context).await.unwrap();
assert!(
result.is_success(),
"Should handle empty secrets gracefully"
assert_eq!(
result.exit_code, 0,
"Should handle empty secrets gracefully. stderr: {}",
result.stderr
);
assert!(
result.stdout.contains("ok"),
"Should produce expected output. stdout: {}",
result.stdout
);
let result_data = result.result.unwrap();
let result_obj = result_data.get("result").unwrap();
assert_eq!(result_obj.get("result").unwrap(), &serde_json::Value::Null);
}
#[tokio::test]
@@ -318,7 +341,7 @@ async fn test_shell_empty_secrets() {
action_ref: "security.no_secrets".to_string(),
parameters: HashMap::new(),
env: HashMap::new(),
secrets: HashMap::new(), // No secrets
secrets: HashMap::new(),
timeout: Some(10),
working_dir: None,
entry_point: "shell".to_string(),
@@ -341,89 +364,155 @@ fi
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: attune_worker::runtime::ParameterDelivery::default(),
parameter_format: attune_worker::runtime::ParameterFormat::default(),
output_format: attune_worker::runtime::OutputFormat::default(),
};
let result = runtime.execute(context).await.unwrap();
assert!(
result.is_success(),
"Should handle empty secrets gracefully"
"Should handle empty secrets gracefully. stderr: {}",
result.stderr
);
assert!(
result.stdout.contains("PASS"),
"Should pass. stdout: {}",
result.stdout
);
assert!(result.stdout.contains("PASS"));
}
#[tokio::test]
async fn test_python_special_characters_in_secrets() {
let runtime = PythonRuntime::new();
async fn test_process_runtime_secrets_not_in_environ() {
// Verify ProcessRuntime (used for all runtimes now) doesn't leak secrets to env
let tmp = TempDir::new().unwrap();
let pack_dir = tmp.path().join("testpack");
let actions_dir = pack_dir.join("actions");
std::fs::create_dir_all(&actions_dir).unwrap();
// Write a script that dumps environment
std::fs::write(
actions_dir.join("check_env.sh"),
r#"#!/bin/bash
if printenv | grep -q "SUPER_SECRET_VALUE"; then
echo "FAIL: Secret leaked to environment"
exit 1
fi
echo "PASS: No secrets in environment"
"#,
)
.unwrap();
let config = RuntimeExecutionConfig {
interpreter: InterpreterConfig {
binary: "/bin/bash".to_string(),
args: vec![],
file_extension: Some(".sh".to_string()),
},
environment: None,
dependencies: None,
};
let runtime = ProcessRuntime::new("shell".to_string(), config, tmp.path().to_path_buf(), tmp.path().join("runtime_envs"));
let context = ExecutionContext {
execution_id: 7,
action_ref: "security.special_chars".to_string(),
action_ref: "testpack.check_env".to_string(),
parameters: HashMap::new(),
env: HashMap::new(),
secrets: {
let mut s = HashMap::new();
s.insert("special_chars".to_string(), "test!@#$%^&*()".to_string());
s.insert("with_newline".to_string(), "line1\nline2".to_string());
s.insert("db_password".to_string(), "SUPER_SECRET_VALUE".to_string());
s
},
timeout: Some(10),
working_dir: None,
entry_point: "run".to_string(),
code: Some(
r#"
def run():
special = get_secret('special_chars')
newline = get_secret('with_newline')
entry_point: "check_env.sh".to_string(),
code: None,
code_path: Some(actions_dir.join("check_env.sh")),
runtime_name: Some("shell".to_string()),
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: attune_worker::runtime::ParameterDelivery::default(),
parameter_format: attune_worker::runtime::ParameterFormat::default(),
output_format: attune_worker::runtime::OutputFormat::default(),
};
newline_char = chr(10)
newline_parts = newline.split(newline_char) if newline else []
let result = runtime.execute(context).await.unwrap();
assert_eq!(
result.exit_code, 0,
"Check should pass. stdout: {}, stderr: {}",
result.stdout, result.stderr
);
assert!(
result.stdout.contains("PASS"),
"Should confirm no secrets in env. stdout: {}",
result.stdout
);
}
return {
'special_correct': special == 'test!@#$%^&*()',
'newline_has_two_parts': len(newline_parts) == 2,
'newline_first_part': newline_parts[0] if len(newline_parts) > 0 else '',
'newline_second_part': newline_parts[1] if len(newline_parts) > 1 else '',
'special_len': len(special) if special else 0
}
"#
.to_string(),
),
code_path: None,
#[tokio::test]
async fn test_python_process_runtime_secrets_not_in_environ() {
// Same check but via ProcessRuntime with Python interpreter
let tmp = TempDir::new().unwrap();
let pack_dir = tmp.path().join("testpack");
let actions_dir = pack_dir.join("actions");
std::fs::create_dir_all(&actions_dir).unwrap();
std::fs::write(
actions_dir.join("check_env.py"),
r#"
import os, json
env_dump = str(os.environ)
leaked = "TOP_SECRET_API_KEY" in env_dump
print(json.dumps({"leaked": leaked}))
"#,
)
.unwrap();
let config = RuntimeExecutionConfig {
interpreter: InterpreterConfig {
binary: "python3".to_string(),
args: vec!["-u".to_string()],
file_extension: Some(".py".to_string()),
},
environment: None,
dependencies: None,
};
let runtime = ProcessRuntime::new("python".to_string(), config, tmp.path().to_path_buf(), tmp.path().join("runtime_envs"));
let context = ExecutionContext {
execution_id: 8,
action_ref: "testpack.check_env".to_string(),
parameters: HashMap::new(),
env: HashMap::new(),
secrets: {
let mut s = HashMap::new();
s.insert("api_key".to_string(), "TOP_SECRET_API_KEY".to_string());
s
},
timeout: Some(10),
working_dir: None,
entry_point: "check_env.py".to_string(),
code: None,
code_path: Some(actions_dir.join("check_env.py")),
runtime_name: Some("python".to_string()),
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
parameter_delivery: attune_worker::runtime::ParameterDelivery::default(),
parameter_format: attune_worker::runtime::ParameterFormat::default(),
output_format: attune_worker::runtime::OutputFormat::Json,
};
let result = runtime.execute(context).await.unwrap();
assert!(
result.is_success(),
"Should handle special characters: {:?}",
result.error
assert_eq!(
result.exit_code, 0,
"Python env check should succeed. stderr: {}",
result.stderr
);
let result_data = result.result.unwrap();
let result_obj = result_data.get("result").unwrap();
let result_data = result.result.expect("Should have parsed JSON result");
assert_eq!(
result_obj.get("special_correct").unwrap(),
&serde_json::json!(true),
"Special characters should be preserved"
);
assert_eq!(
result_obj.get("newline_has_two_parts").unwrap(),
&serde_json::json!(true),
"Newline should split into two parts"
);
assert_eq!(
result_obj.get("newline_first_part").unwrap(),
&serde_json::json!("line1"),
"First part should be 'line1'"
);
assert_eq!(
result_obj.get("newline_second_part").unwrap(),
&serde_json::json!("line2"),
"Second part should be 'line2'"
result_data.get("leaked").unwrap(),
&serde_json::json!(false),
"SECURITY FAILURE: Secret leaked to Python process environment!"
);
}