working on runtime executions

This commit is contained in:
2026-02-16 22:04:20 -06:00
parent f52320f889
commit 904ede04be
99 changed files with 6778 additions and 5929 deletions

View File

@@ -576,6 +576,12 @@ pub struct Config {
#[serde(default = "default_packs_base_dir")]
pub packs_base_dir: String,
/// Runtime environments directory (isolated envs like virtualenvs, node_modules).
/// Pattern: {runtime_envs_dir}/{pack_ref}/{runtime_name}
/// e.g., /opt/attune/runtime_envs/python_example/python
#[serde(default = "default_runtime_envs_dir")]
pub runtime_envs_dir: String,
/// Notifier configuration (optional, for notifier service)
pub notifier: Option<NotifierConfig>,
@@ -599,6 +605,10 @@ fn default_packs_base_dir() -> String {
"/opt/attune/packs".to_string()
}
fn default_runtime_envs_dir() -> String {
"/opt/attune/runtime_envs".to_string()
}
impl Default for DatabaseConfig {
fn default() -> Self {
Self {
@@ -833,8 +843,10 @@ mod tests {
worker: None,
sensor: None,
packs_base_dir: default_packs_base_dir(),
runtime_envs_dir: default_runtime_envs_dir(),
notifier: None,
pack_registry: PackRegistryConfig::default(),
executor: None,
};
assert_eq!(config.service_name, "attune");
@@ -904,8 +916,10 @@ mod tests {
worker: None,
sensor: None,
packs_base_dir: default_packs_base_dir(),
runtime_envs_dir: default_runtime_envs_dir(),
notifier: None,
pack_registry: PackRegistryConfig::default(),
executor: None,
};
assert!(config.validate().is_ok());

View File

@@ -414,6 +414,324 @@ pub mod pack {
/// Runtime model
pub mod runtime {
use super::*;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use tracing::{debug, warn};
/// Configuration for how a runtime executes actions.
///
/// Stored as JSONB in the `runtime.execution_config` column.
/// Uses template variables that are resolved at execution time:
/// - `{pack_dir}` — absolute path to the pack directory
/// - `{env_dir}` — resolved environment directory
/// When an external `env_dir` is provided (e.g., from `runtime_envs_dir`
/// config), that path is used directly. Otherwise falls back to
/// `pack_dir/dir_name` for backward compatibility.
/// - `{interpreter}` — resolved interpreter path
/// - `{action_file}` — absolute path to the action script file
/// - `{manifest_path}` — absolute path to the dependency manifest file
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct RuntimeExecutionConfig {
/// Interpreter configuration (how to invoke the action script)
#[serde(default)]
pub interpreter: InterpreterConfig,
/// Optional isolated environment configuration (venv, node_modules, etc.)
#[serde(default)]
pub environment: Option<EnvironmentConfig>,
/// Optional dependency management configuration
#[serde(default)]
pub dependencies: Option<DependencyConfig>,
}
/// Describes the interpreter binary and how it invokes action scripts.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InterpreterConfig {
/// Path or name of the interpreter binary (e.g., "python3", "/bin/bash").
#[serde(default = "default_interpreter_binary")]
pub binary: String,
/// Additional arguments inserted before the action file path
/// (e.g., `["-u"]` for unbuffered Python output).
#[serde(default)]
pub args: Vec<String>,
/// File extension this runtime handles (e.g., ".py", ".sh").
/// Used to match actions to runtimes when runtime_name is not explicit.
#[serde(default)]
pub file_extension: Option<String>,
}
fn default_interpreter_binary() -> String {
"/bin/sh".to_string()
}
impl Default for InterpreterConfig {
fn default() -> Self {
Self {
binary: default_interpreter_binary(),
args: Vec::new(),
file_extension: None,
}
}
}
/// Describes how to create and manage an isolated runtime environment
/// (e.g., Python virtualenv, Node.js node_modules).
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EnvironmentConfig {
/// Type of environment: "virtualenv", "node_modules", "none".
pub env_type: String,
/// Fallback directory name relative to the pack directory (e.g., ".venv").
/// Only used when no external `env_dir` is provided (legacy/bare-metal).
/// In production, the env_dir is computed externally as
/// `{runtime_envs_dir}/{pack_ref}/{runtime_name}`.
#[serde(default = "super::runtime::default_env_dir_name")]
pub dir_name: String,
/// Command(s) to create the environment.
/// Template variables: `{env_dir}`, `{pack_dir}`.
/// Example: `["python3", "-m", "venv", "{env_dir}"]`
#[serde(default)]
pub create_command: Vec<String>,
/// Path to the interpreter inside the environment.
/// When the environment exists, this overrides `interpreter.binary`.
/// Template variables: `{env_dir}`.
/// Example: `"{env_dir}/bin/python3"`
pub interpreter_path: Option<String>,
}
/// Describes how to detect and install dependencies for a pack.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DependencyConfig {
/// Name of the manifest file to look for in the pack directory
/// (e.g., "requirements.txt", "package.json").
pub manifest_file: String,
/// Command to install dependencies.
/// Template variables: `{interpreter}`, `{env_dir}`, `{manifest_path}`, `{pack_dir}`.
/// Example: `["{interpreter}", "-m", "pip", "install", "-r", "{manifest_path}"]`
#[serde(default)]
pub install_command: Vec<String>,
}
fn default_env_dir_name() -> String {
".venv".to_string()
}
impl RuntimeExecutionConfig {
/// Resolve template variables in a single string.
pub fn resolve_template(template: &str, vars: &HashMap<&str, String>) -> String {
let mut result = template.to_string();
for (key, value) in vars {
result = result.replace(&format!("{{{}}}", key), value);
}
result
}
/// Resolve the interpreter binary path using a pack-relative env_dir
/// (legacy fallback — prefers [`resolve_interpreter_with_env`]).
pub fn resolve_interpreter(&self, pack_dir: &Path) -> PathBuf {
let fallback_env_dir = self
.environment
.as_ref()
.map(|cfg| pack_dir.join(&cfg.dir_name));
self.resolve_interpreter_with_env(pack_dir, fallback_env_dir.as_deref())
}
/// Resolve the interpreter binary path for a given pack directory and
/// an explicit environment directory.
///
/// If `env_dir` is provided and exists on disk, returns the
/// environment's interpreter. Otherwise returns the system interpreter.
pub fn resolve_interpreter_with_env(
&self,
pack_dir: &Path,
env_dir: Option<&Path>,
) -> PathBuf {
if let Some(ref env_cfg) = self.environment {
if let Some(ref interp_path_template) = env_cfg.interpreter_path {
if let Some(env_dir) = env_dir {
if env_dir.exists() {
let mut vars = HashMap::new();
vars.insert("env_dir", env_dir.to_string_lossy().to_string());
vars.insert("pack_dir", pack_dir.to_string_lossy().to_string());
let resolved = Self::resolve_template(interp_path_template, &vars);
let resolved_path = PathBuf::from(&resolved);
// Path::exists() follows symlinks — returns true only
// if the final target is reachable. A valid symlink to
// an existing executable passes this check just fine.
if resolved_path.exists() {
debug!(
"Using environment interpreter: {} (template: '{}', env_dir: {})",
resolved_path.display(),
interp_path_template,
env_dir.display(),
);
return resolved_path;
}
// exists() returned false — check whether the path is
// a broken symlink (symlink_metadata succeeds for the
// link itself even when its target is missing).
let is_broken_symlink = std::fs::symlink_metadata(&resolved_path)
.map(|m| m.file_type().is_symlink())
.unwrap_or(false);
if is_broken_symlink {
// Read the dangling target for the diagnostic
let target = std::fs::read_link(&resolved_path)
.map(|t| t.display().to_string())
.unwrap_or_else(|_| "<unreadable>".to_string());
warn!(
"Environment interpreter at '{}' is a broken symlink \
(target '{}' does not exist). This typically happens \
when the venv was created by a different container \
where python3 lives at a different path. \
Recreate the venv with `--copies` or delete '{}' \
and restart the worker. \
Falling back to system interpreter '{}'",
resolved_path.display(),
target,
env_dir.display(),
self.interpreter.binary,
);
} else {
warn!(
"Environment interpreter not found at resolved path '{}' \
(template: '{}', env_dir: {}). \
Falling back to system interpreter '{}'",
resolved_path.display(),
interp_path_template,
env_dir.display(),
self.interpreter.binary,
);
}
} else {
warn!(
"Environment directory does not exist: {}. \
Expected interpreter template '{}' cannot be resolved. \
Falling back to system interpreter '{}'",
env_dir.display(),
interp_path_template,
self.interpreter.binary,
);
}
} else {
debug!(
"No env_dir provided; skipping environment interpreter resolution. \
Using system interpreter '{}'",
self.interpreter.binary,
);
}
} else {
debug!(
"No interpreter_path configured in environment config. \
Using system interpreter '{}'",
self.interpreter.binary,
);
}
} else {
debug!(
"No environment config present. Using system interpreter '{}'",
self.interpreter.binary,
);
}
PathBuf::from(&self.interpreter.binary)
}
/// Resolve the working directory for action execution.
/// Returns the pack directory.
pub fn resolve_working_dir(&self, pack_dir: &Path) -> PathBuf {
pack_dir.to_path_buf()
}
/// Resolve the environment directory for a pack (legacy pack-relative
/// fallback — callers should prefer computing `env_dir` externally
/// from `runtime_envs_dir`).
pub fn resolve_env_dir(&self, pack_dir: &Path) -> Option<PathBuf> {
self.environment
.as_ref()
.map(|env_cfg| pack_dir.join(&env_cfg.dir_name))
}
/// Check whether the pack directory has a dependency manifest file.
pub fn has_dependencies(&self, pack_dir: &Path) -> bool {
if let Some(ref dep_cfg) = self.dependencies {
pack_dir.join(&dep_cfg.manifest_file).exists()
} else {
false
}
}
/// Build template variables using a pack-relative env_dir
/// (legacy fallback — prefers [`build_template_vars_with_env`]).
pub fn build_template_vars(&self, pack_dir: &Path) -> HashMap<&'static str, String> {
let fallback_env_dir = self
.environment
.as_ref()
.map(|cfg| pack_dir.join(&cfg.dir_name));
self.build_template_vars_with_env(pack_dir, fallback_env_dir.as_deref())
}
/// Build template variables for a given pack directory and an explicit
/// environment directory.
///
/// The `env_dir` should be the external runtime environment path
/// (e.g., `/opt/attune/runtime_envs/{pack_ref}/{runtime_name}`).
/// If `None`, falls back to the pack-relative `dir_name`.
pub fn build_template_vars_with_env(
&self,
pack_dir: &Path,
env_dir: Option<&Path>,
) -> HashMap<&'static str, String> {
let mut vars = HashMap::new();
vars.insert("pack_dir", pack_dir.to_string_lossy().to_string());
if let Some(env_dir) = env_dir {
vars.insert("env_dir", env_dir.to_string_lossy().to_string());
} else if let Some(ref env_cfg) = self.environment {
let fallback = pack_dir.join(&env_cfg.dir_name);
vars.insert("env_dir", fallback.to_string_lossy().to_string());
}
let interpreter = self.resolve_interpreter_with_env(pack_dir, env_dir);
vars.insert("interpreter", interpreter.to_string_lossy().to_string());
if let Some(ref dep_cfg) = self.dependencies {
let manifest_path = pack_dir.join(&dep_cfg.manifest_file);
vars.insert("manifest_path", manifest_path.to_string_lossy().to_string());
}
vars
}
/// Resolve a command template (Vec<String>) with the given variables.
pub fn resolve_command(
cmd_template: &[String],
vars: &HashMap<&str, String>,
) -> Vec<String> {
cmd_template
.iter()
.map(|part| Self::resolve_template(part, vars))
.collect()
}
/// Check if this runtime can execute a file based on its extension.
pub fn matches_file_extension(&self, file_path: &Path) -> bool {
if let Some(ref ext) = self.interpreter.file_extension {
let expected = ext.trim_start_matches('.');
file_path
.extension()
.and_then(|e| e.to_str())
.map(|e| e.eq_ignore_ascii_case(expected))
.unwrap_or(false)
} else {
false
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct Runtime {
@@ -426,10 +744,18 @@ pub mod runtime {
pub distributions: JsonDict,
pub installation: Option<JsonDict>,
pub installers: JsonDict,
pub execution_config: JsonDict,
pub created: DateTime<Utc>,
pub updated: DateTime<Utc>,
}
impl Runtime {
/// Parse the `execution_config` JSONB into a typed `RuntimeExecutionConfig`.
pub fn parsed_execution_config(&self) -> RuntimeExecutionConfig {
serde_json::from_value(self.execution_config.clone()).unwrap_or_default()
}
}
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct Worker {
pub id: Id,
@@ -552,9 +878,9 @@ pub mod rule {
pub pack_ref: String,
pub label: String,
pub description: String,
pub action: Id,
pub action: Option<Id>,
pub action_ref: String,
pub trigger: Id,
pub trigger: Option<Id>,
pub trigger_ref: String,
pub conditions: JsonValue,
pub action_params: JsonValue,

View File

@@ -459,6 +459,13 @@ impl Connection {
worker_id
);
let dlx = if config.rabbitmq.dead_letter.enabled {
Some(config.rabbitmq.dead_letter.exchange.as_str())
} else {
None
};
// --- Execution dispatch queue ---
let queue_name = format!("worker.{}.executions", worker_id);
let queue_config = QueueConfig {
name: queue_name.clone(),
@@ -467,12 +474,6 @@ impl Connection {
auto_delete: false,
};
let dlx = if config.rabbitmq.dead_letter.enabled {
Some(config.rabbitmq.dead_letter.exchange.as_str())
} else {
None
};
// Worker queues use TTL to expire unprocessed messages
let ttl_ms = Some(config.rabbitmq.worker_queue_ttl_ms);
@@ -487,6 +488,29 @@ impl Connection {
)
.await?;
// --- Pack registration queue ---
// Each worker gets its own queue for pack.registered events so that
// every worker instance can independently set up runtime environments
// (e.g., Python virtualenvs) when a new pack is registered.
let packs_queue_name = format!("worker.{}.packs", worker_id);
let packs_queue_config = QueueConfig {
name: packs_queue_name.clone(),
durable: true,
exclusive: false,
auto_delete: false,
};
self.declare_queue_with_optional_dlx(&packs_queue_config, dlx)
.await?;
// Bind to pack.registered routing key on the events exchange
self.bind_queue(
&packs_queue_name,
&config.rabbitmq.exchanges.events.name,
"pack.registered",
)
.await?;
info!(
"Worker infrastructure setup complete for worker ID {}",
worker_id

View File

@@ -65,6 +65,8 @@ pub enum MessageType {
RuleEnabled,
/// Rule disabled
RuleDisabled,
/// Pack registered or installed (triggers runtime environment setup in workers)
PackRegistered,
}
impl MessageType {
@@ -82,6 +84,7 @@ impl MessageType {
Self::RuleCreated => "rule.created".to_string(),
Self::RuleEnabled => "rule.enabled".to_string(),
Self::RuleDisabled => "rule.disabled".to_string(),
Self::PackRegistered => "pack.registered".to_string(),
}
}
@@ -98,6 +101,7 @@ impl MessageType {
Self::RuleCreated | Self::RuleEnabled | Self::RuleDisabled => {
"attune.events".to_string()
}
Self::PackRegistered => "attune.events".to_string(),
}
}
@@ -115,6 +119,7 @@ impl MessageType {
Self::RuleCreated => "RuleCreated",
Self::RuleEnabled => "RuleEnabled",
Self::RuleDisabled => "RuleDisabled",
Self::PackRegistered => "PackRegistered",
}
}
}
@@ -433,6 +438,23 @@ pub struct RuleDisabledPayload {
pub trigger_ref: String,
}
/// Payload for PackRegistered message
///
/// Published when a pack is registered or installed so that workers can
/// proactively create runtime environments (virtualenvs, node_modules, etc.)
/// instead of waiting until the first execution.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PackRegisteredPayload {
/// Pack ID
pub pack_id: Id,
/// Pack reference (e.g., "python_example")
pub pack_ref: String,
/// Pack version
pub version: String,
/// Runtime names that require environment setup (lowercase, e.g., ["python"])
pub runtime_names: Vec<String>,
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -60,7 +60,7 @@ pub use messages::{
EnforcementCreatedPayload, EventCreatedPayload, ExecutionCompletedPayload,
ExecutionRequestedPayload, ExecutionStatusChangedPayload, InquiryCreatedPayload,
InquiryRespondedPayload, Message, MessageEnvelope, MessageType, NotificationCreatedPayload,
RuleCreatedPayload, RuleDisabledPayload, RuleEnabledPayload,
PackRegisteredPayload, RuleCreatedPayload, RuleDisabledPayload, RuleEnabledPayload,
};
pub use publisher::{Publisher, PublisherConfig};
@@ -220,6 +220,8 @@ pub mod routing_keys {
pub const INQUIRY_RESPONDED: &str = "inquiry.responded";
/// Notification created routing key
pub const NOTIFICATION_CREATED: &str = "notification.created";
/// Pack registered routing key
pub const PACK_REGISTERED: &str = "pack.registered";
}
#[cfg(test)]

View File

@@ -9,9 +9,12 @@
use crate::config::Config;
use crate::error::{Error, Result};
use crate::models::Runtime;
use crate::repositories::action::ActionRepository;
use crate::repositories::runtime::RuntimeRepository;
use crate::repositories::FindById as _;
use serde_json::Value as JsonValue;
use sqlx::{PgPool, Row};
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use std::path::{Path, PathBuf};
use std::process::Command;
use tokio::fs;
@@ -370,7 +373,8 @@ impl PackEnvironmentManager {
sqlx::query_as::<_, Runtime>(
r#"
SELECT id, ref, pack, pack_ref, description, name,
distributions, installation, installers, created, updated
distributions, installation, installers, execution_config,
created, updated
FROM runtime
WHERE id = $1
"#,
@@ -818,6 +822,53 @@ impl PackEnvironmentManager {
}
}
/// Collect the lowercase runtime names that require environment setup for a pack.
///
/// This queries the pack's actions, resolves their runtimes, and returns the names
/// of any runtimes that have environment or dependency configuration. It is used by
/// the API when publishing `PackRegistered` MQ events so that workers know which
/// runtimes to set up without re-querying the database.
pub async fn collect_runtime_names_for_pack(
db_pool: &PgPool,
pack_id: i64,
pack_path: &Path,
) -> Vec<String> {
let actions = match ActionRepository::find_by_pack(db_pool, pack_id).await {
Ok(a) => a,
Err(e) => {
warn!("Failed to load actions for pack ID {}: {}", pack_id, e);
return Vec::new();
}
};
let mut seen_runtime_ids = HashSet::new();
for action in &actions {
if let Some(runtime_id) = action.runtime {
seen_runtime_ids.insert(runtime_id);
}
}
let mut runtime_names = Vec::new();
for runtime_id in seen_runtime_ids {
match RuntimeRepository::find_by_id(db_pool, runtime_id).await {
Ok(Some(rt)) => {
let exec_config = rt.parsed_execution_config();
if exec_config.environment.is_some() || exec_config.has_dependencies(pack_path) {
runtime_names.push(rt.name.to_lowercase());
}
}
Ok(None) => {
debug!("Runtime ID {} not found, skipping", runtime_id);
}
Err(e) => {
warn!("Failed to load runtime {}: {}", runtime_id, e);
}
}
}
runtime_names
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -0,0 +1,776 @@
//! Pack Component Loader
//!
//! Reads runtime, action, trigger, and sensor YAML definitions from a pack directory
//! and registers them in the database. This is the Rust-native equivalent of
//! the Python `load_core_pack.py` script used during init-packs.
//!
//! Components are loaded in dependency order:
//! 1. Runtimes (no dependencies)
//! 2. Triggers (no dependencies)
//! 3. Actions (depend on runtime)
//! 4. Sensors (depend on triggers and runtime)
use std::collections::HashMap;
use std::path::Path;
use sqlx::PgPool;
use tracing::{info, warn};
use crate::error::{Error, Result};
use crate::models::Id;
use crate::repositories::action::ActionRepository;
use crate::repositories::runtime::{CreateRuntimeInput, RuntimeRepository};
use crate::repositories::trigger::{
CreateSensorInput, CreateTriggerInput, SensorRepository, TriggerRepository,
};
use crate::repositories::{Create, FindByRef};
/// Result of loading pack components into the database.
#[derive(Debug, Default)]
pub struct PackLoadResult {
/// Number of runtimes loaded
pub runtimes_loaded: usize,
/// Number of runtimes skipped (already exist)
pub runtimes_skipped: usize,
/// Number of triggers loaded
pub triggers_loaded: usize,
/// Number of triggers skipped (already exist)
pub triggers_skipped: usize,
/// Number of actions loaded
pub actions_loaded: usize,
/// Number of actions skipped (already exist)
pub actions_skipped: usize,
/// Number of sensors loaded
pub sensors_loaded: usize,
/// Number of sensors skipped (already exist)
pub sensors_skipped: usize,
/// Warnings encountered during loading
pub warnings: Vec<String>,
}
impl PackLoadResult {
pub fn total_loaded(&self) -> usize {
self.runtimes_loaded + self.triggers_loaded + self.actions_loaded + self.sensors_loaded
}
pub fn total_skipped(&self) -> usize {
self.runtimes_skipped + self.triggers_skipped + self.actions_skipped + self.sensors_skipped
}
}
/// Loads pack components (triggers, actions, sensors) from YAML files on disk
/// into the database.
pub struct PackComponentLoader<'a> {
pool: &'a PgPool,
pack_id: Id,
pack_ref: String,
}
impl<'a> PackComponentLoader<'a> {
pub fn new(pool: &'a PgPool, pack_id: Id, pack_ref: &str) -> Self {
Self {
pool,
pack_id,
pack_ref: pack_ref.to_string(),
}
}
/// Load all components from the pack directory.
///
/// Reads triggers, actions, and sensors from their respective subdirectories
/// and registers them in the database. Components that already exist (by ref)
/// are skipped.
pub async fn load_all(&self, pack_dir: &Path) -> Result<PackLoadResult> {
let mut result = PackLoadResult::default();
info!(
"Loading components for pack '{}' from {}",
self.pack_ref,
pack_dir.display()
);
// 1. Load runtimes first (no dependencies)
self.load_runtimes(pack_dir, &mut result).await?;
// 2. Load triggers (no dependencies)
let trigger_ids = self.load_triggers(pack_dir, &mut result).await?;
// 3. Load actions (depend on runtime)
self.load_actions(pack_dir, &mut result).await?;
// 4. Load sensors (depend on triggers and runtime)
self.load_sensors(pack_dir, &trigger_ids, &mut result)
.await?;
info!(
"Pack '{}' component loading complete: {} loaded, {} skipped, {} warnings",
self.pack_ref,
result.total_loaded(),
result.total_skipped(),
result.warnings.len()
);
Ok(result)
}
/// Load trigger definitions from `pack_dir/triggers/*.yaml`.
///
/// Returns a map of trigger ref -> trigger ID for use by sensor loading.
/// Load runtime definitions from `pack_dir/runtimes/*.yaml`.
///
/// Runtimes define how actions and sensors are executed (interpreter,
/// environment setup, dependency management). They are loaded first
/// since actions reference them.
async fn load_runtimes(&self, pack_dir: &Path, result: &mut PackLoadResult) -> Result<()> {
let runtimes_dir = pack_dir.join("runtimes");
if !runtimes_dir.exists() {
info!("No runtimes directory found for pack '{}'", self.pack_ref);
return Ok(());
}
let yaml_files = read_yaml_files(&runtimes_dir)?;
info!(
"Found {} runtime definition(s) for pack '{}'",
yaml_files.len(),
self.pack_ref
);
for (filename, content) in &yaml_files {
let data: serde_yaml_ng::Value = serde_yaml_ng::from_str(content).map_err(|e| {
Error::validation(format!("Failed to parse runtime YAML {}: {}", filename, e))
})?;
let runtime_ref = match data.get("ref").and_then(|v| v.as_str()) {
Some(r) => r.to_string(),
None => {
let msg = format!(
"Runtime YAML {} missing 'ref' field, skipping",
filename
);
warn!("{}", msg);
result.warnings.push(msg);
continue;
}
};
// Check if runtime already exists
if let Some(existing) =
RuntimeRepository::find_by_ref(self.pool, &runtime_ref).await?
{
info!(
"Runtime '{}' already exists (ID: {}), skipping",
runtime_ref, existing.id
);
result.runtimes_skipped += 1;
continue;
}
let name = data
.get("name")
.and_then(|v| v.as_str())
.map(|s| s.to_string())
.unwrap_or_else(|| extract_name_from_ref(&runtime_ref));
let description = data
.get("description")
.and_then(|v| v.as_str())
.map(|s| s.to_string());
let distributions = data
.get("distributions")
.and_then(|v| serde_json::to_value(v).ok())
.unwrap_or_else(|| serde_json::json!({}));
let installation = data
.get("installation")
.and_then(|v| serde_json::to_value(v).ok());
let execution_config = data
.get("execution_config")
.and_then(|v| serde_json::to_value(v).ok())
.unwrap_or_else(|| serde_json::json!({}));
let input = CreateRuntimeInput {
r#ref: runtime_ref.clone(),
pack: Some(self.pack_id),
pack_ref: Some(self.pack_ref.clone()),
description,
name,
distributions,
installation,
execution_config,
};
match RuntimeRepository::create(self.pool, input).await {
Ok(rt) => {
info!(
"Created runtime '{}' (ID: {})",
runtime_ref, rt.id
);
result.runtimes_loaded += 1;
}
Err(e) => {
// Check for unique constraint violation (race condition)
if let Error::Database(ref db_err) = e {
if let sqlx::Error::Database(ref inner) = db_err {
if inner.is_unique_violation() {
info!(
"Runtime '{}' already exists (concurrent creation), skipping",
runtime_ref
);
result.runtimes_skipped += 1;
continue;
}
}
}
let msg = format!("Failed to create runtime '{}': {}", runtime_ref, e);
warn!("{}", msg);
result.warnings.push(msg);
}
}
}
Ok(())
}
async fn load_triggers(
&self,
pack_dir: &Path,
result: &mut PackLoadResult,
) -> Result<HashMap<String, Id>> {
let triggers_dir = pack_dir.join("triggers");
let mut trigger_ids = HashMap::new();
if !triggers_dir.exists() {
info!("No triggers directory found for pack '{}'", self.pack_ref);
return Ok(trigger_ids);
}
let yaml_files = read_yaml_files(&triggers_dir)?;
info!(
"Found {} trigger definition(s) for pack '{}'",
yaml_files.len(),
self.pack_ref
);
for (filename, content) in &yaml_files {
let data: serde_yaml_ng::Value = serde_yaml_ng::from_str(content).map_err(|e| {
Error::validation(format!("Failed to parse trigger YAML {}: {}", filename, e))
})?;
let trigger_ref = match data.get("ref").and_then(|v| v.as_str()) {
Some(r) => r.to_string(),
None => {
let msg = format!("Trigger YAML {} missing 'ref' field, skipping", filename);
warn!("{}", msg);
result.warnings.push(msg);
continue;
}
};
// Check if trigger already exists
if let Some(existing) = TriggerRepository::find_by_ref(self.pool, &trigger_ref).await? {
info!(
"Trigger '{}' already exists (ID: {}), skipping",
trigger_ref, existing.id
);
trigger_ids.insert(trigger_ref, existing.id);
result.triggers_skipped += 1;
continue;
}
let name = extract_name_from_ref(&trigger_ref);
let label = data
.get("label")
.and_then(|v| v.as_str())
.map(|s| s.to_string())
.unwrap_or_else(|| generate_label(&name));
let description = data
.get("description")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
let enabled = data
.get("enabled")
.and_then(|v| v.as_bool())
.unwrap_or(true);
let param_schema = data
.get("parameters")
.and_then(|v| serde_json::to_value(v).ok());
let out_schema = data
.get("output")
.and_then(|v| serde_json::to_value(v).ok());
let input = CreateTriggerInput {
r#ref: trigger_ref.clone(),
pack: Some(self.pack_id),
pack_ref: Some(self.pack_ref.clone()),
label,
description: Some(description),
enabled,
param_schema,
out_schema,
is_adhoc: false,
};
match TriggerRepository::create(self.pool, input).await {
Ok(trigger) => {
info!("Created trigger '{}' (ID: {})", trigger_ref, trigger.id);
trigger_ids.insert(trigger_ref, trigger.id);
result.triggers_loaded += 1;
}
Err(e) => {
let msg = format!("Failed to create trigger '{}': {}", trigger_ref, e);
warn!("{}", msg);
result.warnings.push(msg);
}
}
}
Ok(trigger_ids)
}
/// Load action definitions from `pack_dir/actions/*.yaml`.
async fn load_actions(&self, pack_dir: &Path, result: &mut PackLoadResult) -> Result<()> {
let actions_dir = pack_dir.join("actions");
if !actions_dir.exists() {
info!("No actions directory found for pack '{}'", self.pack_ref);
return Ok(());
}
let yaml_files = read_yaml_files(&actions_dir)?;
info!(
"Found {} action definition(s) for pack '{}'",
yaml_files.len(),
self.pack_ref
);
for (filename, content) in &yaml_files {
let data: serde_yaml_ng::Value = serde_yaml_ng::from_str(content).map_err(|e| {
Error::validation(format!("Failed to parse action YAML {}: {}", filename, e))
})?;
let action_ref = match data.get("ref").and_then(|v| v.as_str()) {
Some(r) => r.to_string(),
None => {
let msg = format!("Action YAML {} missing 'ref' field, skipping", filename);
warn!("{}", msg);
result.warnings.push(msg);
continue;
}
};
// Check if action already exists
if let Some(existing) = ActionRepository::find_by_ref(self.pool, &action_ref).await? {
info!(
"Action '{}' already exists (ID: {}), skipping",
action_ref, existing.id
);
result.actions_skipped += 1;
continue;
}
let name = extract_name_from_ref(&action_ref);
let label = data
.get("label")
.and_then(|v| v.as_str())
.map(|s| s.to_string())
.unwrap_or_else(|| generate_label(&name));
let description = data
.get("description")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
let entrypoint = data
.get("entry_point")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
// Resolve runtime ID from runner_type
let runner_type = data
.get("runner_type")
.and_then(|v| v.as_str())
.unwrap_or("shell");
let runtime_id = self.resolve_runtime_id(runner_type).await?;
let param_schema = data
.get("parameters")
.and_then(|v| serde_json::to_value(v).ok());
let out_schema = data
.get("output")
.and_then(|v| serde_json::to_value(v).ok());
// Read optional fields for parameter delivery/format and output format.
// The database has defaults (stdin, json, text), so we only set these
// in the INSERT if the YAML specifies them.
let parameter_delivery = data
.get("parameter_delivery")
.and_then(|v| v.as_str())
.unwrap_or("stdin")
.to_lowercase();
let parameter_format = data
.get("parameter_format")
.and_then(|v| v.as_str())
.unwrap_or("json")
.to_lowercase();
let output_format = data
.get("output_format")
.and_then(|v| v.as_str())
.unwrap_or("text")
.to_lowercase();
// Use raw SQL to include parameter_delivery, parameter_format,
// output_format which are not in CreateActionInput
let create_result = sqlx::query_scalar::<_, i64>(
r#"
INSERT INTO action (
ref, pack, pack_ref, label, description, entrypoint,
runtime, param_schema, out_schema, is_adhoc,
parameter_delivery, parameter_format, output_format
)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
RETURNING id
"#,
)
.bind(&action_ref)
.bind(self.pack_id)
.bind(&self.pack_ref)
.bind(&label)
.bind(&description)
.bind(&entrypoint)
.bind(runtime_id)
.bind(&param_schema)
.bind(&out_schema)
.bind(false) // is_adhoc
.bind(&parameter_delivery)
.bind(&parameter_format)
.bind(&output_format)
.fetch_one(self.pool)
.await;
match create_result {
Ok(id) => {
info!("Created action '{}' (ID: {})", action_ref, id);
result.actions_loaded += 1;
}
Err(e) => {
// Check for unique constraint violation (already exists race condition)
if let sqlx::Error::Database(ref db_err) = e {
if db_err.is_unique_violation() {
info!(
"Action '{}' already exists (concurrent creation), skipping",
action_ref
);
result.actions_skipped += 1;
continue;
}
}
let msg = format!("Failed to create action '{}': {}", action_ref, e);
warn!("{}", msg);
result.warnings.push(msg);
}
}
}
Ok(())
}
/// Load sensor definitions from `pack_dir/sensors/*.yaml`.
async fn load_sensors(
&self,
pack_dir: &Path,
trigger_ids: &HashMap<String, Id>,
result: &mut PackLoadResult,
) -> Result<()> {
let sensors_dir = pack_dir.join("sensors");
if !sensors_dir.exists() {
info!("No sensors directory found for pack '{}'", self.pack_ref);
return Ok(());
}
let yaml_files = read_yaml_files(&sensors_dir)?;
info!(
"Found {} sensor definition(s) for pack '{}'",
yaml_files.len(),
self.pack_ref
);
// Resolve sensor runtime
let sensor_runtime_id = self.resolve_runtime_id("builtin").await?;
let sensor_runtime_ref = "core.builtin".to_string();
for (filename, content) in &yaml_files {
let data: serde_yaml_ng::Value = serde_yaml_ng::from_str(content).map_err(|e| {
Error::validation(format!("Failed to parse sensor YAML {}: {}", filename, e))
})?;
let sensor_ref = match data.get("ref").and_then(|v| v.as_str()) {
Some(r) => r.to_string(),
None => {
let msg = format!("Sensor YAML {} missing 'ref' field, skipping", filename);
warn!("{}", msg);
result.warnings.push(msg);
continue;
}
};
// Check if sensor already exists
if let Some(existing) = SensorRepository::find_by_ref(self.pool, &sensor_ref).await? {
info!(
"Sensor '{}' already exists (ID: {}), skipping",
sensor_ref, existing.id
);
result.sensors_skipped += 1;
continue;
}
let name = extract_name_from_ref(&sensor_ref);
let label = data
.get("label")
.and_then(|v| v.as_str())
.map(|s| s.to_string())
.unwrap_or_else(|| generate_label(&name));
let description = data
.get("description")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
let enabled = data
.get("enabled")
.and_then(|v| v.as_bool())
.unwrap_or(true);
let entrypoint = data
.get("entry_point")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
// Resolve trigger reference
let (trigger_id, trigger_ref) = self.resolve_sensor_trigger(&data, trigger_ids).await;
let param_schema = data
.get("parameters")
.and_then(|v| serde_json::to_value(v).ok());
let config = data
.get("config")
.and_then(|v| serde_json::to_value(v).ok())
.unwrap_or_else(|| serde_json::json!({}));
let input = CreateSensorInput {
r#ref: sensor_ref.clone(),
pack: Some(self.pack_id),
pack_ref: Some(self.pack_ref.clone()),
label,
description,
entrypoint,
runtime: sensor_runtime_id.unwrap_or(0),
runtime_ref: sensor_runtime_ref.clone(),
trigger: trigger_id.unwrap_or(0),
trigger_ref: trigger_ref.unwrap_or_default(),
enabled,
param_schema,
config: Some(config),
};
match SensorRepository::create(self.pool, input).await {
Ok(sensor) => {
info!("Created sensor '{}' (ID: {})", sensor_ref, sensor.id);
result.sensors_loaded += 1;
}
Err(e) => {
let msg = format!("Failed to create sensor '{}': {}", sensor_ref, e);
warn!("{}", msg);
result.warnings.push(msg);
}
}
}
Ok(())
}
/// Resolve a runtime ID from a runner type string (e.g., "shell", "python", "builtin").
///
/// Looks up the runtime in the database by `core.{name}` ref pattern,
/// then falls back to name-based lookup (case-insensitive).
///
/// - "shell" -> "core.shell"
/// - "python" -> "core.python"
/// - "node" -> "core.nodejs"
/// - "builtin" -> "core.builtin"
async fn resolve_runtime_id(&self, runner_type: &str) -> Result<Option<Id>> {
let runner_lower = runner_type.to_lowercase();
// Runtime refs use the format `{pack_ref}.{name}` (e.g., "core.python").
let refs_to_try = match runner_lower.as_str() {
"shell" | "bash" | "sh" => vec!["core.shell"],
"python" | "python3" => vec!["core.python"],
"node" | "nodejs" | "node.js" => vec!["core.nodejs"],
"native" => vec!["core.native"],
"builtin" => vec!["core.builtin"],
other => vec![other],
};
for runtime_ref in &refs_to_try {
if let Some(runtime) = RuntimeRepository::find_by_ref(self.pool, runtime_ref).await? {
return Ok(Some(runtime.id));
}
}
// Fall back to name-based lookup (case-insensitive)
use crate::repositories::runtime::RuntimeRepository as RR;
if let Some(runtime) = RR::find_by_name(self.pool, &runner_lower).await? {
return Ok(Some(runtime.id));
}
warn!(
"Could not find runtime for runner_type '{}', action will have no runtime",
runner_type
);
Ok(None)
}
/// Resolve the trigger reference and ID for a sensor.
///
/// Handles both `trigger_type` (singular) and `trigger_types` (array) fields.
async fn resolve_sensor_trigger(
&self,
data: &serde_yaml_ng::Value,
trigger_ids: &HashMap<String, Id>,
) -> (Option<Id>, Option<String>) {
// Try trigger_types (array) first, then trigger_type (singular)
let trigger_type_str = data
.get("trigger_types")
.and_then(|v| v.as_sequence())
.and_then(|seq| seq.first())
.and_then(|v| v.as_str())
.or_else(|| data.get("trigger_type").and_then(|v| v.as_str()));
let trigger_ref = match trigger_type_str {
Some(t) => {
if t.contains('.') {
t.to_string()
} else {
format!("{}.{}", self.pack_ref, t)
}
}
None => return (None, None),
};
// Look up trigger ID from our loaded triggers map first
if let Some(&id) = trigger_ids.get(&trigger_ref) {
return (Some(id), Some(trigger_ref));
}
// Fall back to database lookup
match TriggerRepository::find_by_ref(self.pool, &trigger_ref).await {
Ok(Some(trigger)) => (Some(trigger.id), Some(trigger_ref)),
_ => {
warn!("Could not resolve trigger ref '{}' for sensor", trigger_ref);
(None, Some(trigger_ref))
}
}
}
}
/// Read all `.yaml` and `.yml` files from a directory, sorted by filename.
///
/// Returns a Vec of (filename, content) pairs.
fn read_yaml_files(dir: &Path) -> Result<Vec<(String, String)>> {
let mut files = Vec::new();
let entries = std::fs::read_dir(dir)
.map_err(|e| Error::io(format!("Failed to read directory {}: {}", dir.display(), e)))?;
let mut paths: Vec<_> = entries
.filter_map(|e| e.ok())
.filter(|e| {
let path = e.path();
path.is_file()
&& matches!(
path.extension().and_then(|ext| ext.to_str()),
Some("yaml") | Some("yml")
)
})
.collect();
// Sort by filename for deterministic ordering
paths.sort_by_key(|e| e.file_name());
for entry in paths {
let path = entry.path();
let filename = entry.file_name().to_string_lossy().to_string();
let content = std::fs::read_to_string(&path)
.map_err(|e| Error::io(format!("Failed to read file {}: {}", path.display(), e)))?;
files.push((filename, content));
}
Ok(files)
}
/// Extract the short name from a dotted ref (e.g., "core.echo" -> "echo").
fn extract_name_from_ref(r: &str) -> String {
r.rsplit('.').next().unwrap_or(r).to_string()
}
/// Generate a human-readable label from a snake_case name.
///
/// Examples:
/// - "echo" -> "Echo"
/// - "http_request" -> "Http Request"
/// - "datetime_timer" -> "Datetime Timer"
fn generate_label(name: &str) -> String {
name.split('_')
.map(|word| {
let mut chars = word.chars();
match chars.next() {
Some(c) => {
let upper: String = c.to_uppercase().collect();
format!("{}{}", upper, chars.as_str())
}
None => String::new(),
}
})
.collect::<Vec<_>>()
.join(" ")
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_extract_name_from_ref() {
assert_eq!(extract_name_from_ref("core.echo"), "echo");
assert_eq!(extract_name_from_ref("python_example.greet"), "greet");
assert_eq!(extract_name_from_ref("simple"), "simple");
assert_eq!(extract_name_from_ref("a.b.c"), "c");
}
#[test]
fn test_generate_label() {
assert_eq!(generate_label("echo"), "Echo");
assert_eq!(generate_label("http_request"), "Http Request");
assert_eq!(generate_label("datetime_timer"), "Datetime Timer");
assert_eq!(generate_label("a_b_c"), "A B C");
}
}

View File

@@ -9,17 +9,19 @@
pub mod client;
pub mod dependency;
pub mod installer;
pub mod loader;
pub mod storage;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
// Re-export client, installer, storage, and dependency utilities
// Re-export client, installer, loader, storage, and dependency utilities
pub use client::RegistryClient;
pub use dependency::{
DependencyValidation, DependencyValidator, PackDepValidation, RuntimeDepValidation,
};
pub use installer::{InstalledPack, PackInstaller, PackSource};
pub use loader::{PackComponentLoader, PackLoadResult};
pub use storage::{
calculate_directory_checksum, calculate_file_checksum, verify_checksum, PackStorage,
};
@@ -245,7 +247,10 @@ impl Checksum {
pub fn parse(s: &str) -> Result<Self, String> {
let parts: Vec<&str> = s.splitn(2, ':').collect();
if parts.len() != 2 {
return Err(format!("Invalid checksum format: {}. Expected 'algorithm:hash'", s));
return Err(format!(
"Invalid checksum format: {}. Expected 'algorithm:hash'",
s
));
}
let algorithm = parts[0].to_lowercase();
@@ -259,7 +264,10 @@ impl Checksum {
// Basic validation of hash format (hex string)
if !hash.chars().all(|c| c.is_ascii_hexdigit()) {
return Err(format!("Invalid hash format: {}. Must be hexadecimal", hash));
return Err(format!(
"Invalid hash format: {}. Must be hexadecimal",
hash
));
}
Ok(Self { algorithm, hash })

View File

@@ -33,6 +33,7 @@ pub struct CreateRuntimeInput {
pub name: String,
pub distributions: JsonDict,
pub installation: Option<JsonDict>,
pub execution_config: JsonDict,
}
/// Input for updating a runtime
@@ -42,6 +43,7 @@ pub struct UpdateRuntimeInput {
pub name: Option<String>,
pub distributions: Option<JsonDict>,
pub installation: Option<JsonDict>,
pub execution_config: Option<JsonDict>,
}
#[async_trait::async_trait]
@@ -53,7 +55,8 @@ impl FindById for RuntimeRepository {
let runtime = sqlx::query_as::<_, Runtime>(
r#"
SELECT id, ref, pack, pack_ref, description, name,
distributions, installation, installers, created, updated
distributions, installation, installers, execution_config,
created, updated
FROM runtime
WHERE id = $1
"#,
@@ -75,7 +78,8 @@ impl FindByRef for RuntimeRepository {
let runtime = sqlx::query_as::<_, Runtime>(
r#"
SELECT id, ref, pack, pack_ref, description, name,
distributions, installation, installers, created, updated
distributions, installation, installers, execution_config,
created, updated
FROM runtime
WHERE ref = $1
"#,
@@ -97,7 +101,8 @@ impl List for RuntimeRepository {
let runtimes = sqlx::query_as::<_, Runtime>(
r#"
SELECT id, ref, pack, pack_ref, description, name,
distributions, installation, installers, created, updated
distributions, installation, installers, execution_config,
created, updated
FROM runtime
ORDER BY ref ASC
"#,
@@ -120,10 +125,11 @@ impl Create for RuntimeRepository {
let runtime = sqlx::query_as::<_, Runtime>(
r#"
INSERT INTO runtime (ref, pack, pack_ref, description, name,
distributions, installation, installers)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
distributions, installation, installers, execution_config)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
RETURNING id, ref, pack, pack_ref, description, name,
distributions, installation, installers, created, updated
distributions, installation, installers, execution_config,
created, updated
"#,
)
.bind(&input.r#ref)
@@ -134,6 +140,7 @@ impl Create for RuntimeRepository {
.bind(&input.distributions)
.bind(&input.installation)
.bind(serde_json::json!({}))
.bind(&input.execution_config)
.fetch_one(executor)
.await?;
@@ -187,6 +194,15 @@ impl Update for RuntimeRepository {
has_updates = true;
}
if let Some(execution_config) = &input.execution_config {
if has_updates {
query.push(", ");
}
query.push("execution_config = ");
query.push_bind(execution_config);
has_updates = true;
}
if !has_updates {
// No updates requested, fetch and return existing entity
return Self::get_by_id(executor, id).await;
@@ -194,7 +210,10 @@ impl Update for RuntimeRepository {
query.push(", updated = NOW() WHERE id = ");
query.push_bind(id);
query.push(" RETURNING id, ref, pack, pack_ref, description, name, distributions, installation, installers, created, updated");
query.push(
" RETURNING id, ref, pack, pack_ref, description, name, \
distributions, installation, installers, execution_config, created, updated",
);
let runtime = query
.build_query_as::<Runtime>()
@@ -229,7 +248,8 @@ impl RuntimeRepository {
let runtimes = sqlx::query_as::<_, Runtime>(
r#"
SELECT id, ref, pack, pack_ref, description, name,
distributions, installation, installers, created, updated
distributions, installation, installers, execution_config,
created, updated
FROM runtime
WHERE pack = $1
ORDER BY ref ASC
@@ -241,6 +261,29 @@ impl RuntimeRepository {
Ok(runtimes)
}
/// Find a runtime by name (case-insensitive)
pub async fn find_by_name<'e, E>(executor: E, name: &str) -> Result<Option<Runtime>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let runtime = sqlx::query_as::<_, Runtime>(
r#"
SELECT id, ref, pack, pack_ref, description, name,
distributions, installation, installers, execution_config,
created, updated
FROM runtime
WHERE LOWER(name) = LOWER($1)
LIMIT 1
"#,
)
.bind(name)
.fetch_optional(executor)
.await?;
Ok(runtime)
}
}
// ============================================================================
@@ -338,7 +381,7 @@ impl Create for WorkerRepository {
INSERT INTO worker (name, worker_type, runtime, host, port, status,
capabilities, meta)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
RETURNING id, name, worker_type, runtime, host, port, status,
RETURNING id, name, worker_type, worker_role, runtime, host, port, status,
capabilities, meta, last_heartbeat, created, updated
"#,
)
@@ -428,7 +471,10 @@ impl Update for WorkerRepository {
query.push(", updated = NOW() WHERE id = ");
query.push_bind(id);
query.push(" RETURNING id, name, worker_type, worker_role, runtime, host, port, status, capabilities, meta, last_heartbeat, created, updated");
query.push(
" RETURNING id, name, worker_type, worker_role, runtime, host, port, status, \
capabilities, meta, last_heartbeat, created, updated",
);
let worker = query.build_query_as::<Worker>().fetch_one(executor).await?;

View File

@@ -109,13 +109,13 @@ impl RuntimeDetector {
pub async fn detect_from_database(&self) -> Result<Vec<String>> {
info!("Querying database for runtime definitions...");
// Query all runtimes from database (no longer filtered by type)
// Query all runtimes from database
let runtimes = sqlx::query_as::<_, Runtime>(
r#"
SELECT id, ref, pack, pack_ref, description, name,
distributions, installation, installers, created, updated
distributions, installation, installers, execution_config,
created, updated
FROM runtime
WHERE ref NOT LIKE '%.sensor.builtin'
ORDER BY ref
"#,
)

View File

@@ -174,24 +174,18 @@ impl RefValidator {
Ok(())
}
/// Validate pack.type.component format (e.g., "core.action.webhook")
/// Validate pack.name format (e.g., "core.python", "core.shell")
pub fn validate_runtime_ref(ref_str: &str) -> Result<()> {
let parts: Vec<&str> = ref_str.split('.').collect();
if parts.len() != 3 {
if parts.len() != 2 {
return Err(Error::validation(format!(
"Invalid runtime reference format: '{}'. Expected 'pack.type.component'",
"Invalid runtime reference format: '{}'. Expected 'pack.name' (e.g., 'core.python')",
ref_str
)));
}
Self::validate_identifier(parts[0])?;
if parts[1] != "action" && parts[1] != "sensor" {
return Err(Error::validation(format!(
"Invalid runtime type: '{}'. Must be 'action' or 'sensor'",
parts[1]
)));
}
Self::validate_identifier(parts[2])?;
Self::validate_identifier(parts[1])?;
Ok(())
}
@@ -267,13 +261,15 @@ mod tests {
#[test]
fn test_ref_validator_runtime() {
assert!(RefValidator::validate_runtime_ref("core.action.webhook").is_ok());
assert!(RefValidator::validate_runtime_ref("mypack.sensor.monitor").is_ok());
assert!(RefValidator::validate_runtime_ref("core.python").is_ok());
assert!(RefValidator::validate_runtime_ref("core.shell").is_ok());
assert!(RefValidator::validate_runtime_ref("mypack.nodejs").is_ok());
assert!(RefValidator::validate_runtime_ref("core.builtin").is_ok());
// Invalid formats
assert!(RefValidator::validate_runtime_ref("core.webhook").is_err());
assert!(RefValidator::validate_runtime_ref("core.invalid.webhook").is_err());
assert!(RefValidator::validate_runtime_ref("Core.action.webhook").is_err());
assert!(RefValidator::validate_runtime_ref("core.action.webhook").is_err()); // 3-part no longer valid
assert!(RefValidator::validate_runtime_ref("python").is_err()); // missing pack
assert!(RefValidator::validate_runtime_ref("Core.python").is_err()); // uppercase
}
#[test]

View File

@@ -54,12 +54,29 @@ impl TestExecutor {
Self { pack_base_dir }
}
/// Execute all tests for a pack
/// Execute all tests for a pack, looking up the pack directory from the base dir
pub async fn execute_pack_tests(
&self,
pack_ref: &str,
pack_version: &str,
test_config: &TestConfig,
) -> Result<PackTestResult> {
let pack_dir = self.pack_base_dir.join(pack_ref);
self.execute_pack_tests_at(&pack_dir, pack_ref, pack_version, test_config)
.await
}
/// Execute all tests for a pack at a specific directory path.
///
/// Use this when the pack files are not yet at the standard
/// `packs_base_dir/pack_ref` location (e.g., during installation
/// from a temp directory).
pub async fn execute_pack_tests_at(
&self,
pack_dir: &Path,
pack_ref: &str,
pack_version: &str,
test_config: &TestConfig,
) -> Result<PackTestResult> {
info!("Executing tests for pack: {} v{}", pack_ref, pack_version);
@@ -69,7 +86,6 @@ impl TestExecutor {
));
}
let pack_dir = self.pack_base_dir.join(pack_ref);
if !pack_dir.exists() {
return Err(Error::not_found(
"pack_directory",