formatting

This commit is contained in:
2026-03-04 22:42:23 -06:00
parent 67a1c02543
commit 95765f50a8
30 changed files with 5172 additions and 336 deletions

View File

@@ -56,7 +56,10 @@ impl RegistryClient {
let http_client = reqwest::Client::builder()
.timeout(timeout)
.user_agent(format!("attune-registry-client/{}", env!("CARGO_PKG_VERSION")))
.user_agent(format!(
"attune-registry-client/{}",
env!("CARGO_PKG_VERSION")
))
.build()
.map_err(|e| Error::Internal(format!("Failed to create HTTP client: {}", e)))?;
@@ -69,7 +72,9 @@ impl RegistryClient {
/// Get all enabled registries sorted by priority (lower number = higher priority)
pub fn get_registries(&self) -> Vec<RegistryIndexConfig> {
let mut registries: Vec<_> = self.config.indices
let mut registries: Vec<_> = self
.config
.indices
.iter()
.filter(|r| r.enabled)
.cloned()
@@ -156,7 +161,8 @@ impl RegistryClient {
/// Fetch index from file:// URL
async fn fetch_index_from_file(&self, url: &str) -> Result<PackIndex> {
let path = url.strip_prefix("file://")
let path = url
.strip_prefix("file://")
.ok_or_else(|| Error::Configuration(format!("Invalid file URL: {}", url)))?;
let path = PathBuf::from(path);
@@ -209,11 +215,7 @@ impl RegistryClient {
}
}
Err(e) => {
tracing::warn!(
"Failed to fetch registry {}: {}",
registry.url,
e
);
tracing::warn!("Failed to fetch registry {}: {}", registry.url, e);
continue;
}
}
@@ -236,7 +238,10 @@ impl RegistryClient {
let matches = pack.pack_ref.to_lowercase().contains(&keyword_lower)
|| pack.label.to_lowercase().contains(&keyword_lower)
|| pack.description.to_lowercase().contains(&keyword_lower)
|| pack.keywords.iter().any(|k| k.to_lowercase().contains(&keyword_lower));
|| pack
.keywords
.iter()
.any(|k| k.to_lowercase().contains(&keyword_lower));
if matches {
results.push((pack, registry.url.clone()));
@@ -244,11 +249,7 @@ impl RegistryClient {
}
}
Err(e) => {
tracing::warn!(
"Failed to fetch registry {}: {}",
registry.url,
e
);
tracing::warn!("Failed to fetch registry {}: {}", registry.url, e);
continue;
}
}
@@ -264,7 +265,9 @@ impl RegistryClient {
registry_name: &str,
) -> Result<Option<PackIndexEntry>> {
// Find registry by name
let registry = self.config.indices
let registry = self
.config
.indices
.iter()
.find(|r| r.name.as_deref() == Some(registry_name))
.ok_or_else(|| Error::not_found("registry", "name", registry_name))?;

View File

@@ -23,15 +23,9 @@ pub type ProgressCallback = Arc<dyn Fn(ProgressEvent) + Send + Sync>;
#[derive(Debug, Clone)]
pub enum ProgressEvent {
/// Started a new step
StepStarted {
step: String,
message: String,
},
StepStarted { step: String, message: String },
/// Step completed
StepCompleted {
step: String,
message: String,
},
StepCompleted { step: String, message: String },
/// Download progress
Downloading {
url: String,
@@ -39,21 +33,13 @@ pub enum ProgressEvent {
total_bytes: Option<u64>,
},
/// Extraction progress
Extracting {
file: String,
},
Extracting { file: String },
/// Verification progress
Verifying {
message: String,
},
Verifying { message: String },
/// Warning message
Warning {
message: String,
},
Warning { message: String },
/// Info message
Info {
message: String,
},
Info { message: String },
}
/// Pack installer for handling various installation sources
@@ -151,12 +137,15 @@ impl PackInstaller {
/// Install a pack from the given source
pub async fn install(&self, source: PackSource) -> Result<InstalledPack> {
match source {
PackSource::Git { url, git_ref } => self.install_from_git(&url, git_ref.as_deref()).await,
PackSource::Git { url, git_ref } => {
self.install_from_git(&url, git_ref.as_deref()).await
}
PackSource::Archive { url } => self.install_from_archive_url(&url, None).await,
PackSource::LocalDirectory { path } => self.install_from_local_directory(&path).await,
PackSource::LocalArchive { path } => self.install_from_local_archive(&path).await,
PackSource::Registry { pack_ref, version } => {
self.install_from_registry(&pack_ref, version.as_deref()).await
self.install_from_registry(&pack_ref, version.as_deref())
.await
}
}
}
@@ -267,7 +256,11 @@ impl PackInstaller {
// Verify source exists and is a directory
if !source_path.exists() {
return Err(Error::not_found("directory", "path", source_path.display().to_string()));
return Err(Error::not_found(
"directory",
"path",
source_path.display().to_string(),
));
}
if !source_path.is_dir() {
@@ -301,7 +294,11 @@ impl PackInstaller {
// Verify file exists
if !archive_path.exists() {
return Err(Error::not_found("file", "path", archive_path.display().to_string()));
return Err(Error::not_found(
"file",
"path",
archive_path.display().to_string(),
));
}
if !archive_path.is_file() {
@@ -369,9 +366,7 @@ impl PackInstaller {
git_ref,
checksum,
} => {
let mut installed = self
.install_from_git(&url, git_ref.as_deref())
.await?;
let mut installed = self.install_from_git(&url, git_ref.as_deref()).await?;
installed.checksum = Some(checksum);
Ok(installed)
}
@@ -426,11 +421,7 @@ impl PackInstaller {
}
// Determine filename from URL
let filename = url
.split('/')
.last()
.unwrap_or("archive.zip")
.to_string();
let filename = url.split('/').last().unwrap_or("archive.zip").to_string();
let archive_path = self.temp_dir.join(&filename);
@@ -483,7 +474,10 @@ impl PackInstaller {
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(Error::internal(format!("Failed to extract zip: {}", stderr)));
return Err(Error::internal(format!(
"Failed to extract zip: {}",
stderr
)));
}
Ok(())
@@ -502,22 +496,23 @@ impl PackInstaller {
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(Error::internal(format!("Failed to extract tar.gz: {}", stderr)));
return Err(Error::internal(format!(
"Failed to extract tar.gz: {}",
stderr
)));
}
Ok(())
}
/// Verify archive checksum
async fn verify_archive_checksum(
&self,
archive_path: &Path,
checksum_str: &str,
) -> Result<()> {
async fn verify_archive_checksum(&self, archive_path: &Path, checksum_str: &str) -> Result<()> {
let checksum = Checksum::parse(checksum_str)
.map_err(|e| Error::validation(format!("Invalid checksum: {}", e)))?;
let computed = self.compute_checksum(archive_path, &checksum.algorithm).await?;
let computed = self
.compute_checksum(archive_path, &checksum.algorithm)
.await?;
if computed != checksum.hash {
return Err(Error::validation(format!(
@@ -553,7 +548,10 @@ impl PackInstaller {
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(Error::internal(format!("Checksum computation failed: {}", stderr)));
return Err(Error::internal(format!(
"Checksum computation failed: {}",
stderr
)));
}
let stdout = String::from_utf8_lossy(&output.stdout);
@@ -611,9 +609,9 @@ impl PackInstaller {
use tokio::fs;
// Create destination directory if it doesn't exist
fs::create_dir_all(dst)
.await
.map_err(|e| Error::internal(format!("Failed to create destination directory: {}", e)))?;
fs::create_dir_all(dst).await.map_err(|e| {
Error::internal(format!("Failed to create destination directory: {}", e))
})?;
// Read source directory
let mut entries = fs::read_dir(src)

View File

@@ -145,7 +145,8 @@ impl PackStorage {
})?;
for entry in entries {
let entry = entry.map_err(|e| Error::io(format!("Failed to read directory entry: {}", e)))?;
let entry =
entry.map_err(|e| Error::io(format!("Failed to read directory entry: {}", e)))?;
let path = entry.path();
if path.is_dir() {
if let Some(name) = path.file_name().and_then(|n| n.to_str()) {
@@ -209,13 +210,21 @@ pub fn calculate_directory_checksum<P: AsRef<Path>>(path: P) -> Result<String> {
// Hash file contents
let mut file = fs::File::open(&file_path).map_err(|e| {
Error::io(format!("Failed to open file {}: {}", file_path.display(), e))
Error::io(format!(
"Failed to open file {}: {}",
file_path.display(),
e
))
})?;
let mut buffer = [0u8; 8192];
loop {
let n = file.read(&mut buffer).map_err(|e| {
Error::io(format!("Failed to read file {}: {}", file_path.display(), e))
Error::io(format!(
"Failed to read file {}: {}",
file_path.display(),
e
))
})?;
if n == 0 {
break;
@@ -255,15 +264,14 @@ pub fn calculate_file_checksum<P: AsRef<Path>>(path: P) -> Result<String> {
}
let mut hasher = Sha256::new();
let mut file = fs::File::open(path).map_err(|e| {
Error::io(format!("Failed to open file {}: {}", path.display(), e))
})?;
let mut file = fs::File::open(path)
.map_err(|e| Error::io(format!("Failed to open file {}: {}", path.display(), e)))?;
let mut buffer = [0u8; 8192];
loop {
let n = file.read(&mut buffer).map_err(|e| {
Error::io(format!("Failed to read file {}: {}", path.display(), e))
})?;
let n = file
.read(&mut buffer)
.map_err(|e| Error::io(format!("Failed to read file {}: {}", path.display(), e)))?;
if n == 0 {
break;
}
@@ -291,7 +299,8 @@ fn copy_dir_all(src: &Path, dst: &Path) -> Result<()> {
e
))
})? {
let entry = entry.map_err(|e| Error::io(format!("Failed to read directory entry: {}", e)))?;
let entry =
entry.map_err(|e| Error::io(format!("Failed to read directory entry: {}", e)))?;
let path = entry.path();
let file_name = entry.file_name();
let dest_path = dst.join(&file_name);

View File

@@ -332,9 +332,8 @@ impl ExecutionRepository {
.collect::<Vec<_>>()
.join(", ");
let select_clause = format!(
"{prefixed_select}, enf.rule_ref AS rule_ref, enf.trigger_ref AS trigger_ref"
);
let select_clause =
format!("{prefixed_select}, enf.rule_ref AS rule_ref, enf.trigger_ref AS trigger_ref");
let from_clause = "FROM execution e LEFT JOIN enforcement enf ON e.enforcement = enf.id";
@@ -425,10 +424,7 @@ impl ExecutionRepository {
}
// ── COUNT query ──────────────────────────────────────────────────
let total: i64 = count_qb
.build_query_scalar()
.fetch_one(db)
.await?;
let total: i64 = count_qb.build_query_scalar().fetch_one(db).await?;
let total = total.max(0) as u64;
// ── Data query with ORDER BY + pagination ────────────────────────
@@ -438,10 +434,7 @@ impl ExecutionRepository {
qb.push(" OFFSET ");
qb.push_bind(filters.offset as i64);
let rows: Vec<ExecutionWithRefs> = qb
.build_query_as()
.fetch_all(db)
.await?;
let rows: Vec<ExecutionWithRefs> = qb.build_query_as().fetch_all(db).await?;
Ok(ExecutionSearchResult { rows, total })
}

View File

@@ -556,11 +556,7 @@ mod tests {
#[test]
fn test_context_without_event_metadata() {
// Context with only a payload — no id, trigger, or created
let context = TemplateContext::new(
json!({"service": "test"}),
json!({}),
json!({}),
);
let context = TemplateContext::new(json!({"service": "test"}), json!({}), json!({}));
let template = json!({
"service": "{{ event.payload.service }}",

View File

@@ -87,26 +87,14 @@ pub enum Expr {
},
/// Unary operation: `op operand`
UnaryOp {
op: UnaryOp,
operand: Box<Expr>,
},
UnaryOp { op: UnaryOp, operand: Box<Expr> },
/// Property access: `expr.field`
DotAccess {
object: Box<Expr>,
field: String,
},
DotAccess { object: Box<Expr>, field: String },
/// Index/bracket access: `expr[index_expr]`
IndexAccess {
object: Box<Expr>,
index: Box<Expr>,
},
IndexAccess { object: Box<Expr>, index: Box<Expr> },
/// Function call: `name(arg1, arg2, ...)`
FunctionCall {
name: String,
args: Vec<Expr>,
},
FunctionCall { name: String, args: Vec<Expr> },
}

View File

@@ -741,7 +741,9 @@ fn to_int(v: &JsonValue) -> EvalResult<JsonValue> {
} else if let Some(f) = n.as_f64() {
Ok(json!(f as i64))
} else {
Err(EvalError::TypeError("Cannot convert number to int".to_string()))
Err(EvalError::TypeError(
"Cannot convert number to int".to_string(),
))
}
}
JsonValue::String(s) => {
@@ -958,9 +960,7 @@ fn fn_join(arr: &JsonValue, sep: &JsonValue) -> EvalResult<JsonValue> {
))
})?;
let sep = require_string("join", sep)?;
let strings: Result<Vec<String>, _> = arr.iter().map(|v| {
Ok(value_to_string(v))
}).collect();
let strings: Result<Vec<String>, _> = arr.iter().map(|v| Ok(value_to_string(v))).collect();
Ok(json!(strings?.join(sep)))
}
@@ -986,8 +986,7 @@ fn fn_ends_with(s: &JsonValue, suffix: &JsonValue) -> EvalResult<JsonValue> {
fn fn_match(pattern: &JsonValue, s: &JsonValue) -> EvalResult<JsonValue> {
let pattern = require_string("match", pattern)?;
let s = require_string("match", s)?;
let re = Regex::new(pattern)
.map_err(|e| EvalError::RegexError(format!("{}", e)))?;
let re = Regex::new(pattern).map_err(|e| EvalError::RegexError(format!("{}", e)))?;
Ok(json!(re.is_match(s)))
}
@@ -1012,9 +1011,7 @@ fn fn_reversed(v: &JsonValue) -> EvalResult<JsonValue> {
rev.reverse();
Ok(JsonValue::Array(rev))
}
JsonValue::String(s) => {
Ok(json!(s.chars().rev().collect::<String>()))
}
JsonValue::String(s) => Ok(json!(s.chars().rev().collect::<String>())),
_ => Err(EvalError::TypeError(format!(
"reversed() requires array or string, got {}",
type_name(v)
@@ -1095,7 +1092,10 @@ fn fn_flat(v: &JsonValue) -> EvalResult<JsonValue> {
fn fn_zip(a: &JsonValue, b: &JsonValue) -> EvalResult<JsonValue> {
let a_arr = a.as_array().ok_or_else(|| {
EvalError::TypeError(format!("zip() first argument must be array, got {}", type_name(a)))
EvalError::TypeError(format!(
"zip() first argument must be array, got {}",
type_name(a)
))
})?;
let b_arr = b.as_array().ok_or_else(|| {
EvalError::TypeError(format!(
@@ -1114,37 +1114,38 @@ fn fn_zip(a: &JsonValue, b: &JsonValue) -> EvalResult<JsonValue> {
}
fn fn_range_1(end: &JsonValue) -> EvalResult<JsonValue> {
let n = end.as_i64().ok_or_else(|| {
EvalError::TypeError("range() requires integer argument".to_string())
})?;
let n = end
.as_i64()
.ok_or_else(|| EvalError::TypeError("range() requires integer argument".to_string()))?;
let arr: Vec<JsonValue> = (0..n).map(|i| json!(i)).collect();
Ok(JsonValue::Array(arr))
}
fn fn_range_2(start: &JsonValue, end: &JsonValue) -> EvalResult<JsonValue> {
let s = start.as_i64().ok_or_else(|| {
EvalError::TypeError("range() requires integer arguments".to_string())
})?;
let e = end.as_i64().ok_or_else(|| {
EvalError::TypeError("range() requires integer arguments".to_string())
})?;
let s = start
.as_i64()
.ok_or_else(|| EvalError::TypeError("range() requires integer arguments".to_string()))?;
let e = end
.as_i64()
.ok_or_else(|| EvalError::TypeError("range() requires integer arguments".to_string()))?;
let arr: Vec<JsonValue> = (s..e).map(|i| json!(i)).collect();
Ok(JsonValue::Array(arr))
}
fn fn_slice(v: &JsonValue, start: &JsonValue, end: &JsonValue) -> EvalResult<JsonValue> {
let s = start.as_i64().ok_or_else(|| {
EvalError::TypeError("slice() start must be integer".to_string())
})? as usize;
let s = start
.as_i64()
.ok_or_else(|| EvalError::TypeError("slice() start must be integer".to_string()))?
as usize;
match v {
JsonValue::Array(arr) => {
let e = if end.is_null() {
arr.len()
} else {
end.as_i64()
.ok_or_else(|| EvalError::TypeError("slice() end must be integer".to_string()))?
as usize
end.as_i64().ok_or_else(|| {
EvalError::TypeError("slice() end must be integer".to_string())
})? as usize
};
let e = e.min(arr.len());
let s = s.min(e);
@@ -1155,9 +1156,9 @@ fn fn_slice(v: &JsonValue, start: &JsonValue, end: &JsonValue) -> EvalResult<Jso
let e = if end.is_null() {
chars.len()
} else {
end.as_i64()
.ok_or_else(|| EvalError::TypeError("slice() end must be integer".to_string()))?
as usize
end.as_i64().ok_or_else(|| {
EvalError::TypeError("slice() end must be integer".to_string())
})? as usize
};
let e = e.min(chars.len());
let s = s.min(e);
@@ -1182,7 +1183,9 @@ fn fn_index_of(haystack: &JsonValue, needle: &JsonValue) -> EvalResult<JsonValue
}
JsonValue::String(s) => {
let needle = needle.as_str().ok_or_else(|| {
EvalError::TypeError("index_of() needle must be string for string search".to_string())
EvalError::TypeError(
"index_of() needle must be string for string search".to_string(),
)
})?;
match s.find(needle) {
Some(pos) => Ok(json!(pos as i64)),
@@ -1292,10 +1295,7 @@ mod tests {
&json!({"a": [1, 2], "b": {"c": 3}}),
&json!({"b": {"c": 3}, "a": [1, 2]})
));
assert!(!json_eq(
&json!({"a": [1, 2]}),
&json!({"a": [1, 3]})
));
assert!(!json_eq(&json!({"a": [1, 2]}), &json!({"a": [1, 3]})));
}
#[test]

View File

@@ -71,12 +71,12 @@ use serde_json::Value as JsonValue;
/// This is the main entry point for the expression engine. It tokenizes the
/// input, parses it into an AST, and evaluates it to produce a `JsonValue`.
pub fn eval_expression(input: &str, ctx: &dyn EvalContext) -> EvalResult<JsonValue> {
let tokens = Tokenizer::new(input).tokenize().map_err(|e| {
EvalError::ParseError(format!("{}", e))
})?;
let ast = Parser::new(&tokens).parse().map_err(|e| {
EvalError::ParseError(format!("{}", e))
})?;
let tokens = Tokenizer::new(input)
.tokenize()
.map_err(|e| EvalError::ParseError(format!("{}", e)))?;
let ast = Parser::new(&tokens)
.parse()
.map_err(|e| EvalError::ParseError(format!("{}", e)))?;
evaluator::eval(&ast, ctx)
}
@@ -84,9 +84,9 @@ pub fn eval_expression(input: &str, ctx: &dyn EvalContext) -> EvalResult<JsonVal
///
/// Useful for validation or inspection.
pub fn parse_expression(input: &str) -> Result<Expr, ParseError> {
let tokens = Tokenizer::new(input).tokenize().map_err(|e| {
ParseError::TokenError(format!("{}", e))
})?;
let tokens = Tokenizer::new(input)
.tokenize()
.map_err(|e| ParseError::TokenError(format!("{}", e)))?;
Parser::new(&tokens).parse()
}
@@ -149,7 +149,10 @@ mod tests {
fn test_float_arithmetic() {
let ctx = TestContext::new();
assert_eq!(eval_expression("2.5 + 1.5", &ctx).unwrap(), json!(4.0));
assert_eq!(eval_expression("10.0 / 3.0", &ctx).unwrap(), json!(10.0 / 3.0));
assert_eq!(
eval_expression("10.0 / 3.0", &ctx).unwrap(),
json!(10.0 / 3.0)
);
}
#[test]
@@ -214,9 +217,18 @@ mod tests {
#[test]
fn test_string_comparison() {
let ctx = TestContext::new();
assert_eq!(eval_expression("\"abc\" == \"abc\"", &ctx).unwrap(), json!(true));
assert_eq!(eval_expression("\"abc\" < \"abd\"", &ctx).unwrap(), json!(true));
assert_eq!(eval_expression("\"abc\" > \"abb\"", &ctx).unwrap(), json!(true));
assert_eq!(
eval_expression("\"abc\" == \"abc\"", &ctx).unwrap(),
json!(true)
);
assert_eq!(
eval_expression("\"abc\" < \"abd\"", &ctx).unwrap(),
json!(true)
);
assert_eq!(
eval_expression("\"abc\" > \"abb\"", &ctx).unwrap(),
json!(true)
);
}
#[test]
@@ -225,7 +237,10 @@ mod tests {
assert_eq!(eval_expression("null == null", &ctx).unwrap(), json!(true));
assert_eq!(eval_expression("null != null", &ctx).unwrap(), json!(false));
assert_eq!(eval_expression("null == 0", &ctx).unwrap(), json!(false));
assert_eq!(eval_expression("null == false", &ctx).unwrap(), json!(false));
assert_eq!(
eval_expression("null == false", &ctx).unwrap(),
json!(false)
);
}
#[test]
@@ -256,9 +271,15 @@ mod tests {
fn test_boolean_operators() {
let ctx = TestContext::new();
assert_eq!(eval_expression("true and true", &ctx).unwrap(), json!(true));
assert_eq!(eval_expression("true and false", &ctx).unwrap(), json!(false));
assert_eq!(
eval_expression("true and false", &ctx).unwrap(),
json!(false)
);
assert_eq!(eval_expression("false or true", &ctx).unwrap(), json!(true));
assert_eq!(eval_expression("false or false", &ctx).unwrap(), json!(false));
assert_eq!(
eval_expression("false or false", &ctx).unwrap(),
json!(false)
);
assert_eq!(eval_expression("not true", &ctx).unwrap(), json!(false));
assert_eq!(eval_expression("not false", &ctx).unwrap(), json!(true));
}
@@ -283,8 +304,7 @@ mod tests {
#[test]
fn test_dot_access() {
let ctx = TestContext::new()
.with_var("obj", json!({"a": {"b": 42}}));
let ctx = TestContext::new().with_var("obj", json!({"a": {"b": 42}}));
assert_eq!(eval_expression("obj.a.b", &ctx).unwrap(), json!(42));
}
@@ -294,7 +314,10 @@ mod tests {
.with_var("arr", json!([10, 20, 30]))
.with_var("obj", json!({"key": "value"}));
assert_eq!(eval_expression("arr[1]", &ctx).unwrap(), json!(20));
assert_eq!(eval_expression("obj[\"key\"]", &ctx).unwrap(), json!("value"));
assert_eq!(
eval_expression("obj[\"key\"]", &ctx).unwrap(),
json!("value")
);
}
#[test]
@@ -304,9 +327,18 @@ mod tests {
.with_var("obj", json!({"key": "val"}));
assert_eq!(eval_expression("2 in arr", &ctx).unwrap(), json!(true));
assert_eq!(eval_expression("5 in arr", &ctx).unwrap(), json!(false));
assert_eq!(eval_expression("\"key\" in obj", &ctx).unwrap(), json!(true));
assert_eq!(eval_expression("\"nope\" in obj", &ctx).unwrap(), json!(false));
assert_eq!(eval_expression("\"ell\" in \"hello\"", &ctx).unwrap(), json!(true));
assert_eq!(
eval_expression("\"key\" in obj", &ctx).unwrap(),
json!(true)
);
assert_eq!(
eval_expression("\"nope\" in obj", &ctx).unwrap(),
json!(false)
);
assert_eq!(
eval_expression("\"ell\" in \"hello\"", &ctx).unwrap(),
json!(true)
);
}
// ---------------------------------------------------------------
@@ -319,7 +351,10 @@ mod tests {
.with_var("arr", json!([1, 2, 3]))
.with_var("obj", json!({"a": 1, "b": 2}));
assert_eq!(eval_expression("length(arr)", &ctx).unwrap(), json!(3));
assert_eq!(eval_expression("length(\"hello\")", &ctx).unwrap(), json!(5));
assert_eq!(
eval_expression("length(\"hello\")", &ctx).unwrap(),
json!(5)
);
assert_eq!(eval_expression("length(obj)", &ctx).unwrap(), json!(2));
}
@@ -327,7 +362,10 @@ mod tests {
fn test_type_conversions() {
let ctx = TestContext::new();
assert_eq!(eval_expression("string(42)", &ctx).unwrap(), json!("42"));
assert_eq!(eval_expression("number(\"3.14\")", &ctx).unwrap(), json!(3.14));
assert_eq!(
eval_expression("number(\"3.14\")", &ctx).unwrap(),
json!(3.14)
);
assert_eq!(eval_expression("int(3.9)", &ctx).unwrap(), json!(3));
assert_eq!(eval_expression("int(\"42\")", &ctx).unwrap(), json!(42));
assert_eq!(eval_expression("bool(1)", &ctx).unwrap(), json!(true));
@@ -341,18 +379,35 @@ mod tests {
let ctx = TestContext::new()
.with_var("arr", json!([1]))
.with_var("obj", json!({}));
assert_eq!(eval_expression("type_of(42)", &ctx).unwrap(), json!("number"));
assert_eq!(eval_expression("type_of(\"hi\")", &ctx).unwrap(), json!("string"));
assert_eq!(eval_expression("type_of(true)", &ctx).unwrap(), json!("bool"));
assert_eq!(eval_expression("type_of(null)", &ctx).unwrap(), json!("null"));
assert_eq!(eval_expression("type_of(arr)", &ctx).unwrap(), json!("array"));
assert_eq!(eval_expression("type_of(obj)", &ctx).unwrap(), json!("object"));
assert_eq!(
eval_expression("type_of(42)", &ctx).unwrap(),
json!("number")
);
assert_eq!(
eval_expression("type_of(\"hi\")", &ctx).unwrap(),
json!("string")
);
assert_eq!(
eval_expression("type_of(true)", &ctx).unwrap(),
json!("bool")
);
assert_eq!(
eval_expression("type_of(null)", &ctx).unwrap(),
json!("null")
);
assert_eq!(
eval_expression("type_of(arr)", &ctx).unwrap(),
json!("array")
);
assert_eq!(
eval_expression("type_of(obj)", &ctx).unwrap(),
json!("object")
);
}
#[test]
fn test_keys_values() {
let ctx = TestContext::new()
.with_var("obj", json!({"b": 2, "a": 1}));
let ctx = TestContext::new().with_var("obj", json!({"b": 2, "a": 1}));
let keys = eval_expression("sort(keys(obj))", &ctx).unwrap();
assert_eq!(keys, json!(["a", "b"]));
let values = eval_expression("sort(values(obj))", &ctx).unwrap();
@@ -368,15 +423,27 @@ mod tests {
assert_eq!(eval_expression("round(3.5)", &ctx).unwrap(), json!(4));
assert_eq!(eval_expression("min(3, 7)", &ctx).unwrap(), json!(3));
assert_eq!(eval_expression("max(3, 7)", &ctx).unwrap(), json!(7));
assert_eq!(eval_expression("sum([1, 2, 3, 4])", &ctx).unwrap(), json!(10));
assert_eq!(
eval_expression("sum([1, 2, 3, 4])", &ctx).unwrap(),
json!(10)
);
}
#[test]
fn test_string_functions() {
let ctx = TestContext::new();
assert_eq!(eval_expression("lower(\"HELLO\")", &ctx).unwrap(), json!("hello"));
assert_eq!(eval_expression("upper(\"hello\")", &ctx).unwrap(), json!("HELLO"));
assert_eq!(eval_expression("trim(\" hi \")", &ctx).unwrap(), json!("hi"));
assert_eq!(
eval_expression("lower(\"HELLO\")", &ctx).unwrap(),
json!("hello")
);
assert_eq!(
eval_expression("upper(\"hello\")", &ctx).unwrap(),
json!("HELLO")
);
assert_eq!(
eval_expression("trim(\" hi \")", &ctx).unwrap(),
json!("hi")
);
assert_eq!(
eval_expression("replace(\"hello world\", \"world\", \"rust\")", &ctx).unwrap(),
json!("hello rust")
@@ -414,10 +481,15 @@ mod tests {
#[test]
fn test_collection_functions() {
let ctx = TestContext::new()
.with_var("arr", json!([3, 1, 2]));
assert_eq!(eval_expression("sort(arr)", &ctx).unwrap(), json!([1, 2, 3]));
assert_eq!(eval_expression("reversed(arr)", &ctx).unwrap(), json!([2, 1, 3]));
let ctx = TestContext::new().with_var("arr", json!([3, 1, 2]));
assert_eq!(
eval_expression("sort(arr)", &ctx).unwrap(),
json!([1, 2, 3])
);
assert_eq!(
eval_expression("reversed(arr)", &ctx).unwrap(),
json!([2, 1, 3])
);
assert_eq!(
eval_expression("unique([1, 2, 2, 3, 1])", &ctx).unwrap(),
json!([1, 2, 3])
@@ -435,14 +507,23 @@ mod tests {
#[test]
fn test_range() {
let ctx = TestContext::new();
assert_eq!(eval_expression("range(5)", &ctx).unwrap(), json!([0, 1, 2, 3, 4]));
assert_eq!(eval_expression("range(2, 5)", &ctx).unwrap(), json!([2, 3, 4]));
assert_eq!(
eval_expression("range(5)", &ctx).unwrap(),
json!([0, 1, 2, 3, 4])
);
assert_eq!(
eval_expression("range(2, 5)", &ctx).unwrap(),
json!([2, 3, 4])
);
}
#[test]
fn test_reversed_string() {
let ctx = TestContext::new();
assert_eq!(eval_expression("reversed(\"abc\")", &ctx).unwrap(), json!("cba"));
assert_eq!(
eval_expression("reversed(\"abc\")", &ctx).unwrap(),
json!("cba")
);
}
#[test]
@@ -464,8 +545,7 @@ mod tests {
#[test]
fn test_complex_expression() {
let ctx = TestContext::new()
.with_var("items", json!([1, 2, 3, 4, 5]));
let ctx = TestContext::new().with_var("items", json!([1, 2, 3, 4, 5]));
assert_eq!(
eval_expression("length(items) > 3 and 5 in items", &ctx).unwrap(),
json!(true)
@@ -474,8 +554,10 @@ mod tests {
#[test]
fn test_chained_access() {
let ctx = TestContext::new()
.with_var("data", json!({"users": [{"name": "Alice"}, {"name": "Bob"}]}));
let ctx = TestContext::new().with_var(
"data",
json!({"users": [{"name": "Alice"}, {"name": "Bob"}]}),
);
assert_eq!(
eval_expression("data.users[1].name", &ctx).unwrap(),
json!("Bob")
@@ -484,8 +566,7 @@ mod tests {
#[test]
fn test_ternary_via_boolean() {
let ctx = TestContext::new()
.with_var("x", json!(10));
let ctx = TestContext::new().with_var("x", json!(10));
// No ternary operator, but boolean expressions work for conditions
assert_eq!(
eval_expression("x > 5 and x < 20", &ctx).unwrap(),

View File

@@ -368,8 +368,8 @@ impl<'a> Parser<'a> {
#[cfg(test)]
mod tests {
use super::*;
use super::super::tokenizer::Tokenizer;
use super::*;
fn parse(input: &str) -> Expr {
let tokens = Tokenizer::new(input).tokenize().unwrap();

View File

@@ -320,14 +320,14 @@ impl Tokenizer {
}
if is_float {
let val: f64 = num_str.parse().map_err(|_| {
TokenError::InvalidNumber(start, num_str.clone())
})?;
let val: f64 = num_str
.parse()
.map_err(|_| TokenError::InvalidNumber(start, num_str.clone()))?;
Ok(Token::new(TokenKind::Float(val), start, self.pos))
} else {
let val: i64 = num_str.parse().map_err(|_| {
TokenError::InvalidNumber(start, num_str.clone())
})?;
let val: i64 = num_str
.parse()
.map_err(|_| TokenError::InvalidNumber(start, num_str.clone()))?;
Ok(Token::new(TokenKind::Integer(val), start, self.pos))
}
}
@@ -365,11 +365,7 @@ mod tests {
fn tokenize(input: &str) -> Vec<TokenKind> {
let mut t = Tokenizer::new(input);
t.tokenize()
.unwrap()
.into_iter()
.map(|t| t.kind)
.collect()
t.tokenize().unwrap().into_iter().map(|t| t.kind).collect()
}
#[test]

View File

@@ -307,9 +307,7 @@ impl WorkflowLoader {
// Strip `.workflow` suffix if present:
// "deploy.workflow.yaml" -> stem "deploy.workflow" -> name "deploy"
// "deploy.yaml" -> stem "deploy" -> name "deploy"
let name = raw_stem
.strip_suffix(".workflow")
.unwrap_or(raw_stem);
let name = raw_stem.strip_suffix(".workflow").unwrap_or(raw_stem);
let ref_name = format!("{}.{}", pack_name, name);
workflow_files.push(WorkflowFile {

View File

@@ -1501,7 +1501,10 @@ tasks:
let failure_transition = &task.next[1];
assert_eq!(failure_transition.publish.len(), 1);
if let PublishDirective::Simple(map) = &failure_transition.publish[0] {
assert_eq!(map.get("validation_passed"), Some(&serde_json::Value::Bool(false)));
assert_eq!(
map.get("validation_passed"),
Some(&serde_json::Value::Bool(false))
);
} else {
panic!("Expected Simple publish directive");
}

View File

@@ -43,7 +43,7 @@ async fn test_create_execution_basic() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
let execution = ExecutionRepository::create(&pool, input).await.unwrap();
@@ -76,7 +76,7 @@ async fn test_create_execution_without_action() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
let execution = ExecutionRepository::create(&pool, input).await.unwrap();
@@ -109,7 +109,7 @@ async fn test_create_execution_with_all_fields() {
executor: None, // Don't reference non-existent identity
status: ExecutionStatus::Scheduled,
result: Some(json!({"status": "ok"})),
workflow_task: None,
workflow_task: None,
};
let execution = ExecutionRepository::create(&pool, input).await.unwrap();
@@ -144,7 +144,7 @@ async fn test_create_execution_with_parent() {
executor: None,
status: ExecutionStatus::Running,
result: None,
workflow_task: None,
workflow_task: None,
};
let parent = ExecutionRepository::create(&pool, parent_input)
@@ -162,7 +162,7 @@ async fn test_create_execution_with_parent() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
let child = ExecutionRepository::create(&pool, child_input)
@@ -200,7 +200,7 @@ async fn test_find_execution_by_id() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
let created = ExecutionRepository::create(&pool, input).await.unwrap();
@@ -252,7 +252,7 @@ async fn test_list_executions() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
ExecutionRepository::create(&pool, input).await.unwrap();
@@ -297,7 +297,7 @@ async fn test_list_executions_ordered_by_created_desc() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
let exec = ExecutionRepository::create(&pool, input).await.unwrap();
@@ -347,7 +347,7 @@ async fn test_update_execution_status() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
let created = ExecutionRepository::create(&pool, input).await.unwrap();
@@ -391,7 +391,7 @@ async fn test_update_execution_result() {
executor: None,
status: ExecutionStatus::Running,
result: None,
workflow_task: None,
workflow_task: None,
};
let created = ExecutionRepository::create(&pool, input).await.unwrap();
@@ -436,7 +436,7 @@ async fn test_update_execution_executor() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
let created = ExecutionRepository::create(&pool, input).await.unwrap();
@@ -479,7 +479,7 @@ async fn test_update_execution_status_transitions() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
let exec = ExecutionRepository::create(&pool, input).await.unwrap();
@@ -569,7 +569,7 @@ async fn test_update_execution_failed_status() {
executor: None,
status: ExecutionStatus::Running,
result: None,
workflow_task: None,
workflow_task: None,
};
let created = ExecutionRepository::create(&pool, input).await.unwrap();
@@ -613,7 +613,7 @@ async fn test_update_execution_no_changes() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
let created = ExecutionRepository::create(&pool, input).await.unwrap();
@@ -656,7 +656,7 @@ async fn test_delete_execution() {
executor: None,
status: ExecutionStatus::Completed,
result: None,
workflow_task: None,
workflow_task: None,
};
let created = ExecutionRepository::create(&pool, input).await.unwrap();
@@ -721,7 +721,7 @@ async fn test_find_executions_by_status() {
executor: None,
status: *status,
result: None,
workflow_task: None,
workflow_task: None,
};
ExecutionRepository::create(&pool, input).await.unwrap();
@@ -767,7 +767,7 @@ async fn test_find_executions_by_enforcement() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
let _exec1 = ExecutionRepository::create(&pool, exec1_input)
.await
@@ -785,7 +785,7 @@ async fn test_find_executions_by_enforcement() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
ExecutionRepository::create(&pool, input).await.unwrap();
@@ -828,7 +828,7 @@ async fn test_parent_child_execution_hierarchy() {
executor: None,
status: ExecutionStatus::Running,
result: None,
workflow_task: None,
workflow_task: None,
};
let parent = ExecutionRepository::create(&pool, parent_input)
@@ -848,7 +848,7 @@ async fn test_parent_child_execution_hierarchy() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
let child = ExecutionRepository::create(&pool, child_input)
@@ -891,7 +891,7 @@ async fn test_nested_execution_hierarchy() {
executor: None,
status: ExecutionStatus::Running,
result: None,
workflow_task: None,
workflow_task: None,
};
let grandparent = ExecutionRepository::create(&pool, grandparent_input)
@@ -909,7 +909,7 @@ async fn test_nested_execution_hierarchy() {
executor: None,
status: ExecutionStatus::Running,
result: None,
workflow_task: None,
workflow_task: None,
};
let parent = ExecutionRepository::create(&pool, parent_input)
@@ -927,7 +927,7 @@ async fn test_nested_execution_hierarchy() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
let child = ExecutionRepository::create(&pool, child_input)
@@ -968,7 +968,7 @@ async fn test_execution_timestamps() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
let created = ExecutionRepository::create(&pool, input).await.unwrap();
@@ -1038,7 +1038,7 @@ async fn test_execution_config_json() {
executor: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
};
let execution = ExecutionRepository::create(&pool, input).await.unwrap();
@@ -1070,7 +1070,7 @@ async fn test_execution_result_json() {
executor: None,
status: ExecutionStatus::Running,
result: None,
workflow_task: None,
workflow_task: None,
};
let created = ExecutionRepository::create(&pool, input).await.unwrap();

View File

@@ -50,7 +50,7 @@ async fn test_create_inquiry_minimal() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -109,7 +109,7 @@ async fn test_create_inquiry_with_response_schema() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -166,7 +166,7 @@ async fn test_create_inquiry_with_timeout() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -219,7 +219,7 @@ async fn test_create_inquiry_with_assigned_user() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -306,7 +306,7 @@ async fn test_find_inquiry_by_id() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -366,7 +366,7 @@ async fn test_get_inquiry_by_id() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -434,7 +434,7 @@ async fn test_list_inquiries() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -494,7 +494,7 @@ async fn test_update_inquiry_status() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -549,7 +549,7 @@ async fn test_update_inquiry_status_transitions() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -633,7 +633,7 @@ async fn test_update_inquiry_response() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -690,7 +690,7 @@ async fn test_update_inquiry_with_response_and_status() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -747,7 +747,7 @@ async fn test_update_inquiry_assignment() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -813,7 +813,7 @@ async fn test_update_inquiry_no_changes() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -888,7 +888,7 @@ async fn test_delete_inquiry() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -946,7 +946,7 @@ async fn test_delete_execution_cascades_to_inquiries() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -1012,7 +1012,7 @@ async fn test_find_inquiries_by_status() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -1090,7 +1090,7 @@ async fn test_find_inquiries_by_execution() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -1108,7 +1108,7 @@ async fn test_find_inquiries_by_execution() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -1171,7 +1171,7 @@ async fn test_inquiry_timestamps_auto_managed() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await
@@ -1237,7 +1237,7 @@ async fn test_inquiry_complex_response_schema() {
executor: None,
status: attune_common::models::enums::ExecutionStatus::Requested,
result: None,
workflow_task: None,
workflow_task: None,
},
)
.await

View File

@@ -482,7 +482,7 @@ async fn test_list_rules() {
action_params: json!({}),
trigger_params: json!({}),
enabled: true,
is_adhoc: false,
is_adhoc: false,
};
RuleRepository::create(&pool, input).await.unwrap();
@@ -535,7 +535,7 @@ async fn test_list_rules_ordered_by_ref() {
action_params: json!({}),
trigger_params: json!({}),
enabled: true,
is_adhoc: false,
is_adhoc: false,
};
RuleRepository::create(&pool, input).await.unwrap();
@@ -983,7 +983,7 @@ async fn test_find_rules_by_pack() {
action_params: json!({}),
trigger_params: json!({}),
enabled: true,
is_adhoc: false,
is_adhoc: false,
};
RuleRepository::create(&pool, input).await.unwrap();
@@ -1060,7 +1060,7 @@ async fn test_find_rules_by_action() {
action_params: json!({}),
trigger_params: json!({}),
enabled: true,
is_adhoc: false,
is_adhoc: false,
};
RuleRepository::create(&pool, input).await.unwrap();
@@ -1141,7 +1141,7 @@ async fn test_find_rules_by_trigger() {
action_params: json!({}),
trigger_params: json!({}),
enabled: true,
is_adhoc: false,
is_adhoc: false,
};
RuleRepository::create(&pool, input).await.unwrap();
@@ -1172,7 +1172,9 @@ async fn test_find_rules_by_trigger() {
.unwrap();
assert_eq!(trigger1_rules.len(), 2);
assert!(trigger1_rules.iter().all(|r| r.trigger == Some(trigger1.id)));
assert!(trigger1_rules
.iter()
.all(|r| r.trigger == Some(trigger1.id)));
let trigger2_rules = RuleRepository::find_by_trigger(&pool, trigger2.id)
.await
@@ -1217,7 +1219,7 @@ async fn test_find_enabled_rules() {
action_params: json!({}),
trigger_params: json!({}),
enabled: true,
is_adhoc: false,
is_adhoc: false,
};
RuleRepository::create(&pool, input).await.unwrap();
@@ -1239,7 +1241,7 @@ async fn test_find_enabled_rules() {
action_params: json!({}),
trigger_params: json!({}),
enabled: false,
is_adhoc: false,
is_adhoc: false,
};
RuleRepository::create(&pool, input).await.unwrap();