This commit is contained in:
2026-03-02 19:27:52 -06:00
parent 42a9f1d31a
commit 5da940639a
40 changed files with 3931 additions and 2785 deletions

View File

@@ -53,6 +53,9 @@ jsonschema = { workspace = true }
# OpenAPI
utoipa = { workspace = true }
# JWT
jsonwebtoken = { workspace = true }
# Encryption
argon2 = { workspace = true }
ring = { workspace = true }

View File

@@ -0,0 +1,460 @@
//! JWT token generation and validation
//!
//! Shared across all Attune services. Token types:
//! - **Access**: Standard user login tokens (1h default)
//! - **Refresh**: Long-lived refresh tokens (7d default)
//! - **Sensor**: Sensor service tokens with trigger type metadata (24h default)
//! - **Execution**: Short-lived tokens scoped to a single execution (matching execution timeout)
use chrono::{Duration, Utc};
use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, Validation};
use serde::{Deserialize, Serialize};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum JwtError {
#[error("Failed to encode JWT: {0}")]
EncodeError(String),
#[error("Failed to decode JWT: {0}")]
DecodeError(String),
#[error("Token has expired")]
Expired,
#[error("Invalid token")]
Invalid,
}
/// JWT Claims structure
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Claims {
/// Subject (identity ID)
pub sub: String,
/// Identity login (or descriptor like "execution:123")
pub login: String,
/// Issued at (Unix timestamp)
pub iat: i64,
/// Expiration time (Unix timestamp)
pub exp: i64,
/// Token type (access, refresh, sensor, or execution)
#[serde(default)]
pub token_type: TokenType,
/// Optional scope (e.g., "sensor", "execution")
#[serde(skip_serializing_if = "Option::is_none")]
pub scope: Option<String>,
/// Optional metadata (e.g., trigger_types for sensors, execution_id for execution tokens)
#[serde(skip_serializing_if = "Option::is_none")]
pub metadata: Option<serde_json::Value>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum TokenType {
Access,
Refresh,
Sensor,
Execution,
}
impl Default for TokenType {
fn default() -> Self {
Self::Access
}
}
/// Configuration for JWT tokens
#[derive(Debug, Clone)]
pub struct JwtConfig {
/// Secret key for signing tokens
pub secret: String,
/// Access token expiration duration (in seconds)
pub access_token_expiration: i64,
/// Refresh token expiration duration (in seconds)
pub refresh_token_expiration: i64,
}
impl Default for JwtConfig {
fn default() -> Self {
Self {
secret: "insecure_default_secret_change_in_production".to_string(),
access_token_expiration: 3600, // 1 hour
refresh_token_expiration: 604800, // 7 days
}
}
}
/// Generate a JWT access token
pub fn generate_access_token(
identity_id: i64,
login: &str,
config: &JwtConfig,
) -> Result<String, JwtError> {
generate_token(identity_id, login, config, TokenType::Access)
}
/// Generate a JWT refresh token
pub fn generate_refresh_token(
identity_id: i64,
login: &str,
config: &JwtConfig,
) -> Result<String, JwtError> {
generate_token(identity_id, login, config, TokenType::Refresh)
}
/// Generate a JWT token with a specific type
pub fn generate_token(
identity_id: i64,
login: &str,
config: &JwtConfig,
token_type: TokenType,
) -> Result<String, JwtError> {
let now = Utc::now();
let expiration = match token_type {
TokenType::Access => config.access_token_expiration,
TokenType::Refresh => config.refresh_token_expiration,
// Sensor and Execution tokens are generated via their own dedicated functions
// with explicit TTLs; this fallback should not normally be reached.
TokenType::Sensor => 86400,
TokenType::Execution => 300,
};
let exp = (now + Duration::seconds(expiration)).timestamp();
let claims = Claims {
sub: identity_id.to_string(),
login: login.to_string(),
iat: now.timestamp(),
exp,
token_type,
scope: None,
metadata: None,
};
encode(
&Header::default(),
&claims,
&EncodingKey::from_secret(config.secret.as_bytes()),
)
.map_err(|e| JwtError::EncodeError(e.to_string()))
}
/// Generate a sensor token with specific trigger types
///
/// # Arguments
/// * `identity_id` - The identity ID for the sensor
/// * `sensor_ref` - The sensor reference (e.g., "sensor:core.timer")
/// * `trigger_types` - List of trigger types this sensor can create events for
/// * `config` - JWT configuration
/// * `ttl_seconds` - Time to live in seconds (default: 24 hours)
pub fn generate_sensor_token(
identity_id: i64,
sensor_ref: &str,
trigger_types: Vec<String>,
config: &JwtConfig,
ttl_seconds: Option<i64>,
) -> Result<String, JwtError> {
let now = Utc::now();
let expiration = ttl_seconds.unwrap_or(86400); // Default: 24 hours
let exp = (now + Duration::seconds(expiration)).timestamp();
let metadata = serde_json::json!({
"trigger_types": trigger_types,
});
let claims = Claims {
sub: identity_id.to_string(),
login: sensor_ref.to_string(),
iat: now.timestamp(),
exp,
token_type: TokenType::Sensor,
scope: Some("sensor".to_string()),
metadata: Some(metadata),
};
encode(
&Header::default(),
&claims,
&EncodingKey::from_secret(config.secret.as_bytes()),
)
.map_err(|e| JwtError::EncodeError(e.to_string()))
}
/// Generate an execution-scoped token.
///
/// These tokens are short-lived (matching the execution timeout) and scoped
/// to a single execution. They allow actions to call back into the Attune API
/// (e.g., to create artifacts, update progress) without full user credentials.
///
/// The token is automatically invalidated when it expires. The TTL defaults to
/// the execution timeout plus a 60-second grace period to account for cleanup.
///
/// # Arguments
/// * `identity_id` - The identity ID that triggered the execution
/// * `execution_id` - The execution ID this token is scoped to
/// * `action_ref` - The action reference for audit/logging
/// * `config` - JWT configuration (uses the same signing secret as all tokens)
/// * `ttl_seconds` - Time to live in seconds (defaults to 360 = 5 min timeout + 60s grace)
pub fn generate_execution_token(
identity_id: i64,
execution_id: i64,
action_ref: &str,
config: &JwtConfig,
ttl_seconds: Option<i64>,
) -> Result<String, JwtError> {
let now = Utc::now();
let expiration = ttl_seconds.unwrap_or(360); // Default: 6 minutes (5 min timeout + grace)
let exp = (now + Duration::seconds(expiration)).timestamp();
let metadata = serde_json::json!({
"execution_id": execution_id,
"action_ref": action_ref,
});
let claims = Claims {
sub: identity_id.to_string(),
login: format!("execution:{}", execution_id),
iat: now.timestamp(),
exp,
token_type: TokenType::Execution,
scope: Some("execution".to_string()),
metadata: Some(metadata),
};
encode(
&Header::default(),
&claims,
&EncodingKey::from_secret(config.secret.as_bytes()),
)
.map_err(|e| JwtError::EncodeError(e.to_string()))
}
/// Validate and decode a JWT token
pub fn validate_token(token: &str, config: &JwtConfig) -> Result<Claims, JwtError> {
let validation = Validation::default();
decode::<Claims>(
token,
&DecodingKey::from_secret(config.secret.as_bytes()),
&validation,
)
.map(|data| data.claims)
.map_err(|e| {
if e.to_string().contains("ExpiredSignature") {
JwtError::Expired
} else {
JwtError::DecodeError(e.to_string())
}
})
}
/// Extract token from Authorization header
pub fn extract_token_from_header(auth_header: &str) -> Option<&str> {
if auth_header.starts_with("Bearer ") {
Some(&auth_header[7..])
} else {
None
}
}
#[cfg(test)]
mod tests {
use super::*;
fn test_config() -> JwtConfig {
JwtConfig {
secret: "test_secret_key_for_testing".to_string(),
access_token_expiration: 3600,
refresh_token_expiration: 604800,
}
}
#[test]
fn test_generate_and_validate_access_token() {
let config = test_config();
let token =
generate_access_token(123, "testuser", &config).expect("Failed to generate token");
let claims = validate_token(&token, &config).expect("Failed to validate token");
assert_eq!(claims.sub, "123");
assert_eq!(claims.login, "testuser");
assert_eq!(claims.token_type, TokenType::Access);
}
#[test]
fn test_generate_and_validate_refresh_token() {
let config = test_config();
let token =
generate_refresh_token(456, "anotheruser", &config).expect("Failed to generate token");
let claims = validate_token(&token, &config).expect("Failed to validate token");
assert_eq!(claims.sub, "456");
assert_eq!(claims.login, "anotheruser");
assert_eq!(claims.token_type, TokenType::Refresh);
}
#[test]
fn test_invalid_token() {
let config = test_config();
let result = validate_token("invalid.token.here", &config);
assert!(result.is_err());
}
#[test]
fn test_token_with_wrong_secret() {
let config = test_config();
let token = generate_access_token(789, "user", &config).expect("Failed to generate token");
let wrong_config = JwtConfig {
secret: "different_secret".to_string(),
..config
};
let result = validate_token(&token, &wrong_config);
assert!(result.is_err());
}
#[test]
fn test_expired_token() {
let now = Utc::now().timestamp();
let expired_claims = Claims {
sub: "999".to_string(),
login: "expireduser".to_string(),
iat: now - 3600,
exp: now - 1800,
token_type: TokenType::Access,
scope: None,
metadata: None,
};
let config = test_config();
let expired_token = encode(
&Header::default(),
&expired_claims,
&EncodingKey::from_secret(config.secret.as_bytes()),
)
.expect("Failed to encode token");
let result = validate_token(&expired_token, &config);
assert!(matches!(result, Err(JwtError::Expired)));
}
#[test]
fn test_extract_token_from_header() {
let header = "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9";
let token = extract_token_from_header(header);
assert_eq!(token, Some("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9"));
let invalid_header = "Token abc123";
let token = extract_token_from_header(invalid_header);
assert_eq!(token, None);
let no_token = "Bearer ";
let token = extract_token_from_header(no_token);
assert_eq!(token, Some(""));
}
#[test]
fn test_claims_serialization() {
let claims = Claims {
sub: "123".to_string(),
login: "testuser".to_string(),
iat: 1234567890,
exp: 1234571490,
token_type: TokenType::Access,
scope: None,
metadata: None,
};
let json = serde_json::to_string(&claims).expect("Failed to serialize");
let deserialized: Claims = serde_json::from_str(&json).expect("Failed to deserialize");
assert_eq!(claims.sub, deserialized.sub);
assert_eq!(claims.login, deserialized.login);
assert_eq!(claims.token_type, deserialized.token_type);
}
#[test]
fn test_generate_sensor_token() {
let config = test_config();
let trigger_types = vec!["core.timer".to_string(), "core.webhook".to_string()];
let token = generate_sensor_token(
999,
"sensor:core.timer",
trigger_types.clone(),
&config,
Some(86400),
)
.expect("Failed to generate sensor token");
let claims = validate_token(&token, &config).expect("Failed to validate token");
assert_eq!(claims.sub, "999");
assert_eq!(claims.login, "sensor:core.timer");
assert_eq!(claims.token_type, TokenType::Sensor);
assert_eq!(claims.scope, Some("sensor".to_string()));
let metadata = claims.metadata.expect("Metadata should be present");
let trigger_types_from_token = metadata["trigger_types"]
.as_array()
.expect("trigger_types should be an array");
assert_eq!(trigger_types_from_token.len(), 2);
}
#[test]
fn test_generate_execution_token() {
let config = test_config();
let token =
generate_execution_token(42, 12345, "python_example.artifact_demo", &config, None)
.expect("Failed to generate execution token");
let claims = validate_token(&token, &config).expect("Failed to validate token");
assert_eq!(claims.sub, "42");
assert_eq!(claims.login, "execution:12345");
assert_eq!(claims.token_type, TokenType::Execution);
assert_eq!(claims.scope, Some("execution".to_string()));
let metadata = claims.metadata.expect("Metadata should be present");
assert_eq!(metadata["execution_id"], 12345);
assert_eq!(metadata["action_ref"], "python_example.artifact_demo");
}
#[test]
fn test_execution_token_custom_ttl() {
let config = test_config();
let token = generate_execution_token(1, 100, "core.echo", &config, Some(600))
.expect("Failed to generate execution token");
let claims = validate_token(&token, &config).expect("Failed to validate token");
// Should expire roughly 600 seconds from now
let now = Utc::now().timestamp();
let diff = claims.exp - now;
assert!(
diff > 590 && diff <= 600,
"TTL should be ~600s, got {}s",
diff
);
}
#[test]
fn test_token_type_serialization() {
// Ensure all token types round-trip through JSON correctly
for tt in [
TokenType::Access,
TokenType::Refresh,
TokenType::Sensor,
TokenType::Execution,
] {
let json = serde_json::to_string(&tt).expect("Failed to serialize");
let deserialized: TokenType =
serde_json::from_str(&json).expect("Failed to deserialize");
assert_eq!(tt, deserialized);
}
}
}

View File

@@ -0,0 +1,13 @@
//! Authentication primitives shared across Attune services.
//!
//! This module provides JWT token types, generation, and validation functions
//! that are used by the API (for all token types), the worker (for execution-scoped
//! tokens), and the sensor service (for sensor tokens).
pub mod jwt;
pub use jwt::{
extract_token_from_header, generate_access_token, generate_execution_token,
generate_refresh_token, generate_sensor_token, generate_token, validate_token, Claims,
JwtConfig, JwtError, TokenType,
};

View File

@@ -6,6 +6,7 @@
//! - Configuration
//! - Utilities
pub mod auth;
pub mod config;
pub mod crypto;
pub mod db;

View File

@@ -10,6 +10,8 @@ use sqlx::FromRow;
// Re-export common types
pub use action::*;
pub use artifact::Artifact;
pub use artifact_version::ArtifactVersion;
pub use entity_history::*;
pub use enums::*;
pub use event::*;
@@ -355,7 +357,7 @@ pub mod enums {
Url,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Type)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Type, ToSchema)]
#[sqlx(type_name = "artifact_retention_enum", rename_all = "lowercase")]
#[serde(rename_all = "lowercase")]
pub enum RetentionPolicyType {
@@ -1268,9 +1270,66 @@ pub mod artifact {
pub r#type: ArtifactType,
pub retention_policy: RetentionPolicyType,
pub retention_limit: i32,
/// Human-readable name (e.g. "Build Log", "Test Results")
pub name: Option<String>,
/// Optional longer description
pub description: Option<String>,
/// MIME content type (e.g. "application/json", "text/plain")
pub content_type: Option<String>,
/// Size of the latest version's content in bytes
pub size_bytes: Option<i64>,
/// Execution that produced this artifact (no FK — execution is a hypertable)
pub execution: Option<Id>,
/// Structured JSONB data for progress artifacts or metadata
pub data: Option<serde_json::Value>,
pub created: DateTime<Utc>,
pub updated: DateTime<Utc>,
}
/// Select columns for Artifact queries (excludes DB-only columns if any arise).
/// Must be kept in sync with the Artifact struct field order.
pub const SELECT_COLUMNS: &str =
"id, ref, scope, owner, type, retention_policy, retention_limit, \
name, description, content_type, size_bytes, execution, data, \
created, updated";
}
/// Artifact version model — immutable content snapshots
pub mod artifact_version {
use super::*;
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct ArtifactVersion {
pub id: Id,
/// Parent artifact
pub artifact: Id,
/// Version number (1-based, monotonically increasing per artifact)
pub version: i32,
/// MIME content type for this version
pub content_type: Option<String>,
/// Size of content in bytes
pub size_bytes: Option<i64>,
/// Binary content (file data) — not included in default queries for performance
#[serde(skip_serializing)]
pub content: Option<Vec<u8>>,
/// Structured JSON content
pub content_json: Option<serde_json::Value>,
/// Free-form metadata about this version
pub meta: Option<serde_json::Value>,
/// Who created this version
pub created_by: Option<String>,
pub created: DateTime<Utc>,
}
/// Select columns WITHOUT the potentially large `content` BYTEA column.
/// Use `SELECT_COLUMNS_WITH_CONTENT` when you need the binary payload.
pub const SELECT_COLUMNS: &str = "id, artifact, version, content_type, size_bytes, \
NULL::bytea AS content, content_json, meta, created_by, created";
/// Select columns INCLUDING the binary `content` column.
pub const SELECT_COLUMNS_WITH_CONTENT: &str =
"id, artifact, version, content_type, size_bytes, \
content, content_json, meta, created_by, created";
}
/// Workflow orchestration models

View File

@@ -1,7 +1,8 @@
//! Artifact repository for database operations
//! Artifact and ArtifactVersion repositories for database operations
use crate::models::{
artifact::*,
artifact_version::ArtifactVersion,
enums::{ArtifactType, OwnerType, RetentionPolicyType},
};
use crate::Result;
@@ -9,6 +10,10 @@ use sqlx::{Executor, Postgres, QueryBuilder};
use super::{Create, Delete, FindById, FindByRef, List, Repository, Update};
// ============================================================================
// ArtifactRepository
// ============================================================================
pub struct ArtifactRepository;
impl Repository for ArtifactRepository {
@@ -26,6 +31,11 @@ pub struct CreateArtifactInput {
pub r#type: ArtifactType,
pub retention_policy: RetentionPolicyType,
pub retention_limit: i32,
pub name: Option<String>,
pub description: Option<String>,
pub content_type: Option<String>,
pub execution: Option<i64>,
pub data: Option<serde_json::Value>,
}
#[derive(Debug, Clone, Default)]
@@ -36,6 +46,29 @@ pub struct UpdateArtifactInput {
pub r#type: Option<ArtifactType>,
pub retention_policy: Option<RetentionPolicyType>,
pub retention_limit: Option<i32>,
pub name: Option<String>,
pub description: Option<String>,
pub content_type: Option<String>,
pub size_bytes: Option<i64>,
pub data: Option<serde_json::Value>,
}
/// Filters for searching artifacts
#[derive(Debug, Clone, Default)]
pub struct ArtifactSearchFilters {
pub scope: Option<OwnerType>,
pub owner: Option<String>,
pub r#type: Option<ArtifactType>,
pub execution: Option<i64>,
pub name_contains: Option<String>,
pub limit: u32,
pub offset: u32,
}
/// Search result with total count
pub struct ArtifactSearchResult {
pub rows: Vec<Artifact>,
pub total: i64,
}
#[async_trait::async_trait]
@@ -44,15 +77,12 @@ impl FindById for ArtifactRepository {
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Artifact>(
"SELECT id, ref, scope, owner, type, retention_policy, retention_limit, created, updated
FROM artifact
WHERE id = $1",
)
.bind(id)
.fetch_optional(executor)
.await
.map_err(Into::into)
let query = format!("SELECT {} FROM artifact WHERE id = $1", SELECT_COLUMNS);
sqlx::query_as::<_, Artifact>(&query)
.bind(id)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
}
@@ -62,15 +92,12 @@ impl FindByRef for ArtifactRepository {
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Artifact>(
"SELECT id, ref, scope, owner, type, retention_policy, retention_limit, created, updated
FROM artifact
WHERE ref = $1",
)
.bind(ref_str)
.fetch_optional(executor)
.await
.map_err(Into::into)
let query = format!("SELECT {} FROM artifact WHERE ref = $1", SELECT_COLUMNS);
sqlx::query_as::<_, Artifact>(&query)
.bind(ref_str)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
}
@@ -80,15 +107,14 @@ impl List for ArtifactRepository {
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Artifact>(
"SELECT id, ref, scope, owner, type, retention_policy, retention_limit, created, updated
FROM artifact
ORDER BY created DESC
LIMIT 1000",
)
.fetch_all(executor)
.await
.map_err(Into::into)
let query = format!(
"SELECT {} FROM artifact ORDER BY created DESC LIMIT 1000",
SELECT_COLUMNS
);
sqlx::query_as::<_, Artifact>(&query)
.fetch_all(executor)
.await
.map_err(Into::into)
}
}
@@ -100,20 +126,28 @@ impl Create for ArtifactRepository {
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Artifact>(
"INSERT INTO artifact (ref, scope, owner, type, retention_policy, retention_limit)
VALUES ($1, $2, $3, $4, $5, $6)
RETURNING id, ref, scope, owner, type, retention_policy, retention_limit, created, updated",
)
.bind(&input.r#ref)
.bind(input.scope)
.bind(&input.owner)
.bind(input.r#type)
.bind(input.retention_policy)
.bind(input.retention_limit)
.fetch_one(executor)
.await
.map_err(Into::into)
let query = format!(
"INSERT INTO artifact (ref, scope, owner, type, retention_policy, retention_limit, \
name, description, content_type, execution, data) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) \
RETURNING {}",
SELECT_COLUMNS
);
sqlx::query_as::<_, Artifact>(&query)
.bind(&input.r#ref)
.bind(input.scope)
.bind(&input.owner)
.bind(input.r#type)
.bind(input.retention_policy)
.bind(input.retention_limit)
.bind(&input.name)
.bind(&input.description)
.bind(&input.content_type)
.bind(input.execution)
.bind(&input.data)
.fetch_one(executor)
.await
.map_err(Into::into)
}
}
@@ -125,59 +159,40 @@ impl Update for ArtifactRepository {
where
E: Executor<'e, Database = Postgres> + 'e,
{
// Build update query dynamically
let mut query = QueryBuilder::new("UPDATE artifact SET ");
let mut has_updates = false;
if let Some(ref_value) = &input.r#ref {
query.push("ref = ").push_bind(ref_value);
has_updates = true;
}
if let Some(scope) = input.scope {
if has_updates {
query.push(", ");
}
query.push("scope = ").push_bind(scope);
has_updates = true;
}
if let Some(owner) = &input.owner {
if has_updates {
query.push(", ");
}
query.push("owner = ").push_bind(owner);
has_updates = true;
}
if let Some(artifact_type) = input.r#type {
if has_updates {
query.push(", ");
}
query.push("type = ").push_bind(artifact_type);
has_updates = true;
}
if let Some(retention_policy) = input.retention_policy {
if has_updates {
query.push(", ");
}
query
.push("retention_policy = ")
.push_bind(retention_policy);
has_updates = true;
}
if let Some(retention_limit) = input.retention_limit {
if has_updates {
query.push(", ");
}
query.push("retention_limit = ").push_bind(retention_limit);
has_updates = true;
macro_rules! push_field {
($field:expr, $col:expr) => {
if let Some(val) = $field {
if has_updates {
query.push(", ");
}
query.push(concat!($col, " = ")).push_bind(val);
has_updates = true;
}
};
}
push_field!(&input.r#ref, "ref");
push_field!(input.scope, "scope");
push_field!(&input.owner, "owner");
push_field!(input.r#type, "type");
push_field!(input.retention_policy, "retention_policy");
push_field!(input.retention_limit, "retention_limit");
push_field!(&input.name, "name");
push_field!(&input.description, "description");
push_field!(&input.content_type, "content_type");
push_field!(input.size_bytes, "size_bytes");
push_field!(&input.data, "data");
if !has_updates {
// No updates requested, fetch and return existing entity
return Self::get_by_id(executor, id).await;
}
query.push(", updated = NOW() WHERE id = ").push_bind(id);
query.push(" RETURNING id, ref, scope, owner, type, retention_policy, retention_limit, created, updated");
query.push(" RETURNING ");
query.push(SELECT_COLUMNS);
query
.build_query_as::<Artifact>()
@@ -202,21 +217,113 @@ impl Delete for ArtifactRepository {
}
impl ArtifactRepository {
/// Search artifacts with filters and pagination
pub async fn search<'e, E>(
executor: E,
filters: &ArtifactSearchFilters,
) -> Result<ArtifactSearchResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
// Build WHERE clauses
let mut conditions: Vec<String> = Vec::new();
let mut param_idx: usize = 0;
if filters.scope.is_some() {
param_idx += 1;
conditions.push(format!("scope = ${}", param_idx));
}
if filters.owner.is_some() {
param_idx += 1;
conditions.push(format!("owner = ${}", param_idx));
}
if filters.r#type.is_some() {
param_idx += 1;
conditions.push(format!("type = ${}", param_idx));
}
if filters.execution.is_some() {
param_idx += 1;
conditions.push(format!("execution = ${}", param_idx));
}
if filters.name_contains.is_some() {
param_idx += 1;
conditions.push(format!("name ILIKE '%' || ${} || '%'", param_idx));
}
let where_clause = if conditions.is_empty() {
String::new()
} else {
format!("WHERE {}", conditions.join(" AND "))
};
// Count query
let count_sql = format!("SELECT COUNT(*) AS cnt FROM artifact {}", where_clause);
let mut count_query = sqlx::query_scalar::<_, i64>(&count_sql);
// Bind params for count
if let Some(scope) = filters.scope {
count_query = count_query.bind(scope);
}
if let Some(ref owner) = filters.owner {
count_query = count_query.bind(owner.clone());
}
if let Some(r#type) = filters.r#type {
count_query = count_query.bind(r#type);
}
if let Some(execution) = filters.execution {
count_query = count_query.bind(execution);
}
if let Some(ref name) = filters.name_contains {
count_query = count_query.bind(name.clone());
}
let total = count_query.fetch_one(executor).await?;
// Data query
let limit = filters.limit.min(1000);
let offset = filters.offset;
let data_sql = format!(
"SELECT {} FROM artifact {} ORDER BY created DESC LIMIT {} OFFSET {}",
SELECT_COLUMNS, where_clause, limit, offset
);
let mut data_query = sqlx::query_as::<_, Artifact>(&data_sql);
if let Some(scope) = filters.scope {
data_query = data_query.bind(scope);
}
if let Some(ref owner) = filters.owner {
data_query = data_query.bind(owner.clone());
}
if let Some(r#type) = filters.r#type {
data_query = data_query.bind(r#type);
}
if let Some(execution) = filters.execution {
data_query = data_query.bind(execution);
}
if let Some(ref name) = filters.name_contains {
data_query = data_query.bind(name.clone());
}
let rows = data_query.fetch_all(executor).await?;
Ok(ArtifactSearchResult { rows, total })
}
/// Find artifacts by scope
pub async fn find_by_scope<'e, E>(executor: E, scope: OwnerType) -> Result<Vec<Artifact>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Artifact>(
"SELECT id, ref, scope, owner, type, retention_policy, retention_limit, created, updated
FROM artifact
WHERE scope = $1
ORDER BY created DESC",
)
.bind(scope)
.fetch_all(executor)
.await
.map_err(Into::into)
let query = format!(
"SELECT {} FROM artifact WHERE scope = $1 ORDER BY created DESC",
SELECT_COLUMNS
);
sqlx::query_as::<_, Artifact>(&query)
.bind(scope)
.fetch_all(executor)
.await
.map_err(Into::into)
}
/// Find artifacts by owner
@@ -224,16 +331,15 @@ impl ArtifactRepository {
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Artifact>(
"SELECT id, ref, scope, owner, type, retention_policy, retention_limit, created, updated
FROM artifact
WHERE owner = $1
ORDER BY created DESC",
)
.bind(owner)
.fetch_all(executor)
.await
.map_err(Into::into)
let query = format!(
"SELECT {} FROM artifact WHERE owner = $1 ORDER BY created DESC",
SELECT_COLUMNS
);
sqlx::query_as::<_, Artifact>(&query)
.bind(owner)
.fetch_all(executor)
.await
.map_err(Into::into)
}
/// Find artifacts by type
@@ -244,19 +350,18 @@ impl ArtifactRepository {
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Artifact>(
"SELECT id, ref, scope, owner, type, retention_policy, retention_limit, created, updated
FROM artifact
WHERE type = $1
ORDER BY created DESC",
)
.bind(artifact_type)
.fetch_all(executor)
.await
.map_err(Into::into)
let query = format!(
"SELECT {} FROM artifact WHERE type = $1 ORDER BY created DESC",
SELECT_COLUMNS
);
sqlx::query_as::<_, Artifact>(&query)
.bind(artifact_type)
.fetch_all(executor)
.await
.map_err(Into::into)
}
/// Find artifacts by scope and owner (common query pattern)
/// Find artifacts by scope and owner
pub async fn find_by_scope_and_owner<'e, E>(
executor: E,
scope: OwnerType,
@@ -265,17 +370,32 @@ impl ArtifactRepository {
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Artifact>(
"SELECT id, ref, scope, owner, type, retention_policy, retention_limit, created, updated
FROM artifact
WHERE scope = $1 AND owner = $2
ORDER BY created DESC",
)
.bind(scope)
.bind(owner)
.fetch_all(executor)
.await
.map_err(Into::into)
let query = format!(
"SELECT {} FROM artifact WHERE scope = $1 AND owner = $2 ORDER BY created DESC",
SELECT_COLUMNS
);
sqlx::query_as::<_, Artifact>(&query)
.bind(scope)
.bind(owner)
.fetch_all(executor)
.await
.map_err(Into::into)
}
/// Find artifacts by execution ID
pub async fn find_by_execution<'e, E>(executor: E, execution_id: i64) -> Result<Vec<Artifact>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let query = format!(
"SELECT {} FROM artifact WHERE execution = $1 ORDER BY created DESC",
SELECT_COLUMNS
);
sqlx::query_as::<_, Artifact>(&query)
.bind(execution_id)
.fetch_all(executor)
.await
.map_err(Into::into)
}
/// Find artifacts by retention policy
@@ -286,15 +406,297 @@ impl ArtifactRepository {
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Artifact>(
"SELECT id, ref, scope, owner, type, retention_policy, retention_limit, created, updated
FROM artifact
WHERE retention_policy = $1
ORDER BY created DESC",
)
.bind(retention_policy)
.fetch_all(executor)
.await
.map_err(Into::into)
let query = format!(
"SELECT {} FROM artifact WHERE retention_policy = $1 ORDER BY created DESC",
SELECT_COLUMNS
);
sqlx::query_as::<_, Artifact>(&query)
.bind(retention_policy)
.fetch_all(executor)
.await
.map_err(Into::into)
}
/// Append data to a progress-type artifact.
///
/// If `artifact.data` is currently NULL, it is initialized as a JSON array
/// containing the new entry. Otherwise the entry is appended to the existing
/// array. This is done atomically in a single SQL statement.
pub async fn append_progress<'e, E>(
executor: E,
id: i64,
entry: &serde_json::Value,
) -> Result<Artifact>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let query = format!(
"UPDATE artifact \
SET data = CASE \
WHEN data IS NULL THEN jsonb_build_array($2::jsonb) \
ELSE data || jsonb_build_array($2::jsonb) \
END, \
updated = NOW() \
WHERE id = $1 AND type = 'progress' \
RETURNING {}",
SELECT_COLUMNS
);
sqlx::query_as::<_, Artifact>(&query)
.bind(id)
.bind(entry)
.fetch_one(executor)
.await
.map_err(Into::into)
}
/// Replace the full data payload on a progress-type artifact (for "set" semantics).
pub async fn set_data<'e, E>(executor: E, id: i64, data: &serde_json::Value) -> Result<Artifact>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let query = format!(
"UPDATE artifact SET data = $2, updated = NOW() \
WHERE id = $1 RETURNING {}",
SELECT_COLUMNS
);
sqlx::query_as::<_, Artifact>(&query)
.bind(id)
.bind(data)
.fetch_one(executor)
.await
.map_err(Into::into)
}
}
// ============================================================================
// ArtifactVersionRepository
// ============================================================================
use crate::models::artifact_version;
pub struct ArtifactVersionRepository;
impl Repository for ArtifactVersionRepository {
type Entity = ArtifactVersion;
fn table_name() -> &'static str {
"artifact_version"
}
}
#[derive(Debug, Clone)]
pub struct CreateArtifactVersionInput {
pub artifact: i64,
pub content_type: Option<String>,
pub content: Option<Vec<u8>>,
pub content_json: Option<serde_json::Value>,
pub meta: Option<serde_json::Value>,
pub created_by: Option<String>,
}
impl ArtifactVersionRepository {
/// Find a version by ID (without binary content for performance)
pub async fn find_by_id<'e, E>(executor: E, id: i64) -> Result<Option<ArtifactVersion>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let query = format!(
"SELECT {} FROM artifact_version WHERE id = $1",
artifact_version::SELECT_COLUMNS
);
sqlx::query_as::<_, ArtifactVersion>(&query)
.bind(id)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
/// Find a version by ID including binary content
pub async fn find_by_id_with_content<'e, E>(
executor: E,
id: i64,
) -> Result<Option<ArtifactVersion>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let query = format!(
"SELECT {} FROM artifact_version WHERE id = $1",
artifact_version::SELECT_COLUMNS_WITH_CONTENT
);
sqlx::query_as::<_, ArtifactVersion>(&query)
.bind(id)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
/// List all versions for an artifact (without binary content), newest first
pub async fn list_by_artifact<'e, E>(
executor: E,
artifact_id: i64,
) -> Result<Vec<ArtifactVersion>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let query = format!(
"SELECT {} FROM artifact_version WHERE artifact = $1 ORDER BY version DESC",
artifact_version::SELECT_COLUMNS
);
sqlx::query_as::<_, ArtifactVersion>(&query)
.bind(artifact_id)
.fetch_all(executor)
.await
.map_err(Into::into)
}
/// Get the latest version for an artifact (without binary content)
pub async fn find_latest<'e, E>(
executor: E,
artifact_id: i64,
) -> Result<Option<ArtifactVersion>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let query = format!(
"SELECT {} FROM artifact_version WHERE artifact = $1 ORDER BY version DESC LIMIT 1",
artifact_version::SELECT_COLUMNS
);
sqlx::query_as::<_, ArtifactVersion>(&query)
.bind(artifact_id)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
/// Get the latest version for an artifact (with binary content)
pub async fn find_latest_with_content<'e, E>(
executor: E,
artifact_id: i64,
) -> Result<Option<ArtifactVersion>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let query = format!(
"SELECT {} FROM artifact_version WHERE artifact = $1 ORDER BY version DESC LIMIT 1",
artifact_version::SELECT_COLUMNS_WITH_CONTENT
);
sqlx::query_as::<_, ArtifactVersion>(&query)
.bind(artifact_id)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
/// Get a specific version by artifact and version number (without binary content)
pub async fn find_by_version<'e, E>(
executor: E,
artifact_id: i64,
version: i32,
) -> Result<Option<ArtifactVersion>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let query = format!(
"SELECT {} FROM artifact_version WHERE artifact = $1 AND version = $2",
artifact_version::SELECT_COLUMNS
);
sqlx::query_as::<_, ArtifactVersion>(&query)
.bind(artifact_id)
.bind(version)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
/// Get a specific version by artifact and version number (with binary content)
pub async fn find_by_version_with_content<'e, E>(
executor: E,
artifact_id: i64,
version: i32,
) -> Result<Option<ArtifactVersion>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let query = format!(
"SELECT {} FROM artifact_version WHERE artifact = $1 AND version = $2",
artifact_version::SELECT_COLUMNS_WITH_CONTENT
);
sqlx::query_as::<_, ArtifactVersion>(&query)
.bind(artifact_id)
.bind(version)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
/// Create a new artifact version. The version number is auto-assigned
/// (MAX(version) + 1) and the retention trigger fires after insert.
pub async fn create<'e, E>(
executor: E,
input: CreateArtifactVersionInput,
) -> Result<ArtifactVersion>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let size_bytes = input.content.as_ref().map(|c| c.len() as i64).or_else(|| {
input
.content_json
.as_ref()
.map(|j| serde_json::to_string(j).unwrap_or_default().len() as i64)
});
let query = format!(
"INSERT INTO artifact_version \
(artifact, version, content_type, size_bytes, content, content_json, meta, created_by) \
VALUES ($1, next_artifact_version($1), $2, $3, $4, $5, $6, $7) \
RETURNING {}",
artifact_version::SELECT_COLUMNS_WITH_CONTENT
);
sqlx::query_as::<_, ArtifactVersion>(&query)
.bind(input.artifact)
.bind(&input.content_type)
.bind(size_bytes)
.bind(&input.content)
.bind(&input.content_json)
.bind(&input.meta)
.bind(&input.created_by)
.fetch_one(executor)
.await
.map_err(Into::into)
}
/// Delete a specific version by ID
pub async fn delete<'e, E>(executor: E, id: i64) -> Result<bool>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let result = sqlx::query("DELETE FROM artifact_version WHERE id = $1")
.bind(id)
.execute(executor)
.await?;
Ok(result.rows_affected() > 0)
}
/// Delete all versions for an artifact
pub async fn delete_all_for_artifact<'e, E>(executor: E, artifact_id: i64) -> Result<u64>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let result = sqlx::query("DELETE FROM artifact_version WHERE artifact = $1")
.bind(artifact_id)
.execute(executor)
.await?;
Ok(result.rows_affected())
}
/// Count versions for an artifact
pub async fn count_by_artifact<'e, E>(executor: E, artifact_id: i64) -> Result<i64>
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_scalar::<_, i64>("SELECT COUNT(*) FROM artifact_version WHERE artifact = $1")
.bind(artifact_id)
.fetch_one(executor)
.await
.map_err(Into::into)
}
}

View File

@@ -49,7 +49,7 @@ pub mod workflow;
// Re-export repository types
pub use action::{ActionRepository, PolicyRepository};
pub use analytics::AnalyticsRepository;
pub use artifact::ArtifactRepository;
pub use artifact::{ArtifactRepository, ArtifactVersionRepository};
pub use entity_history::EntityHistoryRepository;
pub use event::{EnforcementRepository, EventRepository};
pub use execution::ExecutionRepository;

View File

@@ -67,6 +67,11 @@ impl ArtifactFixture {
r#type: ArtifactType::FileText,
retention_policy: RetentionPolicyType::Versions,
retention_limit: 5,
name: None,
description: None,
content_type: None,
execution: None,
data: None,
}
}
}
@@ -249,6 +254,11 @@ async fn test_update_artifact_all_fields() {
r#type: Some(ArtifactType::FileImage),
retention_policy: Some(RetentionPolicyType::Days),
retention_limit: Some(30),
name: Some("Updated Name".to_string()),
description: Some("Updated description".to_string()),
content_type: Some("image/png".to_string()),
size_bytes: Some(12345),
data: Some(serde_json::json!({"key": "value"})),
};
let updated = ArtifactRepository::update(&pool, created.id, update_input.clone())