artifacts!

This commit is contained in:
2026-03-03 13:42:41 -06:00
parent 5da940639a
commit 8299e5efcb
50 changed files with 4779 additions and 341 deletions

View File

@@ -68,6 +68,13 @@ jsonschema = { workspace = true }
# HTTP client
reqwest = { workspace = true }
# Archive/compression
tar = { workspace = true }
flate2 = { workspace = true }
# Temp files (used for pack upload extraction)
tempfile = { workspace = true }
# Authentication
argon2 = { workspace = true }
rand = "0.9"

View File

@@ -5,7 +5,9 @@ use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
use utoipa::{IntoParams, ToSchema};
use attune_common::models::enums::{ArtifactType, OwnerType, RetentionPolicyType};
use attune_common::models::enums::{
ArtifactType, ArtifactVisibility, OwnerType, RetentionPolicyType,
};
// ============================================================================
// Artifact DTOs
@@ -30,6 +32,10 @@ pub struct CreateArtifactRequest {
#[schema(example = "file_text")]
pub r#type: ArtifactType,
/// Visibility level (public = all users, private = scope/owner restricted).
/// If omitted, defaults to `public` for progress artifacts and `private` for all others.
pub visibility: Option<ArtifactVisibility>,
/// Retention policy type
#[serde(default = "default_retention_policy")]
#[schema(example = "versions")]
@@ -81,6 +87,9 @@ pub struct UpdateArtifactRequest {
/// Updated artifact type
pub r#type: Option<ArtifactType>,
/// Updated visibility
pub visibility: Option<ArtifactVisibility>,
/// Updated retention policy
pub retention_policy: Option<RetentionPolicyType>,
@@ -138,6 +147,9 @@ pub struct ArtifactResponse {
/// Artifact type
pub r#type: ArtifactType,
/// Visibility level
pub visibility: ArtifactVisibility,
/// Retention policy
pub retention_policy: RetentionPolicyType,
@@ -185,6 +197,9 @@ pub struct ArtifactSummary {
/// Artifact type
pub r#type: ArtifactType,
/// Visibility level
pub visibility: ArtifactVisibility,
/// Human-readable name
pub name: Option<String>,
@@ -222,6 +237,9 @@ pub struct ArtifactQueryParams {
/// Filter by artifact type
pub r#type: Option<ArtifactType>,
/// Filter by visibility
pub visibility: Option<ArtifactVisibility>,
/// Filter by execution ID
pub execution: Option<i64>,
@@ -279,6 +297,23 @@ pub struct CreateVersionJsonRequest {
pub created_by: Option<String>,
}
/// Request DTO for creating a new file-backed artifact version.
/// No file content is included — the caller writes the file directly to
/// `$ATTUNE_ARTIFACTS_DIR/{file_path}` after receiving the response.
#[derive(Debug, Clone, Deserialize, ToSchema)]
pub struct CreateFileVersionRequest {
/// MIME content type (e.g. "text/plain", "application/octet-stream")
#[schema(example = "text/plain")]
pub content_type: Option<String>,
/// Free-form metadata about this version
#[schema(value_type = Option<Object>)]
pub meta: Option<JsonValue>,
/// Who created this version (e.g. action ref, identity, "system")
pub created_by: Option<String>,
}
/// Response DTO for an artifact version (without binary content)
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct ArtifactVersionResponse {
@@ -301,6 +336,11 @@ pub struct ArtifactVersionResponse {
#[serde(skip_serializing_if = "Option::is_none")]
pub content_json: Option<JsonValue>,
/// Relative file path for disk-backed versions (from artifacts_dir root).
/// When present, the file content lives on the shared volume, not in the DB.
#[serde(skip_serializing_if = "Option::is_none")]
pub file_path: Option<String>,
/// Free-form metadata
#[serde(skip_serializing_if = "Option::is_none")]
pub meta: Option<JsonValue>,
@@ -327,6 +367,10 @@ pub struct ArtifactVersionSummary {
/// Size of content in bytes
pub size_bytes: Option<i64>,
/// Relative file path for disk-backed versions
#[serde(skip_serializing_if = "Option::is_none")]
pub file_path: Option<String>,
/// Who created this version
pub created_by: Option<String>,
@@ -346,6 +390,7 @@ impl From<attune_common::models::artifact::Artifact> for ArtifactResponse {
scope: a.scope,
owner: a.owner,
r#type: a.r#type,
visibility: a.visibility,
retention_policy: a.retention_policy,
retention_limit: a.retention_limit,
name: a.name,
@@ -366,6 +411,7 @@ impl From<attune_common::models::artifact::Artifact> for ArtifactSummary {
id: a.id,
r#ref: a.r#ref,
r#type: a.r#type,
visibility: a.visibility,
name: a.name,
content_type: a.content_type,
size_bytes: a.size_bytes,
@@ -387,6 +433,7 @@ impl From<attune_common::models::artifact_version::ArtifactVersion> for Artifact
content_type: v.content_type,
size_bytes: v.size_bytes,
content_json: v.content_json,
file_path: v.file_path,
meta: v.meta,
created_by: v.created_by,
created: v.created,
@@ -401,6 +448,7 @@ impl From<attune_common::models::artifact_version::ArtifactVersion> for Artifact
version: v.version,
content_type: v.content_type,
size_bytes: v.size_bytes,
file_path: v.file_path,
created_by: v.created_by,
created: v.created,
}
@@ -419,6 +467,7 @@ mod tests {
assert_eq!(params.per_page, 20);
assert!(params.scope.is_none());
assert!(params.r#type.is_none());
assert!(params.visibility.is_none());
}
#[test]
@@ -427,6 +476,7 @@ mod tests {
scope: None,
owner: None,
r#type: None,
visibility: None,
execution: None,
name: None,
page: 3,
@@ -441,6 +491,7 @@ mod tests {
scope: None,
owner: None,
r#type: None,
visibility: None,
execution: None,
name: None,
page: 1,
@@ -460,6 +511,10 @@ mod tests {
let req: CreateArtifactRequest = serde_json::from_str(json).unwrap();
assert_eq!(req.retention_policy, RetentionPolicyType::Versions);
assert_eq!(req.retention_limit, 5);
assert!(
req.visibility.is_none(),
"Omitting visibility should deserialize as None (server applies type-aware default)"
);
}
#[test]

View File

@@ -33,6 +33,86 @@ struct Args {
port: Option<u16>,
}
/// Attempt to connect to RabbitMQ and create a publisher.
/// Returns the publisher on success.
async fn try_connect_publisher(mq_url: &str) -> Result<Publisher> {
let mq_connection = Connection::connect(mq_url).await?;
// Setup common message queue infrastructure (exchanges and DLX)
let mq_setup_config = attune_common::mq::MessageQueueConfig::default();
if let Err(e) = mq_connection
.setup_common_infrastructure(&mq_setup_config)
.await
{
warn!(
"Failed to setup common MQ infrastructure (may already exist): {}",
e
);
}
let publisher = Publisher::new(
&mq_connection,
PublisherConfig {
confirm_publish: true,
timeout_secs: 30,
exchange: "attune.executions".to_string(),
},
)
.await?;
Ok(publisher)
}
/// Background task that keeps trying to establish the MQ publisher connection.
/// Once connected it installs the publisher into `state`, then monitors the
/// connection health and reconnects if it drops.
async fn mq_reconnect_loop(state: Arc<AppState>, mq_url: String) {
// Retry delay sequence (seconds): 1, 2, 4, 8, 16, 30, 30, …
let delays: &[u64] = &[1, 2, 4, 8, 16, 30];
let mut attempt: usize = 0;
loop {
let delay = delays.get(attempt).copied().unwrap_or(30);
match try_connect_publisher(&mq_url).await {
Ok(publisher) => {
info!(
"Message queue publisher connected (attempt {})",
attempt + 1
);
state.set_publisher(Arc::new(publisher)).await;
attempt = 0; // reset backoff after a successful connect
// Poll liveness: the publisher will error on use when the
// underlying channel is gone. We do a lightweight wait here so
// we notice disconnections and attempt to reconnect.
loop {
tokio::time::sleep(tokio::time::Duration::from_secs(10)).await;
if state.get_publisher().await.is_none() {
// Something cleared the publisher externally; re-enter
// the outer connect loop.
break;
}
// TODO: add a real health-check ping when the lapin API
// exposes one (e.g. channel.basic_noop). For now a broken
// publisher will be detected on the first failed publish and
// can be cleared by the handler to trigger reconnection here.
}
}
Err(e) => {
warn!(
"Failed to connect to message queue (attempt {}, retrying in {}s): {}",
attempt + 1,
delay,
e
);
tokio::time::sleep(tokio::time::Duration::from_secs(delay)).await;
attempt = attempt.saturating_add(1);
}
}
}
}
#[tokio::main]
async fn main() -> Result<()> {
// Initialize tracing subscriber
@@ -66,59 +146,21 @@ async fn main() -> Result<()> {
let database = Database::new(&config.database).await?;
info!("Database connection established");
// Initialize message queue connection and publisher (optional)
let mut state = AppState::new(database.pool().clone(), config.clone());
// Initialize application state (publisher starts as None)
let state = Arc::new(AppState::new(database.pool().clone(), config.clone()));
// Spawn background MQ reconnect loop if a message queue is configured.
// The loop will keep retrying until it connects, then install the publisher
// into the shared state so request handlers can use it immediately.
if let Some(ref mq_config) = config.message_queue {
info!("Connecting to message queue...");
match Connection::connect(&mq_config.url).await {
Ok(mq_connection) => {
info!("Message queue connection established");
// Setup common message queue infrastructure (exchanges and DLX)
let mq_setup_config = attune_common::mq::MessageQueueConfig::default();
match mq_connection
.setup_common_infrastructure(&mq_setup_config)
.await
{
Ok(_) => info!("Common message queue infrastructure setup completed"),
Err(e) => {
warn!(
"Failed to setup common MQ infrastructure (may already exist): {}",
e
);
}
}
// Create publisher
match Publisher::new(
&mq_connection,
PublisherConfig {
confirm_publish: true,
timeout_secs: 30,
exchange: "attune.executions".to_string(),
},
)
.await
{
Ok(publisher) => {
info!("Message queue publisher initialized");
state = state.with_publisher(Arc::new(publisher));
}
Err(e) => {
warn!("Failed to create publisher: {}", e);
warn!("Executions will not be queued for processing");
}
}
}
Err(e) => {
warn!("Failed to connect to message queue: {}", e);
warn!("Executions will not be queued for processing");
}
}
info!("Message queue configured starting background connection loop...");
let mq_url = mq_config.url.clone();
let state_clone = state.clone();
tokio::spawn(async move {
mq_reconnect_loop(state_clone, mq_url).await;
});
} else {
warn!("Message queue not configured");
warn!("Executions will not be queued for processing");
warn!("Message queue not configured executions will not be queued for processing");
}
info!(
@@ -143,7 +185,7 @@ async fn main() -> Result<()> {
info!("PostgreSQL notification listener started");
// Create and start server
let server = Server::new(std::sync::Arc::new(state));
let server = Server::new(state.clone());
info!("Attune API Service is ready");

View File

@@ -2,6 +2,7 @@
//!
//! Provides endpoints for:
//! - CRUD operations on artifacts (metadata + data)
//! - File-backed version creation (execution writes file to shared volume)
//! - File upload (binary) and download for file-type artifacts
//! - JSON content versioning for structured artifacts
//! - Progress append for progress-type artifacts (streaming updates)
@@ -17,8 +18,9 @@ use axum::{
Json, Router,
};
use std::sync::Arc;
use tracing::warn;
use attune_common::models::enums::ArtifactType;
use attune_common::models::enums::{ArtifactType, ArtifactVisibility};
use attune_common::repositories::{
artifact::{
ArtifactRepository, ArtifactSearchFilters, ArtifactVersionRepository, CreateArtifactInput,
@@ -33,7 +35,8 @@ use crate::{
artifact::{
AppendProgressRequest, ArtifactQueryParams, ArtifactResponse, ArtifactSummary,
ArtifactVersionResponse, ArtifactVersionSummary, CreateArtifactRequest,
CreateVersionJsonRequest, SetDataRequest, UpdateArtifactRequest,
CreateFileVersionRequest, CreateVersionJsonRequest, SetDataRequest,
UpdateArtifactRequest,
},
common::{PaginatedResponse, PaginationParams},
ApiResponse, SuccessResponse,
@@ -66,6 +69,7 @@ pub async fn list_artifacts(
scope: query.scope,
owner: query.owner.clone(),
r#type: query.r#type,
visibility: query.visibility,
execution: query.execution,
name_contains: query.name.clone(),
limit: query.limit(),
@@ -175,11 +179,22 @@ pub async fn create_artifact(
)));
}
// Type-aware visibility default: progress artifacts are public by default
// (they're informational status indicators), everything else is private.
let visibility = request.visibility.unwrap_or_else(|| {
if request.r#type == ArtifactType::Progress {
ArtifactVisibility::Public
} else {
ArtifactVisibility::Private
}
});
let input = CreateArtifactInput {
r#ref: request.r#ref,
scope: request.scope,
owner: request.owner,
r#type: request.r#type,
visibility,
retention_policy: request.retention_policy,
retention_limit: request.retention_limit,
name: request.name,
@@ -229,6 +244,7 @@ pub async fn update_artifact(
scope: request.scope,
owner: request.owner,
r#type: request.r#type,
visibility: request.visibility,
retention_policy: request.retention_policy,
retention_limit: request.retention_limit,
name: request.name,
@@ -249,7 +265,7 @@ pub async fn update_artifact(
))
}
/// Delete an artifact (cascades to all versions)
/// Delete an artifact (cascades to all versions, including disk files)
#[utoipa::path(
delete,
path = "/api/v1/artifacts/{id}",
@@ -266,6 +282,22 @@ pub async fn delete_artifact(
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
) -> ApiResult<impl IntoResponse> {
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
// Before deleting DB rows, clean up any file-backed versions on disk
let file_versions =
ArtifactVersionRepository::find_file_versions_by_artifact(&state.db, id).await?;
if !file_versions.is_empty() {
let artifacts_dir = &state.config.artifacts_dir;
cleanup_version_files(artifacts_dir, &file_versions);
// Also try to remove the artifact's parent directory if it's now empty
let ref_dir = ref_to_dir_path(&artifact.r#ref);
let full_ref_dir = std::path::Path::new(artifacts_dir).join(&ref_dir);
cleanup_empty_parents(&full_ref_dir, artifacts_dir);
}
let deleted = ArtifactRepository::delete(&state.db, id).await?;
if !deleted {
return Err(ApiError::NotFound(format!(
@@ -527,6 +559,7 @@ pub async fn create_version_json(
),
content: None,
content_json: Some(request.content),
file_path: None,
meta: request.meta,
created_by: request.created_by,
};
@@ -542,6 +575,108 @@ pub async fn create_version_json(
))
}
/// Create a new file-backed version (no file content in request).
///
/// This endpoint allocates a version number and computes a `file_path` on the
/// shared artifact volume. The caller (execution process) is expected to write
/// the file content directly to `$ATTUNE_ARTIFACTS_DIR/{file_path}` after
/// receiving the response. The worker finalizes `size_bytes` after execution.
///
/// Only applicable to file-type artifacts (FileBinary, FileDatatable, FileText, Log).
#[utoipa::path(
post,
path = "/api/v1/artifacts/{id}/versions/file",
tag = "artifacts",
params(("id" = i64, Path, description = "Artifact ID")),
request_body = CreateFileVersionRequest,
responses(
(status = 201, description = "File version allocated", body = inline(ApiResponse<ArtifactVersionResponse>)),
(status = 400, description = "Artifact type is not file-based"),
(status = 404, description = "Artifact not found"),
),
security(("bearer_auth" = []))
)]
pub async fn create_version_file(
RequireAuth(_user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
Json(request): Json<CreateFileVersionRequest>,
) -> ApiResult<impl IntoResponse> {
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
// Validate this is a file-type artifact
if !is_file_backed_type(artifact.r#type) {
return Err(ApiError::BadRequest(format!(
"Artifact '{}' is type {:?}, which does not support file-backed versions. \
Use POST /versions for JSON or POST /versions/upload for DB-stored files.",
artifact.r#ref, artifact.r#type,
)));
}
let content_type = request
.content_type
.unwrap_or_else(|| default_content_type_for_artifact(artifact.r#type));
// We need the version number to compute the file path. The DB function
// `next_artifact_version()` is called inside the INSERT, so we create the
// row first with file_path = NULL, then compute the path from the returned
// version number and update the row. This avoids a race condition where two
// concurrent requests could compute the same version number.
let input = CreateArtifactVersionInput {
artifact: id,
content_type: Some(content_type.clone()),
content: None,
content_json: None,
file_path: None, // Will be set in the update below
meta: request.meta,
created_by: request.created_by,
};
let version = ArtifactVersionRepository::create(&state.db, input).await?;
// Compute the file path from the artifact ref and version number
let file_path = compute_file_path(&artifact.r#ref, version.version, &content_type);
// Create the parent directory on disk
let artifacts_dir = &state.config.artifacts_dir;
let full_path = std::path::Path::new(artifacts_dir).join(&file_path);
if let Some(parent) = full_path.parent() {
tokio::fs::create_dir_all(parent).await.map_err(|e| {
ApiError::InternalServerError(format!(
"Failed to create artifact directory '{}': {}",
parent.display(),
e,
))
})?;
}
// Update the version row with the computed file_path
sqlx::query("UPDATE artifact_version SET file_path = $1 WHERE id = $2")
.bind(&file_path)
.execute(&state.db)
.await
.map_err(|e| {
ApiError::InternalServerError(format!(
"Failed to set file_path on version {}: {}",
version.id, e,
))
})?;
// Return the version with file_path populated
let mut response = ArtifactVersionResponse::from(version);
response.file_path = Some(file_path);
Ok((
StatusCode::CREATED,
Json(ApiResponse::with_message(
response,
"File version allocated — write content to $ATTUNE_ARTIFACTS_DIR/<file_path>",
)),
))
}
/// Upload a binary file as a new version (multipart/form-data)
///
/// The file is sent as a multipart form field named `file`. Optional fields:
@@ -656,6 +791,7 @@ pub async fn upload_version(
content_type: Some(resolved_ct),
content: Some(file_bytes),
content_json: None,
file_path: None,
meta,
created_by,
};
@@ -671,7 +807,10 @@ pub async fn upload_version(
))
}
/// Download the binary content of a specific version
/// Download the binary content of a specific version.
///
/// For file-backed versions, reads from the shared artifact volume on disk.
/// For DB-stored versions, reads from the BYTEA/JSON content column.
#[utoipa::path(
get,
path = "/api/v1/artifacts/{id}/versions/{version}/download",
@@ -695,69 +834,33 @@ pub async fn download_version(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
// First try without content (cheaper query) to check for file_path
let ver = ArtifactVersionRepository::find_by_version(&state.db, id, version)
.await?
.ok_or_else(|| {
ApiError::NotFound(format!("Version {} not found for artifact {}", version, id))
})?;
// File-backed version: read from disk
if let Some(ref file_path) = ver.file_path {
return serve_file_from_disk(
&state.config.artifacts_dir,
file_path,
&artifact.r#ref,
version,
ver.content_type.as_deref(),
)
.await;
}
// DB-stored version: need to fetch with content
let ver = ArtifactVersionRepository::find_by_version_with_content(&state.db, id, version)
.await?
.ok_or_else(|| {
ApiError::NotFound(format!("Version {} not found for artifact {}", version, id))
})?;
// For binary content
if let Some(bytes) = ver.content {
let ct = ver
.content_type
.unwrap_or_else(|| "application/octet-stream".to_string());
let filename = format!(
"{}_v{}.{}",
artifact.r#ref.replace('.', "_"),
version,
extension_from_content_type(&ct)
);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
)
.into_response());
}
// For JSON content, serialize and return
if let Some(json) = ver.content_json {
let bytes = serde_json::to_vec_pretty(&json).map_err(|e| {
ApiError::InternalServerError(format!("Failed to serialize JSON: {}", e))
})?;
let ct = ver
.content_type
.unwrap_or_else(|| "application/json".to_string());
let filename = format!("{}_v{}.json", artifact.r#ref.replace('.', "_"), version,);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
)
.into_response());
}
Err(ApiError::NotFound(format!(
"Version {} of artifact {} has no downloadable content",
version, id
)))
serve_db_content(&artifact.r#ref, version, &ver)
}
/// Download the latest version's content
@@ -781,72 +884,34 @@ pub async fn download_latest(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
let ver = ArtifactVersionRepository::find_latest_with_content(&state.db, id)
// First try without content (cheaper query) to check for file_path
let ver = ArtifactVersionRepository::find_latest(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("No versions found for artifact {}", id)))?;
let version = ver.version;
// For binary content
if let Some(bytes) = ver.content {
let ct = ver
.content_type
.unwrap_or_else(|| "application/octet-stream".to_string());
let filename = format!(
"{}_v{}.{}",
artifact.r#ref.replace('.', "_"),
// File-backed version: read from disk
if let Some(ref file_path) = ver.file_path {
return serve_file_from_disk(
&state.config.artifacts_dir,
file_path,
&artifact.r#ref,
version,
extension_from_content_type(&ct)
);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
ver.content_type.as_deref(),
)
.into_response());
.await;
}
// For JSON content
if let Some(json) = ver.content_json {
let bytes = serde_json::to_vec_pretty(&json).map_err(|e| {
ApiError::InternalServerError(format!("Failed to serialize JSON: {}", e))
})?;
// DB-stored version: need to fetch with content
let ver = ArtifactVersionRepository::find_latest_with_content(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("No versions found for artifact {}", id)))?;
let ct = ver
.content_type
.unwrap_or_else(|| "application/json".to_string());
let filename = format!("{}_v{}.json", artifact.r#ref.replace('.', "_"), version,);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
)
.into_response());
}
Err(ApiError::NotFound(format!(
"Latest version of artifact {} has no downloadable content",
id
)))
serve_db_content(&artifact.r#ref, ver.version, &ver)
}
/// Delete a specific version by version number
/// Delete a specific version by version number (including disk file if file-backed)
#[utoipa::path(
delete,
path = "/api/v1/artifacts/{id}/versions/{version}",
@@ -867,7 +932,7 @@ pub async fn delete_version(
Path((id, version)): Path<(i64, i32)>,
) -> ApiResult<impl IntoResponse> {
// Verify artifact exists
ArtifactRepository::find_by_id(&state.db, id)
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
@@ -878,6 +943,25 @@ pub async fn delete_version(
ApiError::NotFound(format!("Version {} not found for artifact {}", version, id))
})?;
// Clean up disk file if file-backed
if let Some(ref file_path) = ver.file_path {
let artifacts_dir = &state.config.artifacts_dir;
let full_path = std::path::Path::new(artifacts_dir).join(file_path);
if full_path.exists() {
if let Err(e) = tokio::fs::remove_file(&full_path).await {
warn!(
"Failed to delete artifact file '{}': {}. DB row will still be deleted.",
full_path.display(),
e
);
}
}
// Try to clean up empty parent directories
let ref_dir = ref_to_dir_path(&artifact.r#ref);
let full_ref_dir = std::path::Path::new(artifacts_dir).join(&ref_dir);
cleanup_empty_parents(&full_ref_dir, artifacts_dir);
}
ArtifactVersionRepository::delete(&state.db, ver.id).await?;
Ok((
@@ -890,6 +974,212 @@ pub async fn delete_version(
// Helpers
// ============================================================================
/// Returns true for artifact types that should use file-backed storage on disk.
fn is_file_backed_type(artifact_type: ArtifactType) -> bool {
matches!(
artifact_type,
ArtifactType::FileBinary
| ArtifactType::FileText
| ArtifactType::FileDataTable
| ArtifactType::FileImage
)
}
/// Convert an artifact ref to a directory path by replacing dots with path separators.
/// e.g., "mypack.build_log" -> "mypack/build_log"
fn ref_to_dir_path(artifact_ref: &str) -> String {
artifact_ref.replace('.', "/")
}
/// Compute the relative file path for a file-backed artifact version.
///
/// Pattern: `{ref_slug}/v{version}.{ext}`
/// e.g., `mypack/build_log/v1.txt`
pub fn compute_file_path(artifact_ref: &str, version: i32, content_type: &str) -> String {
let ref_path = ref_to_dir_path(artifact_ref);
let ext = extension_from_content_type(content_type);
format!("{}/v{}.{}", ref_path, version, ext)
}
/// Return a sensible default content type for a given artifact type.
fn default_content_type_for_artifact(artifact_type: ArtifactType) -> String {
match artifact_type {
ArtifactType::FileText => "text/plain".to_string(),
ArtifactType::FileDataTable => "text/csv".to_string(),
ArtifactType::FileImage => "image/png".to_string(),
ArtifactType::FileBinary => "application/octet-stream".to_string(),
_ => "application/octet-stream".to_string(),
}
}
/// Serve a file-backed artifact version from disk.
async fn serve_file_from_disk(
artifacts_dir: &str,
file_path: &str,
artifact_ref: &str,
version: i32,
content_type: Option<&str>,
) -> ApiResult<axum::response::Response> {
let full_path = std::path::Path::new(artifacts_dir).join(file_path);
if !full_path.exists() {
return Err(ApiError::NotFound(format!(
"File for version {} of artifact '{}' not found on disk (expected at '{}')",
version, artifact_ref, file_path,
)));
}
let bytes = tokio::fs::read(&full_path).await.map_err(|e| {
ApiError::InternalServerError(format!(
"Failed to read artifact file '{}': {}",
full_path.display(),
e,
))
})?;
let ct = content_type
.unwrap_or("application/octet-stream")
.to_string();
let filename = format!(
"{}_v{}.{}",
artifact_ref.replace('.', "_"),
version,
extension_from_content_type(&ct),
);
Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
)
.into_response())
}
/// Serve a DB-stored artifact version (BYTEA or JSON content).
fn serve_db_content(
artifact_ref: &str,
version: i32,
ver: &attune_common::models::artifact_version::ArtifactVersion,
) -> ApiResult<axum::response::Response> {
// For binary content
if let Some(ref bytes) = ver.content {
let ct = ver
.content_type
.clone()
.unwrap_or_else(|| "application/octet-stream".to_string());
let filename = format!(
"{}_v{}.{}",
artifact_ref.replace('.', "_"),
version,
extension_from_content_type(&ct),
);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes.clone()),
)
.into_response());
}
// For JSON content, serialize and return
if let Some(ref json) = ver.content_json {
let bytes = serde_json::to_vec_pretty(json).map_err(|e| {
ApiError::InternalServerError(format!("Failed to serialize JSON: {}", e))
})?;
let ct = ver
.content_type
.clone()
.unwrap_or_else(|| "application/json".to_string());
let filename = format!("{}_v{}.json", artifact_ref.replace('.', "_"), version);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
)
.into_response());
}
Err(ApiError::NotFound(format!(
"Version {} of artifact '{}' has no downloadable content",
version, artifact_ref,
)))
}
/// Delete disk files for a set of file-backed artifact versions.
/// Logs warnings on failure but does not propagate errors.
fn cleanup_version_files(
artifacts_dir: &str,
versions: &[attune_common::models::artifact_version::ArtifactVersion],
) {
for ver in versions {
if let Some(ref file_path) = ver.file_path {
let full_path = std::path::Path::new(artifacts_dir).join(file_path);
if full_path.exists() {
if let Err(e) = std::fs::remove_file(&full_path) {
warn!(
"Failed to delete artifact file '{}': {}",
full_path.display(),
e,
);
}
}
}
}
}
/// Attempt to remove empty parent directories up to (but not including) the
/// artifacts_dir root. This is best-effort cleanup.
fn cleanup_empty_parents(dir: &std::path::Path, stop_at: &str) {
let stop_path = std::path::Path::new(stop_at);
let mut current = dir.to_path_buf();
while current != stop_path && current.starts_with(stop_path) {
match std::fs::read_dir(&current) {
Ok(mut entries) => {
if entries.next().is_some() {
// Directory is not empty, stop climbing
break;
}
if let Err(e) = std::fs::remove_dir(&current) {
warn!(
"Failed to remove empty directory '{}': {}",
current.display(),
e,
);
break;
}
}
Err(_) => break,
}
match current.parent() {
Some(parent) => current = parent.to_path_buf(),
None => break,
}
}
}
/// Derive a simple file extension from a MIME content type
fn extension_from_content_type(ct: &str) -> &str {
match ct {
@@ -944,6 +1234,7 @@ pub fn routes() -> Router<Arc<AppState>> {
)
.route("/artifacts/{id}/versions/latest", get(get_latest_version))
.route("/artifacts/{id}/versions/upload", post(upload_version))
.route("/artifacts/{id}/versions/file", post(create_version_file))
.route(
"/artifacts/{id}/versions/{version}",
get(get_version).delete(delete_version),
@@ -975,4 +1266,61 @@ mod tests {
assert_eq!(extension_from_content_type("image/png"), "png");
assert_eq!(extension_from_content_type("unknown/type"), "bin");
}
#[test]
fn test_compute_file_path() {
assert_eq!(
compute_file_path("mypack.build_log", 1, "text/plain"),
"mypack/build_log/v1.txt"
);
assert_eq!(
compute_file_path("mypack.build_log", 3, "application/json"),
"mypack/build_log/v3.json"
);
assert_eq!(
compute_file_path("core.test.results", 2, "text/csv"),
"core/test/results/v2.csv"
);
assert_eq!(
compute_file_path("simple", 1, "application/octet-stream"),
"simple/v1.bin"
);
}
#[test]
fn test_ref_to_dir_path() {
assert_eq!(ref_to_dir_path("mypack.build_log"), "mypack/build_log");
assert_eq!(ref_to_dir_path("simple"), "simple");
assert_eq!(ref_to_dir_path("a.b.c.d"), "a/b/c/d");
}
#[test]
fn test_is_file_backed_type() {
assert!(is_file_backed_type(ArtifactType::FileBinary));
assert!(is_file_backed_type(ArtifactType::FileText));
assert!(is_file_backed_type(ArtifactType::FileDataTable));
assert!(is_file_backed_type(ArtifactType::FileImage));
assert!(!is_file_backed_type(ArtifactType::Progress));
assert!(!is_file_backed_type(ArtifactType::Url));
}
#[test]
fn test_default_content_type_for_artifact() {
assert_eq!(
default_content_type_for_artifact(ArtifactType::FileText),
"text/plain"
);
assert_eq!(
default_content_type_for_artifact(ArtifactType::FileDataTable),
"text/csv"
);
assert_eq!(
default_content_type_for_artifact(ArtifactType::FileImage),
"image/png"
);
assert_eq!(
default_content_type_for_artifact(ArtifactType::FileBinary),
"application/octet-stream"
);
}
}

View File

@@ -170,7 +170,7 @@ pub async fn create_event(
let event = EventRepository::create(&state.db, input).await?;
// Publish EventCreated message to message queue if publisher is available
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let message_payload = EventCreatedPayload {
event_id: event.id,
trigger_id: event.trigger,

View File

@@ -99,7 +99,7 @@ pub async fn create_execution(
.with_source("api-service")
.with_correlation_id(uuid::Uuid::new_v4());
if let Some(publisher) = &state.publisher {
if let Some(publisher) = state.get_publisher().await {
publisher.publish_envelope(&message).await.map_err(|e| {
ApiError::InternalServerError(format!("Failed to publish message: {}", e))
})?;

View File

@@ -403,7 +403,7 @@ pub async fn respond_to_inquiry(
let updated_inquiry = InquiryRepository::update(&state.db, id, update_input).await?;
// Publish InquiryResponded message if publisher is available
if let Some(publisher) = &state.publisher {
if let Some(publisher) = state.get_publisher().await {
let user_id = user
.0
.identity_id()

View File

@@ -1,7 +1,7 @@
//! Pack management API routes
use axum::{
extract::{Path, Query, State},
extract::{Multipart, Path, Query, State},
http::StatusCode,
response::IntoResponse,
routing::get,
@@ -448,6 +448,190 @@ async fn execute_and_store_pack_tests(
Some(Ok(result))
}
/// Upload and register a pack from a tar.gz archive (multipart/form-data)
///
/// The archive should be a gzipped tar containing the pack directory at its root
/// (i.e. the archive should unpack to files like `pack.yaml`, `actions/`, etc.).
/// The multipart field name must be `pack`.
///
/// Optional form fields:
/// - `force`: `"true"` to overwrite an existing pack with the same ref
/// - `skip_tests`: `"true"` to skip test execution after registration
#[utoipa::path(
post,
path = "/api/v1/packs/upload",
tag = "packs",
request_body(content = String, content_type = "multipart/form-data"),
responses(
(status = 201, description = "Pack uploaded and registered successfully", body = inline(ApiResponse<PackInstallResponse>)),
(status = 400, description = "Invalid archive or missing pack.yaml"),
(status = 409, description = "Pack already exists (use force=true to overwrite)"),
),
security(("bearer_auth" = []))
)]
pub async fn upload_pack(
State(state): State<Arc<AppState>>,
RequireAuth(user): RequireAuth,
mut multipart: Multipart,
) -> ApiResult<impl IntoResponse> {
use std::io::Cursor;
const MAX_PACK_SIZE: usize = 100 * 1024 * 1024; // 100 MB
let mut pack_bytes: Option<Vec<u8>> = None;
let mut force = false;
let mut skip_tests = false;
// Parse multipart fields
while let Some(field) = multipart
.next_field()
.await
.map_err(|e| ApiError::BadRequest(format!("Multipart error: {}", e)))?
{
match field.name() {
Some("pack") => {
let data = field.bytes().await.map_err(|e| {
ApiError::BadRequest(format!("Failed to read pack data: {}", e))
})?;
if data.len() > MAX_PACK_SIZE {
return Err(ApiError::BadRequest(format!(
"Pack archive too large: {} bytes (max {} bytes)",
data.len(),
MAX_PACK_SIZE
)));
}
pack_bytes = Some(data.to_vec());
}
Some("force") => {
let val = field.text().await.map_err(|e| {
ApiError::BadRequest(format!("Failed to read force field: {}", e))
})?;
force = val.trim().eq_ignore_ascii_case("true");
}
Some("skip_tests") => {
let val = field.text().await.map_err(|e| {
ApiError::BadRequest(format!("Failed to read skip_tests field: {}", e))
})?;
skip_tests = val.trim().eq_ignore_ascii_case("true");
}
_ => {
// Consume and ignore unknown fields
let _ = field.bytes().await;
}
}
}
let pack_data = pack_bytes.ok_or_else(|| {
ApiError::BadRequest("Missing required 'pack' field in multipart upload".to_string())
})?;
// Extract the tar.gz archive into a temporary directory
let temp_extract_dir = tempfile::tempdir().map_err(|e| {
ApiError::InternalServerError(format!("Failed to create temp directory: {}", e))
})?;
{
let cursor = Cursor::new(&pack_data[..]);
let gz = flate2::read::GzDecoder::new(cursor);
let mut archive = tar::Archive::new(gz);
archive.unpack(temp_extract_dir.path()).map_err(|e| {
ApiError::BadRequest(format!(
"Failed to extract pack archive (must be a valid .tar.gz): {}",
e
))
})?;
}
// Find pack.yaml — it may be at the root or inside a single subdirectory
// (e.g. when GitHub tarballs add a top-level directory)
let pack_root = find_pack_root(temp_extract_dir.path()).ok_or_else(|| {
ApiError::BadRequest(
"Could not find pack.yaml in the uploaded archive. \
Ensure the archive contains pack.yaml at its root or in a single top-level directory."
.to_string(),
)
})?;
// Read pack ref from pack.yaml to determine the final storage path
let pack_yaml_path = pack_root.join("pack.yaml");
let pack_yaml_content = std::fs::read_to_string(&pack_yaml_path)
.map_err(|e| ApiError::InternalServerError(format!("Failed to read pack.yaml: {}", e)))?;
let pack_yaml: serde_yaml_ng::Value = serde_yaml_ng::from_str(&pack_yaml_content)
.map_err(|e| ApiError::BadRequest(format!("Failed to parse pack.yaml: {}", e)))?;
let pack_ref = pack_yaml
.get("ref")
.and_then(|v| v.as_str())
.ok_or_else(|| ApiError::BadRequest("Missing 'ref' field in pack.yaml".to_string()))?
.to_string();
// Move pack to permanent storage
use attune_common::pack_registry::PackStorage;
let storage = PackStorage::new(&state.config.packs_base_dir);
let final_path = storage
.install_pack(&pack_root, &pack_ref, None)
.map_err(|e| {
ApiError::InternalServerError(format!("Failed to move pack to storage: {}", e))
})?;
tracing::info!(
"Pack '{}' uploaded and stored at {:?}",
pack_ref,
final_path
);
// Register the pack in the database
let pack_id = register_pack_internal(
state.clone(),
user.claims.sub,
final_path.to_string_lossy().to_string(),
force,
skip_tests,
)
.await
.map_err(|e| {
// Clean up permanent storage on failure
let _ = std::fs::remove_dir_all(&final_path);
e
})?;
// Fetch the registered pack
let pack = PackRepository::find_by_id(&state.db, pack_id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Pack with ID {} not found", pack_id)))?;
let response = ApiResponse::with_message(
PackInstallResponse {
pack: PackResponse::from(pack),
test_result: None,
tests_skipped: skip_tests,
},
"Pack uploaded and registered successfully",
);
Ok((StatusCode::CREATED, Json(response)))
}
/// Walk the extracted directory and find the directory that contains `pack.yaml`.
/// Returns the path of the directory containing `pack.yaml`, or `None` if not found.
fn find_pack_root(base: &std::path::Path) -> Option<PathBuf> {
// Check root first
if base.join("pack.yaml").exists() {
return Some(base.to_path_buf());
}
// Check one level deep (e.g. GitHub tarballs: repo-main/pack.yaml)
if let Ok(entries) = std::fs::read_dir(base) {
for entry in entries.flatten() {
let path = entry.path();
if path.is_dir() && path.join("pack.yaml").exists() {
return Some(path);
}
}
}
None
}
/// Register a pack from local filesystem
#[utoipa::path(
post,
@@ -1051,7 +1235,7 @@ async fn register_pack_internal(
// Publish pack.registered event so workers can proactively set up
// runtime environments (virtualenvs, node_modules, etc.).
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let runtime_names = attune_common::pack_environment::collect_runtime_names_for_pack(
&state.db, pack.id, &pack_path,
)
@@ -2241,6 +2425,7 @@ pub fn routes() -> Router<Arc<AppState>> {
axum::routing::post(register_packs_batch),
)
.route("/packs/install", axum::routing::post(install_pack))
.route("/packs/upload", axum::routing::post(upload_pack))
.route("/packs/download", axum::routing::post(download_packs))
.route(
"/packs/dependencies",

View File

@@ -341,7 +341,7 @@ pub async fn create_rule(
let rule = RuleRepository::create(&state.db, rule_input).await?;
// Publish RuleCreated message to notify sensor service
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let payload = RuleCreatedPayload {
rule_id: rule.id,
rule_ref: rule.r#ref.clone(),
@@ -440,7 +440,7 @@ pub async fn update_rule(
// If the rule is enabled and trigger params changed, publish RuleEnabled message
// to notify sensors to restart with new parameters
if rule.enabled && trigger_params_changed {
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let payload = RuleEnabledPayload {
rule_id: rule.id,
rule_ref: rule.r#ref.clone(),
@@ -543,7 +543,7 @@ pub async fn enable_rule(
let rule = RuleRepository::update(&state.db, existing_rule.id, update_input).await?;
// Publish RuleEnabled message to notify sensor service
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let payload = RuleEnabledPayload {
rule_id: rule.id,
rule_ref: rule.r#ref.clone(),
@@ -606,7 +606,7 @@ pub async fn disable_rule(
let rule = RuleRepository::update(&state.db, existing_rule.id, update_input).await?;
// Publish RuleDisabled message to notify sensor service
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let payload = RuleDisabledPayload {
rule_id: rule.id,
rule_ref: rule.r#ref.clone(),

View File

@@ -650,7 +650,7 @@ pub async fn receive_webhook(
"Webhook event {} created, attempting to publish EventCreated message",
event.id
);
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let message_payload = EventCreatedPayload {
event_id: event.id,
trigger_id: event.trigger,

View File

@@ -2,7 +2,7 @@
use sqlx::PgPool;
use std::sync::Arc;
use tokio::sync::broadcast;
use tokio::sync::{broadcast, RwLock};
use crate::auth::jwt::JwtConfig;
use attune_common::{config::Config, mq::Publisher};
@@ -18,8 +18,8 @@ pub struct AppState {
pub cors_origins: Vec<String>,
/// Application configuration
pub config: Arc<Config>,
/// Optional message queue publisher
pub publisher: Option<Arc<Publisher>>,
/// Optional message queue publisher (shared, swappable after reconnection)
pub publisher: Arc<RwLock<Option<Arc<Publisher>>>>,
/// Broadcast channel for SSE notifications
pub broadcast_tx: broadcast::Sender<String>,
}
@@ -50,15 +50,20 @@ impl AppState {
jwt_config: Arc::new(jwt_config),
cors_origins,
config: Arc::new(config),
publisher: None,
publisher: Arc::new(RwLock::new(None)),
broadcast_tx,
}
}
/// Set the message queue publisher
pub fn with_publisher(mut self, publisher: Arc<Publisher>) -> Self {
self.publisher = Some(publisher);
self
/// Set the message queue publisher (called once at startup or after reconnection)
pub async fn set_publisher(&self, publisher: Arc<Publisher>) {
let mut guard = self.publisher.write().await;
*guard = Some(publisher);
}
/// Get a clone of the current publisher, if available
pub async fn get_publisher(&self) -> Option<Arc<Publisher>> {
self.publisher.read().await.clone()
}
}

View File

@@ -16,12 +16,13 @@ attune-common = { path = "../common" }
# Async runtime
tokio = { workspace = true }
futures = { workspace = true }
# CLI framework
clap = { workspace = true, features = ["derive", "env", "string"] }
# HTTP client
reqwest = { workspace = true }
reqwest = { workspace = true, features = ["multipart", "stream"] }
# Serialization
serde = { workspace = true }
@@ -41,6 +42,14 @@ dirs = "5.0"
# URL encoding
urlencoding = "2.1"
url = { workspace = true }
# Archive/compression
tar = { workspace = true }
flate2 = { workspace = true }
# WebSocket client (for notifier integration)
tokio-tungstenite = { workspace = true }
# Terminal UI
colored = "2.1"

View File

@@ -1,5 +1,5 @@
use anyhow::{Context, Result};
use reqwest::{Client as HttpClient, Method, RequestBuilder, Response, StatusCode};
use reqwest::{multipart, Client as HttpClient, Method, RequestBuilder, Response, StatusCode};
use serde::{de::DeserializeOwned, Serialize};
use std::path::PathBuf;
use std::time::Duration;
@@ -39,7 +39,7 @@ impl ApiClient {
Self {
client: HttpClient::builder()
.timeout(Duration::from_secs(30))
.timeout(Duration::from_secs(300)) // longer timeout for uploads
.build()
.expect("Failed to build HTTP client"),
base_url,
@@ -50,10 +50,15 @@ impl ApiClient {
}
/// Create a new API client
/// Return the base URL this client is configured to talk to.
pub fn base_url(&self) -> &str {
&self.base_url
}
#[cfg(test)]
pub fn new(base_url: String, auth_token: Option<String>) -> Self {
let client = HttpClient::builder()
.timeout(Duration::from_secs(30))
.timeout(Duration::from_secs(300))
.build()
.expect("Failed to build HTTP client");
@@ -296,6 +301,55 @@ impl ApiClient {
anyhow::bail!("API error ({}): {}", status, error_text);
}
}
/// POST a multipart/form-data request with a file field and optional text fields.
///
/// - `file_field_name`: the multipart field name for the file
/// - `file_bytes`: raw bytes of the file content
/// - `file_name`: filename hint sent in the Content-Disposition header
/// - `mime_type`: MIME type of the file (e.g. `"application/gzip"`)
/// - `extra_fields`: additional text key/value fields to include in the form
pub async fn multipart_post<T: DeserializeOwned>(
&mut self,
path: &str,
file_field_name: &str,
file_bytes: Vec<u8>,
file_name: &str,
mime_type: &str,
extra_fields: Vec<(&str, String)>,
) -> Result<T> {
let url = format!("{}/api/v1{}", self.base_url, path);
let file_part = multipart::Part::bytes(file_bytes)
.file_name(file_name.to_string())
.mime_str(mime_type)
.context("Invalid MIME type")?;
let mut form = multipart::Form::new().part(file_field_name.to_string(), file_part);
for (key, value) in extra_fields {
form = form.text(key.to_string(), value);
}
let mut req = self.client.post(&url).multipart(form);
if let Some(token) = &self.auth_token {
req = req.bearer_auth(token);
}
let response = req.send().await.context("Failed to send multipart request to API")?;
// Handle 401 + refresh (same pattern as execute())
if response.status() == StatusCode::UNAUTHORIZED && self.refresh_token.is_some() {
if self.refresh_auth_token().await? {
return Err(anyhow::anyhow!(
"Token expired and was refreshed. Please retry your command."
));
}
}
self.handle_response(response).await
}
}
#[cfg(test)]

View File

@@ -6,6 +6,7 @@ use std::collections::HashMap;
use crate::client::ApiClient;
use crate::config::CliConfig;
use crate::output::{self, OutputFormat};
use crate::wait::{wait_for_execution, WaitOptions};
#[derive(Subcommand)]
pub enum ActionCommands {
@@ -74,6 +75,11 @@ pub enum ActionCommands {
/// Timeout in seconds when waiting (default: 300)
#[arg(long, default_value = "300", requires = "wait")]
timeout: u64,
/// Notifier WebSocket base URL (e.g. ws://localhost:8081).
/// Derived from --api-url automatically when not set.
#[arg(long, requires = "wait")]
notifier_url: Option<String>,
},
}
@@ -182,6 +188,7 @@ pub async fn handle_action_command(
params_json,
wait,
timeout,
notifier_url,
} => {
handle_execute(
action_ref,
@@ -191,6 +198,7 @@ pub async fn handle_action_command(
api_url,
wait,
timeout,
notifier_url,
output_format,
)
.await
@@ -415,6 +423,7 @@ async fn handle_execute(
api_url: &Option<String>,
wait: bool,
timeout: u64,
notifier_url: Option<String>,
output_format: OutputFormat,
) -> Result<()> {
let config = CliConfig::load_with_profile(profile.as_deref())?;
@@ -453,62 +462,61 @@ async fn handle_execute(
}
let path = "/executions/execute".to_string();
let mut execution: Execution = client.post(&path, &request).await?;
let execution: Execution = client.post(&path, &request).await?;
if wait {
if !wait {
match output_format {
OutputFormat::Json | OutputFormat::Yaml => {
output::print_output(&execution, output_format)?;
}
OutputFormat::Table => {
output::print_info(&format!(
"Waiting for execution {} to complete...",
execution.id
));
output::print_success(&format!("Execution {} started", execution.id));
output::print_key_value_table(vec![
("Execution ID", execution.id.to_string()),
("Action", execution.action_ref.clone()),
("Status", output::format_status(&execution.status)),
]);
}
_ => {}
}
// Poll for completion
let start = std::time::Instant::now();
let timeout_duration = std::time::Duration::from_secs(timeout);
loop {
if start.elapsed() > timeout_duration {
anyhow::bail!("Execution timed out after {} seconds", timeout);
}
let exec_path = format!("/executions/{}", execution.id);
execution = client.get(&exec_path).await?;
if execution.status == "succeeded"
|| execution.status == "failed"
|| execution.status == "canceled"
{
break;
}
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
}
return Ok(());
}
match output_format {
OutputFormat::Table => {
output::print_info(&format!(
"Waiting for execution {} to complete...",
execution.id
));
}
_ => {}
}
let verbose = matches!(output_format, OutputFormat::Table);
let summary = wait_for_execution(WaitOptions {
execution_id: execution.id,
timeout_secs: timeout,
api_client: &mut client,
notifier_ws_url: notifier_url,
verbose,
})
.await?;
match output_format {
OutputFormat::Json | OutputFormat::Yaml => {
output::print_output(&execution, output_format)?;
output::print_output(&summary, output_format)?;
}
OutputFormat::Table => {
output::print_success(&format!(
"Execution {} {}",
execution.id,
if wait { "completed" } else { "started" }
));
output::print_success(&format!("Execution {} completed", summary.id));
output::print_section("Execution Details");
output::print_key_value_table(vec![
("Execution ID", execution.id.to_string()),
("Action", execution.action_ref.clone()),
("Status", output::format_status(&execution.status)),
("Created", output::format_timestamp(&execution.created)),
("Updated", output::format_timestamp(&execution.updated)),
("Execution ID", summary.id.to_string()),
("Action", summary.action_ref.clone()),
("Status", output::format_status(&summary.status)),
("Created", output::format_timestamp(&summary.created)),
("Updated", output::format_timestamp(&summary.updated)),
]);
if let Some(result) = execution.result {
if let Some(result) = summary.result {
if !result.is_null() {
output::print_section("Result");
println!("{}", serde_json::to_string_pretty(&result)?);

View File

@@ -17,6 +17,14 @@ pub enum AuthCommands {
/// Password (will prompt if not provided)
#[arg(long)]
password: Option<String>,
/// API URL to log in to (saved into the profile for future use)
#[arg(long)]
url: Option<String>,
/// Save credentials into a named profile (creates it if it doesn't exist)
#[arg(long)]
save_profile: Option<String>,
},
/// Log out and clear authentication tokens
Logout,
@@ -53,8 +61,22 @@ pub async fn handle_auth_command(
output_format: OutputFormat,
) -> Result<()> {
match command {
AuthCommands::Login { username, password } => {
handle_login(username, password, profile, api_url, output_format).await
AuthCommands::Login {
username,
password,
url,
save_profile,
} => {
// --url is a convenient alias for --api-url at login time
let effective_api_url = url.or_else(|| api_url.clone());
handle_login(
username,
password,
save_profile.as_ref().or(profile.as_ref()),
&effective_api_url,
output_format,
)
.await
}
AuthCommands::Logout => handle_logout(profile, output_format).await,
AuthCommands::Whoami => handle_whoami(profile, api_url, output_format).await,
@@ -65,11 +87,44 @@ pub async fn handle_auth_command(
async fn handle_login(
username: String,
password: Option<String>,
profile: &Option<String>,
profile: Option<&String>,
api_url: &Option<String>,
output_format: OutputFormat,
) -> Result<()> {
let config = CliConfig::load_with_profile(profile.as_deref())?;
// Determine which profile name will own these credentials.
// If --save-profile / --profile was given, use that; otherwise use the
// currently-active profile.
let mut config = CliConfig::load()?;
let target_profile_name = profile
.cloned()
.unwrap_or_else(|| config.current_profile.clone());
// If a URL was provided and the target profile doesn't exist yet, create it.
if !config.profiles.contains_key(&target_profile_name) {
let url = api_url.clone().unwrap_or_else(|| "http://localhost:8080".to_string());
use crate::config::Profile;
config.set_profile(
target_profile_name.clone(),
Profile {
api_url: url,
auth_token: None,
refresh_token: None,
output_format: None,
description: None,
},
)?;
} else if let Some(url) = api_url {
// Profile exists — update its api_url if an explicit URL was provided.
if let Some(p) = config.profiles.get_mut(&target_profile_name) {
p.api_url = url.clone();
}
config.save()?;
}
// Build a temporary config view that points at the target profile so
// ApiClient uses the right base URL.
let mut login_config = CliConfig::load()?;
login_config.current_profile = target_profile_name.clone();
// Prompt for password if not provided
let password = match password {
@@ -82,7 +137,7 @@ async fn handle_login(
}
};
let mut client = ApiClient::from_config(&config, api_url);
let mut client = ApiClient::from_config(&login_config, api_url);
let login_req = LoginRequest {
login: username,
@@ -91,12 +146,17 @@ async fn handle_login(
let response: LoginResponse = client.post("/auth/login", &login_req).await?;
// Save tokens to config
// Persist tokens into the target profile.
let mut config = CliConfig::load()?;
config.set_auth(
response.access_token.clone(),
response.refresh_token.clone(),
)?;
// Ensure the profile exists (it may have just been created above and saved).
if let Some(p) = config.profiles.get_mut(&target_profile_name) {
p.auth_token = Some(response.access_token.clone());
p.refresh_token = Some(response.refresh_token.clone());
config.save()?;
} else {
// Fallback: set_auth writes to the current profile.
config.set_auth(response.access_token.clone(), response.refresh_token.clone())?;
}
match output_format {
OutputFormat::Json | OutputFormat::Yaml => {
@@ -105,6 +165,12 @@ async fn handle_login(
OutputFormat::Table => {
output::print_success("Successfully logged in");
output::print_info(&format!("Token expires in {} seconds", response.expires_in));
if target_profile_name != config.current_profile {
output::print_info(&format!(
"Credentials saved to profile '{}'",
target_profile_name
));
}
}
}

View File

@@ -1,5 +1,6 @@
use anyhow::Result;
use anyhow::{Context, Result};
use clap::Subcommand;
use flate2::{write::GzEncoder, Compression};
use serde::{Deserialize, Serialize};
use std::path::Path;
@@ -77,9 +78,9 @@ pub enum PackCommands {
#[arg(short = 'y', long)]
yes: bool,
},
/// Register a pack from a local directory
/// Register a pack from a local directory (path must be accessible by the API server)
Register {
/// Path to pack directory
/// Path to pack directory (must be a path the API server can access)
path: String,
/// Force re-registration if pack already exists
@@ -90,6 +91,22 @@ pub enum PackCommands {
#[arg(long)]
skip_tests: bool,
},
/// Upload a local pack directory to the API server and register it
///
/// This command tarballs the local directory and streams it to the API,
/// so it works regardless of whether the API is local or running in Docker.
Upload {
/// Path to the local pack directory (must contain pack.yaml)
path: String,
/// Force re-registration if a pack with the same ref already exists
#[arg(short, long)]
force: bool,
/// Skip running pack tests after upload
#[arg(long)]
skip_tests: bool,
},
/// Test a pack's test suite
Test {
/// Pack reference (name) or path to pack directory
@@ -256,6 +273,15 @@ struct RegisterPackRequest {
skip_tests: bool,
}
#[derive(Debug, Serialize, Deserialize)]
struct UploadPackResponse {
pack: Pack,
#[serde(default)]
test_result: Option<serde_json::Value>,
#[serde(default)]
tests_skipped: bool,
}
pub async fn handle_pack_command(
profile: &Option<String>,
command: PackCommands,
@@ -296,6 +322,11 @@ pub async fn handle_pack_command(
force,
skip_tests,
} => handle_register(profile, path, force, skip_tests, api_url, output_format).await,
PackCommands::Upload {
path,
force,
skip_tests,
} => handle_upload(profile, path, force, skip_tests, api_url, output_format).await,
PackCommands::Test {
pack,
verbose,
@@ -593,6 +624,160 @@ async fn handle_uninstall(
Ok(())
}
async fn handle_upload(
profile: &Option<String>,
path: String,
force: bool,
skip_tests: bool,
api_url: &Option<String>,
output_format: OutputFormat,
) -> Result<()> {
let pack_dir = Path::new(&path);
// Validate the directory exists and contains pack.yaml
if !pack_dir.exists() {
anyhow::bail!("Path does not exist: {}", path);
}
if !pack_dir.is_dir() {
anyhow::bail!("Path is not a directory: {}", path);
}
let pack_yaml_path = pack_dir.join("pack.yaml");
if !pack_yaml_path.exists() {
anyhow::bail!("No pack.yaml found in: {}", path);
}
// Read pack ref from pack.yaml so we can display it
let pack_yaml_content = std::fs::read_to_string(&pack_yaml_path)
.context("Failed to read pack.yaml")?;
let pack_yaml: serde_yaml_ng::Value =
serde_yaml_ng::from_str(&pack_yaml_content).context("Failed to parse pack.yaml")?;
let pack_ref = pack_yaml
.get("ref")
.and_then(|v| v.as_str())
.unwrap_or("unknown");
match output_format {
OutputFormat::Table => {
output::print_info(&format!(
"Uploading pack '{}' from: {}",
pack_ref, path
));
output::print_info("Creating archive...");
}
_ => {}
}
// Build an in-memory tar.gz of the pack directory
let tar_gz_bytes = {
let buf = Vec::new();
let enc = GzEncoder::new(buf, Compression::default());
let mut tar = tar::Builder::new(enc);
// Walk the directory and add files to the archive
// We strip the leading path so the archive root is the pack directory contents
let abs_pack_dir = pack_dir
.canonicalize()
.context("Failed to resolve pack directory path")?;
append_dir_to_tar(&mut tar, &abs_pack_dir, &abs_pack_dir)?;
let encoder = tar.into_inner().context("Failed to finalise tar archive")?;
encoder.finish().context("Failed to flush gzip stream")?
};
let archive_size_kb = tar_gz_bytes.len() / 1024;
match output_format {
OutputFormat::Table => {
output::print_info(&format!(
"Archive ready ({} KB), uploading...",
archive_size_kb
));
}
_ => {}
}
let config = CliConfig::load_with_profile(profile.as_deref())?;
let mut client = ApiClient::from_config(&config, api_url);
let mut extra_fields = Vec::new();
if force {
extra_fields.push(("force", "true".to_string()));
}
if skip_tests {
extra_fields.push(("skip_tests", "true".to_string()));
}
let archive_name = format!("{}.tar.gz", pack_ref);
let response: UploadPackResponse = client
.multipart_post(
"/packs/upload",
"pack",
tar_gz_bytes,
&archive_name,
"application/gzip",
extra_fields,
)
.await?;
match output_format {
OutputFormat::Json | OutputFormat::Yaml => {
output::print_output(&response, output_format)?;
}
OutputFormat::Table => {
println!();
output::print_success(&format!(
"✓ Pack '{}' uploaded and registered successfully",
response.pack.pack_ref
));
output::print_info(&format!(" Version: {}", response.pack.version));
output::print_info(&format!(" ID: {}", response.pack.id));
if response.tests_skipped {
output::print_info(" ⚠ Tests were skipped");
} else if let Some(test_result) = &response.test_result {
if let Some(status) = test_result.get("status").and_then(|s| s.as_str()) {
if status == "passed" {
output::print_success(" ✓ All tests passed");
} else if status == "failed" {
output::print_error(" ✗ Some tests failed");
}
}
}
}
}
Ok(())
}
/// Recursively append a directory's contents to a tar archive.
/// `base` is the root directory being archived; `dir` is the current directory
/// being walked. Files are stored with paths relative to `base`.
fn append_dir_to_tar<W: std::io::Write>(
tar: &mut tar::Builder<W>,
base: &Path,
dir: &Path,
) -> Result<()> {
for entry in std::fs::read_dir(dir).context("Failed to read directory")? {
let entry = entry.context("Failed to read directory entry")?;
let entry_path = entry.path();
let relative_path = entry_path
.strip_prefix(base)
.context("Failed to compute relative path")?;
if entry_path.is_dir() {
append_dir_to_tar(tar, base, &entry_path)?;
} else if entry_path.is_file() {
tar.append_path_with_name(&entry_path, relative_path)
.with_context(|| {
format!("Failed to add {} to archive", entry_path.display())
})?;
}
// symlinks are intentionally skipped
}
Ok(())
}
async fn handle_register(
profile: &Option<String>,
path: String,
@@ -604,19 +789,39 @@ async fn handle_register(
let config = CliConfig::load_with_profile(profile.as_deref())?;
let mut client = ApiClient::from_config(&config, api_url);
// Warn if the path looks like a local filesystem path that the API server
// probably can't see (i.e. not a known container mount point).
let looks_local = !path.starts_with("/opt/attune/")
&& !path.starts_with("/app/")
&& !path.starts_with("/packs");
if looks_local {
match output_format {
OutputFormat::Table => {
output::print_info(&format!("Registering pack from: {}", path));
eprintln!(
"⚠ Warning: '{}' looks like a local path. If the API is running in \
Docker it may not be able to access this path.\n \
Use `attune pack upload {}` instead to upload the pack directly.",
path, path
);
}
_ => {}
}
} else {
match output_format {
OutputFormat::Table => {
output::print_info(&format!("Registering pack from: {}", path));
}
_ => {}
}
}
let request = RegisterPackRequest {
path: path.clone(),
force,
skip_tests,
};
match output_format {
OutputFormat::Table => {
output::print_info(&format!("Registering pack from: {}", path));
}
_ => {}
}
let response: PackInstallResponse = client.post("/packs/register", &request).await?;
match output_format {

View File

@@ -5,6 +5,7 @@ mod client;
mod commands;
mod config;
mod output;
mod wait;
use commands::{
action::{handle_action_command, ActionCommands},
@@ -112,6 +113,11 @@ enum Commands {
/// Timeout in seconds when waiting (default: 300)
#[arg(long, default_value = "300", requires = "wait")]
timeout: u64,
/// Notifier WebSocket base URL (e.g. ws://localhost:8081).
/// Derived from --api-url automatically when not set.
#[arg(long, requires = "wait")]
notifier_url: Option<String>,
},
}
@@ -193,6 +199,7 @@ async fn main() {
params_json,
wait,
timeout,
notifier_url,
} => {
// Delegate to action execute command
handle_action_command(
@@ -203,6 +210,7 @@ async fn main() {
params_json,
wait,
timeout,
notifier_url,
},
&cli.api_url,
output_format,

556
crates/cli/src/wait.rs Normal file
View File

@@ -0,0 +1,556 @@
//! Waiting for execution completion.
//!
//! Tries to connect to the notifier WebSocket first so the CLI reacts
//! *immediately* when the execution reaches a terminal state. If the
//! notifier is unreachable (not configured, different port, Docker network
//! boundary, etc.) it transparently falls back to REST polling.
//!
//! Public surface:
//! - [`WaitOptions`] caller-supplied parameters
//! - [`wait_for_execution`] the single entry point
use anyhow::Result;
use futures::{SinkExt, StreamExt};
use serde::{Deserialize, Serialize};
use std::time::{Duration, Instant};
use tokio_tungstenite::{connect_async, tungstenite::Message};
use crate::client::ApiClient;
// ── terminal status helpers ───────────────────────────────────────────────────
fn is_terminal(status: &str) -> bool {
matches!(
status,
"completed" | "succeeded" | "failed" | "canceled" | "cancelled" | "timeout" | "timed_out"
)
}
// ── public types ─────────────────────────────────────────────────────────────
/// Result returned when the wait completes.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ExecutionSummary {
pub id: i64,
pub status: String,
pub action_ref: String,
pub result: Option<serde_json::Value>,
pub created: String,
pub updated: String,
}
/// Parameters that control how we wait.
pub struct WaitOptions<'a> {
/// Execution ID to watch.
pub execution_id: i64,
/// Overall wall-clock limit (seconds). Defaults to 300 if `None`.
pub timeout_secs: u64,
/// REST API client (already authenticated).
pub api_client: &'a mut ApiClient,
/// Base URL of the *notifier* WebSocket service, e.g. `ws://localhost:8081`.
/// Derived from the API URL when not explicitly set.
pub notifier_ws_url: Option<String>,
/// If `true`, print progress lines to stderr.
pub verbose: bool,
}
// ── notifier WebSocket messages (mirrors websocket_server.rs) ────────────────
#[derive(Debug, Serialize)]
#[serde(tag = "type")]
enum ClientMsg {
#[serde(rename = "subscribe")]
Subscribe { filter: String },
#[serde(rename = "ping")]
Ping,
}
#[derive(Debug, Deserialize)]
#[serde(tag = "type")]
enum ServerMsg {
#[serde(rename = "welcome")]
Welcome {
client_id: String,
#[allow(dead_code)]
message: String,
},
#[serde(rename = "notification")]
Notification(NotifierNotification),
#[serde(rename = "error")]
Error { message: String },
#[serde(other)]
Unknown,
}
#[derive(Debug, Deserialize)]
struct NotifierNotification {
pub notification_type: String,
pub entity_type: String,
pub entity_id: i64,
pub payload: serde_json::Value,
}
// ── REST execution shape ──────────────────────────────────────────────────────
#[derive(Debug, Deserialize)]
struct RestExecution {
id: i64,
action_ref: String,
status: String,
result: Option<serde_json::Value>,
created: String,
updated: String,
}
impl From<RestExecution> for ExecutionSummary {
fn from(e: RestExecution) -> Self {
Self {
id: e.id,
status: e.status,
action_ref: e.action_ref,
result: e.result,
created: e.created,
updated: e.updated,
}
}
}
// ── entry point ───────────────────────────────────────────────────────────────
/// Wait for `execution_id` to reach a terminal status.
///
/// 1. Attempts a WebSocket connection to the notifier and subscribes to the
/// specific execution with the filter `entity:execution:<id>`.
/// 2. If the connection fails (or the notifier URL can't be derived) it falls
/// back to polling `GET /executions/<id>` every 2 seconds.
/// 3. In both cases, an overall `timeout_secs` wall-clock limit is enforced.
///
/// Returns the final [`ExecutionSummary`] on success or an error if the
/// timeout is exceeded or a fatal error occurs.
pub async fn wait_for_execution(opts: WaitOptions<'_>) -> Result<ExecutionSummary> {
let overall_deadline = Instant::now() + Duration::from_secs(opts.timeout_secs);
// Reserve at least this long for polling after WebSocket gives up.
// This ensures the polling fallback always gets a fair chance even when
// the WS path consumes most of the timeout budget.
const MIN_POLL_BUDGET: Duration = Duration::from_secs(10);
// Try WebSocket path first; fall through to polling on any connection error.
if let Some(ws_url) = resolve_ws_url(&opts) {
// Give WS at most (timeout - MIN_POLL_BUDGET) so polling always has headroom.
let ws_deadline = if overall_deadline > Instant::now() + MIN_POLL_BUDGET {
overall_deadline - MIN_POLL_BUDGET
} else {
// Timeout is very short; skip WS entirely and go straight to polling.
overall_deadline
};
match wait_via_websocket(
&ws_url,
opts.execution_id,
ws_deadline,
opts.verbose,
opts.api_client,
)
.await
{
Ok(summary) => return Ok(summary),
Err(ws_err) => {
if opts.verbose {
eprintln!(" [notifier: {}] falling back to polling", ws_err);
}
// Fall through to polling below.
}
}
} else if opts.verbose {
eprintln!(" [notifier URL not configured] using polling");
}
// Polling always uses the full overall deadline, so at minimum MIN_POLL_BUDGET
// remains (and often the full timeout if WS failed at connect time).
wait_via_polling(
opts.api_client,
opts.execution_id,
overall_deadline,
opts.verbose,
)
.await
}
// ── WebSocket path ────────────────────────────────────────────────────────────
async fn wait_via_websocket(
ws_base_url: &str,
execution_id: i64,
deadline: Instant,
verbose: bool,
api_client: &mut ApiClient,
) -> Result<ExecutionSummary> {
// Build the full WS endpoint URL.
let ws_url = format!("{}/ws", ws_base_url.trim_end_matches('/'));
let connect_timeout = Duration::from_secs(5);
let remaining = deadline.saturating_duration_since(Instant::now());
if remaining.is_zero() {
anyhow::bail!("WS budget exhausted before connect");
}
let effective_connect_timeout = connect_timeout.min(remaining);
let connect_result =
tokio::time::timeout(effective_connect_timeout, connect_async(&ws_url)).await;
let (ws_stream, _response) = match connect_result {
Ok(Ok(pair)) => pair,
Ok(Err(e)) => anyhow::bail!("WebSocket connect failed: {}", e),
Err(_) => anyhow::bail!("WebSocket connect timed out"),
};
if verbose {
eprintln!(" [notifier] connected to {}", ws_url);
}
let (mut write, mut read) = ws_stream.split();
// Wait for the welcome message before subscribing.
tokio::time::timeout(Duration::from_secs(5), async {
while let Some(msg) = read.next().await {
if let Ok(Message::Text(txt)) = msg {
if let Ok(ServerMsg::Welcome { client_id, .. }) =
serde_json::from_str::<ServerMsg>(&txt)
{
if verbose {
eprintln!(" [notifier] session id {}", client_id);
}
return Ok(());
}
}
}
anyhow::bail!("connection closed before welcome")
})
.await
.map_err(|_| anyhow::anyhow!("timed out waiting for welcome message"))??;
// Subscribe to this specific execution.
let subscribe_msg = ClientMsg::Subscribe {
filter: format!("entity:execution:{}", execution_id),
};
let subscribe_json = serde_json::to_string(&subscribe_msg)?;
SinkExt::send(&mut write, Message::Text(subscribe_json.into())).await?;
if verbose {
eprintln!(
" [notifier] subscribed to entity:execution:{}",
execution_id
);
}
// ── Race-condition guard ──────────────────────────────────────────────
// The execution may have already completed in the window between the
// initial POST and when the WS subscription became active. Check once
// with the REST API *after* subscribing so there is no gap: either the
// notification arrives after this check (and we'll catch it in the loop
// below) or we catch the terminal state here.
{
let path = format!("/executions/{}", execution_id);
if let Ok(exec) = api_client.get::<RestExecution>(&path).await {
if is_terminal(&exec.status) {
if verbose {
eprintln!(
" [notifier] execution {} already terminal ('{}') — caught by post-subscribe check",
execution_id, exec.status
);
}
return Ok(exec.into());
}
}
}
// Periodically ping to keep the connection alive and check the deadline.
let ping_interval = Duration::from_secs(15);
let mut next_ping = Instant::now() + ping_interval;
loop {
let remaining = deadline.saturating_duration_since(Instant::now());
if remaining.is_zero() {
anyhow::bail!("timed out waiting for execution {}", execution_id);
}
// Wait up to the earlier of: next ping time or deadline.
let wait_for = remaining.min(next_ping.saturating_duration_since(Instant::now()));
let msg_result = tokio::time::timeout(wait_for, read.next()).await;
match msg_result {
// Received a message within the window.
Ok(Some(Ok(Message::Text(txt)))) => {
match serde_json::from_str::<ServerMsg>(&txt) {
Ok(ServerMsg::Notification(n)) => {
if n.entity_type == "execution" && n.entity_id == execution_id {
if verbose {
eprintln!(
" [notifier] {} for execution {} — status={:?}",
n.notification_type,
execution_id,
n.payload.get("status").and_then(|s| s.as_str()),
);
}
// Extract status from the notification payload.
// The notifier broadcasts the full execution row in
// `payload`, so we can read the status directly.
if let Some(status) = n.payload.get("status").and_then(|s| s.as_str()) {
if is_terminal(status) {
// Build a summary from the payload; fall
// back to a REST fetch for missing fields.
return build_summary_from_payload(execution_id, &n.payload);
}
}
}
// Not our execution or not yet terminal — keep waiting.
}
Ok(ServerMsg::Error { message }) => {
anyhow::bail!("notifier error: {}", message);
}
Ok(ServerMsg::Welcome { .. } | ServerMsg::Unknown) => {
// Ignore unexpected / unrecognised messages.
}
Err(e) => {
// Log parse failures at trace level — they can happen if the
// server sends a message format we don't recognise yet.
if verbose {
eprintln!(" [notifier] ignoring unrecognised message: {}", e);
}
}
}
}
// Connection closed cleanly.
Ok(Some(Ok(Message::Close(_)))) | Ok(None) => {
anyhow::bail!("notifier WebSocket closed unexpectedly");
}
// Ping/pong frames — ignore.
Ok(Some(Ok(
Message::Ping(_) | Message::Pong(_) | Message::Binary(_) | Message::Frame(_),
))) => {}
// WebSocket transport error.
Ok(Some(Err(e))) => {
anyhow::bail!("WebSocket error: {}", e);
}
// Timeout waiting for a message — time to ping.
Err(_timeout) => {
let now = Instant::now();
if now >= next_ping {
let _ = SinkExt::send(
&mut write,
Message::Text(serde_json::to_string(&ClientMsg::Ping)?.into()),
)
.await;
next_ping = now + ping_interval;
}
}
}
}
}
/// Build an [`ExecutionSummary`] from the notification payload.
/// The notifier payload matches the REST execution shape closely enough that
/// we can deserialize it directly.
fn build_summary_from_payload(
execution_id: i64,
payload: &serde_json::Value,
) -> Result<ExecutionSummary> {
// Try a full deserialize first.
if let Ok(exec) = serde_json::from_value::<RestExecution>(payload.clone()) {
return Ok(exec.into());
}
// Partial payload — assemble what we can.
Ok(ExecutionSummary {
id: execution_id,
status: payload
.get("status")
.and_then(|s| s.as_str())
.unwrap_or("unknown")
.to_string(),
action_ref: payload
.get("action_ref")
.and_then(|s| s.as_str())
.unwrap_or("")
.to_string(),
result: payload.get("result").cloned(),
created: payload
.get("created")
.and_then(|s| s.as_str())
.unwrap_or("")
.to_string(),
updated: payload
.get("updated")
.and_then(|s| s.as_str())
.unwrap_or("")
.to_string(),
})
}
// ── polling fallback ──────────────────────────────────────────────────────────
const POLL_INTERVAL: Duration = Duration::from_millis(500);
const POLL_INTERVAL_MAX: Duration = Duration::from_secs(2);
/// How quickly the poll interval grows on each successive check.
const POLL_BACKOFF_FACTOR: f64 = 1.5;
async fn wait_via_polling(
client: &mut ApiClient,
execution_id: i64,
deadline: Instant,
verbose: bool,
) -> Result<ExecutionSummary> {
if verbose {
eprintln!(" [poll] watching execution {}", execution_id);
}
let mut interval = POLL_INTERVAL;
loop {
// Poll immediately first, before sleeping — catches the case where the
// execution already finished while we were connecting to the notifier.
let path = format!("/executions/{}", execution_id);
match client.get::<RestExecution>(&path).await {
Ok(exec) => {
if is_terminal(&exec.status) {
if verbose {
eprintln!(" [poll] execution {} is {}", execution_id, exec.status);
}
return Ok(exec.into());
}
if verbose {
eprintln!(
" [poll] status = {} — checking again in {:.1}s",
exec.status,
interval.as_secs_f64()
);
}
}
Err(e) => {
if verbose {
eprintln!(" [poll] request failed ({}), retrying…", e);
}
}
}
// Check deadline *after* the poll attempt so we always do at least one check.
if Instant::now() >= deadline {
anyhow::bail!("timed out waiting for execution {}", execution_id);
}
// Sleep, but wake up if we'd overshoot the deadline.
let sleep_for = interval.min(deadline.saturating_duration_since(Instant::now()));
tokio::time::sleep(sleep_for).await;
// Exponential back-off up to the cap.
interval = Duration::from_secs_f64(
(interval.as_secs_f64() * POLL_BACKOFF_FACTOR).min(POLL_INTERVAL_MAX.as_secs_f64()),
);
}
}
// ── URL resolution ────────────────────────────────────────────────────────────
/// Derive the notifier WebSocket base URL.
///
/// Priority:
/// 1. Explicit `notifier_ws_url` in [`WaitOptions`].
/// 2. Replace the API base URL scheme (`http` → `ws`) and port (`8080` → `8081`).
/// This covers the standard single-host layout where both services share the
/// same hostname.
fn resolve_ws_url(opts: &WaitOptions<'_>) -> Option<String> {
if let Some(url) = &opts.notifier_ws_url {
return Some(url.clone());
}
// Ask the client for its base URL by building a dummy request path
// and stripping the path portion — we don't have direct access to
// base_url here so we derive it from the config instead.
let api_url = opts.api_client.base_url();
// Transform http(s)://host:PORT/... → ws(s)://host:8081
let ws_url = derive_notifier_url(&api_url)?;
Some(ws_url)
}
/// Convert an HTTP API base URL into the expected notifier WebSocket URL.
///
/// - `http://localhost:8080` → `ws://localhost:8081`
/// - `https://api.example.com` → `wss://api.example.com:8081`
/// - `http://api.example.com:9000` → `ws://api.example.com:8081`
fn derive_notifier_url(api_url: &str) -> Option<String> {
let url = url::Url::parse(api_url).ok()?;
let ws_scheme = match url.scheme() {
"https" => "wss",
_ => "ws",
};
let host = url.host_str()?;
Some(format!("{}://{}:8081", ws_scheme, host))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_terminal() {
assert!(is_terminal("completed"));
assert!(is_terminal("succeeded"));
assert!(is_terminal("failed"));
assert!(is_terminal("canceled"));
assert!(is_terminal("cancelled"));
assert!(is_terminal("timeout"));
assert!(is_terminal("timed_out"));
assert!(!is_terminal("requested"));
assert!(!is_terminal("scheduled"));
assert!(!is_terminal("running"));
}
#[test]
fn test_derive_notifier_url() {
assert_eq!(
derive_notifier_url("http://localhost:8080"),
Some("ws://localhost:8081".to_string())
);
assert_eq!(
derive_notifier_url("https://api.example.com"),
Some("wss://api.example.com:8081".to_string())
);
assert_eq!(
derive_notifier_url("http://api.example.com:9000"),
Some("ws://api.example.com:8081".to_string())
);
assert_eq!(
derive_notifier_url("http://10.0.0.5:8080"),
Some("ws://10.0.0.5:8081".to_string())
);
}
#[test]
fn test_build_summary_from_full_payload() {
let payload = serde_json::json!({
"id": 42,
"action_ref": "core.echo",
"status": "completed",
"result": { "stdout": "hi" },
"created": "2026-01-01T00:00:00Z",
"updated": "2026-01-01T00:00:01Z"
});
let summary = build_summary_from_payload(42, &payload).unwrap();
assert_eq!(summary.id, 42);
assert_eq!(summary.status, "completed");
assert_eq!(summary.action_ref, "core.echo");
}
#[test]
fn test_build_summary_from_partial_payload() {
let payload = serde_json::json!({ "status": "failed" });
let summary = build_summary_from_payload(7, &payload).unwrap();
assert_eq!(summary.id, 7);
assert_eq!(summary.status, "failed");
assert_eq!(summary.action_ref, "");
}
}

View File

@@ -582,6 +582,13 @@ pub struct Config {
#[serde(default = "default_runtime_envs_dir")]
pub runtime_envs_dir: String,
/// Artifacts directory (shared volume for file-based artifact storage).
/// File-type artifacts (FileBinary, FileDatatable, FileText, Log) are stored
/// on disk at this location rather than in the database.
/// Pattern: {artifacts_dir}/{ref_slug}/v{version}.{ext}
#[serde(default = "default_artifacts_dir")]
pub artifacts_dir: String,
/// Notifier configuration (optional, for notifier service)
pub notifier: Option<NotifierConfig>,
@@ -609,6 +616,10 @@ fn default_runtime_envs_dir() -> String {
"/opt/attune/runtime_envs".to_string()
}
fn default_artifacts_dir() -> String {
"/opt/attune/artifacts".to_string()
}
impl Default for DatabaseConfig {
fn default() -> Self {
Self {
@@ -844,6 +855,7 @@ mod tests {
sensor: None,
packs_base_dir: default_packs_base_dir(),
runtime_envs_dir: default_runtime_envs_dir(),
artifacts_dir: default_artifacts_dir(),
notifier: None,
pack_registry: PackRegistryConfig::default(),
executor: None,
@@ -917,6 +929,7 @@ mod tests {
sensor: None,
packs_base_dir: default_packs_base_dir(),
runtime_envs_dir: default_runtime_envs_dir(),
artifacts_dir: default_artifacts_dir(),
notifier: None,
pack_registry: PackRegistryConfig::default(),
executor: None,

View File

@@ -367,6 +367,24 @@ pub mod enums {
Minutes,
}
/// Visibility level for artifacts.
/// - `Public`: viewable by all authenticated users on the platform.
/// - `Private`: restricted based on the artifact's `scope` and `owner` fields.
/// Full RBAC enforcement is deferred; for now the field enables filtering.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Type, ToSchema)]
#[sqlx(type_name = "artifact_visibility_enum", rename_all = "lowercase")]
#[serde(rename_all = "lowercase")]
pub enum ArtifactVisibility {
Public,
Private,
}
impl Default for ArtifactVisibility {
fn default() -> Self {
Self::Private
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Type, ToSchema)]
#[sqlx(type_name = "workflow_task_status_enum", rename_all = "lowercase")]
#[serde(rename_all = "lowercase")]
@@ -1268,6 +1286,7 @@ pub mod artifact {
pub scope: OwnerType,
pub owner: String,
pub r#type: ArtifactType,
pub visibility: ArtifactVisibility,
pub retention_policy: RetentionPolicyType,
pub retention_limit: i32,
/// Human-readable name (e.g. "Build Log", "Test Results")
@@ -1289,7 +1308,7 @@ pub mod artifact {
/// Select columns for Artifact queries (excludes DB-only columns if any arise).
/// Must be kept in sync with the Artifact struct field order.
pub const SELECT_COLUMNS: &str =
"id, ref, scope, owner, type, retention_policy, retention_limit, \
"id, ref, scope, owner, type, visibility, retention_policy, retention_limit, \
name, description, content_type, size_bytes, execution, data, \
created, updated";
}
@@ -1314,6 +1333,10 @@ pub mod artifact_version {
pub content: Option<Vec<u8>>,
/// Structured JSON content
pub content_json: Option<serde_json::Value>,
/// Relative path from `artifacts_dir` root for disk-stored content.
/// When set, `content` BYTEA is NULL — the file lives on a shared volume.
/// Pattern: `{ref_slug}/v{version}.{ext}`
pub file_path: Option<String>,
/// Free-form metadata about this version
pub meta: Option<serde_json::Value>,
/// Who created this version
@@ -1324,12 +1347,12 @@ pub mod artifact_version {
/// Select columns WITHOUT the potentially large `content` BYTEA column.
/// Use `SELECT_COLUMNS_WITH_CONTENT` when you need the binary payload.
pub const SELECT_COLUMNS: &str = "id, artifact, version, content_type, size_bytes, \
NULL::bytea AS content, content_json, meta, created_by, created";
NULL::bytea AS content, content_json, file_path, meta, created_by, created";
/// Select columns INCLUDING the binary `content` column.
pub const SELECT_COLUMNS_WITH_CONTENT: &str =
"id, artifact, version, content_type, size_bytes, \
content, content_json, meta, created_by, created";
content, content_json, file_path, meta, created_by, created";
}
/// Workflow orchestration models

View File

@@ -5,7 +5,7 @@
//! with headers and payload.
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use serde::{Deserialize, Deserializer, Serialize};
use serde_json::Value as JsonValue;
use uuid::Uuid;
@@ -124,6 +124,17 @@ impl MessageType {
}
}
/// Deserialize a UUID, substituting a freshly-generated one when the value is
/// null or absent. This keeps envelope parsing tolerant of messages that were
/// hand-crafted or produced by older tooling.
fn deserialize_uuid_default<'de, D>(deserializer: D) -> Result<Uuid, D::Error>
where
D: Deserializer<'de>,
{
let opt: Option<Uuid> = Option::deserialize(deserializer)?;
Ok(opt.unwrap_or_else(Uuid::new_v4))
}
/// Message envelope that wraps all messages with metadata
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MessageEnvelope<T>
@@ -131,9 +142,17 @@ where
T: Clone,
{
/// Unique message identifier
#[serde(
default = "Uuid::new_v4",
deserialize_with = "deserialize_uuid_default"
)]
pub message_id: Uuid,
/// Correlation ID for tracing related messages
#[serde(
default = "Uuid::new_v4",
deserialize_with = "deserialize_uuid_default"
)]
pub correlation_id: Uuid,
/// Message type

View File

@@ -3,7 +3,7 @@
use crate::models::{
artifact::*,
artifact_version::ArtifactVersion,
enums::{ArtifactType, OwnerType, RetentionPolicyType},
enums::{ArtifactType, ArtifactVisibility, OwnerType, RetentionPolicyType},
};
use crate::Result;
use sqlx::{Executor, Postgres, QueryBuilder};
@@ -29,6 +29,7 @@ pub struct CreateArtifactInput {
pub scope: OwnerType,
pub owner: String,
pub r#type: ArtifactType,
pub visibility: ArtifactVisibility,
pub retention_policy: RetentionPolicyType,
pub retention_limit: i32,
pub name: Option<String>,
@@ -44,6 +45,7 @@ pub struct UpdateArtifactInput {
pub scope: Option<OwnerType>,
pub owner: Option<String>,
pub r#type: Option<ArtifactType>,
pub visibility: Option<ArtifactVisibility>,
pub retention_policy: Option<RetentionPolicyType>,
pub retention_limit: Option<i32>,
pub name: Option<String>,
@@ -59,6 +61,7 @@ pub struct ArtifactSearchFilters {
pub scope: Option<OwnerType>,
pub owner: Option<String>,
pub r#type: Option<ArtifactType>,
pub visibility: Option<ArtifactVisibility>,
pub execution: Option<i64>,
pub name_contains: Option<String>,
pub limit: u32,
@@ -127,9 +130,9 @@ impl Create for ArtifactRepository {
E: Executor<'e, Database = Postgres> + 'e,
{
let query = format!(
"INSERT INTO artifact (ref, scope, owner, type, retention_policy, retention_limit, \
"INSERT INTO artifact (ref, scope, owner, type, visibility, retention_policy, retention_limit, \
name, description, content_type, execution, data) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) \
RETURNING {}",
SELECT_COLUMNS
);
@@ -138,6 +141,7 @@ impl Create for ArtifactRepository {
.bind(input.scope)
.bind(&input.owner)
.bind(input.r#type)
.bind(input.visibility)
.bind(input.retention_policy)
.bind(input.retention_limit)
.bind(&input.name)
@@ -178,6 +182,7 @@ impl Update for ArtifactRepository {
push_field!(input.scope, "scope");
push_field!(&input.owner, "owner");
push_field!(input.r#type, "type");
push_field!(input.visibility, "visibility");
push_field!(input.retention_policy, "retention_policy");
push_field!(input.retention_limit, "retention_limit");
push_field!(&input.name, "name");
@@ -241,6 +246,10 @@ impl ArtifactRepository {
param_idx += 1;
conditions.push(format!("type = ${}", param_idx));
}
if filters.visibility.is_some() {
param_idx += 1;
conditions.push(format!("visibility = ${}", param_idx));
}
if filters.execution.is_some() {
param_idx += 1;
conditions.push(format!("execution = ${}", param_idx));
@@ -270,6 +279,9 @@ impl ArtifactRepository {
if let Some(r#type) = filters.r#type {
count_query = count_query.bind(r#type);
}
if let Some(visibility) = filters.visibility {
count_query = count_query.bind(visibility);
}
if let Some(execution) = filters.execution {
count_query = count_query.bind(execution);
}
@@ -298,6 +310,9 @@ impl ArtifactRepository {
if let Some(r#type) = filters.r#type {
data_query = data_query.bind(r#type);
}
if let Some(visibility) = filters.visibility {
data_query = data_query.bind(visibility);
}
if let Some(execution) = filters.execution {
data_query = data_query.bind(execution);
}
@@ -466,6 +481,21 @@ impl ArtifactRepository {
.await
.map_err(Into::into)
}
/// Update the size_bytes of an artifact (used by worker finalization to sync
/// the parent artifact's size with the latest file-based version).
pub async fn update_size_bytes<'e, E>(executor: E, id: i64, size_bytes: i64) -> Result<bool>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let result =
sqlx::query("UPDATE artifact SET size_bytes = $1, updated = NOW() WHERE id = $2")
.bind(size_bytes)
.bind(id)
.execute(executor)
.await?;
Ok(result.rows_affected() > 0)
}
}
// ============================================================================
@@ -489,6 +519,7 @@ pub struct CreateArtifactVersionInput {
pub content_type: Option<String>,
pub content: Option<Vec<u8>>,
pub content_json: Option<serde_json::Value>,
pub file_path: Option<String>,
pub meta: Option<serde_json::Value>,
pub created_by: Option<String>,
}
@@ -646,8 +677,8 @@ impl ArtifactVersionRepository {
let query = format!(
"INSERT INTO artifact_version \
(artifact, version, content_type, size_bytes, content, content_json, meta, created_by) \
VALUES ($1, next_artifact_version($1), $2, $3, $4, $5, $6, $7) \
(artifact, version, content_type, size_bytes, content, content_json, file_path, meta, created_by) \
VALUES ($1, next_artifact_version($1), $2, $3, $4, $5, $6, $7, $8) \
RETURNING {}",
artifact_version::SELECT_COLUMNS_WITH_CONTENT
);
@@ -657,6 +688,7 @@ impl ArtifactVersionRepository {
.bind(size_bytes)
.bind(&input.content)
.bind(&input.content_json)
.bind(&input.file_path)
.bind(&input.meta)
.bind(&input.created_by)
.fetch_one(executor)
@@ -699,4 +731,67 @@ impl ArtifactVersionRepository {
.await
.map_err(Into::into)
}
/// Update the size_bytes of a specific artifact version (used by worker finalization).
pub async fn update_size_bytes<'e, E>(
executor: E,
version_id: i64,
size_bytes: i64,
) -> Result<bool>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let result = sqlx::query("UPDATE artifact_version SET size_bytes = $1 WHERE id = $2")
.bind(size_bytes)
.bind(version_id)
.execute(executor)
.await?;
Ok(result.rows_affected() > 0)
}
/// Find all file-backed versions linked to an execution.
/// Joins artifact_version → artifact on artifact.execution to find all
/// file-based versions produced by a given execution.
pub async fn find_file_versions_by_execution<'e, E>(
executor: E,
execution_id: i64,
) -> Result<Vec<ArtifactVersion>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let query = format!(
"SELECT av.{} \
FROM artifact_version av \
JOIN artifact a ON av.artifact = a.id \
WHERE a.execution = $1 AND av.file_path IS NOT NULL",
artifact_version::SELECT_COLUMNS
.split(", ")
.collect::<Vec<_>>()
.join(", av.")
);
sqlx::query_as::<_, ArtifactVersion>(&query)
.bind(execution_id)
.fetch_all(executor)
.await
.map_err(Into::into)
}
/// Find all file-backed versions for a specific artifact (used for disk cleanup on delete).
pub async fn find_file_versions_by_artifact<'e, E>(
executor: E,
artifact_id: i64,
) -> Result<Vec<ArtifactVersion>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let query = format!(
"SELECT {} FROM artifact_version WHERE artifact = $1 AND file_path IS NOT NULL",
artifact_version::SELECT_COLUMNS
);
sqlx::query_as::<_, ArtifactVersion>(&query)
.bind(artifact_id)
.fetch_all(executor)
.await
.map_err(Into::into)
}
}

View File

@@ -3,7 +3,9 @@
//! Tests cover CRUD operations, specialized queries, constraints,
//! enum handling, timestamps, and edge cases.
use attune_common::models::enums::{ArtifactType, OwnerType, RetentionPolicyType};
use attune_common::models::enums::{
ArtifactType, ArtifactVisibility, OwnerType, RetentionPolicyType,
};
use attune_common::repositories::artifact::{
ArtifactRepository, CreateArtifactInput, UpdateArtifactInput,
};
@@ -65,6 +67,7 @@ impl ArtifactFixture {
scope: OwnerType::System,
owner: self.unique_owner("system"),
r#type: ArtifactType::FileText,
visibility: ArtifactVisibility::default(),
retention_policy: RetentionPolicyType::Versions,
retention_limit: 5,
name: None,
@@ -252,6 +255,7 @@ async fn test_update_artifact_all_fields() {
scope: Some(OwnerType::Identity),
owner: Some(fixture.unique_owner("identity")),
r#type: Some(ArtifactType::FileImage),
visibility: Some(ArtifactVisibility::Public),
retention_policy: Some(RetentionPolicyType::Days),
retention_limit: Some(30),
name: Some("Updated Name".to_string()),

View File

@@ -2,8 +2,9 @@
use anyhow::{Context, Result};
use sqlx::postgres::PgListener;
use std::time::Duration;
use tokio::sync::broadcast;
use tracing::{debug, error, info, warn};
use tracing::{debug, error, info, trace, warn};
use crate::service::Notification;
@@ -18,6 +19,8 @@ const NOTIFICATION_CHANNELS: &[&str] = &[
"enforcement_status_changed",
"event_created",
"workflow_execution_status_changed",
"artifact_created",
"artifact_updated",
];
/// PostgreSQL listener that receives NOTIFY events and broadcasts them
@@ -46,70 +49,111 @@ impl PostgresListener {
);
// Create a dedicated listener connection
let mut listener = self.create_listener().await?;
info!("PostgreSQL listener ready — entering recv loop");
// Periodic heartbeat so we can confirm the task is alive even when idle.
let heartbeat_interval = Duration::from_secs(60);
let mut next_heartbeat = tokio::time::Instant::now() + heartbeat_interval;
// Process notifications in a loop
loop {
// Log a heartbeat if no notification has arrived for a while.
let now = tokio::time::Instant::now();
if now >= next_heartbeat {
info!("PostgreSQL listener heartbeat — still waiting for notifications");
next_heartbeat = now + heartbeat_interval;
}
trace!("Calling listener.recv() — waiting for next notification");
// Use a timeout so the heartbeat fires even during long idle periods.
match tokio::time::timeout(heartbeat_interval, listener.recv()).await {
// Timed out waiting — loop back and log the heartbeat above.
Err(_timeout) => {
trace!("listener.recv() timed out — re-entering loop");
continue;
}
Ok(recv_result) => match recv_result {
Ok(pg_notification) => {
let channel = pg_notification.channel();
let payload = pg_notification.payload();
debug!(
"Received PostgreSQL notification: channel={}, payload_len={}",
channel,
payload.len()
);
debug!("Notification payload: {}", payload);
// Parse and broadcast notification
if let Err(e) = self.process_notification(channel, payload) {
error!(
"Failed to process notification from channel '{}': {}",
channel, e
);
}
}
Err(e) => {
error!("Error receiving PostgreSQL notification: {}", e);
// Sleep briefly before retrying to avoid tight loop on persistent errors
tokio::time::sleep(Duration::from_secs(1)).await;
// Try to reconnect
warn!("Attempting to reconnect PostgreSQL listener...");
match self.create_listener().await {
Ok(new_listener) => {
listener = new_listener;
next_heartbeat = tokio::time::Instant::now() + heartbeat_interval;
info!("PostgreSQL listener reconnected successfully");
}
Err(e) => {
error!("Failed to reconnect PostgreSQL listener: {}", e);
tokio::time::sleep(Duration::from_secs(5)).await;
}
}
}
}, // end Ok(recv_result)
} // end timeout match
}
}
/// Create a fresh [`PgListener`] subscribed to all notification channels.
async fn create_listener(&self) -> Result<PgListener> {
info!("Connecting PostgreSQL LISTEN connection to {}", {
// Mask the password for logging
let url = &self.database_url;
if let Some(at) = url.rfind('@') {
if let Some(colon) = url[..at].rfind(':') {
format!("{}:****{}", &url[..colon], &url[at..])
} else {
url.clone()
}
} else {
url.clone()
}
});
let mut listener = PgListener::connect(&self.database_url)
.await
.context("Failed to connect PostgreSQL listener")?;
// Listen on all notification channels
for channel in NOTIFICATION_CHANNELS {
listener
.listen(channel)
.await
.context(format!("Failed to LISTEN on channel '{}'", channel))?;
info!("Listening on PostgreSQL channel: {}", channel);
}
info!("PostgreSQL LISTEN connection established — subscribing to channels");
// Process notifications in a loop
loop {
match listener.recv().await {
Ok(pg_notification) => {
debug!(
"Received PostgreSQL notification: channel={}, payload={}",
pg_notification.channel(),
pg_notification.payload()
);
// Use listen_all for a single round-trip instead of N separate commands
listener
.listen_all(NOTIFICATION_CHANNELS.iter().copied())
.await
.context("Failed to LISTEN on notification channels")?;
// Parse and broadcast notification
if let Err(e) = self
.process_notification(pg_notification.channel(), pg_notification.payload())
{
error!(
"Failed to process notification from channel '{}': {}",
pg_notification.channel(),
e
);
}
}
Err(e) => {
error!("Error receiving PostgreSQL notification: {}", e);
info!(
"Subscribed to {} PostgreSQL channels: {:?}",
NOTIFICATION_CHANNELS.len(),
NOTIFICATION_CHANNELS
);
// Sleep briefly before retrying to avoid tight loop on persistent errors
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
// Try to reconnect
warn!("Attempting to reconnect PostgreSQL listener...");
match PgListener::connect(&self.database_url).await {
Ok(new_listener) => {
listener = new_listener;
// Re-subscribe to all channels
for channel in NOTIFICATION_CHANNELS {
if let Err(e) = listener.listen(channel).await {
error!(
"Failed to re-subscribe to channel '{}': {}",
channel, e
);
}
}
info!("PostgreSQL listener reconnected successfully");
}
Err(e) => {
error!("Failed to reconnect PostgreSQL listener: {}", e);
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
}
}
}
}
}
Ok(listener)
}
/// Process a PostgreSQL notification and broadcast it to WebSocket clients
@@ -171,6 +215,8 @@ mod tests {
assert!(NOTIFICATION_CHANNELS.contains(&"enforcement_created"));
assert!(NOTIFICATION_CHANNELS.contains(&"enforcement_status_changed"));
assert!(NOTIFICATION_CHANNELS.contains(&"inquiry_created"));
assert!(NOTIFICATION_CHANNELS.contains(&"artifact_created"));
assert!(NOTIFICATION_CHANNELS.contains(&"artifact_updated"));
}
#[test]

View File

@@ -3,7 +3,7 @@
use anyhow::Result;
use std::sync::Arc;
use tokio::sync::broadcast;
use tracing::{error, info};
use tracing::{debug, error, info};
use attune_common::config::Config;
@@ -108,8 +108,25 @@ impl NotifierService {
tokio::spawn(async move {
loop {
tokio::select! {
Ok(notification) = notification_rx.recv() => {
subscriber_manager.broadcast(notification);
recv_result = notification_rx.recv() => {
match recv_result {
Ok(notification) => {
debug!(
"Broadcasting notification: type={}, entity_type={}, entity_id={}",
notification.notification_type,
notification.entity_type,
notification.entity_id,
);
subscriber_manager.broadcast(notification);
}
Err(tokio::sync::broadcast::error::RecvError::Lagged(n)) => {
error!("Notification broadcaster lagged — dropped {} messages", n);
}
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
error!("Notification broadcast channel closed — broadcaster exiting");
break;
}
}
}
_ = shutdown_rx.recv() => {
info!("Notification broadcaster shutting down");

View File

@@ -180,6 +180,7 @@ impl SubscriberManager {
// Channel closed, client disconnected
failed_count += 1;
to_remove.push(client_id.clone());
debug!("Client {} disconnected — removing", client_id);
}
}
}
@@ -191,8 +192,12 @@ impl SubscriberManager {
if sent_count > 0 {
debug!(
"Broadcast notification: sent={}, failed={}, type={}",
sent_count, failed_count, notification.notification_type
"Broadcast notification: sent={}, failed={}, type={}, entity_type={}, entity_id={}",
sent_count,
failed_count,
notification.notification_type,
notification.entity_type,
notification.entity_id,
);
}
}

View File

@@ -157,8 +157,10 @@ async fn handle_websocket(socket: WebSocket, state: Arc<AppState>) {
let subscriber_manager_clone = state.subscriber_manager.clone();
let outgoing_task = tokio::spawn(async move {
while let Some(notification) = rx.recv().await {
// Serialize notification to JSON
match serde_json::to_string(&notification) {
// Wrap in the tagged ClientMessage envelope so the client sees
// {"type":"notification", "notification_type":..., "entity_type":..., ...}
let envelope = ClientMessage::Notification(notification);
match serde_json::to_string(&envelope) {
Ok(json) => {
if let Err(e) = ws_sender.send(Message::Text(json.into())).await {
error!("Failed to send notification to {}: {}", client_id_clone, e);

View File

@@ -17,6 +17,7 @@ use attune_common::auth::jwt::{generate_execution_token, JwtConfig};
use attune_common::error::{Error, Result};
use attune_common::models::runtime::RuntimeExecutionConfig;
use attune_common::models::{runtime::Runtime as RuntimeModel, Action, Execution, ExecutionStatus};
use attune_common::repositories::artifact::{ArtifactRepository, ArtifactVersionRepository};
use attune_common::repositories::execution::{ExecutionRepository, UpdateExecutionInput};
use attune_common::repositories::runtime_version::RuntimeVersionRepository;
use attune_common::repositories::{FindById, Update};
@@ -42,6 +43,7 @@ pub struct ActionExecutor {
max_stdout_bytes: usize,
max_stderr_bytes: usize,
packs_base_dir: PathBuf,
artifacts_dir: PathBuf,
api_url: String,
jwt_config: JwtConfig,
}
@@ -67,6 +69,7 @@ impl ActionExecutor {
max_stdout_bytes: usize,
max_stderr_bytes: usize,
packs_base_dir: PathBuf,
artifacts_dir: PathBuf,
api_url: String,
jwt_config: JwtConfig,
) -> Self {
@@ -79,6 +82,7 @@ impl ActionExecutor {
max_stdout_bytes,
max_stderr_bytes,
packs_base_dir,
artifacts_dir,
api_url,
jwt_config,
}
@@ -142,6 +146,15 @@ impl ActionExecutor {
// Don't fail the execution just because artifact storage failed
}
// Finalize file-backed artifacts (stat files on disk and update size_bytes)
if let Err(e) = self.finalize_file_artifacts(execution_id).await {
warn!(
"Failed to finalize file-backed artifacts for execution {}: {}",
execution_id, e
);
// Don't fail the execution just because artifact finalization failed
}
// Update execution with result
let is_success = result.is_success();
debug!(
@@ -291,6 +304,10 @@ impl ActionExecutor {
env.insert("ATTUNE_EXEC_ID".to_string(), execution.id.to_string());
env.insert("ATTUNE_ACTION".to_string(), execution.action_ref.clone());
env.insert("ATTUNE_API_URL".to_string(), self.api_url.clone());
env.insert(
"ATTUNE_ARTIFACTS_DIR".to_string(),
self.artifacts_dir.to_string_lossy().to_string(),
);
// Generate execution-scoped API token.
// The identity that triggered the execution is derived from the `sub` claim
@@ -657,6 +674,95 @@ impl ActionExecutor {
Ok(())
}
/// Finalize file-backed artifacts after execution completes.
///
/// Scans all artifact versions linked to this execution that have a `file_path`,
/// stats each file on disk, and updates `size_bytes` on both the version row
/// and the parent artifact row.
async fn finalize_file_artifacts(&self, execution_id: i64) -> Result<()> {
let versions =
ArtifactVersionRepository::find_file_versions_by_execution(&self.pool, execution_id)
.await?;
if versions.is_empty() {
return Ok(());
}
info!(
"Finalizing {} file-backed artifact version(s) for execution {}",
versions.len(),
execution_id,
);
// Track the latest version per artifact so we can update parent size_bytes
let mut latest_size_per_artifact: HashMap<i64, (i32, i64)> = HashMap::new();
for ver in &versions {
let file_path = match &ver.file_path {
Some(fp) => fp,
None => continue,
};
let full_path = self.artifacts_dir.join(file_path);
let size_bytes = match tokio::fs::metadata(&full_path).await {
Ok(metadata) => metadata.len() as i64,
Err(e) => {
warn!(
"Could not stat artifact file '{}' for version {}: {}. Setting size_bytes=0.",
full_path.display(),
ver.id,
e,
);
0
}
};
// Update the version row
if let Err(e) =
ArtifactVersionRepository::update_size_bytes(&self.pool, ver.id, size_bytes).await
{
warn!(
"Failed to update size_bytes for artifact version {}: {}",
ver.id, e,
);
}
// Track the highest version number per artifact for parent update
let entry = latest_size_per_artifact
.entry(ver.artifact)
.or_insert((ver.version, size_bytes));
if ver.version > entry.0 {
*entry = (ver.version, size_bytes);
}
debug!(
"Finalized artifact version {} (artifact {}): file='{}', size={}",
ver.id, ver.artifact, file_path, size_bytes,
);
}
// Update parent artifact size_bytes to reflect the latest version's size
for (artifact_id, (_version, size_bytes)) in &latest_size_per_artifact {
if let Err(e) =
ArtifactRepository::update_size_bytes(&self.pool, *artifact_id, *size_bytes).await
{
warn!(
"Failed to update size_bytes for artifact {}: {}",
artifact_id, e,
);
}
}
info!(
"Finalized file-backed artifacts for execution {}: {} version(s), {} artifact(s)",
execution_id,
versions.len(),
latest_size_per_artifact.len(),
);
Ok(())
}
/// Handle successful execution
async fn handle_execution_success(
&self,

View File

@@ -136,7 +136,7 @@ impl WorkerService {
// Initialize worker registration
let registration = Arc::new(RwLock::new(WorkerRegistration::new(pool.clone(), &config)));
// Initialize artifact manager
// Initialize artifact manager (legacy, for stdout/stderr log storage)
let artifact_base_dir = std::path::PathBuf::from(
config
.worker
@@ -148,6 +148,22 @@ impl WorkerService {
let artifact_manager = ArtifactManager::new(artifact_base_dir);
artifact_manager.initialize().await?;
// Initialize artifacts directory for file-backed artifact storage (shared volume).
// Execution processes write artifact files here; the API serves them from the same path.
let artifacts_dir = std::path::PathBuf::from(&config.artifacts_dir);
if let Err(e) = tokio::fs::create_dir_all(&artifacts_dir).await {
warn!(
"Failed to create artifacts directory '{}': {}. File-backed artifacts may not work.",
artifacts_dir.display(),
e,
);
} else {
info!(
"Artifacts directory initialized at: {}",
artifacts_dir.display()
);
}
let packs_base_dir = std::path::PathBuf::from(&config.packs_base_dir);
let runtime_envs_dir = std::path::PathBuf::from(&config.runtime_envs_dir);
@@ -304,6 +320,7 @@ impl WorkerService {
max_stdout_bytes,
max_stderr_bytes,
packs_base_dir.clone(),
artifacts_dir,
api_url,
jwt_config,
));