artifacts!

This commit is contained in:
2026-03-03 13:42:41 -06:00
parent 5da940639a
commit 8299e5efcb
50 changed files with 4779 additions and 341 deletions

View File

@@ -68,6 +68,13 @@ jsonschema = { workspace = true }
# HTTP client
reqwest = { workspace = true }
# Archive/compression
tar = { workspace = true }
flate2 = { workspace = true }
# Temp files (used for pack upload extraction)
tempfile = { workspace = true }
# Authentication
argon2 = { workspace = true }
rand = "0.9"

View File

@@ -5,7 +5,9 @@ use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
use utoipa::{IntoParams, ToSchema};
use attune_common::models::enums::{ArtifactType, OwnerType, RetentionPolicyType};
use attune_common::models::enums::{
ArtifactType, ArtifactVisibility, OwnerType, RetentionPolicyType,
};
// ============================================================================
// Artifact DTOs
@@ -30,6 +32,10 @@ pub struct CreateArtifactRequest {
#[schema(example = "file_text")]
pub r#type: ArtifactType,
/// Visibility level (public = all users, private = scope/owner restricted).
/// If omitted, defaults to `public` for progress artifacts and `private` for all others.
pub visibility: Option<ArtifactVisibility>,
/// Retention policy type
#[serde(default = "default_retention_policy")]
#[schema(example = "versions")]
@@ -81,6 +87,9 @@ pub struct UpdateArtifactRequest {
/// Updated artifact type
pub r#type: Option<ArtifactType>,
/// Updated visibility
pub visibility: Option<ArtifactVisibility>,
/// Updated retention policy
pub retention_policy: Option<RetentionPolicyType>,
@@ -138,6 +147,9 @@ pub struct ArtifactResponse {
/// Artifact type
pub r#type: ArtifactType,
/// Visibility level
pub visibility: ArtifactVisibility,
/// Retention policy
pub retention_policy: RetentionPolicyType,
@@ -185,6 +197,9 @@ pub struct ArtifactSummary {
/// Artifact type
pub r#type: ArtifactType,
/// Visibility level
pub visibility: ArtifactVisibility,
/// Human-readable name
pub name: Option<String>,
@@ -222,6 +237,9 @@ pub struct ArtifactQueryParams {
/// Filter by artifact type
pub r#type: Option<ArtifactType>,
/// Filter by visibility
pub visibility: Option<ArtifactVisibility>,
/// Filter by execution ID
pub execution: Option<i64>,
@@ -279,6 +297,23 @@ pub struct CreateVersionJsonRequest {
pub created_by: Option<String>,
}
/// Request DTO for creating a new file-backed artifact version.
/// No file content is included — the caller writes the file directly to
/// `$ATTUNE_ARTIFACTS_DIR/{file_path}` after receiving the response.
#[derive(Debug, Clone, Deserialize, ToSchema)]
pub struct CreateFileVersionRequest {
/// MIME content type (e.g. "text/plain", "application/octet-stream")
#[schema(example = "text/plain")]
pub content_type: Option<String>,
/// Free-form metadata about this version
#[schema(value_type = Option<Object>)]
pub meta: Option<JsonValue>,
/// Who created this version (e.g. action ref, identity, "system")
pub created_by: Option<String>,
}
/// Response DTO for an artifact version (without binary content)
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct ArtifactVersionResponse {
@@ -301,6 +336,11 @@ pub struct ArtifactVersionResponse {
#[serde(skip_serializing_if = "Option::is_none")]
pub content_json: Option<JsonValue>,
/// Relative file path for disk-backed versions (from artifacts_dir root).
/// When present, the file content lives on the shared volume, not in the DB.
#[serde(skip_serializing_if = "Option::is_none")]
pub file_path: Option<String>,
/// Free-form metadata
#[serde(skip_serializing_if = "Option::is_none")]
pub meta: Option<JsonValue>,
@@ -327,6 +367,10 @@ pub struct ArtifactVersionSummary {
/// Size of content in bytes
pub size_bytes: Option<i64>,
/// Relative file path for disk-backed versions
#[serde(skip_serializing_if = "Option::is_none")]
pub file_path: Option<String>,
/// Who created this version
pub created_by: Option<String>,
@@ -346,6 +390,7 @@ impl From<attune_common::models::artifact::Artifact> for ArtifactResponse {
scope: a.scope,
owner: a.owner,
r#type: a.r#type,
visibility: a.visibility,
retention_policy: a.retention_policy,
retention_limit: a.retention_limit,
name: a.name,
@@ -366,6 +411,7 @@ impl From<attune_common::models::artifact::Artifact> for ArtifactSummary {
id: a.id,
r#ref: a.r#ref,
r#type: a.r#type,
visibility: a.visibility,
name: a.name,
content_type: a.content_type,
size_bytes: a.size_bytes,
@@ -387,6 +433,7 @@ impl From<attune_common::models::artifact_version::ArtifactVersion> for Artifact
content_type: v.content_type,
size_bytes: v.size_bytes,
content_json: v.content_json,
file_path: v.file_path,
meta: v.meta,
created_by: v.created_by,
created: v.created,
@@ -401,6 +448,7 @@ impl From<attune_common::models::artifact_version::ArtifactVersion> for Artifact
version: v.version,
content_type: v.content_type,
size_bytes: v.size_bytes,
file_path: v.file_path,
created_by: v.created_by,
created: v.created,
}
@@ -419,6 +467,7 @@ mod tests {
assert_eq!(params.per_page, 20);
assert!(params.scope.is_none());
assert!(params.r#type.is_none());
assert!(params.visibility.is_none());
}
#[test]
@@ -427,6 +476,7 @@ mod tests {
scope: None,
owner: None,
r#type: None,
visibility: None,
execution: None,
name: None,
page: 3,
@@ -441,6 +491,7 @@ mod tests {
scope: None,
owner: None,
r#type: None,
visibility: None,
execution: None,
name: None,
page: 1,
@@ -460,6 +511,10 @@ mod tests {
let req: CreateArtifactRequest = serde_json::from_str(json).unwrap();
assert_eq!(req.retention_policy, RetentionPolicyType::Versions);
assert_eq!(req.retention_limit, 5);
assert!(
req.visibility.is_none(),
"Omitting visibility should deserialize as None (server applies type-aware default)"
);
}
#[test]

View File

@@ -33,6 +33,86 @@ struct Args {
port: Option<u16>,
}
/// Attempt to connect to RabbitMQ and create a publisher.
/// Returns the publisher on success.
async fn try_connect_publisher(mq_url: &str) -> Result<Publisher> {
let mq_connection = Connection::connect(mq_url).await?;
// Setup common message queue infrastructure (exchanges and DLX)
let mq_setup_config = attune_common::mq::MessageQueueConfig::default();
if let Err(e) = mq_connection
.setup_common_infrastructure(&mq_setup_config)
.await
{
warn!(
"Failed to setup common MQ infrastructure (may already exist): {}",
e
);
}
let publisher = Publisher::new(
&mq_connection,
PublisherConfig {
confirm_publish: true,
timeout_secs: 30,
exchange: "attune.executions".to_string(),
},
)
.await?;
Ok(publisher)
}
/// Background task that keeps trying to establish the MQ publisher connection.
/// Once connected it installs the publisher into `state`, then monitors the
/// connection health and reconnects if it drops.
async fn mq_reconnect_loop(state: Arc<AppState>, mq_url: String) {
// Retry delay sequence (seconds): 1, 2, 4, 8, 16, 30, 30, …
let delays: &[u64] = &[1, 2, 4, 8, 16, 30];
let mut attempt: usize = 0;
loop {
let delay = delays.get(attempt).copied().unwrap_or(30);
match try_connect_publisher(&mq_url).await {
Ok(publisher) => {
info!(
"Message queue publisher connected (attempt {})",
attempt + 1
);
state.set_publisher(Arc::new(publisher)).await;
attempt = 0; // reset backoff after a successful connect
// Poll liveness: the publisher will error on use when the
// underlying channel is gone. We do a lightweight wait here so
// we notice disconnections and attempt to reconnect.
loop {
tokio::time::sleep(tokio::time::Duration::from_secs(10)).await;
if state.get_publisher().await.is_none() {
// Something cleared the publisher externally; re-enter
// the outer connect loop.
break;
}
// TODO: add a real health-check ping when the lapin API
// exposes one (e.g. channel.basic_noop). For now a broken
// publisher will be detected on the first failed publish and
// can be cleared by the handler to trigger reconnection here.
}
}
Err(e) => {
warn!(
"Failed to connect to message queue (attempt {}, retrying in {}s): {}",
attempt + 1,
delay,
e
);
tokio::time::sleep(tokio::time::Duration::from_secs(delay)).await;
attempt = attempt.saturating_add(1);
}
}
}
}
#[tokio::main]
async fn main() -> Result<()> {
// Initialize tracing subscriber
@@ -66,59 +146,21 @@ async fn main() -> Result<()> {
let database = Database::new(&config.database).await?;
info!("Database connection established");
// Initialize message queue connection and publisher (optional)
let mut state = AppState::new(database.pool().clone(), config.clone());
// Initialize application state (publisher starts as None)
let state = Arc::new(AppState::new(database.pool().clone(), config.clone()));
// Spawn background MQ reconnect loop if a message queue is configured.
// The loop will keep retrying until it connects, then install the publisher
// into the shared state so request handlers can use it immediately.
if let Some(ref mq_config) = config.message_queue {
info!("Connecting to message queue...");
match Connection::connect(&mq_config.url).await {
Ok(mq_connection) => {
info!("Message queue connection established");
// Setup common message queue infrastructure (exchanges and DLX)
let mq_setup_config = attune_common::mq::MessageQueueConfig::default();
match mq_connection
.setup_common_infrastructure(&mq_setup_config)
.await
{
Ok(_) => info!("Common message queue infrastructure setup completed"),
Err(e) => {
warn!(
"Failed to setup common MQ infrastructure (may already exist): {}",
e
);
}
}
// Create publisher
match Publisher::new(
&mq_connection,
PublisherConfig {
confirm_publish: true,
timeout_secs: 30,
exchange: "attune.executions".to_string(),
},
)
.await
{
Ok(publisher) => {
info!("Message queue publisher initialized");
state = state.with_publisher(Arc::new(publisher));
}
Err(e) => {
warn!("Failed to create publisher: {}", e);
warn!("Executions will not be queued for processing");
}
}
}
Err(e) => {
warn!("Failed to connect to message queue: {}", e);
warn!("Executions will not be queued for processing");
}
}
info!("Message queue configured starting background connection loop...");
let mq_url = mq_config.url.clone();
let state_clone = state.clone();
tokio::spawn(async move {
mq_reconnect_loop(state_clone, mq_url).await;
});
} else {
warn!("Message queue not configured");
warn!("Executions will not be queued for processing");
warn!("Message queue not configured executions will not be queued for processing");
}
info!(
@@ -143,7 +185,7 @@ async fn main() -> Result<()> {
info!("PostgreSQL notification listener started");
// Create and start server
let server = Server::new(std::sync::Arc::new(state));
let server = Server::new(state.clone());
info!("Attune API Service is ready");

View File

@@ -2,6 +2,7 @@
//!
//! Provides endpoints for:
//! - CRUD operations on artifacts (metadata + data)
//! - File-backed version creation (execution writes file to shared volume)
//! - File upload (binary) and download for file-type artifacts
//! - JSON content versioning for structured artifacts
//! - Progress append for progress-type artifacts (streaming updates)
@@ -17,8 +18,9 @@ use axum::{
Json, Router,
};
use std::sync::Arc;
use tracing::warn;
use attune_common::models::enums::ArtifactType;
use attune_common::models::enums::{ArtifactType, ArtifactVisibility};
use attune_common::repositories::{
artifact::{
ArtifactRepository, ArtifactSearchFilters, ArtifactVersionRepository, CreateArtifactInput,
@@ -33,7 +35,8 @@ use crate::{
artifact::{
AppendProgressRequest, ArtifactQueryParams, ArtifactResponse, ArtifactSummary,
ArtifactVersionResponse, ArtifactVersionSummary, CreateArtifactRequest,
CreateVersionJsonRequest, SetDataRequest, UpdateArtifactRequest,
CreateFileVersionRequest, CreateVersionJsonRequest, SetDataRequest,
UpdateArtifactRequest,
},
common::{PaginatedResponse, PaginationParams},
ApiResponse, SuccessResponse,
@@ -66,6 +69,7 @@ pub async fn list_artifacts(
scope: query.scope,
owner: query.owner.clone(),
r#type: query.r#type,
visibility: query.visibility,
execution: query.execution,
name_contains: query.name.clone(),
limit: query.limit(),
@@ -175,11 +179,22 @@ pub async fn create_artifact(
)));
}
// Type-aware visibility default: progress artifacts are public by default
// (they're informational status indicators), everything else is private.
let visibility = request.visibility.unwrap_or_else(|| {
if request.r#type == ArtifactType::Progress {
ArtifactVisibility::Public
} else {
ArtifactVisibility::Private
}
});
let input = CreateArtifactInput {
r#ref: request.r#ref,
scope: request.scope,
owner: request.owner,
r#type: request.r#type,
visibility,
retention_policy: request.retention_policy,
retention_limit: request.retention_limit,
name: request.name,
@@ -229,6 +244,7 @@ pub async fn update_artifact(
scope: request.scope,
owner: request.owner,
r#type: request.r#type,
visibility: request.visibility,
retention_policy: request.retention_policy,
retention_limit: request.retention_limit,
name: request.name,
@@ -249,7 +265,7 @@ pub async fn update_artifact(
))
}
/// Delete an artifact (cascades to all versions)
/// Delete an artifact (cascades to all versions, including disk files)
#[utoipa::path(
delete,
path = "/api/v1/artifacts/{id}",
@@ -266,6 +282,22 @@ pub async fn delete_artifact(
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
) -> ApiResult<impl IntoResponse> {
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
// Before deleting DB rows, clean up any file-backed versions on disk
let file_versions =
ArtifactVersionRepository::find_file_versions_by_artifact(&state.db, id).await?;
if !file_versions.is_empty() {
let artifacts_dir = &state.config.artifacts_dir;
cleanup_version_files(artifacts_dir, &file_versions);
// Also try to remove the artifact's parent directory if it's now empty
let ref_dir = ref_to_dir_path(&artifact.r#ref);
let full_ref_dir = std::path::Path::new(artifacts_dir).join(&ref_dir);
cleanup_empty_parents(&full_ref_dir, artifacts_dir);
}
let deleted = ArtifactRepository::delete(&state.db, id).await?;
if !deleted {
return Err(ApiError::NotFound(format!(
@@ -527,6 +559,7 @@ pub async fn create_version_json(
),
content: None,
content_json: Some(request.content),
file_path: None,
meta: request.meta,
created_by: request.created_by,
};
@@ -542,6 +575,108 @@ pub async fn create_version_json(
))
}
/// Create a new file-backed version (no file content in request).
///
/// This endpoint allocates a version number and computes a `file_path` on the
/// shared artifact volume. The caller (execution process) is expected to write
/// the file content directly to `$ATTUNE_ARTIFACTS_DIR/{file_path}` after
/// receiving the response. The worker finalizes `size_bytes` after execution.
///
/// Only applicable to file-type artifacts (FileBinary, FileDatatable, FileText, Log).
#[utoipa::path(
post,
path = "/api/v1/artifacts/{id}/versions/file",
tag = "artifacts",
params(("id" = i64, Path, description = "Artifact ID")),
request_body = CreateFileVersionRequest,
responses(
(status = 201, description = "File version allocated", body = inline(ApiResponse<ArtifactVersionResponse>)),
(status = 400, description = "Artifact type is not file-based"),
(status = 404, description = "Artifact not found"),
),
security(("bearer_auth" = []))
)]
pub async fn create_version_file(
RequireAuth(_user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
Json(request): Json<CreateFileVersionRequest>,
) -> ApiResult<impl IntoResponse> {
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
// Validate this is a file-type artifact
if !is_file_backed_type(artifact.r#type) {
return Err(ApiError::BadRequest(format!(
"Artifact '{}' is type {:?}, which does not support file-backed versions. \
Use POST /versions for JSON or POST /versions/upload for DB-stored files.",
artifact.r#ref, artifact.r#type,
)));
}
let content_type = request
.content_type
.unwrap_or_else(|| default_content_type_for_artifact(artifact.r#type));
// We need the version number to compute the file path. The DB function
// `next_artifact_version()` is called inside the INSERT, so we create the
// row first with file_path = NULL, then compute the path from the returned
// version number and update the row. This avoids a race condition where two
// concurrent requests could compute the same version number.
let input = CreateArtifactVersionInput {
artifact: id,
content_type: Some(content_type.clone()),
content: None,
content_json: None,
file_path: None, // Will be set in the update below
meta: request.meta,
created_by: request.created_by,
};
let version = ArtifactVersionRepository::create(&state.db, input).await?;
// Compute the file path from the artifact ref and version number
let file_path = compute_file_path(&artifact.r#ref, version.version, &content_type);
// Create the parent directory on disk
let artifacts_dir = &state.config.artifacts_dir;
let full_path = std::path::Path::new(artifacts_dir).join(&file_path);
if let Some(parent) = full_path.parent() {
tokio::fs::create_dir_all(parent).await.map_err(|e| {
ApiError::InternalServerError(format!(
"Failed to create artifact directory '{}': {}",
parent.display(),
e,
))
})?;
}
// Update the version row with the computed file_path
sqlx::query("UPDATE artifact_version SET file_path = $1 WHERE id = $2")
.bind(&file_path)
.execute(&state.db)
.await
.map_err(|e| {
ApiError::InternalServerError(format!(
"Failed to set file_path on version {}: {}",
version.id, e,
))
})?;
// Return the version with file_path populated
let mut response = ArtifactVersionResponse::from(version);
response.file_path = Some(file_path);
Ok((
StatusCode::CREATED,
Json(ApiResponse::with_message(
response,
"File version allocated — write content to $ATTUNE_ARTIFACTS_DIR/<file_path>",
)),
))
}
/// Upload a binary file as a new version (multipart/form-data)
///
/// The file is sent as a multipart form field named `file`. Optional fields:
@@ -656,6 +791,7 @@ pub async fn upload_version(
content_type: Some(resolved_ct),
content: Some(file_bytes),
content_json: None,
file_path: None,
meta,
created_by,
};
@@ -671,7 +807,10 @@ pub async fn upload_version(
))
}
/// Download the binary content of a specific version
/// Download the binary content of a specific version.
///
/// For file-backed versions, reads from the shared artifact volume on disk.
/// For DB-stored versions, reads from the BYTEA/JSON content column.
#[utoipa::path(
get,
path = "/api/v1/artifacts/{id}/versions/{version}/download",
@@ -695,69 +834,33 @@ pub async fn download_version(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
// First try without content (cheaper query) to check for file_path
let ver = ArtifactVersionRepository::find_by_version(&state.db, id, version)
.await?
.ok_or_else(|| {
ApiError::NotFound(format!("Version {} not found for artifact {}", version, id))
})?;
// File-backed version: read from disk
if let Some(ref file_path) = ver.file_path {
return serve_file_from_disk(
&state.config.artifacts_dir,
file_path,
&artifact.r#ref,
version,
ver.content_type.as_deref(),
)
.await;
}
// DB-stored version: need to fetch with content
let ver = ArtifactVersionRepository::find_by_version_with_content(&state.db, id, version)
.await?
.ok_or_else(|| {
ApiError::NotFound(format!("Version {} not found for artifact {}", version, id))
})?;
// For binary content
if let Some(bytes) = ver.content {
let ct = ver
.content_type
.unwrap_or_else(|| "application/octet-stream".to_string());
let filename = format!(
"{}_v{}.{}",
artifact.r#ref.replace('.', "_"),
version,
extension_from_content_type(&ct)
);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
)
.into_response());
}
// For JSON content, serialize and return
if let Some(json) = ver.content_json {
let bytes = serde_json::to_vec_pretty(&json).map_err(|e| {
ApiError::InternalServerError(format!("Failed to serialize JSON: {}", e))
})?;
let ct = ver
.content_type
.unwrap_or_else(|| "application/json".to_string());
let filename = format!("{}_v{}.json", artifact.r#ref.replace('.', "_"), version,);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
)
.into_response());
}
Err(ApiError::NotFound(format!(
"Version {} of artifact {} has no downloadable content",
version, id
)))
serve_db_content(&artifact.r#ref, version, &ver)
}
/// Download the latest version's content
@@ -781,72 +884,34 @@ pub async fn download_latest(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
let ver = ArtifactVersionRepository::find_latest_with_content(&state.db, id)
// First try without content (cheaper query) to check for file_path
let ver = ArtifactVersionRepository::find_latest(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("No versions found for artifact {}", id)))?;
let version = ver.version;
// For binary content
if let Some(bytes) = ver.content {
let ct = ver
.content_type
.unwrap_or_else(|| "application/octet-stream".to_string());
let filename = format!(
"{}_v{}.{}",
artifact.r#ref.replace('.', "_"),
// File-backed version: read from disk
if let Some(ref file_path) = ver.file_path {
return serve_file_from_disk(
&state.config.artifacts_dir,
file_path,
&artifact.r#ref,
version,
extension_from_content_type(&ct)
);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
ver.content_type.as_deref(),
)
.into_response());
.await;
}
// For JSON content
if let Some(json) = ver.content_json {
let bytes = serde_json::to_vec_pretty(&json).map_err(|e| {
ApiError::InternalServerError(format!("Failed to serialize JSON: {}", e))
})?;
// DB-stored version: need to fetch with content
let ver = ArtifactVersionRepository::find_latest_with_content(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("No versions found for artifact {}", id)))?;
let ct = ver
.content_type
.unwrap_or_else(|| "application/json".to_string());
let filename = format!("{}_v{}.json", artifact.r#ref.replace('.', "_"), version,);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
)
.into_response());
}
Err(ApiError::NotFound(format!(
"Latest version of artifact {} has no downloadable content",
id
)))
serve_db_content(&artifact.r#ref, ver.version, &ver)
}
/// Delete a specific version by version number
/// Delete a specific version by version number (including disk file if file-backed)
#[utoipa::path(
delete,
path = "/api/v1/artifacts/{id}/versions/{version}",
@@ -867,7 +932,7 @@ pub async fn delete_version(
Path((id, version)): Path<(i64, i32)>,
) -> ApiResult<impl IntoResponse> {
// Verify artifact exists
ArtifactRepository::find_by_id(&state.db, id)
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
@@ -878,6 +943,25 @@ pub async fn delete_version(
ApiError::NotFound(format!("Version {} not found for artifact {}", version, id))
})?;
// Clean up disk file if file-backed
if let Some(ref file_path) = ver.file_path {
let artifacts_dir = &state.config.artifacts_dir;
let full_path = std::path::Path::new(artifacts_dir).join(file_path);
if full_path.exists() {
if let Err(e) = tokio::fs::remove_file(&full_path).await {
warn!(
"Failed to delete artifact file '{}': {}. DB row will still be deleted.",
full_path.display(),
e
);
}
}
// Try to clean up empty parent directories
let ref_dir = ref_to_dir_path(&artifact.r#ref);
let full_ref_dir = std::path::Path::new(artifacts_dir).join(&ref_dir);
cleanup_empty_parents(&full_ref_dir, artifacts_dir);
}
ArtifactVersionRepository::delete(&state.db, ver.id).await?;
Ok((
@@ -890,6 +974,212 @@ pub async fn delete_version(
// Helpers
// ============================================================================
/// Returns true for artifact types that should use file-backed storage on disk.
fn is_file_backed_type(artifact_type: ArtifactType) -> bool {
matches!(
artifact_type,
ArtifactType::FileBinary
| ArtifactType::FileText
| ArtifactType::FileDataTable
| ArtifactType::FileImage
)
}
/// Convert an artifact ref to a directory path by replacing dots with path separators.
/// e.g., "mypack.build_log" -> "mypack/build_log"
fn ref_to_dir_path(artifact_ref: &str) -> String {
artifact_ref.replace('.', "/")
}
/// Compute the relative file path for a file-backed artifact version.
///
/// Pattern: `{ref_slug}/v{version}.{ext}`
/// e.g., `mypack/build_log/v1.txt`
pub fn compute_file_path(artifact_ref: &str, version: i32, content_type: &str) -> String {
let ref_path = ref_to_dir_path(artifact_ref);
let ext = extension_from_content_type(content_type);
format!("{}/v{}.{}", ref_path, version, ext)
}
/// Return a sensible default content type for a given artifact type.
fn default_content_type_for_artifact(artifact_type: ArtifactType) -> String {
match artifact_type {
ArtifactType::FileText => "text/plain".to_string(),
ArtifactType::FileDataTable => "text/csv".to_string(),
ArtifactType::FileImage => "image/png".to_string(),
ArtifactType::FileBinary => "application/octet-stream".to_string(),
_ => "application/octet-stream".to_string(),
}
}
/// Serve a file-backed artifact version from disk.
async fn serve_file_from_disk(
artifacts_dir: &str,
file_path: &str,
artifact_ref: &str,
version: i32,
content_type: Option<&str>,
) -> ApiResult<axum::response::Response> {
let full_path = std::path::Path::new(artifacts_dir).join(file_path);
if !full_path.exists() {
return Err(ApiError::NotFound(format!(
"File for version {} of artifact '{}' not found on disk (expected at '{}')",
version, artifact_ref, file_path,
)));
}
let bytes = tokio::fs::read(&full_path).await.map_err(|e| {
ApiError::InternalServerError(format!(
"Failed to read artifact file '{}': {}",
full_path.display(),
e,
))
})?;
let ct = content_type
.unwrap_or("application/octet-stream")
.to_string();
let filename = format!(
"{}_v{}.{}",
artifact_ref.replace('.', "_"),
version,
extension_from_content_type(&ct),
);
Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
)
.into_response())
}
/// Serve a DB-stored artifact version (BYTEA or JSON content).
fn serve_db_content(
artifact_ref: &str,
version: i32,
ver: &attune_common::models::artifact_version::ArtifactVersion,
) -> ApiResult<axum::response::Response> {
// For binary content
if let Some(ref bytes) = ver.content {
let ct = ver
.content_type
.clone()
.unwrap_or_else(|| "application/octet-stream".to_string());
let filename = format!(
"{}_v{}.{}",
artifact_ref.replace('.', "_"),
version,
extension_from_content_type(&ct),
);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes.clone()),
)
.into_response());
}
// For JSON content, serialize and return
if let Some(ref json) = ver.content_json {
let bytes = serde_json::to_vec_pretty(json).map_err(|e| {
ApiError::InternalServerError(format!("Failed to serialize JSON: {}", e))
})?;
let ct = ver
.content_type
.clone()
.unwrap_or_else(|| "application/json".to_string());
let filename = format!("{}_v{}.json", artifact_ref.replace('.', "_"), version);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
)
.into_response());
}
Err(ApiError::NotFound(format!(
"Version {} of artifact '{}' has no downloadable content",
version, artifact_ref,
)))
}
/// Delete disk files for a set of file-backed artifact versions.
/// Logs warnings on failure but does not propagate errors.
fn cleanup_version_files(
artifacts_dir: &str,
versions: &[attune_common::models::artifact_version::ArtifactVersion],
) {
for ver in versions {
if let Some(ref file_path) = ver.file_path {
let full_path = std::path::Path::new(artifacts_dir).join(file_path);
if full_path.exists() {
if let Err(e) = std::fs::remove_file(&full_path) {
warn!(
"Failed to delete artifact file '{}': {}",
full_path.display(),
e,
);
}
}
}
}
}
/// Attempt to remove empty parent directories up to (but not including) the
/// artifacts_dir root. This is best-effort cleanup.
fn cleanup_empty_parents(dir: &std::path::Path, stop_at: &str) {
let stop_path = std::path::Path::new(stop_at);
let mut current = dir.to_path_buf();
while current != stop_path && current.starts_with(stop_path) {
match std::fs::read_dir(&current) {
Ok(mut entries) => {
if entries.next().is_some() {
// Directory is not empty, stop climbing
break;
}
if let Err(e) = std::fs::remove_dir(&current) {
warn!(
"Failed to remove empty directory '{}': {}",
current.display(),
e,
);
break;
}
}
Err(_) => break,
}
match current.parent() {
Some(parent) => current = parent.to_path_buf(),
None => break,
}
}
}
/// Derive a simple file extension from a MIME content type
fn extension_from_content_type(ct: &str) -> &str {
match ct {
@@ -944,6 +1234,7 @@ pub fn routes() -> Router<Arc<AppState>> {
)
.route("/artifacts/{id}/versions/latest", get(get_latest_version))
.route("/artifacts/{id}/versions/upload", post(upload_version))
.route("/artifacts/{id}/versions/file", post(create_version_file))
.route(
"/artifacts/{id}/versions/{version}",
get(get_version).delete(delete_version),
@@ -975,4 +1266,61 @@ mod tests {
assert_eq!(extension_from_content_type("image/png"), "png");
assert_eq!(extension_from_content_type("unknown/type"), "bin");
}
#[test]
fn test_compute_file_path() {
assert_eq!(
compute_file_path("mypack.build_log", 1, "text/plain"),
"mypack/build_log/v1.txt"
);
assert_eq!(
compute_file_path("mypack.build_log", 3, "application/json"),
"mypack/build_log/v3.json"
);
assert_eq!(
compute_file_path("core.test.results", 2, "text/csv"),
"core/test/results/v2.csv"
);
assert_eq!(
compute_file_path("simple", 1, "application/octet-stream"),
"simple/v1.bin"
);
}
#[test]
fn test_ref_to_dir_path() {
assert_eq!(ref_to_dir_path("mypack.build_log"), "mypack/build_log");
assert_eq!(ref_to_dir_path("simple"), "simple");
assert_eq!(ref_to_dir_path("a.b.c.d"), "a/b/c/d");
}
#[test]
fn test_is_file_backed_type() {
assert!(is_file_backed_type(ArtifactType::FileBinary));
assert!(is_file_backed_type(ArtifactType::FileText));
assert!(is_file_backed_type(ArtifactType::FileDataTable));
assert!(is_file_backed_type(ArtifactType::FileImage));
assert!(!is_file_backed_type(ArtifactType::Progress));
assert!(!is_file_backed_type(ArtifactType::Url));
}
#[test]
fn test_default_content_type_for_artifact() {
assert_eq!(
default_content_type_for_artifact(ArtifactType::FileText),
"text/plain"
);
assert_eq!(
default_content_type_for_artifact(ArtifactType::FileDataTable),
"text/csv"
);
assert_eq!(
default_content_type_for_artifact(ArtifactType::FileImage),
"image/png"
);
assert_eq!(
default_content_type_for_artifact(ArtifactType::FileBinary),
"application/octet-stream"
);
}
}

View File

@@ -170,7 +170,7 @@ pub async fn create_event(
let event = EventRepository::create(&state.db, input).await?;
// Publish EventCreated message to message queue if publisher is available
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let message_payload = EventCreatedPayload {
event_id: event.id,
trigger_id: event.trigger,

View File

@@ -99,7 +99,7 @@ pub async fn create_execution(
.with_source("api-service")
.with_correlation_id(uuid::Uuid::new_v4());
if let Some(publisher) = &state.publisher {
if let Some(publisher) = state.get_publisher().await {
publisher.publish_envelope(&message).await.map_err(|e| {
ApiError::InternalServerError(format!("Failed to publish message: {}", e))
})?;

View File

@@ -403,7 +403,7 @@ pub async fn respond_to_inquiry(
let updated_inquiry = InquiryRepository::update(&state.db, id, update_input).await?;
// Publish InquiryResponded message if publisher is available
if let Some(publisher) = &state.publisher {
if let Some(publisher) = state.get_publisher().await {
let user_id = user
.0
.identity_id()

View File

@@ -1,7 +1,7 @@
//! Pack management API routes
use axum::{
extract::{Path, Query, State},
extract::{Multipart, Path, Query, State},
http::StatusCode,
response::IntoResponse,
routing::get,
@@ -448,6 +448,190 @@ async fn execute_and_store_pack_tests(
Some(Ok(result))
}
/// Upload and register a pack from a tar.gz archive (multipart/form-data)
///
/// The archive should be a gzipped tar containing the pack directory at its root
/// (i.e. the archive should unpack to files like `pack.yaml`, `actions/`, etc.).
/// The multipart field name must be `pack`.
///
/// Optional form fields:
/// - `force`: `"true"` to overwrite an existing pack with the same ref
/// - `skip_tests`: `"true"` to skip test execution after registration
#[utoipa::path(
post,
path = "/api/v1/packs/upload",
tag = "packs",
request_body(content = String, content_type = "multipart/form-data"),
responses(
(status = 201, description = "Pack uploaded and registered successfully", body = inline(ApiResponse<PackInstallResponse>)),
(status = 400, description = "Invalid archive or missing pack.yaml"),
(status = 409, description = "Pack already exists (use force=true to overwrite)"),
),
security(("bearer_auth" = []))
)]
pub async fn upload_pack(
State(state): State<Arc<AppState>>,
RequireAuth(user): RequireAuth,
mut multipart: Multipart,
) -> ApiResult<impl IntoResponse> {
use std::io::Cursor;
const MAX_PACK_SIZE: usize = 100 * 1024 * 1024; // 100 MB
let mut pack_bytes: Option<Vec<u8>> = None;
let mut force = false;
let mut skip_tests = false;
// Parse multipart fields
while let Some(field) = multipart
.next_field()
.await
.map_err(|e| ApiError::BadRequest(format!("Multipart error: {}", e)))?
{
match field.name() {
Some("pack") => {
let data = field.bytes().await.map_err(|e| {
ApiError::BadRequest(format!("Failed to read pack data: {}", e))
})?;
if data.len() > MAX_PACK_SIZE {
return Err(ApiError::BadRequest(format!(
"Pack archive too large: {} bytes (max {} bytes)",
data.len(),
MAX_PACK_SIZE
)));
}
pack_bytes = Some(data.to_vec());
}
Some("force") => {
let val = field.text().await.map_err(|e| {
ApiError::BadRequest(format!("Failed to read force field: {}", e))
})?;
force = val.trim().eq_ignore_ascii_case("true");
}
Some("skip_tests") => {
let val = field.text().await.map_err(|e| {
ApiError::BadRequest(format!("Failed to read skip_tests field: {}", e))
})?;
skip_tests = val.trim().eq_ignore_ascii_case("true");
}
_ => {
// Consume and ignore unknown fields
let _ = field.bytes().await;
}
}
}
let pack_data = pack_bytes.ok_or_else(|| {
ApiError::BadRequest("Missing required 'pack' field in multipart upload".to_string())
})?;
// Extract the tar.gz archive into a temporary directory
let temp_extract_dir = tempfile::tempdir().map_err(|e| {
ApiError::InternalServerError(format!("Failed to create temp directory: {}", e))
})?;
{
let cursor = Cursor::new(&pack_data[..]);
let gz = flate2::read::GzDecoder::new(cursor);
let mut archive = tar::Archive::new(gz);
archive.unpack(temp_extract_dir.path()).map_err(|e| {
ApiError::BadRequest(format!(
"Failed to extract pack archive (must be a valid .tar.gz): {}",
e
))
})?;
}
// Find pack.yaml — it may be at the root or inside a single subdirectory
// (e.g. when GitHub tarballs add a top-level directory)
let pack_root = find_pack_root(temp_extract_dir.path()).ok_or_else(|| {
ApiError::BadRequest(
"Could not find pack.yaml in the uploaded archive. \
Ensure the archive contains pack.yaml at its root or in a single top-level directory."
.to_string(),
)
})?;
// Read pack ref from pack.yaml to determine the final storage path
let pack_yaml_path = pack_root.join("pack.yaml");
let pack_yaml_content = std::fs::read_to_string(&pack_yaml_path)
.map_err(|e| ApiError::InternalServerError(format!("Failed to read pack.yaml: {}", e)))?;
let pack_yaml: serde_yaml_ng::Value = serde_yaml_ng::from_str(&pack_yaml_content)
.map_err(|e| ApiError::BadRequest(format!("Failed to parse pack.yaml: {}", e)))?;
let pack_ref = pack_yaml
.get("ref")
.and_then(|v| v.as_str())
.ok_or_else(|| ApiError::BadRequest("Missing 'ref' field in pack.yaml".to_string()))?
.to_string();
// Move pack to permanent storage
use attune_common::pack_registry::PackStorage;
let storage = PackStorage::new(&state.config.packs_base_dir);
let final_path = storage
.install_pack(&pack_root, &pack_ref, None)
.map_err(|e| {
ApiError::InternalServerError(format!("Failed to move pack to storage: {}", e))
})?;
tracing::info!(
"Pack '{}' uploaded and stored at {:?}",
pack_ref,
final_path
);
// Register the pack in the database
let pack_id = register_pack_internal(
state.clone(),
user.claims.sub,
final_path.to_string_lossy().to_string(),
force,
skip_tests,
)
.await
.map_err(|e| {
// Clean up permanent storage on failure
let _ = std::fs::remove_dir_all(&final_path);
e
})?;
// Fetch the registered pack
let pack = PackRepository::find_by_id(&state.db, pack_id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Pack with ID {} not found", pack_id)))?;
let response = ApiResponse::with_message(
PackInstallResponse {
pack: PackResponse::from(pack),
test_result: None,
tests_skipped: skip_tests,
},
"Pack uploaded and registered successfully",
);
Ok((StatusCode::CREATED, Json(response)))
}
/// Walk the extracted directory and find the directory that contains `pack.yaml`.
/// Returns the path of the directory containing `pack.yaml`, or `None` if not found.
fn find_pack_root(base: &std::path::Path) -> Option<PathBuf> {
// Check root first
if base.join("pack.yaml").exists() {
return Some(base.to_path_buf());
}
// Check one level deep (e.g. GitHub tarballs: repo-main/pack.yaml)
if let Ok(entries) = std::fs::read_dir(base) {
for entry in entries.flatten() {
let path = entry.path();
if path.is_dir() && path.join("pack.yaml").exists() {
return Some(path);
}
}
}
None
}
/// Register a pack from local filesystem
#[utoipa::path(
post,
@@ -1051,7 +1235,7 @@ async fn register_pack_internal(
// Publish pack.registered event so workers can proactively set up
// runtime environments (virtualenvs, node_modules, etc.).
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let runtime_names = attune_common::pack_environment::collect_runtime_names_for_pack(
&state.db, pack.id, &pack_path,
)
@@ -2241,6 +2425,7 @@ pub fn routes() -> Router<Arc<AppState>> {
axum::routing::post(register_packs_batch),
)
.route("/packs/install", axum::routing::post(install_pack))
.route("/packs/upload", axum::routing::post(upload_pack))
.route("/packs/download", axum::routing::post(download_packs))
.route(
"/packs/dependencies",

View File

@@ -341,7 +341,7 @@ pub async fn create_rule(
let rule = RuleRepository::create(&state.db, rule_input).await?;
// Publish RuleCreated message to notify sensor service
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let payload = RuleCreatedPayload {
rule_id: rule.id,
rule_ref: rule.r#ref.clone(),
@@ -440,7 +440,7 @@ pub async fn update_rule(
// If the rule is enabled and trigger params changed, publish RuleEnabled message
// to notify sensors to restart with new parameters
if rule.enabled && trigger_params_changed {
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let payload = RuleEnabledPayload {
rule_id: rule.id,
rule_ref: rule.r#ref.clone(),
@@ -543,7 +543,7 @@ pub async fn enable_rule(
let rule = RuleRepository::update(&state.db, existing_rule.id, update_input).await?;
// Publish RuleEnabled message to notify sensor service
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let payload = RuleEnabledPayload {
rule_id: rule.id,
rule_ref: rule.r#ref.clone(),
@@ -606,7 +606,7 @@ pub async fn disable_rule(
let rule = RuleRepository::update(&state.db, existing_rule.id, update_input).await?;
// Publish RuleDisabled message to notify sensor service
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let payload = RuleDisabledPayload {
rule_id: rule.id,
rule_ref: rule.r#ref.clone(),

View File

@@ -650,7 +650,7 @@ pub async fn receive_webhook(
"Webhook event {} created, attempting to publish EventCreated message",
event.id
);
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let message_payload = EventCreatedPayload {
event_id: event.id,
trigger_id: event.trigger,

View File

@@ -2,7 +2,7 @@
use sqlx::PgPool;
use std::sync::Arc;
use tokio::sync::broadcast;
use tokio::sync::{broadcast, RwLock};
use crate::auth::jwt::JwtConfig;
use attune_common::{config::Config, mq::Publisher};
@@ -18,8 +18,8 @@ pub struct AppState {
pub cors_origins: Vec<String>,
/// Application configuration
pub config: Arc<Config>,
/// Optional message queue publisher
pub publisher: Option<Arc<Publisher>>,
/// Optional message queue publisher (shared, swappable after reconnection)
pub publisher: Arc<RwLock<Option<Arc<Publisher>>>>,
/// Broadcast channel for SSE notifications
pub broadcast_tx: broadcast::Sender<String>,
}
@@ -50,15 +50,20 @@ impl AppState {
jwt_config: Arc::new(jwt_config),
cors_origins,
config: Arc::new(config),
publisher: None,
publisher: Arc::new(RwLock::new(None)),
broadcast_tx,
}
}
/// Set the message queue publisher
pub fn with_publisher(mut self, publisher: Arc<Publisher>) -> Self {
self.publisher = Some(publisher);
self
/// Set the message queue publisher (called once at startup or after reconnection)
pub async fn set_publisher(&self, publisher: Arc<Publisher>) {
let mut guard = self.publisher.write().await;
*guard = Some(publisher);
}
/// Get a clone of the current publisher, if available
pub async fn get_publisher(&self) -> Option<Arc<Publisher>> {
self.publisher.read().await.clone()
}
}