artifacts!

This commit is contained in:
2026-03-03 13:42:41 -06:00
parent 5da940639a
commit 8299e5efcb
50 changed files with 4779 additions and 341 deletions

View File

@@ -2,6 +2,7 @@
//!
//! Provides endpoints for:
//! - CRUD operations on artifacts (metadata + data)
//! - File-backed version creation (execution writes file to shared volume)
//! - File upload (binary) and download for file-type artifacts
//! - JSON content versioning for structured artifacts
//! - Progress append for progress-type artifacts (streaming updates)
@@ -17,8 +18,9 @@ use axum::{
Json, Router,
};
use std::sync::Arc;
use tracing::warn;
use attune_common::models::enums::ArtifactType;
use attune_common::models::enums::{ArtifactType, ArtifactVisibility};
use attune_common::repositories::{
artifact::{
ArtifactRepository, ArtifactSearchFilters, ArtifactVersionRepository, CreateArtifactInput,
@@ -33,7 +35,8 @@ use crate::{
artifact::{
AppendProgressRequest, ArtifactQueryParams, ArtifactResponse, ArtifactSummary,
ArtifactVersionResponse, ArtifactVersionSummary, CreateArtifactRequest,
CreateVersionJsonRequest, SetDataRequest, UpdateArtifactRequest,
CreateFileVersionRequest, CreateVersionJsonRequest, SetDataRequest,
UpdateArtifactRequest,
},
common::{PaginatedResponse, PaginationParams},
ApiResponse, SuccessResponse,
@@ -66,6 +69,7 @@ pub async fn list_artifacts(
scope: query.scope,
owner: query.owner.clone(),
r#type: query.r#type,
visibility: query.visibility,
execution: query.execution,
name_contains: query.name.clone(),
limit: query.limit(),
@@ -175,11 +179,22 @@ pub async fn create_artifact(
)));
}
// Type-aware visibility default: progress artifacts are public by default
// (they're informational status indicators), everything else is private.
let visibility = request.visibility.unwrap_or_else(|| {
if request.r#type == ArtifactType::Progress {
ArtifactVisibility::Public
} else {
ArtifactVisibility::Private
}
});
let input = CreateArtifactInput {
r#ref: request.r#ref,
scope: request.scope,
owner: request.owner,
r#type: request.r#type,
visibility,
retention_policy: request.retention_policy,
retention_limit: request.retention_limit,
name: request.name,
@@ -229,6 +244,7 @@ pub async fn update_artifact(
scope: request.scope,
owner: request.owner,
r#type: request.r#type,
visibility: request.visibility,
retention_policy: request.retention_policy,
retention_limit: request.retention_limit,
name: request.name,
@@ -249,7 +265,7 @@ pub async fn update_artifact(
))
}
/// Delete an artifact (cascades to all versions)
/// Delete an artifact (cascades to all versions, including disk files)
#[utoipa::path(
delete,
path = "/api/v1/artifacts/{id}",
@@ -266,6 +282,22 @@ pub async fn delete_artifact(
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
) -> ApiResult<impl IntoResponse> {
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
// Before deleting DB rows, clean up any file-backed versions on disk
let file_versions =
ArtifactVersionRepository::find_file_versions_by_artifact(&state.db, id).await?;
if !file_versions.is_empty() {
let artifacts_dir = &state.config.artifacts_dir;
cleanup_version_files(artifacts_dir, &file_versions);
// Also try to remove the artifact's parent directory if it's now empty
let ref_dir = ref_to_dir_path(&artifact.r#ref);
let full_ref_dir = std::path::Path::new(artifacts_dir).join(&ref_dir);
cleanup_empty_parents(&full_ref_dir, artifacts_dir);
}
let deleted = ArtifactRepository::delete(&state.db, id).await?;
if !deleted {
return Err(ApiError::NotFound(format!(
@@ -527,6 +559,7 @@ pub async fn create_version_json(
),
content: None,
content_json: Some(request.content),
file_path: None,
meta: request.meta,
created_by: request.created_by,
};
@@ -542,6 +575,108 @@ pub async fn create_version_json(
))
}
/// Create a new file-backed version (no file content in request).
///
/// This endpoint allocates a version number and computes a `file_path` on the
/// shared artifact volume. The caller (execution process) is expected to write
/// the file content directly to `$ATTUNE_ARTIFACTS_DIR/{file_path}` after
/// receiving the response. The worker finalizes `size_bytes` after execution.
///
/// Only applicable to file-type artifacts (FileBinary, FileDatatable, FileText, Log).
#[utoipa::path(
post,
path = "/api/v1/artifacts/{id}/versions/file",
tag = "artifacts",
params(("id" = i64, Path, description = "Artifact ID")),
request_body = CreateFileVersionRequest,
responses(
(status = 201, description = "File version allocated", body = inline(ApiResponse<ArtifactVersionResponse>)),
(status = 400, description = "Artifact type is not file-based"),
(status = 404, description = "Artifact not found"),
),
security(("bearer_auth" = []))
)]
pub async fn create_version_file(
RequireAuth(_user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
Json(request): Json<CreateFileVersionRequest>,
) -> ApiResult<impl IntoResponse> {
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
// Validate this is a file-type artifact
if !is_file_backed_type(artifact.r#type) {
return Err(ApiError::BadRequest(format!(
"Artifact '{}' is type {:?}, which does not support file-backed versions. \
Use POST /versions for JSON or POST /versions/upload for DB-stored files.",
artifact.r#ref, artifact.r#type,
)));
}
let content_type = request
.content_type
.unwrap_or_else(|| default_content_type_for_artifact(artifact.r#type));
// We need the version number to compute the file path. The DB function
// `next_artifact_version()` is called inside the INSERT, so we create the
// row first with file_path = NULL, then compute the path from the returned
// version number and update the row. This avoids a race condition where two
// concurrent requests could compute the same version number.
let input = CreateArtifactVersionInput {
artifact: id,
content_type: Some(content_type.clone()),
content: None,
content_json: None,
file_path: None, // Will be set in the update below
meta: request.meta,
created_by: request.created_by,
};
let version = ArtifactVersionRepository::create(&state.db, input).await?;
// Compute the file path from the artifact ref and version number
let file_path = compute_file_path(&artifact.r#ref, version.version, &content_type);
// Create the parent directory on disk
let artifacts_dir = &state.config.artifacts_dir;
let full_path = std::path::Path::new(artifacts_dir).join(&file_path);
if let Some(parent) = full_path.parent() {
tokio::fs::create_dir_all(parent).await.map_err(|e| {
ApiError::InternalServerError(format!(
"Failed to create artifact directory '{}': {}",
parent.display(),
e,
))
})?;
}
// Update the version row with the computed file_path
sqlx::query("UPDATE artifact_version SET file_path = $1 WHERE id = $2")
.bind(&file_path)
.execute(&state.db)
.await
.map_err(|e| {
ApiError::InternalServerError(format!(
"Failed to set file_path on version {}: {}",
version.id, e,
))
})?;
// Return the version with file_path populated
let mut response = ArtifactVersionResponse::from(version);
response.file_path = Some(file_path);
Ok((
StatusCode::CREATED,
Json(ApiResponse::with_message(
response,
"File version allocated — write content to $ATTUNE_ARTIFACTS_DIR/<file_path>",
)),
))
}
/// Upload a binary file as a new version (multipart/form-data)
///
/// The file is sent as a multipart form field named `file`. Optional fields:
@@ -656,6 +791,7 @@ pub async fn upload_version(
content_type: Some(resolved_ct),
content: Some(file_bytes),
content_json: None,
file_path: None,
meta,
created_by,
};
@@ -671,7 +807,10 @@ pub async fn upload_version(
))
}
/// Download the binary content of a specific version
/// Download the binary content of a specific version.
///
/// For file-backed versions, reads from the shared artifact volume on disk.
/// For DB-stored versions, reads from the BYTEA/JSON content column.
#[utoipa::path(
get,
path = "/api/v1/artifacts/{id}/versions/{version}/download",
@@ -695,69 +834,33 @@ pub async fn download_version(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
// First try without content (cheaper query) to check for file_path
let ver = ArtifactVersionRepository::find_by_version(&state.db, id, version)
.await?
.ok_or_else(|| {
ApiError::NotFound(format!("Version {} not found for artifact {}", version, id))
})?;
// File-backed version: read from disk
if let Some(ref file_path) = ver.file_path {
return serve_file_from_disk(
&state.config.artifacts_dir,
file_path,
&artifact.r#ref,
version,
ver.content_type.as_deref(),
)
.await;
}
// DB-stored version: need to fetch with content
let ver = ArtifactVersionRepository::find_by_version_with_content(&state.db, id, version)
.await?
.ok_or_else(|| {
ApiError::NotFound(format!("Version {} not found for artifact {}", version, id))
})?;
// For binary content
if let Some(bytes) = ver.content {
let ct = ver
.content_type
.unwrap_or_else(|| "application/octet-stream".to_string());
let filename = format!(
"{}_v{}.{}",
artifact.r#ref.replace('.', "_"),
version,
extension_from_content_type(&ct)
);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
)
.into_response());
}
// For JSON content, serialize and return
if let Some(json) = ver.content_json {
let bytes = serde_json::to_vec_pretty(&json).map_err(|e| {
ApiError::InternalServerError(format!("Failed to serialize JSON: {}", e))
})?;
let ct = ver
.content_type
.unwrap_or_else(|| "application/json".to_string());
let filename = format!("{}_v{}.json", artifact.r#ref.replace('.', "_"), version,);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
)
.into_response());
}
Err(ApiError::NotFound(format!(
"Version {} of artifact {} has no downloadable content",
version, id
)))
serve_db_content(&artifact.r#ref, version, &ver)
}
/// Download the latest version's content
@@ -781,72 +884,34 @@ pub async fn download_latest(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
let ver = ArtifactVersionRepository::find_latest_with_content(&state.db, id)
// First try without content (cheaper query) to check for file_path
let ver = ArtifactVersionRepository::find_latest(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("No versions found for artifact {}", id)))?;
let version = ver.version;
// For binary content
if let Some(bytes) = ver.content {
let ct = ver
.content_type
.unwrap_or_else(|| "application/octet-stream".to_string());
let filename = format!(
"{}_v{}.{}",
artifact.r#ref.replace('.', "_"),
// File-backed version: read from disk
if let Some(ref file_path) = ver.file_path {
return serve_file_from_disk(
&state.config.artifacts_dir,
file_path,
&artifact.r#ref,
version,
extension_from_content_type(&ct)
);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
ver.content_type.as_deref(),
)
.into_response());
.await;
}
// For JSON content
if let Some(json) = ver.content_json {
let bytes = serde_json::to_vec_pretty(&json).map_err(|e| {
ApiError::InternalServerError(format!("Failed to serialize JSON: {}", e))
})?;
// DB-stored version: need to fetch with content
let ver = ArtifactVersionRepository::find_latest_with_content(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("No versions found for artifact {}", id)))?;
let ct = ver
.content_type
.unwrap_or_else(|| "application/json".to_string());
let filename = format!("{}_v{}.json", artifact.r#ref.replace('.', "_"), version,);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
)
.into_response());
}
Err(ApiError::NotFound(format!(
"Latest version of artifact {} has no downloadable content",
id
)))
serve_db_content(&artifact.r#ref, ver.version, &ver)
}
/// Delete a specific version by version number
/// Delete a specific version by version number (including disk file if file-backed)
#[utoipa::path(
delete,
path = "/api/v1/artifacts/{id}/versions/{version}",
@@ -867,7 +932,7 @@ pub async fn delete_version(
Path((id, version)): Path<(i64, i32)>,
) -> ApiResult<impl IntoResponse> {
// Verify artifact exists
ArtifactRepository::find_by_id(&state.db, id)
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
@@ -878,6 +943,25 @@ pub async fn delete_version(
ApiError::NotFound(format!("Version {} not found for artifact {}", version, id))
})?;
// Clean up disk file if file-backed
if let Some(ref file_path) = ver.file_path {
let artifacts_dir = &state.config.artifacts_dir;
let full_path = std::path::Path::new(artifacts_dir).join(file_path);
if full_path.exists() {
if let Err(e) = tokio::fs::remove_file(&full_path).await {
warn!(
"Failed to delete artifact file '{}': {}. DB row will still be deleted.",
full_path.display(),
e
);
}
}
// Try to clean up empty parent directories
let ref_dir = ref_to_dir_path(&artifact.r#ref);
let full_ref_dir = std::path::Path::new(artifacts_dir).join(&ref_dir);
cleanup_empty_parents(&full_ref_dir, artifacts_dir);
}
ArtifactVersionRepository::delete(&state.db, ver.id).await?;
Ok((
@@ -890,6 +974,212 @@ pub async fn delete_version(
// Helpers
// ============================================================================
/// Returns true for artifact types that should use file-backed storage on disk.
fn is_file_backed_type(artifact_type: ArtifactType) -> bool {
matches!(
artifact_type,
ArtifactType::FileBinary
| ArtifactType::FileText
| ArtifactType::FileDataTable
| ArtifactType::FileImage
)
}
/// Convert an artifact ref to a directory path by replacing dots with path separators.
/// e.g., "mypack.build_log" -> "mypack/build_log"
fn ref_to_dir_path(artifact_ref: &str) -> String {
artifact_ref.replace('.', "/")
}
/// Compute the relative file path for a file-backed artifact version.
///
/// Pattern: `{ref_slug}/v{version}.{ext}`
/// e.g., `mypack/build_log/v1.txt`
pub fn compute_file_path(artifact_ref: &str, version: i32, content_type: &str) -> String {
let ref_path = ref_to_dir_path(artifact_ref);
let ext = extension_from_content_type(content_type);
format!("{}/v{}.{}", ref_path, version, ext)
}
/// Return a sensible default content type for a given artifact type.
fn default_content_type_for_artifact(artifact_type: ArtifactType) -> String {
match artifact_type {
ArtifactType::FileText => "text/plain".to_string(),
ArtifactType::FileDataTable => "text/csv".to_string(),
ArtifactType::FileImage => "image/png".to_string(),
ArtifactType::FileBinary => "application/octet-stream".to_string(),
_ => "application/octet-stream".to_string(),
}
}
/// Serve a file-backed artifact version from disk.
async fn serve_file_from_disk(
artifacts_dir: &str,
file_path: &str,
artifact_ref: &str,
version: i32,
content_type: Option<&str>,
) -> ApiResult<axum::response::Response> {
let full_path = std::path::Path::new(artifacts_dir).join(file_path);
if !full_path.exists() {
return Err(ApiError::NotFound(format!(
"File for version {} of artifact '{}' not found on disk (expected at '{}')",
version, artifact_ref, file_path,
)));
}
let bytes = tokio::fs::read(&full_path).await.map_err(|e| {
ApiError::InternalServerError(format!(
"Failed to read artifact file '{}': {}",
full_path.display(),
e,
))
})?;
let ct = content_type
.unwrap_or("application/octet-stream")
.to_string();
let filename = format!(
"{}_v{}.{}",
artifact_ref.replace('.', "_"),
version,
extension_from_content_type(&ct),
);
Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
)
.into_response())
}
/// Serve a DB-stored artifact version (BYTEA or JSON content).
fn serve_db_content(
artifact_ref: &str,
version: i32,
ver: &attune_common::models::artifact_version::ArtifactVersion,
) -> ApiResult<axum::response::Response> {
// For binary content
if let Some(ref bytes) = ver.content {
let ct = ver
.content_type
.clone()
.unwrap_or_else(|| "application/octet-stream".to_string());
let filename = format!(
"{}_v{}.{}",
artifact_ref.replace('.', "_"),
version,
extension_from_content_type(&ct),
);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes.clone()),
)
.into_response());
}
// For JSON content, serialize and return
if let Some(ref json) = ver.content_json {
let bytes = serde_json::to_vec_pretty(json).map_err(|e| {
ApiError::InternalServerError(format!("Failed to serialize JSON: {}", e))
})?;
let ct = ver
.content_type
.clone()
.unwrap_or_else(|| "application/json".to_string());
let filename = format!("{}_v{}.json", artifact_ref.replace('.', "_"), version);
return Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, ct),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
Body::from(bytes),
)
.into_response());
}
Err(ApiError::NotFound(format!(
"Version {} of artifact '{}' has no downloadable content",
version, artifact_ref,
)))
}
/// Delete disk files for a set of file-backed artifact versions.
/// Logs warnings on failure but does not propagate errors.
fn cleanup_version_files(
artifacts_dir: &str,
versions: &[attune_common::models::artifact_version::ArtifactVersion],
) {
for ver in versions {
if let Some(ref file_path) = ver.file_path {
let full_path = std::path::Path::new(artifacts_dir).join(file_path);
if full_path.exists() {
if let Err(e) = std::fs::remove_file(&full_path) {
warn!(
"Failed to delete artifact file '{}': {}",
full_path.display(),
e,
);
}
}
}
}
}
/// Attempt to remove empty parent directories up to (but not including) the
/// artifacts_dir root. This is best-effort cleanup.
fn cleanup_empty_parents(dir: &std::path::Path, stop_at: &str) {
let stop_path = std::path::Path::new(stop_at);
let mut current = dir.to_path_buf();
while current != stop_path && current.starts_with(stop_path) {
match std::fs::read_dir(&current) {
Ok(mut entries) => {
if entries.next().is_some() {
// Directory is not empty, stop climbing
break;
}
if let Err(e) = std::fs::remove_dir(&current) {
warn!(
"Failed to remove empty directory '{}': {}",
current.display(),
e,
);
break;
}
}
Err(_) => break,
}
match current.parent() {
Some(parent) => current = parent.to_path_buf(),
None => break,
}
}
}
/// Derive a simple file extension from a MIME content type
fn extension_from_content_type(ct: &str) -> &str {
match ct {
@@ -944,6 +1234,7 @@ pub fn routes() -> Router<Arc<AppState>> {
)
.route("/artifacts/{id}/versions/latest", get(get_latest_version))
.route("/artifacts/{id}/versions/upload", post(upload_version))
.route("/artifacts/{id}/versions/file", post(create_version_file))
.route(
"/artifacts/{id}/versions/{version}",
get(get_version).delete(delete_version),
@@ -975,4 +1266,61 @@ mod tests {
assert_eq!(extension_from_content_type("image/png"), "png");
assert_eq!(extension_from_content_type("unknown/type"), "bin");
}
#[test]
fn test_compute_file_path() {
assert_eq!(
compute_file_path("mypack.build_log", 1, "text/plain"),
"mypack/build_log/v1.txt"
);
assert_eq!(
compute_file_path("mypack.build_log", 3, "application/json"),
"mypack/build_log/v3.json"
);
assert_eq!(
compute_file_path("core.test.results", 2, "text/csv"),
"core/test/results/v2.csv"
);
assert_eq!(
compute_file_path("simple", 1, "application/octet-stream"),
"simple/v1.bin"
);
}
#[test]
fn test_ref_to_dir_path() {
assert_eq!(ref_to_dir_path("mypack.build_log"), "mypack/build_log");
assert_eq!(ref_to_dir_path("simple"), "simple");
assert_eq!(ref_to_dir_path("a.b.c.d"), "a/b/c/d");
}
#[test]
fn test_is_file_backed_type() {
assert!(is_file_backed_type(ArtifactType::FileBinary));
assert!(is_file_backed_type(ArtifactType::FileText));
assert!(is_file_backed_type(ArtifactType::FileDataTable));
assert!(is_file_backed_type(ArtifactType::FileImage));
assert!(!is_file_backed_type(ArtifactType::Progress));
assert!(!is_file_backed_type(ArtifactType::Url));
}
#[test]
fn test_default_content_type_for_artifact() {
assert_eq!(
default_content_type_for_artifact(ArtifactType::FileText),
"text/plain"
);
assert_eq!(
default_content_type_for_artifact(ArtifactType::FileDataTable),
"text/csv"
);
assert_eq!(
default_content_type_for_artifact(ArtifactType::FileImage),
"image/png"
);
assert_eq!(
default_content_type_for_artifact(ArtifactType::FileBinary),
"application/octet-stream"
);
}
}

View File

@@ -170,7 +170,7 @@ pub async fn create_event(
let event = EventRepository::create(&state.db, input).await?;
// Publish EventCreated message to message queue if publisher is available
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let message_payload = EventCreatedPayload {
event_id: event.id,
trigger_id: event.trigger,

View File

@@ -99,7 +99,7 @@ pub async fn create_execution(
.with_source("api-service")
.with_correlation_id(uuid::Uuid::new_v4());
if let Some(publisher) = &state.publisher {
if let Some(publisher) = state.get_publisher().await {
publisher.publish_envelope(&message).await.map_err(|e| {
ApiError::InternalServerError(format!("Failed to publish message: {}", e))
})?;

View File

@@ -403,7 +403,7 @@ pub async fn respond_to_inquiry(
let updated_inquiry = InquiryRepository::update(&state.db, id, update_input).await?;
// Publish InquiryResponded message if publisher is available
if let Some(publisher) = &state.publisher {
if let Some(publisher) = state.get_publisher().await {
let user_id = user
.0
.identity_id()

View File

@@ -1,7 +1,7 @@
//! Pack management API routes
use axum::{
extract::{Path, Query, State},
extract::{Multipart, Path, Query, State},
http::StatusCode,
response::IntoResponse,
routing::get,
@@ -448,6 +448,190 @@ async fn execute_and_store_pack_tests(
Some(Ok(result))
}
/// Upload and register a pack from a tar.gz archive (multipart/form-data)
///
/// The archive should be a gzipped tar containing the pack directory at its root
/// (i.e. the archive should unpack to files like `pack.yaml`, `actions/`, etc.).
/// The multipart field name must be `pack`.
///
/// Optional form fields:
/// - `force`: `"true"` to overwrite an existing pack with the same ref
/// - `skip_tests`: `"true"` to skip test execution after registration
#[utoipa::path(
post,
path = "/api/v1/packs/upload",
tag = "packs",
request_body(content = String, content_type = "multipart/form-data"),
responses(
(status = 201, description = "Pack uploaded and registered successfully", body = inline(ApiResponse<PackInstallResponse>)),
(status = 400, description = "Invalid archive or missing pack.yaml"),
(status = 409, description = "Pack already exists (use force=true to overwrite)"),
),
security(("bearer_auth" = []))
)]
pub async fn upload_pack(
State(state): State<Arc<AppState>>,
RequireAuth(user): RequireAuth,
mut multipart: Multipart,
) -> ApiResult<impl IntoResponse> {
use std::io::Cursor;
const MAX_PACK_SIZE: usize = 100 * 1024 * 1024; // 100 MB
let mut pack_bytes: Option<Vec<u8>> = None;
let mut force = false;
let mut skip_tests = false;
// Parse multipart fields
while let Some(field) = multipart
.next_field()
.await
.map_err(|e| ApiError::BadRequest(format!("Multipart error: {}", e)))?
{
match field.name() {
Some("pack") => {
let data = field.bytes().await.map_err(|e| {
ApiError::BadRequest(format!("Failed to read pack data: {}", e))
})?;
if data.len() > MAX_PACK_SIZE {
return Err(ApiError::BadRequest(format!(
"Pack archive too large: {} bytes (max {} bytes)",
data.len(),
MAX_PACK_SIZE
)));
}
pack_bytes = Some(data.to_vec());
}
Some("force") => {
let val = field.text().await.map_err(|e| {
ApiError::BadRequest(format!("Failed to read force field: {}", e))
})?;
force = val.trim().eq_ignore_ascii_case("true");
}
Some("skip_tests") => {
let val = field.text().await.map_err(|e| {
ApiError::BadRequest(format!("Failed to read skip_tests field: {}", e))
})?;
skip_tests = val.trim().eq_ignore_ascii_case("true");
}
_ => {
// Consume and ignore unknown fields
let _ = field.bytes().await;
}
}
}
let pack_data = pack_bytes.ok_or_else(|| {
ApiError::BadRequest("Missing required 'pack' field in multipart upload".to_string())
})?;
// Extract the tar.gz archive into a temporary directory
let temp_extract_dir = tempfile::tempdir().map_err(|e| {
ApiError::InternalServerError(format!("Failed to create temp directory: {}", e))
})?;
{
let cursor = Cursor::new(&pack_data[..]);
let gz = flate2::read::GzDecoder::new(cursor);
let mut archive = tar::Archive::new(gz);
archive.unpack(temp_extract_dir.path()).map_err(|e| {
ApiError::BadRequest(format!(
"Failed to extract pack archive (must be a valid .tar.gz): {}",
e
))
})?;
}
// Find pack.yaml — it may be at the root or inside a single subdirectory
// (e.g. when GitHub tarballs add a top-level directory)
let pack_root = find_pack_root(temp_extract_dir.path()).ok_or_else(|| {
ApiError::BadRequest(
"Could not find pack.yaml in the uploaded archive. \
Ensure the archive contains pack.yaml at its root or in a single top-level directory."
.to_string(),
)
})?;
// Read pack ref from pack.yaml to determine the final storage path
let pack_yaml_path = pack_root.join("pack.yaml");
let pack_yaml_content = std::fs::read_to_string(&pack_yaml_path)
.map_err(|e| ApiError::InternalServerError(format!("Failed to read pack.yaml: {}", e)))?;
let pack_yaml: serde_yaml_ng::Value = serde_yaml_ng::from_str(&pack_yaml_content)
.map_err(|e| ApiError::BadRequest(format!("Failed to parse pack.yaml: {}", e)))?;
let pack_ref = pack_yaml
.get("ref")
.and_then(|v| v.as_str())
.ok_or_else(|| ApiError::BadRequest("Missing 'ref' field in pack.yaml".to_string()))?
.to_string();
// Move pack to permanent storage
use attune_common::pack_registry::PackStorage;
let storage = PackStorage::new(&state.config.packs_base_dir);
let final_path = storage
.install_pack(&pack_root, &pack_ref, None)
.map_err(|e| {
ApiError::InternalServerError(format!("Failed to move pack to storage: {}", e))
})?;
tracing::info!(
"Pack '{}' uploaded and stored at {:?}",
pack_ref,
final_path
);
// Register the pack in the database
let pack_id = register_pack_internal(
state.clone(),
user.claims.sub,
final_path.to_string_lossy().to_string(),
force,
skip_tests,
)
.await
.map_err(|e| {
// Clean up permanent storage on failure
let _ = std::fs::remove_dir_all(&final_path);
e
})?;
// Fetch the registered pack
let pack = PackRepository::find_by_id(&state.db, pack_id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Pack with ID {} not found", pack_id)))?;
let response = ApiResponse::with_message(
PackInstallResponse {
pack: PackResponse::from(pack),
test_result: None,
tests_skipped: skip_tests,
},
"Pack uploaded and registered successfully",
);
Ok((StatusCode::CREATED, Json(response)))
}
/// Walk the extracted directory and find the directory that contains `pack.yaml`.
/// Returns the path of the directory containing `pack.yaml`, or `None` if not found.
fn find_pack_root(base: &std::path::Path) -> Option<PathBuf> {
// Check root first
if base.join("pack.yaml").exists() {
return Some(base.to_path_buf());
}
// Check one level deep (e.g. GitHub tarballs: repo-main/pack.yaml)
if let Ok(entries) = std::fs::read_dir(base) {
for entry in entries.flatten() {
let path = entry.path();
if path.is_dir() && path.join("pack.yaml").exists() {
return Some(path);
}
}
}
None
}
/// Register a pack from local filesystem
#[utoipa::path(
post,
@@ -1051,7 +1235,7 @@ async fn register_pack_internal(
// Publish pack.registered event so workers can proactively set up
// runtime environments (virtualenvs, node_modules, etc.).
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let runtime_names = attune_common::pack_environment::collect_runtime_names_for_pack(
&state.db, pack.id, &pack_path,
)
@@ -2241,6 +2425,7 @@ pub fn routes() -> Router<Arc<AppState>> {
axum::routing::post(register_packs_batch),
)
.route("/packs/install", axum::routing::post(install_pack))
.route("/packs/upload", axum::routing::post(upload_pack))
.route("/packs/download", axum::routing::post(download_packs))
.route(
"/packs/dependencies",

View File

@@ -341,7 +341,7 @@ pub async fn create_rule(
let rule = RuleRepository::create(&state.db, rule_input).await?;
// Publish RuleCreated message to notify sensor service
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let payload = RuleCreatedPayload {
rule_id: rule.id,
rule_ref: rule.r#ref.clone(),
@@ -440,7 +440,7 @@ pub async fn update_rule(
// If the rule is enabled and trigger params changed, publish RuleEnabled message
// to notify sensors to restart with new parameters
if rule.enabled && trigger_params_changed {
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let payload = RuleEnabledPayload {
rule_id: rule.id,
rule_ref: rule.r#ref.clone(),
@@ -543,7 +543,7 @@ pub async fn enable_rule(
let rule = RuleRepository::update(&state.db, existing_rule.id, update_input).await?;
// Publish RuleEnabled message to notify sensor service
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let payload = RuleEnabledPayload {
rule_id: rule.id,
rule_ref: rule.r#ref.clone(),
@@ -606,7 +606,7 @@ pub async fn disable_rule(
let rule = RuleRepository::update(&state.db, existing_rule.id, update_input).await?;
// Publish RuleDisabled message to notify sensor service
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let payload = RuleDisabledPayload {
rule_id: rule.id,
rule_ref: rule.r#ref.clone(),

View File

@@ -650,7 +650,7 @@ pub async fn receive_webhook(
"Webhook event {} created, attempting to publish EventCreated message",
event.id
);
if let Some(ref publisher) = state.publisher {
if let Some(publisher) = state.get_publisher().await {
let message_payload = EventCreatedPayload {
event_id: event.id,
trigger_id: event.trigger,