change capture

This commit is contained in:
2026-02-26 14:34:02 -06:00
parent 7ee3604eb1
commit b43495b26d
47 changed files with 5785 additions and 1525 deletions

View File

@@ -0,0 +1,358 @@
//! Analytics DTOs for API requests and responses
//!
//! These types represent the API-facing view of analytics data derived from
//! TimescaleDB continuous aggregates over entity history hypertables.
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use utoipa::{IntoParams, ToSchema};
use attune_common::repositories::analytics::{
AnalyticsTimeRange, EnforcementVolumeBucket, EventVolumeBucket, ExecutionStatusBucket,
ExecutionThroughputBucket, FailureRateSummary, WorkerStatusBucket,
};
// ---------------------------------------------------------------------------
// Query parameters
// ---------------------------------------------------------------------------
/// Common query parameters for analytics endpoints.
#[derive(Debug, Clone, Deserialize, IntoParams)]
pub struct AnalyticsQueryParams {
/// Start of time range (ISO 8601). Defaults to 24 hours ago.
#[param(example = "2026-02-25T00:00:00Z")]
pub since: Option<DateTime<Utc>>,
/// End of time range (ISO 8601). Defaults to now.
#[param(example = "2026-02-26T00:00:00Z")]
pub until: Option<DateTime<Utc>>,
/// Number of hours to look back from now (alternative to since/until).
/// Ignored if `since` is provided.
#[param(example = 24, minimum = 1, maximum = 8760)]
pub hours: Option<i64>,
}
impl AnalyticsQueryParams {
/// Convert to the repository-level time range.
pub fn to_time_range(&self) -> AnalyticsTimeRange {
match (&self.since, &self.until) {
(Some(since), Some(until)) => AnalyticsTimeRange {
since: *since,
until: *until,
},
(Some(since), None) => AnalyticsTimeRange {
since: *since,
until: Utc::now(),
},
(None, Some(until)) => {
let hours = self.hours.unwrap_or(24).clamp(1, 8760);
AnalyticsTimeRange {
since: *until - chrono::Duration::hours(hours),
until: *until,
}
}
(None, None) => {
let hours = self.hours.unwrap_or(24).clamp(1, 8760);
AnalyticsTimeRange::last_hours(hours)
}
}
}
}
/// Path parameter for filtering analytics by a specific entity ref.
#[derive(Debug, Clone, Deserialize, IntoParams)]
pub struct AnalyticsRefParam {
/// Optional entity ref filter (action_ref, trigger_ref, rule_ref, or worker name)
#[param(example = "core.http_request")]
pub entity_ref: Option<String>,
}
// ---------------------------------------------------------------------------
// Response types
// ---------------------------------------------------------------------------
/// A single data point in an hourly time series.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct TimeSeriesPoint {
/// Start of the 1-hour bucket (ISO 8601)
#[schema(example = "2026-02-26T10:00:00Z")]
pub bucket: DateTime<Utc>,
/// The series label (e.g., status name, action ref). Null for aggregate totals.
#[schema(example = "completed")]
pub label: Option<String>,
/// The count value for this bucket
#[schema(example = 42)]
pub value: i64,
}
/// Response for execution status transitions over time.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct ExecutionStatusTimeSeriesResponse {
/// Time range start
pub since: DateTime<Utc>,
/// Time range end
pub until: DateTime<Utc>,
/// Data points: one per (bucket, status) pair
pub data: Vec<TimeSeriesPoint>,
}
/// Response for execution throughput over time.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct ExecutionThroughputResponse {
/// Time range start
pub since: DateTime<Utc>,
/// Time range end
pub until: DateTime<Utc>,
/// Data points: one per bucket (total executions created)
pub data: Vec<TimeSeriesPoint>,
}
/// Response for event volume over time.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct EventVolumeResponse {
/// Time range start
pub since: DateTime<Utc>,
/// Time range end
pub until: DateTime<Utc>,
/// Data points: one per bucket (total events created)
pub data: Vec<TimeSeriesPoint>,
}
/// Response for worker status transitions over time.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct WorkerStatusTimeSeriesResponse {
/// Time range start
pub since: DateTime<Utc>,
/// Time range end
pub until: DateTime<Utc>,
/// Data points: one per (bucket, status) pair
pub data: Vec<TimeSeriesPoint>,
}
/// Response for enforcement volume over time.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct EnforcementVolumeResponse {
/// Time range start
pub since: DateTime<Utc>,
/// Time range end
pub until: DateTime<Utc>,
/// Data points: one per bucket (total enforcements created)
pub data: Vec<TimeSeriesPoint>,
}
/// Response for the execution failure rate summary.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct FailureRateResponse {
/// Time range start
pub since: DateTime<Utc>,
/// Time range end
pub until: DateTime<Utc>,
/// Total executions reaching a terminal state in the window
#[schema(example = 100)]
pub total_terminal: i64,
/// Number of failed executions
#[schema(example = 12)]
pub failed_count: i64,
/// Number of timed-out executions
#[schema(example = 3)]
pub timeout_count: i64,
/// Number of completed executions
#[schema(example = 85)]
pub completed_count: i64,
/// Failure rate as a percentage (0.0 100.0)
#[schema(example = 15.0)]
pub failure_rate_pct: f64,
}
/// Combined dashboard analytics response.
///
/// Returns all key metrics in a single response for the dashboard page,
/// avoiding multiple round-trips.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct DashboardAnalyticsResponse {
/// Time range start
pub since: DateTime<Utc>,
/// Time range end
pub until: DateTime<Utc>,
/// Execution throughput per hour
pub execution_throughput: Vec<TimeSeriesPoint>,
/// Execution status transitions per hour
pub execution_status: Vec<TimeSeriesPoint>,
/// Event volume per hour
pub event_volume: Vec<TimeSeriesPoint>,
/// Enforcement volume per hour
pub enforcement_volume: Vec<TimeSeriesPoint>,
/// Worker status transitions per hour
pub worker_status: Vec<TimeSeriesPoint>,
/// Execution failure rate summary
pub failure_rate: FailureRateResponse,
}
// ---------------------------------------------------------------------------
// Conversion helpers
// ---------------------------------------------------------------------------
impl From<ExecutionStatusBucket> for TimeSeriesPoint {
fn from(b: ExecutionStatusBucket) -> Self {
Self {
bucket: b.bucket,
label: b.new_status,
value: b.transition_count,
}
}
}
impl From<ExecutionThroughputBucket> for TimeSeriesPoint {
fn from(b: ExecutionThroughputBucket) -> Self {
Self {
bucket: b.bucket,
label: b.action_ref,
value: b.execution_count,
}
}
}
impl From<EventVolumeBucket> for TimeSeriesPoint {
fn from(b: EventVolumeBucket) -> Self {
Self {
bucket: b.bucket,
label: b.trigger_ref,
value: b.event_count,
}
}
}
impl From<WorkerStatusBucket> for TimeSeriesPoint {
fn from(b: WorkerStatusBucket) -> Self {
Self {
bucket: b.bucket,
label: b.new_status,
value: b.transition_count,
}
}
}
impl From<EnforcementVolumeBucket> for TimeSeriesPoint {
fn from(b: EnforcementVolumeBucket) -> Self {
Self {
bucket: b.bucket,
label: b.rule_ref,
value: b.enforcement_count,
}
}
}
impl FailureRateResponse {
/// Create from the repository summary plus the query time range.
pub fn from_summary(summary: FailureRateSummary, range: &AnalyticsTimeRange) -> Self {
Self {
since: range.since,
until: range.until,
total_terminal: summary.total_terminal,
failed_count: summary.failed_count,
timeout_count: summary.timeout_count,
completed_count: summary.completed_count,
failure_rate_pct: summary.failure_rate_pct,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_query_params_defaults() {
let params = AnalyticsQueryParams {
since: None,
until: None,
hours: None,
};
let range = params.to_time_range();
let diff = range.until - range.since;
assert!((diff.num_hours() - 24).abs() <= 1);
}
#[test]
fn test_query_params_custom_hours() {
let params = AnalyticsQueryParams {
since: None,
until: None,
hours: Some(6),
};
let range = params.to_time_range();
let diff = range.until - range.since;
assert!((diff.num_hours() - 6).abs() <= 1);
}
#[test]
fn test_query_params_hours_clamped() {
let params = AnalyticsQueryParams {
since: None,
until: None,
hours: Some(99999),
};
let range = params.to_time_range();
let diff = range.until - range.since;
// Clamped to 8760 hours (1 year)
assert!((diff.num_hours() - 8760).abs() <= 1);
}
#[test]
fn test_query_params_explicit_range() {
let since = Utc::now() - chrono::Duration::hours(48);
let until = Utc::now();
let params = AnalyticsQueryParams {
since: Some(since),
until: Some(until),
hours: Some(6), // ignored when since is provided
};
let range = params.to_time_range();
assert_eq!(range.since, since);
assert_eq!(range.until, until);
}
#[test]
fn test_failure_rate_response_from_summary() {
let summary = FailureRateSummary {
total_terminal: 100,
failed_count: 12,
timeout_count: 3,
completed_count: 85,
failure_rate_pct: 15.0,
};
let range = AnalyticsTimeRange::last_hours(24);
let response = FailureRateResponse::from_summary(summary, &range);
assert_eq!(response.total_terminal, 100);
assert_eq!(response.failed_count, 12);
assert_eq!(response.failure_rate_pct, 15.0);
}
#[test]
fn test_time_series_point_from_execution_status_bucket() {
let bucket = ExecutionStatusBucket {
bucket: Utc::now(),
action_ref: Some("core.http".into()),
new_status: Some("completed".into()),
transition_count: 10,
};
let point: TimeSeriesPoint = bucket.into();
assert_eq!(point.label.as_deref(), Some("completed"));
assert_eq!(point.value, 10);
}
#[test]
fn test_time_series_point_from_event_volume_bucket() {
let bucket = EventVolumeBucket {
bucket: Utc::now(),
trigger_ref: Some("core.timer".into()),
event_count: 25,
};
let point: TimeSeriesPoint = bucket.into();
assert_eq!(point.label.as_deref(), Some("core.timer"));
assert_eq!(point.value, 25);
}
}

View File

@@ -0,0 +1,211 @@
//! History DTOs for API requests and responses
//!
//! These types represent the API-facing view of entity history records
//! stored in TimescaleDB hypertables.
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
use utoipa::{IntoParams, ToSchema};
use attune_common::models::entity_history::HistoryEntityType;
/// Response DTO for a single entity history record.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct HistoryRecordResponse {
/// When the change occurred
#[schema(example = "2026-02-26T10:30:00Z")]
pub time: DateTime<Utc>,
/// The operation: `INSERT`, `UPDATE`, or `DELETE`
#[schema(example = "UPDATE")]
pub operation: String,
/// The primary key of the changed entity
#[schema(example = 42)]
pub entity_id: i64,
/// Denormalized human-readable identifier (e.g., action_ref, worker name)
#[schema(example = "core.http_request")]
pub entity_ref: Option<String>,
/// Names of fields that changed (empty for INSERT/DELETE)
#[schema(example = json!(["status", "result"]))]
pub changed_fields: Vec<String>,
/// Previous values of changed fields (null for INSERT)
#[schema(value_type = Object, example = json!({"status": "requested"}))]
pub old_values: Option<JsonValue>,
/// New values of changed fields (null for DELETE)
#[schema(value_type = Object, example = json!({"status": "running"}))]
pub new_values: Option<JsonValue>,
}
impl From<attune_common::models::entity_history::EntityHistoryRecord> for HistoryRecordResponse {
fn from(record: attune_common::models::entity_history::EntityHistoryRecord) -> Self {
Self {
time: record.time,
operation: record.operation,
entity_id: record.entity_id,
entity_ref: record.entity_ref,
changed_fields: record.changed_fields,
old_values: record.old_values,
new_values: record.new_values,
}
}
}
/// Query parameters for filtering history records.
#[derive(Debug, Clone, Deserialize, IntoParams)]
pub struct HistoryQueryParams {
/// Filter by entity ID
#[param(example = 42)]
pub entity_id: Option<i64>,
/// Filter by entity ref (e.g., action_ref, worker name)
#[param(example = "core.http_request")]
pub entity_ref: Option<String>,
/// Filter by operation type: `INSERT`, `UPDATE`, or `DELETE`
#[param(example = "UPDATE")]
pub operation: Option<String>,
/// Only include records where this field was changed
#[param(example = "status")]
pub changed_field: Option<String>,
/// Only include records at or after this time (ISO 8601)
#[param(example = "2026-02-01T00:00:00Z")]
pub since: Option<DateTime<Utc>>,
/// Only include records at or before this time (ISO 8601)
#[param(example = "2026-02-28T23:59:59Z")]
pub until: Option<DateTime<Utc>>,
/// Page number (1-based)
#[serde(default = "default_page")]
#[param(example = 1, minimum = 1)]
pub page: u32,
/// Number of items per page
#[serde(default = "default_page_size")]
#[param(example = 50, minimum = 1, maximum = 1000)]
pub page_size: u32,
}
fn default_page() -> u32 {
1
}
fn default_page_size() -> u32 {
50
}
impl HistoryQueryParams {
/// Convert to the repository-level query params.
pub fn to_repo_params(
&self,
) -> attune_common::repositories::entity_history::HistoryQueryParams {
let limit = (self.page_size.min(1000).max(1)) as i64;
let offset = ((self.page.saturating_sub(1)) as i64) * limit;
attune_common::repositories::entity_history::HistoryQueryParams {
entity_id: self.entity_id,
entity_ref: self.entity_ref.clone(),
operation: self.operation.clone(),
changed_field: self.changed_field.clone(),
since: self.since,
until: self.until,
limit: Some(limit),
offset: Some(offset),
}
}
}
/// Path parameter for the entity type segment.
#[derive(Debug, Clone, Deserialize, IntoParams)]
pub struct HistoryEntityTypePath {
/// Entity type: `execution`, `worker`, `enforcement`, or `event`
pub entity_type: String,
}
impl HistoryEntityTypePath {
/// Parse the entity type string, returning a typed enum or an error message.
pub fn parse(&self) -> Result<HistoryEntityType, String> {
self.entity_type.parse::<HistoryEntityType>()
}
}
/// Path parameters for entity-specific history (e.g., `/executions/42/history`).
#[derive(Debug, Clone, Deserialize, IntoParams)]
pub struct EntityIdPath {
/// The entity's primary key
pub id: i64,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_query_params_defaults() {
let json = r#"{}"#;
let params: HistoryQueryParams = serde_json::from_str(json).unwrap();
assert_eq!(params.page, 1);
assert_eq!(params.page_size, 50);
assert!(params.entity_id.is_none());
assert!(params.operation.is_none());
}
#[test]
fn test_query_params_to_repo_params() {
let params = HistoryQueryParams {
entity_id: Some(42),
entity_ref: None,
operation: Some("UPDATE".to_string()),
changed_field: Some("status".to_string()),
since: None,
until: None,
page: 3,
page_size: 20,
};
let repo = params.to_repo_params();
assert_eq!(repo.entity_id, Some(42));
assert_eq!(repo.operation, Some("UPDATE".to_string()));
assert_eq!(repo.changed_field, Some("status".to_string()));
assert_eq!(repo.limit, Some(20));
assert_eq!(repo.offset, Some(40)); // (3-1) * 20
}
#[test]
fn test_query_params_page_size_cap() {
let params = HistoryQueryParams {
entity_id: None,
entity_ref: None,
operation: None,
changed_field: None,
since: None,
until: None,
page: 1,
page_size: 5000,
};
let repo = params.to_repo_params();
assert_eq!(repo.limit, Some(1000));
}
#[test]
fn test_entity_type_path_parse() {
let path = HistoryEntityTypePath {
entity_type: "execution".to_string(),
};
assert_eq!(path.parse().unwrap(), HistoryEntityType::Execution);
let path = HistoryEntityTypePath {
entity_type: "unknown".to_string(),
};
assert!(path.parse().is_err());
}
}

View File

@@ -1,10 +1,12 @@
//! Data Transfer Objects (DTOs) for API requests and responses
pub mod action;
pub mod analytics;
pub mod auth;
pub mod common;
pub mod event;
pub mod execution;
pub mod history;
pub mod inquiry;
pub mod key;
pub mod pack;
@@ -14,6 +16,11 @@ pub mod webhook;
pub mod workflow;
pub use action::{ActionResponse, ActionSummary, CreateActionRequest, UpdateActionRequest};
pub use analytics::{
AnalyticsQueryParams, DashboardAnalyticsResponse, EventVolumeResponse,
ExecutionStatusTimeSeriesResponse, ExecutionThroughputResponse, FailureRateResponse,
TimeSeriesPoint,
};
pub use auth::{
ChangePasswordRequest, CurrentUserResponse, LoginRequest, RefreshTokenRequest, RegisterRequest,
TokenResponse,
@@ -25,7 +32,10 @@ pub use event::{
EnforcementQueryParams, EnforcementResponse, EnforcementSummary, EventQueryParams,
EventResponse, EventSummary,
};
pub use execution::{CreateExecutionRequest, ExecutionQueryParams, ExecutionResponse, ExecutionSummary};
pub use execution::{
CreateExecutionRequest, ExecutionQueryParams, ExecutionResponse, ExecutionSummary,
};
pub use history::{HistoryEntityTypePath, HistoryQueryParams, HistoryRecordResponse};
pub use inquiry::{
CreateInquiryRequest, InquiryQueryParams, InquiryRespondRequest, InquiryResponse,
InquirySummary, UpdateInquiryRequest,

View File

@@ -0,0 +1,304 @@
//! Analytics API routes
//!
//! Provides read-only access to TimescaleDB continuous aggregates for dashboard
//! widgets and time-series analytics. All data is pre-computed by TimescaleDB
//! continuous aggregate policies — these endpoints simply query the materialized views.
use axum::{
extract::{Query, State},
http::StatusCode,
response::IntoResponse,
routing::get,
Json, Router,
};
use std::sync::Arc;
use attune_common::repositories::analytics::AnalyticsRepository;
use crate::{
auth::middleware::RequireAuth,
dto::{
analytics::{
AnalyticsQueryParams, DashboardAnalyticsResponse, EnforcementVolumeResponse,
EventVolumeResponse, ExecutionStatusTimeSeriesResponse, ExecutionThroughputResponse,
FailureRateResponse, TimeSeriesPoint, WorkerStatusTimeSeriesResponse,
},
common::ApiResponse,
},
middleware::ApiResult,
state::AppState,
};
/// Get a combined dashboard analytics payload.
///
/// Returns all key metrics in a single response to avoid multiple round-trips
/// from the dashboard page. Includes execution throughput, status transitions,
/// event volume, enforcement volume, worker status, and failure rate.
#[utoipa::path(
get,
path = "/api/v1/analytics/dashboard",
tag = "analytics",
params(AnalyticsQueryParams),
responses(
(status = 200, description = "Dashboard analytics", body = inline(ApiResponse<DashboardAnalyticsResponse>)),
),
security(("bearer_auth" = []))
)]
pub async fn get_dashboard_analytics(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Query(query): Query<AnalyticsQueryParams>,
) -> ApiResult<impl IntoResponse> {
let range = query.to_time_range();
// Run all aggregate queries concurrently
let (throughput, status, events, enforcements, workers, failure_rate) = tokio::try_join!(
AnalyticsRepository::execution_throughput_hourly(&state.db, &range),
AnalyticsRepository::execution_status_hourly(&state.db, &range),
AnalyticsRepository::event_volume_hourly(&state.db, &range),
AnalyticsRepository::enforcement_volume_hourly(&state.db, &range),
AnalyticsRepository::worker_status_hourly(&state.db, &range),
AnalyticsRepository::execution_failure_rate(&state.db, &range),
)?;
let response = DashboardAnalyticsResponse {
since: range.since,
until: range.until,
execution_throughput: throughput.into_iter().map(Into::into).collect(),
execution_status: status.into_iter().map(Into::into).collect(),
event_volume: events.into_iter().map(Into::into).collect(),
enforcement_volume: enforcements.into_iter().map(Into::into).collect(),
worker_status: workers.into_iter().map(Into::into).collect(),
failure_rate: FailureRateResponse::from_summary(failure_rate, &range),
};
Ok((StatusCode::OK, Json(ApiResponse::new(response))))
}
/// Get execution status transitions over time.
///
/// Returns hourly buckets of execution status transitions (e.g., how many
/// executions moved to "completed", "failed", "running" per hour).
#[utoipa::path(
get,
path = "/api/v1/analytics/executions/status",
tag = "analytics",
params(AnalyticsQueryParams),
responses(
(status = 200, description = "Execution status transitions", body = inline(ApiResponse<ExecutionStatusTimeSeriesResponse>)),
),
security(("bearer_auth" = []))
)]
pub async fn get_execution_status_analytics(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Query(query): Query<AnalyticsQueryParams>,
) -> ApiResult<impl IntoResponse> {
let range = query.to_time_range();
let rows = AnalyticsRepository::execution_status_hourly(&state.db, &range).await?;
let data: Vec<TimeSeriesPoint> = rows.into_iter().map(Into::into).collect();
let response = ExecutionStatusTimeSeriesResponse {
since: range.since,
until: range.until,
data,
};
Ok((StatusCode::OK, Json(ApiResponse::new(response))))
}
/// Get execution throughput over time.
///
/// Returns hourly buckets of execution creation counts.
#[utoipa::path(
get,
path = "/api/v1/analytics/executions/throughput",
tag = "analytics",
params(AnalyticsQueryParams),
responses(
(status = 200, description = "Execution throughput", body = inline(ApiResponse<ExecutionThroughputResponse>)),
),
security(("bearer_auth" = []))
)]
pub async fn get_execution_throughput_analytics(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Query(query): Query<AnalyticsQueryParams>,
) -> ApiResult<impl IntoResponse> {
let range = query.to_time_range();
let rows = AnalyticsRepository::execution_throughput_hourly(&state.db, &range).await?;
let data: Vec<TimeSeriesPoint> = rows.into_iter().map(Into::into).collect();
let response = ExecutionThroughputResponse {
since: range.since,
until: range.until,
data,
};
Ok((StatusCode::OK, Json(ApiResponse::new(response))))
}
/// Get the execution failure rate summary.
///
/// Returns aggregate failure/timeout/completion counts and the failure rate
/// percentage over the requested time range.
#[utoipa::path(
get,
path = "/api/v1/analytics/executions/failure-rate",
tag = "analytics",
params(AnalyticsQueryParams),
responses(
(status = 200, description = "Failure rate summary", body = inline(ApiResponse<FailureRateResponse>)),
),
security(("bearer_auth" = []))
)]
pub async fn get_failure_rate_analytics(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Query(query): Query<AnalyticsQueryParams>,
) -> ApiResult<impl IntoResponse> {
let range = query.to_time_range();
let summary = AnalyticsRepository::execution_failure_rate(&state.db, &range).await?;
let response = FailureRateResponse::from_summary(summary, &range);
Ok((StatusCode::OK, Json(ApiResponse::new(response))))
}
/// Get event volume over time.
///
/// Returns hourly buckets of event creation counts, aggregated across all triggers.
#[utoipa::path(
get,
path = "/api/v1/analytics/events/volume",
tag = "analytics",
params(AnalyticsQueryParams),
responses(
(status = 200, description = "Event volume", body = inline(ApiResponse<EventVolumeResponse>)),
),
security(("bearer_auth" = []))
)]
pub async fn get_event_volume_analytics(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Query(query): Query<AnalyticsQueryParams>,
) -> ApiResult<impl IntoResponse> {
let range = query.to_time_range();
let rows = AnalyticsRepository::event_volume_hourly(&state.db, &range).await?;
let data: Vec<TimeSeriesPoint> = rows.into_iter().map(Into::into).collect();
let response = EventVolumeResponse {
since: range.since,
until: range.until,
data,
};
Ok((StatusCode::OK, Json(ApiResponse::new(response))))
}
/// Get worker status transitions over time.
///
/// Returns hourly buckets of worker status changes (online/offline/draining).
#[utoipa::path(
get,
path = "/api/v1/analytics/workers/status",
tag = "analytics",
params(AnalyticsQueryParams),
responses(
(status = 200, description = "Worker status transitions", body = inline(ApiResponse<WorkerStatusTimeSeriesResponse>)),
),
security(("bearer_auth" = []))
)]
pub async fn get_worker_status_analytics(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Query(query): Query<AnalyticsQueryParams>,
) -> ApiResult<impl IntoResponse> {
let range = query.to_time_range();
let rows = AnalyticsRepository::worker_status_hourly(&state.db, &range).await?;
let data: Vec<TimeSeriesPoint> = rows.into_iter().map(Into::into).collect();
let response = WorkerStatusTimeSeriesResponse {
since: range.since,
until: range.until,
data,
};
Ok((StatusCode::OK, Json(ApiResponse::new(response))))
}
/// Get enforcement volume over time.
///
/// Returns hourly buckets of enforcement creation counts, aggregated across all rules.
#[utoipa::path(
get,
path = "/api/v1/analytics/enforcements/volume",
tag = "analytics",
params(AnalyticsQueryParams),
responses(
(status = 200, description = "Enforcement volume", body = inline(ApiResponse<EnforcementVolumeResponse>)),
),
security(("bearer_auth" = []))
)]
pub async fn get_enforcement_volume_analytics(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Query(query): Query<AnalyticsQueryParams>,
) -> ApiResult<impl IntoResponse> {
let range = query.to_time_range();
let rows = AnalyticsRepository::enforcement_volume_hourly(&state.db, &range).await?;
let data: Vec<TimeSeriesPoint> = rows.into_iter().map(Into::into).collect();
let response = EnforcementVolumeResponse {
since: range.since,
until: range.until,
data,
};
Ok((StatusCode::OK, Json(ApiResponse::new(response))))
}
// ---------------------------------------------------------------------------
// Router
// ---------------------------------------------------------------------------
/// Build the analytics routes.
///
/// Mounts:
/// - `GET /analytics/dashboard` — combined dashboard payload
/// - `GET /analytics/executions/status` — execution status transitions
/// - `GET /analytics/executions/throughput` — execution creation throughput
/// - `GET /analytics/executions/failure-rate` — failure rate summary
/// - `GET /analytics/events/volume` — event creation volume
/// - `GET /analytics/workers/status` — worker status transitions
/// - `GET /analytics/enforcements/volume` — enforcement creation volume
pub fn routes() -> Router<Arc<AppState>> {
Router::new()
.route("/analytics/dashboard", get(get_dashboard_analytics))
.route(
"/analytics/executions/status",
get(get_execution_status_analytics),
)
.route(
"/analytics/executions/throughput",
get(get_execution_throughput_analytics),
)
.route(
"/analytics/executions/failure-rate",
get(get_failure_rate_analytics),
)
.route("/analytics/events/volume", get(get_event_volume_analytics))
.route(
"/analytics/workers/status",
get(get_worker_status_analytics),
)
.route(
"/analytics/enforcements/volume",
get(get_enforcement_volume_analytics),
)
}

View File

@@ -0,0 +1,245 @@
//! Entity history API routes
//!
//! Provides read-only access to the TimescaleDB entity history hypertables.
//! History records are written by PostgreSQL triggers — these endpoints only query them.
use axum::{
extract::{Path, Query, State},
http::StatusCode,
response::IntoResponse,
routing::get,
Json, Router,
};
use std::sync::Arc;
use attune_common::models::entity_history::HistoryEntityType;
use attune_common::repositories::entity_history::EntityHistoryRepository;
use crate::{
auth::middleware::RequireAuth,
dto::{
common::{PaginatedResponse, PaginationMeta, PaginationParams},
history::{HistoryQueryParams, HistoryRecordResponse},
},
middleware::{ApiError, ApiResult},
state::AppState,
};
/// List history records for a given entity type.
///
/// Supported entity types: `execution`, `worker`, `enforcement`, `event`.
/// Returns a paginated list of change records ordered by time descending.
#[utoipa::path(
get,
path = "/api/v1/history/{entity_type}",
tag = "history",
params(
("entity_type" = String, Path, description = "Entity type: execution, worker, enforcement, or event"),
HistoryQueryParams,
),
responses(
(status = 200, description = "Paginated list of history records", body = PaginatedResponse<HistoryRecordResponse>),
(status = 400, description = "Invalid entity type"),
),
security(("bearer_auth" = []))
)]
pub async fn list_entity_history(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Path(entity_type_str): Path<String>,
Query(query): Query<HistoryQueryParams>,
) -> ApiResult<impl IntoResponse> {
let entity_type = parse_entity_type(&entity_type_str)?;
let repo_params = query.to_repo_params();
let (records, total) = tokio::try_join!(
EntityHistoryRepository::query(&state.db, entity_type, &repo_params),
EntityHistoryRepository::count(&state.db, entity_type, &repo_params),
)?;
let data: Vec<HistoryRecordResponse> = records.into_iter().map(Into::into).collect();
let pagination_params = PaginationParams {
page: query.page,
page_size: query.page_size,
};
let response = PaginatedResponse {
data,
pagination: PaginationMeta::new(
pagination_params.page,
pagination_params.page_size,
total as u64,
),
};
Ok((StatusCode::OK, Json(response)))
}
/// Get history for a specific execution by ID.
///
/// Returns all change records for the given execution, ordered by time descending.
#[utoipa::path(
get,
path = "/api/v1/executions/{id}/history",
tag = "history",
params(
("id" = i64, Path, description = "Execution ID"),
HistoryQueryParams,
),
responses(
(status = 200, description = "History records for the execution", body = PaginatedResponse<HistoryRecordResponse>),
),
security(("bearer_auth" = []))
)]
pub async fn get_execution_history(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Path(id): Path<i64>,
Query(query): Query<HistoryQueryParams>,
) -> ApiResult<impl IntoResponse> {
get_entity_history_by_id(&state, HistoryEntityType::Execution, id, query).await
}
/// Get history for a specific worker by ID.
///
/// Returns all change records for the given worker, ordered by time descending.
#[utoipa::path(
get,
path = "/api/v1/workers/{id}/history",
tag = "history",
params(
("id" = i64, Path, description = "Worker ID"),
HistoryQueryParams,
),
responses(
(status = 200, description = "History records for the worker", body = PaginatedResponse<HistoryRecordResponse>),
),
security(("bearer_auth" = []))
)]
pub async fn get_worker_history(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Path(id): Path<i64>,
Query(query): Query<HistoryQueryParams>,
) -> ApiResult<impl IntoResponse> {
get_entity_history_by_id(&state, HistoryEntityType::Worker, id, query).await
}
/// Get history for a specific enforcement by ID.
///
/// Returns all change records for the given enforcement, ordered by time descending.
#[utoipa::path(
get,
path = "/api/v1/enforcements/{id}/history",
tag = "history",
params(
("id" = i64, Path, description = "Enforcement ID"),
HistoryQueryParams,
),
responses(
(status = 200, description = "History records for the enforcement", body = PaginatedResponse<HistoryRecordResponse>),
),
security(("bearer_auth" = []))
)]
pub async fn get_enforcement_history(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Path(id): Path<i64>,
Query(query): Query<HistoryQueryParams>,
) -> ApiResult<impl IntoResponse> {
get_entity_history_by_id(&state, HistoryEntityType::Enforcement, id, query).await
}
/// Get history for a specific event by ID.
///
/// Returns all change records for the given event, ordered by time descending.
#[utoipa::path(
get,
path = "/api/v1/events/{id}/history",
tag = "history",
params(
("id" = i64, Path, description = "Event ID"),
HistoryQueryParams,
),
responses(
(status = 200, description = "History records for the event", body = PaginatedResponse<HistoryRecordResponse>),
),
security(("bearer_auth" = []))
)]
pub async fn get_event_history(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Path(id): Path<i64>,
Query(query): Query<HistoryQueryParams>,
) -> ApiResult<impl IntoResponse> {
get_entity_history_by_id(&state, HistoryEntityType::Event, id, query).await
}
// ---------------------------------------------------------------------------
// Shared helpers
// ---------------------------------------------------------------------------
/// Parse and validate the entity type path parameter.
fn parse_entity_type(s: &str) -> Result<HistoryEntityType, ApiError> {
s.parse::<HistoryEntityType>().map_err(ApiError::BadRequest)
}
/// Shared implementation for `GET /<entities>/:id/history` endpoints.
async fn get_entity_history_by_id(
state: &AppState,
entity_type: HistoryEntityType,
entity_id: i64,
query: HistoryQueryParams,
) -> ApiResult<impl IntoResponse> {
// Override entity_id from the path — ignore any entity_id in query params
let mut repo_params = query.to_repo_params();
repo_params.entity_id = Some(entity_id);
let (records, total) = tokio::try_join!(
EntityHistoryRepository::query(&state.db, entity_type, &repo_params),
EntityHistoryRepository::count(&state.db, entity_type, &repo_params),
)?;
let data: Vec<HistoryRecordResponse> = records.into_iter().map(Into::into).collect();
let pagination_params = PaginationParams {
page: query.page,
page_size: query.page_size,
};
let response = PaginatedResponse {
data,
pagination: PaginationMeta::new(
pagination_params.page,
pagination_params.page_size,
total as u64,
),
};
Ok((StatusCode::OK, Json(response)))
}
// ---------------------------------------------------------------------------
// Router
// ---------------------------------------------------------------------------
/// Build the history routes.
///
/// Mounts:
/// - `GET /history/:entity_type` — generic history query
/// - `GET /executions/:id/history` — execution-specific history
/// - `GET /workers/:id/history` — worker-specific history (note: currently no /workers base route exists)
/// - `GET /enforcements/:id/history` — enforcement-specific history
/// - `GET /events/:id/history` — event-specific history
pub fn routes() -> Router<Arc<AppState>> {
Router::new()
// Generic history endpoint
.route("/history/{entity_type}", get(list_entity_history))
// Entity-specific convenience endpoints
.route("/executions/{id}/history", get(get_execution_history))
.route("/workers/{id}/history", get(get_worker_history))
.route("/enforcements/{id}/history", get(get_enforcement_history))
.route("/events/{id}/history", get(get_event_history))
}

View File

@@ -1,10 +1,12 @@
//! API route modules
pub mod actions;
pub mod analytics;
pub mod auth;
pub mod events;
pub mod executions;
pub mod health;
pub mod history;
pub mod inquiries;
pub mod keys;
pub mod packs;
@@ -14,10 +16,12 @@ pub mod webhooks;
pub mod workflows;
pub use actions::routes as action_routes;
pub use analytics::routes as analytics_routes;
pub use auth::routes as auth_routes;
pub use events::routes as event_routes;
pub use executions::routes as execution_routes;
pub use health::routes as health_routes;
pub use history::routes as history_routes;
pub use inquiries::routes as inquiry_routes;
pub use keys::routes as key_routes;
pub use packs::routes as pack_routes;

View File

@@ -55,6 +55,8 @@ impl Server {
.merge(routes::key_routes())
.merge(routes::workflow_routes())
.merge(routes::webhook_routes())
.merge(routes::history_routes())
.merge(routes::analytics_routes())
// TODO: Add more route modules here
// etc.
.with_state(self.state.clone());