change capture

This commit is contained in:
2026-02-26 14:34:02 -06:00
parent 7ee3604eb1
commit b43495b26d
47 changed files with 5785 additions and 1525 deletions

View File

@@ -0,0 +1,358 @@
//! Analytics DTOs for API requests and responses
//!
//! These types represent the API-facing view of analytics data derived from
//! TimescaleDB continuous aggregates over entity history hypertables.
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use utoipa::{IntoParams, ToSchema};
use attune_common::repositories::analytics::{
AnalyticsTimeRange, EnforcementVolumeBucket, EventVolumeBucket, ExecutionStatusBucket,
ExecutionThroughputBucket, FailureRateSummary, WorkerStatusBucket,
};
// ---------------------------------------------------------------------------
// Query parameters
// ---------------------------------------------------------------------------
/// Common query parameters for analytics endpoints.
#[derive(Debug, Clone, Deserialize, IntoParams)]
pub struct AnalyticsQueryParams {
/// Start of time range (ISO 8601). Defaults to 24 hours ago.
#[param(example = "2026-02-25T00:00:00Z")]
pub since: Option<DateTime<Utc>>,
/// End of time range (ISO 8601). Defaults to now.
#[param(example = "2026-02-26T00:00:00Z")]
pub until: Option<DateTime<Utc>>,
/// Number of hours to look back from now (alternative to since/until).
/// Ignored if `since` is provided.
#[param(example = 24, minimum = 1, maximum = 8760)]
pub hours: Option<i64>,
}
impl AnalyticsQueryParams {
/// Convert to the repository-level time range.
pub fn to_time_range(&self) -> AnalyticsTimeRange {
match (&self.since, &self.until) {
(Some(since), Some(until)) => AnalyticsTimeRange {
since: *since,
until: *until,
},
(Some(since), None) => AnalyticsTimeRange {
since: *since,
until: Utc::now(),
},
(None, Some(until)) => {
let hours = self.hours.unwrap_or(24).clamp(1, 8760);
AnalyticsTimeRange {
since: *until - chrono::Duration::hours(hours),
until: *until,
}
}
(None, None) => {
let hours = self.hours.unwrap_or(24).clamp(1, 8760);
AnalyticsTimeRange::last_hours(hours)
}
}
}
}
/// Path parameter for filtering analytics by a specific entity ref.
#[derive(Debug, Clone, Deserialize, IntoParams)]
pub struct AnalyticsRefParam {
/// Optional entity ref filter (action_ref, trigger_ref, rule_ref, or worker name)
#[param(example = "core.http_request")]
pub entity_ref: Option<String>,
}
// ---------------------------------------------------------------------------
// Response types
// ---------------------------------------------------------------------------
/// A single data point in an hourly time series.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct TimeSeriesPoint {
/// Start of the 1-hour bucket (ISO 8601)
#[schema(example = "2026-02-26T10:00:00Z")]
pub bucket: DateTime<Utc>,
/// The series label (e.g., status name, action ref). Null for aggregate totals.
#[schema(example = "completed")]
pub label: Option<String>,
/// The count value for this bucket
#[schema(example = 42)]
pub value: i64,
}
/// Response for execution status transitions over time.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct ExecutionStatusTimeSeriesResponse {
/// Time range start
pub since: DateTime<Utc>,
/// Time range end
pub until: DateTime<Utc>,
/// Data points: one per (bucket, status) pair
pub data: Vec<TimeSeriesPoint>,
}
/// Response for execution throughput over time.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct ExecutionThroughputResponse {
/// Time range start
pub since: DateTime<Utc>,
/// Time range end
pub until: DateTime<Utc>,
/// Data points: one per bucket (total executions created)
pub data: Vec<TimeSeriesPoint>,
}
/// Response for event volume over time.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct EventVolumeResponse {
/// Time range start
pub since: DateTime<Utc>,
/// Time range end
pub until: DateTime<Utc>,
/// Data points: one per bucket (total events created)
pub data: Vec<TimeSeriesPoint>,
}
/// Response for worker status transitions over time.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct WorkerStatusTimeSeriesResponse {
/// Time range start
pub since: DateTime<Utc>,
/// Time range end
pub until: DateTime<Utc>,
/// Data points: one per (bucket, status) pair
pub data: Vec<TimeSeriesPoint>,
}
/// Response for enforcement volume over time.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct EnforcementVolumeResponse {
/// Time range start
pub since: DateTime<Utc>,
/// Time range end
pub until: DateTime<Utc>,
/// Data points: one per bucket (total enforcements created)
pub data: Vec<TimeSeriesPoint>,
}
/// Response for the execution failure rate summary.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct FailureRateResponse {
/// Time range start
pub since: DateTime<Utc>,
/// Time range end
pub until: DateTime<Utc>,
/// Total executions reaching a terminal state in the window
#[schema(example = 100)]
pub total_terminal: i64,
/// Number of failed executions
#[schema(example = 12)]
pub failed_count: i64,
/// Number of timed-out executions
#[schema(example = 3)]
pub timeout_count: i64,
/// Number of completed executions
#[schema(example = 85)]
pub completed_count: i64,
/// Failure rate as a percentage (0.0 100.0)
#[schema(example = 15.0)]
pub failure_rate_pct: f64,
}
/// Combined dashboard analytics response.
///
/// Returns all key metrics in a single response for the dashboard page,
/// avoiding multiple round-trips.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct DashboardAnalyticsResponse {
/// Time range start
pub since: DateTime<Utc>,
/// Time range end
pub until: DateTime<Utc>,
/// Execution throughput per hour
pub execution_throughput: Vec<TimeSeriesPoint>,
/// Execution status transitions per hour
pub execution_status: Vec<TimeSeriesPoint>,
/// Event volume per hour
pub event_volume: Vec<TimeSeriesPoint>,
/// Enforcement volume per hour
pub enforcement_volume: Vec<TimeSeriesPoint>,
/// Worker status transitions per hour
pub worker_status: Vec<TimeSeriesPoint>,
/// Execution failure rate summary
pub failure_rate: FailureRateResponse,
}
// ---------------------------------------------------------------------------
// Conversion helpers
// ---------------------------------------------------------------------------
impl From<ExecutionStatusBucket> for TimeSeriesPoint {
fn from(b: ExecutionStatusBucket) -> Self {
Self {
bucket: b.bucket,
label: b.new_status,
value: b.transition_count,
}
}
}
impl From<ExecutionThroughputBucket> for TimeSeriesPoint {
fn from(b: ExecutionThroughputBucket) -> Self {
Self {
bucket: b.bucket,
label: b.action_ref,
value: b.execution_count,
}
}
}
impl From<EventVolumeBucket> for TimeSeriesPoint {
fn from(b: EventVolumeBucket) -> Self {
Self {
bucket: b.bucket,
label: b.trigger_ref,
value: b.event_count,
}
}
}
impl From<WorkerStatusBucket> for TimeSeriesPoint {
fn from(b: WorkerStatusBucket) -> Self {
Self {
bucket: b.bucket,
label: b.new_status,
value: b.transition_count,
}
}
}
impl From<EnforcementVolumeBucket> for TimeSeriesPoint {
fn from(b: EnforcementVolumeBucket) -> Self {
Self {
bucket: b.bucket,
label: b.rule_ref,
value: b.enforcement_count,
}
}
}
impl FailureRateResponse {
/// Create from the repository summary plus the query time range.
pub fn from_summary(summary: FailureRateSummary, range: &AnalyticsTimeRange) -> Self {
Self {
since: range.since,
until: range.until,
total_terminal: summary.total_terminal,
failed_count: summary.failed_count,
timeout_count: summary.timeout_count,
completed_count: summary.completed_count,
failure_rate_pct: summary.failure_rate_pct,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_query_params_defaults() {
let params = AnalyticsQueryParams {
since: None,
until: None,
hours: None,
};
let range = params.to_time_range();
let diff = range.until - range.since;
assert!((diff.num_hours() - 24).abs() <= 1);
}
#[test]
fn test_query_params_custom_hours() {
let params = AnalyticsQueryParams {
since: None,
until: None,
hours: Some(6),
};
let range = params.to_time_range();
let diff = range.until - range.since;
assert!((diff.num_hours() - 6).abs() <= 1);
}
#[test]
fn test_query_params_hours_clamped() {
let params = AnalyticsQueryParams {
since: None,
until: None,
hours: Some(99999),
};
let range = params.to_time_range();
let diff = range.until - range.since;
// Clamped to 8760 hours (1 year)
assert!((diff.num_hours() - 8760).abs() <= 1);
}
#[test]
fn test_query_params_explicit_range() {
let since = Utc::now() - chrono::Duration::hours(48);
let until = Utc::now();
let params = AnalyticsQueryParams {
since: Some(since),
until: Some(until),
hours: Some(6), // ignored when since is provided
};
let range = params.to_time_range();
assert_eq!(range.since, since);
assert_eq!(range.until, until);
}
#[test]
fn test_failure_rate_response_from_summary() {
let summary = FailureRateSummary {
total_terminal: 100,
failed_count: 12,
timeout_count: 3,
completed_count: 85,
failure_rate_pct: 15.0,
};
let range = AnalyticsTimeRange::last_hours(24);
let response = FailureRateResponse::from_summary(summary, &range);
assert_eq!(response.total_terminal, 100);
assert_eq!(response.failed_count, 12);
assert_eq!(response.failure_rate_pct, 15.0);
}
#[test]
fn test_time_series_point_from_execution_status_bucket() {
let bucket = ExecutionStatusBucket {
bucket: Utc::now(),
action_ref: Some("core.http".into()),
new_status: Some("completed".into()),
transition_count: 10,
};
let point: TimeSeriesPoint = bucket.into();
assert_eq!(point.label.as_deref(), Some("completed"));
assert_eq!(point.value, 10);
}
#[test]
fn test_time_series_point_from_event_volume_bucket() {
let bucket = EventVolumeBucket {
bucket: Utc::now(),
trigger_ref: Some("core.timer".into()),
event_count: 25,
};
let point: TimeSeriesPoint = bucket.into();
assert_eq!(point.label.as_deref(), Some("core.timer"));
assert_eq!(point.value, 25);
}
}

View File

@@ -0,0 +1,211 @@
//! History DTOs for API requests and responses
//!
//! These types represent the API-facing view of entity history records
//! stored in TimescaleDB hypertables.
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
use utoipa::{IntoParams, ToSchema};
use attune_common::models::entity_history::HistoryEntityType;
/// Response DTO for a single entity history record.
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct HistoryRecordResponse {
/// When the change occurred
#[schema(example = "2026-02-26T10:30:00Z")]
pub time: DateTime<Utc>,
/// The operation: `INSERT`, `UPDATE`, or `DELETE`
#[schema(example = "UPDATE")]
pub operation: String,
/// The primary key of the changed entity
#[schema(example = 42)]
pub entity_id: i64,
/// Denormalized human-readable identifier (e.g., action_ref, worker name)
#[schema(example = "core.http_request")]
pub entity_ref: Option<String>,
/// Names of fields that changed (empty for INSERT/DELETE)
#[schema(example = json!(["status", "result"]))]
pub changed_fields: Vec<String>,
/// Previous values of changed fields (null for INSERT)
#[schema(value_type = Object, example = json!({"status": "requested"}))]
pub old_values: Option<JsonValue>,
/// New values of changed fields (null for DELETE)
#[schema(value_type = Object, example = json!({"status": "running"}))]
pub new_values: Option<JsonValue>,
}
impl From<attune_common::models::entity_history::EntityHistoryRecord> for HistoryRecordResponse {
fn from(record: attune_common::models::entity_history::EntityHistoryRecord) -> Self {
Self {
time: record.time,
operation: record.operation,
entity_id: record.entity_id,
entity_ref: record.entity_ref,
changed_fields: record.changed_fields,
old_values: record.old_values,
new_values: record.new_values,
}
}
}
/// Query parameters for filtering history records.
#[derive(Debug, Clone, Deserialize, IntoParams)]
pub struct HistoryQueryParams {
/// Filter by entity ID
#[param(example = 42)]
pub entity_id: Option<i64>,
/// Filter by entity ref (e.g., action_ref, worker name)
#[param(example = "core.http_request")]
pub entity_ref: Option<String>,
/// Filter by operation type: `INSERT`, `UPDATE`, or `DELETE`
#[param(example = "UPDATE")]
pub operation: Option<String>,
/// Only include records where this field was changed
#[param(example = "status")]
pub changed_field: Option<String>,
/// Only include records at or after this time (ISO 8601)
#[param(example = "2026-02-01T00:00:00Z")]
pub since: Option<DateTime<Utc>>,
/// Only include records at or before this time (ISO 8601)
#[param(example = "2026-02-28T23:59:59Z")]
pub until: Option<DateTime<Utc>>,
/// Page number (1-based)
#[serde(default = "default_page")]
#[param(example = 1, minimum = 1)]
pub page: u32,
/// Number of items per page
#[serde(default = "default_page_size")]
#[param(example = 50, minimum = 1, maximum = 1000)]
pub page_size: u32,
}
fn default_page() -> u32 {
1
}
fn default_page_size() -> u32 {
50
}
impl HistoryQueryParams {
/// Convert to the repository-level query params.
pub fn to_repo_params(
&self,
) -> attune_common::repositories::entity_history::HistoryQueryParams {
let limit = (self.page_size.min(1000).max(1)) as i64;
let offset = ((self.page.saturating_sub(1)) as i64) * limit;
attune_common::repositories::entity_history::HistoryQueryParams {
entity_id: self.entity_id,
entity_ref: self.entity_ref.clone(),
operation: self.operation.clone(),
changed_field: self.changed_field.clone(),
since: self.since,
until: self.until,
limit: Some(limit),
offset: Some(offset),
}
}
}
/// Path parameter for the entity type segment.
#[derive(Debug, Clone, Deserialize, IntoParams)]
pub struct HistoryEntityTypePath {
/// Entity type: `execution`, `worker`, `enforcement`, or `event`
pub entity_type: String,
}
impl HistoryEntityTypePath {
/// Parse the entity type string, returning a typed enum or an error message.
pub fn parse(&self) -> Result<HistoryEntityType, String> {
self.entity_type.parse::<HistoryEntityType>()
}
}
/// Path parameters for entity-specific history (e.g., `/executions/42/history`).
#[derive(Debug, Clone, Deserialize, IntoParams)]
pub struct EntityIdPath {
/// The entity's primary key
pub id: i64,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_query_params_defaults() {
let json = r#"{}"#;
let params: HistoryQueryParams = serde_json::from_str(json).unwrap();
assert_eq!(params.page, 1);
assert_eq!(params.page_size, 50);
assert!(params.entity_id.is_none());
assert!(params.operation.is_none());
}
#[test]
fn test_query_params_to_repo_params() {
let params = HistoryQueryParams {
entity_id: Some(42),
entity_ref: None,
operation: Some("UPDATE".to_string()),
changed_field: Some("status".to_string()),
since: None,
until: None,
page: 3,
page_size: 20,
};
let repo = params.to_repo_params();
assert_eq!(repo.entity_id, Some(42));
assert_eq!(repo.operation, Some("UPDATE".to_string()));
assert_eq!(repo.changed_field, Some("status".to_string()));
assert_eq!(repo.limit, Some(20));
assert_eq!(repo.offset, Some(40)); // (3-1) * 20
}
#[test]
fn test_query_params_page_size_cap() {
let params = HistoryQueryParams {
entity_id: None,
entity_ref: None,
operation: None,
changed_field: None,
since: None,
until: None,
page: 1,
page_size: 5000,
};
let repo = params.to_repo_params();
assert_eq!(repo.limit, Some(1000));
}
#[test]
fn test_entity_type_path_parse() {
let path = HistoryEntityTypePath {
entity_type: "execution".to_string(),
};
assert_eq!(path.parse().unwrap(), HistoryEntityType::Execution);
let path = HistoryEntityTypePath {
entity_type: "unknown".to_string(),
};
assert!(path.parse().is_err());
}
}

View File

@@ -1,10 +1,12 @@
//! Data Transfer Objects (DTOs) for API requests and responses
pub mod action;
pub mod analytics;
pub mod auth;
pub mod common;
pub mod event;
pub mod execution;
pub mod history;
pub mod inquiry;
pub mod key;
pub mod pack;
@@ -14,6 +16,11 @@ pub mod webhook;
pub mod workflow;
pub use action::{ActionResponse, ActionSummary, CreateActionRequest, UpdateActionRequest};
pub use analytics::{
AnalyticsQueryParams, DashboardAnalyticsResponse, EventVolumeResponse,
ExecutionStatusTimeSeriesResponse, ExecutionThroughputResponse, FailureRateResponse,
TimeSeriesPoint,
};
pub use auth::{
ChangePasswordRequest, CurrentUserResponse, LoginRequest, RefreshTokenRequest, RegisterRequest,
TokenResponse,
@@ -25,7 +32,10 @@ pub use event::{
EnforcementQueryParams, EnforcementResponse, EnforcementSummary, EventQueryParams,
EventResponse, EventSummary,
};
pub use execution::{CreateExecutionRequest, ExecutionQueryParams, ExecutionResponse, ExecutionSummary};
pub use execution::{
CreateExecutionRequest, ExecutionQueryParams, ExecutionResponse, ExecutionSummary,
};
pub use history::{HistoryEntityTypePath, HistoryQueryParams, HistoryRecordResponse};
pub use inquiry::{
CreateInquiryRequest, InquiryQueryParams, InquiryRespondRequest, InquiryResponse,
InquirySummary, UpdateInquiryRequest,

View File

@@ -0,0 +1,304 @@
//! Analytics API routes
//!
//! Provides read-only access to TimescaleDB continuous aggregates for dashboard
//! widgets and time-series analytics. All data is pre-computed by TimescaleDB
//! continuous aggregate policies — these endpoints simply query the materialized views.
use axum::{
extract::{Query, State},
http::StatusCode,
response::IntoResponse,
routing::get,
Json, Router,
};
use std::sync::Arc;
use attune_common::repositories::analytics::AnalyticsRepository;
use crate::{
auth::middleware::RequireAuth,
dto::{
analytics::{
AnalyticsQueryParams, DashboardAnalyticsResponse, EnforcementVolumeResponse,
EventVolumeResponse, ExecutionStatusTimeSeriesResponse, ExecutionThroughputResponse,
FailureRateResponse, TimeSeriesPoint, WorkerStatusTimeSeriesResponse,
},
common::ApiResponse,
},
middleware::ApiResult,
state::AppState,
};
/// Get a combined dashboard analytics payload.
///
/// Returns all key metrics in a single response to avoid multiple round-trips
/// from the dashboard page. Includes execution throughput, status transitions,
/// event volume, enforcement volume, worker status, and failure rate.
#[utoipa::path(
get,
path = "/api/v1/analytics/dashboard",
tag = "analytics",
params(AnalyticsQueryParams),
responses(
(status = 200, description = "Dashboard analytics", body = inline(ApiResponse<DashboardAnalyticsResponse>)),
),
security(("bearer_auth" = []))
)]
pub async fn get_dashboard_analytics(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Query(query): Query<AnalyticsQueryParams>,
) -> ApiResult<impl IntoResponse> {
let range = query.to_time_range();
// Run all aggregate queries concurrently
let (throughput, status, events, enforcements, workers, failure_rate) = tokio::try_join!(
AnalyticsRepository::execution_throughput_hourly(&state.db, &range),
AnalyticsRepository::execution_status_hourly(&state.db, &range),
AnalyticsRepository::event_volume_hourly(&state.db, &range),
AnalyticsRepository::enforcement_volume_hourly(&state.db, &range),
AnalyticsRepository::worker_status_hourly(&state.db, &range),
AnalyticsRepository::execution_failure_rate(&state.db, &range),
)?;
let response = DashboardAnalyticsResponse {
since: range.since,
until: range.until,
execution_throughput: throughput.into_iter().map(Into::into).collect(),
execution_status: status.into_iter().map(Into::into).collect(),
event_volume: events.into_iter().map(Into::into).collect(),
enforcement_volume: enforcements.into_iter().map(Into::into).collect(),
worker_status: workers.into_iter().map(Into::into).collect(),
failure_rate: FailureRateResponse::from_summary(failure_rate, &range),
};
Ok((StatusCode::OK, Json(ApiResponse::new(response))))
}
/// Get execution status transitions over time.
///
/// Returns hourly buckets of execution status transitions (e.g., how many
/// executions moved to "completed", "failed", "running" per hour).
#[utoipa::path(
get,
path = "/api/v1/analytics/executions/status",
tag = "analytics",
params(AnalyticsQueryParams),
responses(
(status = 200, description = "Execution status transitions", body = inline(ApiResponse<ExecutionStatusTimeSeriesResponse>)),
),
security(("bearer_auth" = []))
)]
pub async fn get_execution_status_analytics(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Query(query): Query<AnalyticsQueryParams>,
) -> ApiResult<impl IntoResponse> {
let range = query.to_time_range();
let rows = AnalyticsRepository::execution_status_hourly(&state.db, &range).await?;
let data: Vec<TimeSeriesPoint> = rows.into_iter().map(Into::into).collect();
let response = ExecutionStatusTimeSeriesResponse {
since: range.since,
until: range.until,
data,
};
Ok((StatusCode::OK, Json(ApiResponse::new(response))))
}
/// Get execution throughput over time.
///
/// Returns hourly buckets of execution creation counts.
#[utoipa::path(
get,
path = "/api/v1/analytics/executions/throughput",
tag = "analytics",
params(AnalyticsQueryParams),
responses(
(status = 200, description = "Execution throughput", body = inline(ApiResponse<ExecutionThroughputResponse>)),
),
security(("bearer_auth" = []))
)]
pub async fn get_execution_throughput_analytics(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Query(query): Query<AnalyticsQueryParams>,
) -> ApiResult<impl IntoResponse> {
let range = query.to_time_range();
let rows = AnalyticsRepository::execution_throughput_hourly(&state.db, &range).await?;
let data: Vec<TimeSeriesPoint> = rows.into_iter().map(Into::into).collect();
let response = ExecutionThroughputResponse {
since: range.since,
until: range.until,
data,
};
Ok((StatusCode::OK, Json(ApiResponse::new(response))))
}
/// Get the execution failure rate summary.
///
/// Returns aggregate failure/timeout/completion counts and the failure rate
/// percentage over the requested time range.
#[utoipa::path(
get,
path = "/api/v1/analytics/executions/failure-rate",
tag = "analytics",
params(AnalyticsQueryParams),
responses(
(status = 200, description = "Failure rate summary", body = inline(ApiResponse<FailureRateResponse>)),
),
security(("bearer_auth" = []))
)]
pub async fn get_failure_rate_analytics(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Query(query): Query<AnalyticsQueryParams>,
) -> ApiResult<impl IntoResponse> {
let range = query.to_time_range();
let summary = AnalyticsRepository::execution_failure_rate(&state.db, &range).await?;
let response = FailureRateResponse::from_summary(summary, &range);
Ok((StatusCode::OK, Json(ApiResponse::new(response))))
}
/// Get event volume over time.
///
/// Returns hourly buckets of event creation counts, aggregated across all triggers.
#[utoipa::path(
get,
path = "/api/v1/analytics/events/volume",
tag = "analytics",
params(AnalyticsQueryParams),
responses(
(status = 200, description = "Event volume", body = inline(ApiResponse<EventVolumeResponse>)),
),
security(("bearer_auth" = []))
)]
pub async fn get_event_volume_analytics(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Query(query): Query<AnalyticsQueryParams>,
) -> ApiResult<impl IntoResponse> {
let range = query.to_time_range();
let rows = AnalyticsRepository::event_volume_hourly(&state.db, &range).await?;
let data: Vec<TimeSeriesPoint> = rows.into_iter().map(Into::into).collect();
let response = EventVolumeResponse {
since: range.since,
until: range.until,
data,
};
Ok((StatusCode::OK, Json(ApiResponse::new(response))))
}
/// Get worker status transitions over time.
///
/// Returns hourly buckets of worker status changes (online/offline/draining).
#[utoipa::path(
get,
path = "/api/v1/analytics/workers/status",
tag = "analytics",
params(AnalyticsQueryParams),
responses(
(status = 200, description = "Worker status transitions", body = inline(ApiResponse<WorkerStatusTimeSeriesResponse>)),
),
security(("bearer_auth" = []))
)]
pub async fn get_worker_status_analytics(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Query(query): Query<AnalyticsQueryParams>,
) -> ApiResult<impl IntoResponse> {
let range = query.to_time_range();
let rows = AnalyticsRepository::worker_status_hourly(&state.db, &range).await?;
let data: Vec<TimeSeriesPoint> = rows.into_iter().map(Into::into).collect();
let response = WorkerStatusTimeSeriesResponse {
since: range.since,
until: range.until,
data,
};
Ok((StatusCode::OK, Json(ApiResponse::new(response))))
}
/// Get enforcement volume over time.
///
/// Returns hourly buckets of enforcement creation counts, aggregated across all rules.
#[utoipa::path(
get,
path = "/api/v1/analytics/enforcements/volume",
tag = "analytics",
params(AnalyticsQueryParams),
responses(
(status = 200, description = "Enforcement volume", body = inline(ApiResponse<EnforcementVolumeResponse>)),
),
security(("bearer_auth" = []))
)]
pub async fn get_enforcement_volume_analytics(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Query(query): Query<AnalyticsQueryParams>,
) -> ApiResult<impl IntoResponse> {
let range = query.to_time_range();
let rows = AnalyticsRepository::enforcement_volume_hourly(&state.db, &range).await?;
let data: Vec<TimeSeriesPoint> = rows.into_iter().map(Into::into).collect();
let response = EnforcementVolumeResponse {
since: range.since,
until: range.until,
data,
};
Ok((StatusCode::OK, Json(ApiResponse::new(response))))
}
// ---------------------------------------------------------------------------
// Router
// ---------------------------------------------------------------------------
/// Build the analytics routes.
///
/// Mounts:
/// - `GET /analytics/dashboard` — combined dashboard payload
/// - `GET /analytics/executions/status` — execution status transitions
/// - `GET /analytics/executions/throughput` — execution creation throughput
/// - `GET /analytics/executions/failure-rate` — failure rate summary
/// - `GET /analytics/events/volume` — event creation volume
/// - `GET /analytics/workers/status` — worker status transitions
/// - `GET /analytics/enforcements/volume` — enforcement creation volume
pub fn routes() -> Router<Arc<AppState>> {
Router::new()
.route("/analytics/dashboard", get(get_dashboard_analytics))
.route(
"/analytics/executions/status",
get(get_execution_status_analytics),
)
.route(
"/analytics/executions/throughput",
get(get_execution_throughput_analytics),
)
.route(
"/analytics/executions/failure-rate",
get(get_failure_rate_analytics),
)
.route("/analytics/events/volume", get(get_event_volume_analytics))
.route(
"/analytics/workers/status",
get(get_worker_status_analytics),
)
.route(
"/analytics/enforcements/volume",
get(get_enforcement_volume_analytics),
)
}

View File

@@ -0,0 +1,245 @@
//! Entity history API routes
//!
//! Provides read-only access to the TimescaleDB entity history hypertables.
//! History records are written by PostgreSQL triggers — these endpoints only query them.
use axum::{
extract::{Path, Query, State},
http::StatusCode,
response::IntoResponse,
routing::get,
Json, Router,
};
use std::sync::Arc;
use attune_common::models::entity_history::HistoryEntityType;
use attune_common::repositories::entity_history::EntityHistoryRepository;
use crate::{
auth::middleware::RequireAuth,
dto::{
common::{PaginatedResponse, PaginationMeta, PaginationParams},
history::{HistoryQueryParams, HistoryRecordResponse},
},
middleware::{ApiError, ApiResult},
state::AppState,
};
/// List history records for a given entity type.
///
/// Supported entity types: `execution`, `worker`, `enforcement`, `event`.
/// Returns a paginated list of change records ordered by time descending.
#[utoipa::path(
get,
path = "/api/v1/history/{entity_type}",
tag = "history",
params(
("entity_type" = String, Path, description = "Entity type: execution, worker, enforcement, or event"),
HistoryQueryParams,
),
responses(
(status = 200, description = "Paginated list of history records", body = PaginatedResponse<HistoryRecordResponse>),
(status = 400, description = "Invalid entity type"),
),
security(("bearer_auth" = []))
)]
pub async fn list_entity_history(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Path(entity_type_str): Path<String>,
Query(query): Query<HistoryQueryParams>,
) -> ApiResult<impl IntoResponse> {
let entity_type = parse_entity_type(&entity_type_str)?;
let repo_params = query.to_repo_params();
let (records, total) = tokio::try_join!(
EntityHistoryRepository::query(&state.db, entity_type, &repo_params),
EntityHistoryRepository::count(&state.db, entity_type, &repo_params),
)?;
let data: Vec<HistoryRecordResponse> = records.into_iter().map(Into::into).collect();
let pagination_params = PaginationParams {
page: query.page,
page_size: query.page_size,
};
let response = PaginatedResponse {
data,
pagination: PaginationMeta::new(
pagination_params.page,
pagination_params.page_size,
total as u64,
),
};
Ok((StatusCode::OK, Json(response)))
}
/// Get history for a specific execution by ID.
///
/// Returns all change records for the given execution, ordered by time descending.
#[utoipa::path(
get,
path = "/api/v1/executions/{id}/history",
tag = "history",
params(
("id" = i64, Path, description = "Execution ID"),
HistoryQueryParams,
),
responses(
(status = 200, description = "History records for the execution", body = PaginatedResponse<HistoryRecordResponse>),
),
security(("bearer_auth" = []))
)]
pub async fn get_execution_history(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Path(id): Path<i64>,
Query(query): Query<HistoryQueryParams>,
) -> ApiResult<impl IntoResponse> {
get_entity_history_by_id(&state, HistoryEntityType::Execution, id, query).await
}
/// Get history for a specific worker by ID.
///
/// Returns all change records for the given worker, ordered by time descending.
#[utoipa::path(
get,
path = "/api/v1/workers/{id}/history",
tag = "history",
params(
("id" = i64, Path, description = "Worker ID"),
HistoryQueryParams,
),
responses(
(status = 200, description = "History records for the worker", body = PaginatedResponse<HistoryRecordResponse>),
),
security(("bearer_auth" = []))
)]
pub async fn get_worker_history(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Path(id): Path<i64>,
Query(query): Query<HistoryQueryParams>,
) -> ApiResult<impl IntoResponse> {
get_entity_history_by_id(&state, HistoryEntityType::Worker, id, query).await
}
/// Get history for a specific enforcement by ID.
///
/// Returns all change records for the given enforcement, ordered by time descending.
#[utoipa::path(
get,
path = "/api/v1/enforcements/{id}/history",
tag = "history",
params(
("id" = i64, Path, description = "Enforcement ID"),
HistoryQueryParams,
),
responses(
(status = 200, description = "History records for the enforcement", body = PaginatedResponse<HistoryRecordResponse>),
),
security(("bearer_auth" = []))
)]
pub async fn get_enforcement_history(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Path(id): Path<i64>,
Query(query): Query<HistoryQueryParams>,
) -> ApiResult<impl IntoResponse> {
get_entity_history_by_id(&state, HistoryEntityType::Enforcement, id, query).await
}
/// Get history for a specific event by ID.
///
/// Returns all change records for the given event, ordered by time descending.
#[utoipa::path(
get,
path = "/api/v1/events/{id}/history",
tag = "history",
params(
("id" = i64, Path, description = "Event ID"),
HistoryQueryParams,
),
responses(
(status = 200, description = "History records for the event", body = PaginatedResponse<HistoryRecordResponse>),
),
security(("bearer_auth" = []))
)]
pub async fn get_event_history(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
Path(id): Path<i64>,
Query(query): Query<HistoryQueryParams>,
) -> ApiResult<impl IntoResponse> {
get_entity_history_by_id(&state, HistoryEntityType::Event, id, query).await
}
// ---------------------------------------------------------------------------
// Shared helpers
// ---------------------------------------------------------------------------
/// Parse and validate the entity type path parameter.
fn parse_entity_type(s: &str) -> Result<HistoryEntityType, ApiError> {
s.parse::<HistoryEntityType>().map_err(ApiError::BadRequest)
}
/// Shared implementation for `GET /<entities>/:id/history` endpoints.
async fn get_entity_history_by_id(
state: &AppState,
entity_type: HistoryEntityType,
entity_id: i64,
query: HistoryQueryParams,
) -> ApiResult<impl IntoResponse> {
// Override entity_id from the path — ignore any entity_id in query params
let mut repo_params = query.to_repo_params();
repo_params.entity_id = Some(entity_id);
let (records, total) = tokio::try_join!(
EntityHistoryRepository::query(&state.db, entity_type, &repo_params),
EntityHistoryRepository::count(&state.db, entity_type, &repo_params),
)?;
let data: Vec<HistoryRecordResponse> = records.into_iter().map(Into::into).collect();
let pagination_params = PaginationParams {
page: query.page,
page_size: query.page_size,
};
let response = PaginatedResponse {
data,
pagination: PaginationMeta::new(
pagination_params.page,
pagination_params.page_size,
total as u64,
),
};
Ok((StatusCode::OK, Json(response)))
}
// ---------------------------------------------------------------------------
// Router
// ---------------------------------------------------------------------------
/// Build the history routes.
///
/// Mounts:
/// - `GET /history/:entity_type` — generic history query
/// - `GET /executions/:id/history` — execution-specific history
/// - `GET /workers/:id/history` — worker-specific history (note: currently no /workers base route exists)
/// - `GET /enforcements/:id/history` — enforcement-specific history
/// - `GET /events/:id/history` — event-specific history
pub fn routes() -> Router<Arc<AppState>> {
Router::new()
// Generic history endpoint
.route("/history/{entity_type}", get(list_entity_history))
// Entity-specific convenience endpoints
.route("/executions/{id}/history", get(get_execution_history))
.route("/workers/{id}/history", get(get_worker_history))
.route("/enforcements/{id}/history", get(get_enforcement_history))
.route("/events/{id}/history", get(get_event_history))
}

View File

@@ -1,10 +1,12 @@
//! API route modules
pub mod actions;
pub mod analytics;
pub mod auth;
pub mod events;
pub mod executions;
pub mod health;
pub mod history;
pub mod inquiries;
pub mod keys;
pub mod packs;
@@ -14,10 +16,12 @@ pub mod webhooks;
pub mod workflows;
pub use actions::routes as action_routes;
pub use analytics::routes as analytics_routes;
pub use auth::routes as auth_routes;
pub use events::routes as event_routes;
pub use executions::routes as execution_routes;
pub use health::routes as health_routes;
pub use history::routes as history_routes;
pub use inquiries::routes as inquiry_routes;
pub use keys::routes as key_routes;
pub use packs::routes as pack_routes;

View File

@@ -55,6 +55,8 @@ impl Server {
.merge(routes::key_routes())
.merge(routes::workflow_routes())
.merge(routes::webhook_routes())
.merge(routes::history_routes())
.merge(routes::analytics_routes())
// TODO: Add more route modules here
// etc.
.with_state(self.state.clone());

View File

@@ -10,6 +10,7 @@ use sqlx::FromRow;
// Re-export common types
pub use action::*;
pub use entity_history::*;
pub use enums::*;
pub use event::*;
pub use execution::*;
@@ -1439,3 +1440,91 @@ pub mod pack_test {
pub last_test_passed: Option<bool>,
}
}
/// Entity history tracking models (TimescaleDB hypertables)
///
/// These models represent rows in the `<entity>_history` append-only hypertables
/// that track field-level changes to operational tables via PostgreSQL triggers.
pub mod entity_history {
use super::*;
/// A single history record capturing a field-level change to an entity.
///
/// History records are append-only and populated by PostgreSQL triggers —
/// they are never created or modified by application code.
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct EntityHistoryRecord {
/// When the change occurred (hypertable partitioning dimension)
pub time: DateTime<Utc>,
/// The operation that produced this record: `INSERT`, `UPDATE`, or `DELETE`
pub operation: String,
/// The primary key of the changed row in the source table
pub entity_id: Id,
/// Denormalized human-readable identifier (e.g., `action_ref`, `worker.name`, `rule_ref`, `trigger_ref`)
pub entity_ref: Option<String>,
/// Names of fields that changed in this operation (empty for INSERT/DELETE)
pub changed_fields: Vec<String>,
/// Previous values of the changed fields (NULL for INSERT)
pub old_values: Option<JsonValue>,
/// New values of the changed fields (NULL for DELETE)
pub new_values: Option<JsonValue>,
}
/// Supported entity types that have history tracking.
///
/// Each variant maps to a `<name>_history` hypertable in the database.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HistoryEntityType {
Execution,
Worker,
Enforcement,
Event,
}
impl HistoryEntityType {
/// Returns the history table name for this entity type.
pub fn table_name(&self) -> &'static str {
match self {
Self::Execution => "execution_history",
Self::Worker => "worker_history",
Self::Enforcement => "enforcement_history",
Self::Event => "event_history",
}
}
}
impl std::fmt::Display for HistoryEntityType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Execution => write!(f, "execution"),
Self::Worker => write!(f, "worker"),
Self::Enforcement => write!(f, "enforcement"),
Self::Event => write!(f, "event"),
}
}
}
impl std::str::FromStr for HistoryEntityType {
type Err = String;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"execution" => Ok(Self::Execution),
"worker" => Ok(Self::Worker),
"enforcement" => Ok(Self::Enforcement),
"event" => Ok(Self::Event),
other => Err(format!(
"unknown history entity type '{}'; expected one of: execution, worker, enforcement, event",
other
)),
}
}
}
}

View File

@@ -191,9 +191,13 @@ impl RabbitMqConfig {
/// Queue configurations
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QueuesConfig {
/// Events queue configuration
/// Events queue configuration (sensor catch-all, bound with `#`)
pub events: QueueConfig,
/// Executor events queue configuration (bound only to `event.created`)
#[serde(default = "default_executor_events_queue")]
pub executor_events: QueueConfig,
/// Executions queue configuration (legacy - to be deprecated)
pub executions: QueueConfig,
@@ -216,6 +220,15 @@ pub struct QueuesConfig {
pub notifications: QueueConfig,
}
fn default_executor_events_queue() -> QueueConfig {
QueueConfig {
name: "attune.executor.events.queue".to_string(),
durable: true,
exclusive: false,
auto_delete: false,
}
}
impl Default for QueuesConfig {
fn default() -> Self {
Self {
@@ -225,6 +238,12 @@ impl Default for QueuesConfig {
exclusive: false,
auto_delete: false,
},
executor_events: QueueConfig {
name: "attune.executor.events.queue".to_string(),
durable: true,
exclusive: false,
auto_delete: false,
},
executions: QueueConfig {
name: "attune.executions.queue".to_string(),
durable: true,
@@ -567,6 +586,7 @@ mod tests {
fn test_default_queues() {
let queues = QueuesConfig::default();
assert_eq!(queues.events.name, "attune.events.queue");
assert_eq!(queues.executor_events.name, "attune.executor.events.queue");
assert_eq!(queues.executions.name, "attune.executions.queue");
assert_eq!(
queues.execution_completed.name,

View File

@@ -396,6 +396,11 @@ impl Connection {
None
};
// Declare executor-specific events queue (only receives event.created messages,
// unlike the sensor's catch-all events queue which is bound with `#`)
self.declare_queue_with_optional_dlx(&config.rabbitmq.queues.executor_events, dlx)
.await?;
// Declare executor queues
self.declare_queue_with_optional_dlx(&config.rabbitmq.queues.enforcements, dlx)
.await?;
@@ -444,6 +449,15 @@ impl Connection {
)
.await?;
// Bind executor events queue to only event.created routing key
// (the sensor's attune.events.queue uses `#` and gets all message types)
self.bind_queue(
&config.rabbitmq.queues.executor_events.name,
&config.rabbitmq.exchanges.events.name,
"event.created",
)
.await?;
info!("Executor infrastructure setup complete");
Ok(())
}

View File

@@ -190,8 +190,10 @@ pub mod exchanges {
/// Well-known queue names
pub mod queues {
/// Event processing queue
/// Event processing queue (sensor catch-all, bound with `#`)
pub const EVENTS: &str = "attune.events.queue";
/// Executor event processing queue (bound only to `event.created`)
pub const EXECUTOR_EVENTS: &str = "attune.executor.events.queue";
/// Execution request queue
pub const EXECUTIONS: &str = "attune.executions.queue";
/// Notification delivery queue

View File

@@ -0,0 +1,565 @@
//! Analytics repository for querying TimescaleDB continuous aggregates
//!
//! This module provides read-only query methods for the continuous aggregate
//! materialized views created in migration 000009_timescaledb_history. These views are
//! auto-refreshed by TimescaleDB policies and provide pre-computed hourly
//! rollups for dashboard widgets.
use chrono::{DateTime, Utc};
use serde::Serialize;
use sqlx::{Executor, FromRow, Postgres};
use crate::Result;
/// Repository for querying analytics continuous aggregates.
///
/// All methods are read-only. The underlying materialized views are
/// auto-refreshed by TimescaleDB continuous aggregate policies.
pub struct AnalyticsRepository;
// ---------------------------------------------------------------------------
// Row types returned by aggregate queries
// ---------------------------------------------------------------------------
/// A single hourly bucket of execution status transitions.
#[derive(Debug, Clone, Serialize, FromRow)]
pub struct ExecutionStatusBucket {
/// Start of the 1-hour bucket
pub bucket: DateTime<Utc>,
/// Action ref (e.g., "core.http_request"); NULL when grouped across all actions
pub action_ref: Option<String>,
/// The status that was transitioned to (e.g., "completed", "failed")
pub new_status: Option<String>,
/// Number of transitions in this bucket
pub transition_count: i64,
}
/// A single hourly bucket of execution throughput (creations).
#[derive(Debug, Clone, Serialize, FromRow)]
pub struct ExecutionThroughputBucket {
/// Start of the 1-hour bucket
pub bucket: DateTime<Utc>,
/// Action ref; NULL when grouped across all actions
pub action_ref: Option<String>,
/// Number of executions created in this bucket
pub execution_count: i64,
}
/// A single hourly bucket of event volume.
#[derive(Debug, Clone, Serialize, FromRow)]
pub struct EventVolumeBucket {
/// Start of the 1-hour bucket
pub bucket: DateTime<Utc>,
/// Trigger ref; NULL when grouped across all triggers
pub trigger_ref: Option<String>,
/// Number of events created in this bucket
pub event_count: i64,
}
/// A single hourly bucket of worker status transitions.
#[derive(Debug, Clone, Serialize, FromRow)]
pub struct WorkerStatusBucket {
/// Start of the 1-hour bucket
pub bucket: DateTime<Utc>,
/// Worker name; NULL when grouped across all workers
pub worker_name: Option<String>,
/// The status transitioned to (e.g., "online", "offline")
pub new_status: Option<String>,
/// Number of transitions in this bucket
pub transition_count: i64,
}
/// A single hourly bucket of enforcement volume.
#[derive(Debug, Clone, Serialize, FromRow)]
pub struct EnforcementVolumeBucket {
/// Start of the 1-hour bucket
pub bucket: DateTime<Utc>,
/// Rule ref; NULL when grouped across all rules
pub rule_ref: Option<String>,
/// Number of enforcements created in this bucket
pub enforcement_count: i64,
}
/// Aggregated failure rate over a time range.
#[derive(Debug, Clone, Serialize)]
pub struct FailureRateSummary {
/// Total status transitions to terminal states in the window
pub total_terminal: i64,
/// Number of transitions to "failed" status
pub failed_count: i64,
/// Number of transitions to "timeout" status
pub timeout_count: i64,
/// Number of transitions to "completed" status
pub completed_count: i64,
/// Failure rate as a percentage (0.0 100.0)
pub failure_rate_pct: f64,
}
// ---------------------------------------------------------------------------
// Query parameters
// ---------------------------------------------------------------------------
/// Common time-range parameters for analytics queries.
#[derive(Debug, Clone)]
pub struct AnalyticsTimeRange {
/// Start of the query window (inclusive). Defaults to 24 hours ago.
pub since: DateTime<Utc>,
/// End of the query window (inclusive). Defaults to now.
pub until: DateTime<Utc>,
}
impl Default for AnalyticsTimeRange {
fn default() -> Self {
let now = Utc::now();
Self {
since: now - chrono::Duration::hours(24),
until: now,
}
}
}
impl AnalyticsTimeRange {
/// Create a range covering the last N hours from now.
pub fn last_hours(hours: i64) -> Self {
let now = Utc::now();
Self {
since: now - chrono::Duration::hours(hours),
until: now,
}
}
/// Create a range covering the last N days from now.
pub fn last_days(days: i64) -> Self {
let now = Utc::now();
Self {
since: now - chrono::Duration::days(days),
until: now,
}
}
}
// ---------------------------------------------------------------------------
// Repository implementation
// ---------------------------------------------------------------------------
impl AnalyticsRepository {
// =======================================================================
// Execution status transitions
// =======================================================================
/// Get execution status transitions per hour, aggregated across all actions.
///
/// Returns one row per (bucket, new_status) pair, ordered by bucket ascending.
pub async fn execution_status_hourly<'e, E>(
executor: E,
range: &AnalyticsTimeRange,
) -> Result<Vec<ExecutionStatusBucket>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let rows = sqlx::query_as::<_, ExecutionStatusBucket>(
r#"
SELECT
bucket,
NULL::text AS action_ref,
new_status,
SUM(transition_count)::bigint AS transition_count
FROM execution_status_hourly
WHERE bucket >= $1 AND bucket <= $2
GROUP BY bucket, new_status
ORDER BY bucket ASC, new_status
"#,
)
.bind(range.since)
.bind(range.until)
.fetch_all(executor)
.await?;
Ok(rows)
}
/// Get execution status transitions per hour for a specific action.
pub async fn execution_status_hourly_by_action<'e, E>(
executor: E,
range: &AnalyticsTimeRange,
action_ref: &str,
) -> Result<Vec<ExecutionStatusBucket>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let rows = sqlx::query_as::<_, ExecutionStatusBucket>(
r#"
SELECT
bucket,
action_ref,
new_status,
transition_count
FROM execution_status_hourly
WHERE bucket >= $1 AND bucket <= $2 AND action_ref = $3
ORDER BY bucket ASC, new_status
"#,
)
.bind(range.since)
.bind(range.until)
.bind(action_ref)
.fetch_all(executor)
.await?;
Ok(rows)
}
// =======================================================================
// Execution throughput
// =======================================================================
/// Get execution creation throughput per hour, aggregated across all actions.
pub async fn execution_throughput_hourly<'e, E>(
executor: E,
range: &AnalyticsTimeRange,
) -> Result<Vec<ExecutionThroughputBucket>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let rows = sqlx::query_as::<_, ExecutionThroughputBucket>(
r#"
SELECT
bucket,
NULL::text AS action_ref,
SUM(execution_count)::bigint AS execution_count
FROM execution_throughput_hourly
WHERE bucket >= $1 AND bucket <= $2
GROUP BY bucket
ORDER BY bucket ASC
"#,
)
.bind(range.since)
.bind(range.until)
.fetch_all(executor)
.await?;
Ok(rows)
}
/// Get execution creation throughput per hour for a specific action.
pub async fn execution_throughput_hourly_by_action<'e, E>(
executor: E,
range: &AnalyticsTimeRange,
action_ref: &str,
) -> Result<Vec<ExecutionThroughputBucket>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let rows = sqlx::query_as::<_, ExecutionThroughputBucket>(
r#"
SELECT
bucket,
action_ref,
execution_count
FROM execution_throughput_hourly
WHERE bucket >= $1 AND bucket <= $2 AND action_ref = $3
ORDER BY bucket ASC
"#,
)
.bind(range.since)
.bind(range.until)
.bind(action_ref)
.fetch_all(executor)
.await?;
Ok(rows)
}
// =======================================================================
// Event volume
// =======================================================================
/// Get event creation volume per hour, aggregated across all triggers.
pub async fn event_volume_hourly<'e, E>(
executor: E,
range: &AnalyticsTimeRange,
) -> Result<Vec<EventVolumeBucket>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let rows = sqlx::query_as::<_, EventVolumeBucket>(
r#"
SELECT
bucket,
NULL::text AS trigger_ref,
SUM(event_count)::bigint AS event_count
FROM event_volume_hourly
WHERE bucket >= $1 AND bucket <= $2
GROUP BY bucket
ORDER BY bucket ASC
"#,
)
.bind(range.since)
.bind(range.until)
.fetch_all(executor)
.await?;
Ok(rows)
}
/// Get event creation volume per hour for a specific trigger.
pub async fn event_volume_hourly_by_trigger<'e, E>(
executor: E,
range: &AnalyticsTimeRange,
trigger_ref: &str,
) -> Result<Vec<EventVolumeBucket>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let rows = sqlx::query_as::<_, EventVolumeBucket>(
r#"
SELECT
bucket,
trigger_ref,
event_count
FROM event_volume_hourly
WHERE bucket >= $1 AND bucket <= $2 AND trigger_ref = $3
ORDER BY bucket ASC
"#,
)
.bind(range.since)
.bind(range.until)
.bind(trigger_ref)
.fetch_all(executor)
.await?;
Ok(rows)
}
// =======================================================================
// Worker health
// =======================================================================
/// Get worker status transitions per hour, aggregated across all workers.
pub async fn worker_status_hourly<'e, E>(
executor: E,
range: &AnalyticsTimeRange,
) -> Result<Vec<WorkerStatusBucket>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let rows = sqlx::query_as::<_, WorkerStatusBucket>(
r#"
SELECT
bucket,
NULL::text AS worker_name,
new_status,
SUM(transition_count)::bigint AS transition_count
FROM worker_status_hourly
WHERE bucket >= $1 AND bucket <= $2
GROUP BY bucket, new_status
ORDER BY bucket ASC, new_status
"#,
)
.bind(range.since)
.bind(range.until)
.fetch_all(executor)
.await?;
Ok(rows)
}
/// Get worker status transitions per hour for a specific worker.
pub async fn worker_status_hourly_by_name<'e, E>(
executor: E,
range: &AnalyticsTimeRange,
worker_name: &str,
) -> Result<Vec<WorkerStatusBucket>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let rows = sqlx::query_as::<_, WorkerStatusBucket>(
r#"
SELECT
bucket,
worker_name,
new_status,
transition_count
FROM worker_status_hourly
WHERE bucket >= $1 AND bucket <= $2 AND worker_name = $3
ORDER BY bucket ASC, new_status
"#,
)
.bind(range.since)
.bind(range.until)
.bind(worker_name)
.fetch_all(executor)
.await?;
Ok(rows)
}
// =======================================================================
// Enforcement volume
// =======================================================================
/// Get enforcement creation volume per hour, aggregated across all rules.
pub async fn enforcement_volume_hourly<'e, E>(
executor: E,
range: &AnalyticsTimeRange,
) -> Result<Vec<EnforcementVolumeBucket>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let rows = sqlx::query_as::<_, EnforcementVolumeBucket>(
r#"
SELECT
bucket,
NULL::text AS rule_ref,
SUM(enforcement_count)::bigint AS enforcement_count
FROM enforcement_volume_hourly
WHERE bucket >= $1 AND bucket <= $2
GROUP BY bucket
ORDER BY bucket ASC
"#,
)
.bind(range.since)
.bind(range.until)
.fetch_all(executor)
.await?;
Ok(rows)
}
/// Get enforcement creation volume per hour for a specific rule.
pub async fn enforcement_volume_hourly_by_rule<'e, E>(
executor: E,
range: &AnalyticsTimeRange,
rule_ref: &str,
) -> Result<Vec<EnforcementVolumeBucket>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let rows = sqlx::query_as::<_, EnforcementVolumeBucket>(
r#"
SELECT
bucket,
rule_ref,
enforcement_count
FROM enforcement_volume_hourly
WHERE bucket >= $1 AND bucket <= $2 AND rule_ref = $3
ORDER BY bucket ASC
"#,
)
.bind(range.since)
.bind(range.until)
.bind(rule_ref)
.fetch_all(executor)
.await?;
Ok(rows)
}
// =======================================================================
// Derived analytics
// =======================================================================
/// Compute the execution failure rate over a time range.
///
/// Uses the `execution_status_hourly` aggregate to count terminal-state
/// transitions (completed, failed, timeout) and derive the failure
/// percentage.
pub async fn execution_failure_rate<'e, E>(
executor: E,
range: &AnalyticsTimeRange,
) -> Result<FailureRateSummary>
where
E: Executor<'e, Database = Postgres> + 'e,
{
// Query terminal-state transitions from the aggregate
let rows = sqlx::query_as::<_, (Option<String>, i64)>(
r#"
SELECT
new_status,
SUM(transition_count)::bigint AS cnt
FROM execution_status_hourly
WHERE bucket >= $1 AND bucket <= $2
AND new_status IN ('completed', 'failed', 'timeout')
GROUP BY new_status
"#,
)
.bind(range.since)
.bind(range.until)
.fetch_all(executor)
.await?;
let mut completed: i64 = 0;
let mut failed: i64 = 0;
let mut timeout: i64 = 0;
for (status, count) in &rows {
match status.as_deref() {
Some("completed") => completed = *count,
Some("failed") => failed = *count,
Some("timeout") => timeout = *count,
_ => {}
}
}
let total_terminal = completed + failed + timeout;
let failure_rate_pct = if total_terminal > 0 {
((failed + timeout) as f64 / total_terminal as f64) * 100.0
} else {
0.0
};
Ok(FailureRateSummary {
total_terminal,
failed_count: failed,
timeout_count: timeout,
completed_count: completed,
failure_rate_pct,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_analytics_time_range_default() {
let range = AnalyticsTimeRange::default();
let diff = range.until - range.since;
// Should be approximately 24 hours
assert!((diff.num_hours() - 24).abs() <= 1);
}
#[test]
fn test_analytics_time_range_last_hours() {
let range = AnalyticsTimeRange::last_hours(6);
let diff = range.until - range.since;
assert!((diff.num_hours() - 6).abs() <= 1);
}
#[test]
fn test_analytics_time_range_last_days() {
let range = AnalyticsTimeRange::last_days(7);
let diff = range.until - range.since;
assert!((diff.num_days() - 7).abs() <= 1);
}
#[test]
fn test_failure_rate_summary_zero_total() {
let summary = FailureRateSummary {
total_terminal: 0,
failed_count: 0,
timeout_count: 0,
completed_count: 0,
failure_rate_pct: 0.0,
};
assert_eq!(summary.failure_rate_pct, 0.0);
}
#[test]
fn test_failure_rate_calculation() {
// 80 completed, 15 failed, 5 timeout → 20% failure rate
let total = 80 + 15 + 5;
let rate = ((15 + 5) as f64 / total as f64) * 100.0;
assert!((rate - 20.0).abs() < 0.01);
}
}

View File

@@ -0,0 +1,301 @@
//! Entity history repository for querying TimescaleDB history hypertables
//!
//! This module provides read-only query methods for the `<entity>_history` tables.
//! History records are written exclusively by PostgreSQL triggers — this repository
//! only reads them.
use chrono::{DateTime, Utc};
use sqlx::{Executor, Postgres, QueryBuilder};
use crate::models::entity_history::{EntityHistoryRecord, HistoryEntityType};
use crate::Result;
/// Repository for querying entity history hypertables.
///
/// All methods are read-only. History records are populated by PostgreSQL
/// `AFTER INSERT OR UPDATE OR DELETE` triggers on the operational tables.
pub struct EntityHistoryRepository;
/// Query parameters for filtering history records.
#[derive(Debug, Clone, Default)]
pub struct HistoryQueryParams {
/// Filter by entity ID (e.g., execution.id)
pub entity_id: Option<i64>,
/// Filter by entity ref (e.g., action_ref, worker name)
pub entity_ref: Option<String>,
/// Filter by operation type: `INSERT`, `UPDATE`, or `DELETE`
pub operation: Option<String>,
/// Only include records where this field was changed
pub changed_field: Option<String>,
/// Only include records at or after this time
pub since: Option<DateTime<Utc>>,
/// Only include records at or before this time
pub until: Option<DateTime<Utc>>,
/// Maximum number of records to return (default: 100, max: 1000)
pub limit: Option<i64>,
/// Offset for pagination
pub offset: Option<i64>,
}
impl HistoryQueryParams {
/// Returns the effective limit, capped at 1000.
pub fn effective_limit(&self) -> i64 {
self.limit.unwrap_or(100).min(1000).max(1)
}
/// Returns the effective offset.
pub fn effective_offset(&self) -> i64 {
self.offset.unwrap_or(0).max(0)
}
}
impl EntityHistoryRepository {
/// Query history records for a given entity type with optional filters.
///
/// Results are ordered by `time DESC` (most recent first).
pub async fn query<'e, E>(
executor: E,
entity_type: HistoryEntityType,
params: &HistoryQueryParams,
) -> Result<Vec<EntityHistoryRecord>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
// We must use format! for the table name since it can't be a bind parameter,
// but HistoryEntityType::table_name() returns a known static str so this is safe.
let table = entity_type.table_name();
let mut qb: QueryBuilder<Postgres> =
QueryBuilder::new(format!("SELECT time, operation, entity_id, entity_ref, changed_fields, old_values, new_values FROM {table} WHERE 1=1"));
if let Some(entity_id) = params.entity_id {
qb.push(" AND entity_id = ").push_bind(entity_id);
}
if let Some(ref entity_ref) = params.entity_ref {
qb.push(" AND entity_ref = ").push_bind(entity_ref.clone());
}
if let Some(ref operation) = params.operation {
qb.push(" AND operation = ")
.push_bind(operation.to_uppercase());
}
if let Some(ref changed_field) = params.changed_field {
qb.push(" AND ")
.push_bind(changed_field.clone())
.push(" = ANY(changed_fields)");
}
if let Some(since) = params.since {
qb.push(" AND time >= ").push_bind(since);
}
if let Some(until) = params.until {
qb.push(" AND time <= ").push_bind(until);
}
qb.push(" ORDER BY time DESC");
qb.push(" LIMIT ").push_bind(params.effective_limit());
qb.push(" OFFSET ").push_bind(params.effective_offset());
let records = qb
.build_query_as::<EntityHistoryRecord>()
.fetch_all(executor)
.await?;
Ok(records)
}
/// Count history records for a given entity type with optional filters.
///
/// Useful for pagination metadata.
pub async fn count<'e, E>(
executor: E,
entity_type: HistoryEntityType,
params: &HistoryQueryParams,
) -> Result<i64>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let table = entity_type.table_name();
let mut qb: QueryBuilder<Postgres> =
QueryBuilder::new(format!("SELECT COUNT(*) FROM {table} WHERE 1=1"));
if let Some(entity_id) = params.entity_id {
qb.push(" AND entity_id = ").push_bind(entity_id);
}
if let Some(ref entity_ref) = params.entity_ref {
qb.push(" AND entity_ref = ").push_bind(entity_ref.clone());
}
if let Some(ref operation) = params.operation {
qb.push(" AND operation = ")
.push_bind(operation.to_uppercase());
}
if let Some(ref changed_field) = params.changed_field {
qb.push(" AND ")
.push_bind(changed_field.clone())
.push(" = ANY(changed_fields)");
}
if let Some(since) = params.since {
qb.push(" AND time >= ").push_bind(since);
}
if let Some(until) = params.until {
qb.push(" AND time <= ").push_bind(until);
}
let row: (i64,) = qb.build_query_as().fetch_one(executor).await?;
Ok(row.0)
}
/// Get history records for a specific entity by ID.
///
/// Convenience method equivalent to `query()` with `entity_id` set.
pub async fn find_by_entity_id<'e, E>(
executor: E,
entity_type: HistoryEntityType,
entity_id: i64,
limit: Option<i64>,
) -> Result<Vec<EntityHistoryRecord>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let params = HistoryQueryParams {
entity_id: Some(entity_id),
limit,
..Default::default()
};
Self::query(executor, entity_type, &params).await
}
/// Get only status-change history records for a specific entity.
///
/// Filters to UPDATE operations where `changed_fields` includes `"status"`.
pub async fn find_status_changes<'e, E>(
executor: E,
entity_type: HistoryEntityType,
entity_id: i64,
limit: Option<i64>,
) -> Result<Vec<EntityHistoryRecord>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let params = HistoryQueryParams {
entity_id: Some(entity_id),
operation: Some("UPDATE".to_string()),
changed_field: Some("status".to_string()),
limit,
..Default::default()
};
Self::query(executor, entity_type, &params).await
}
/// Get the most recent history record for a specific entity.
pub async fn find_latest<'e, E>(
executor: E,
entity_type: HistoryEntityType,
entity_id: i64,
) -> Result<Option<EntityHistoryRecord>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let records = Self::find_by_entity_id(executor, entity_type, entity_id, Some(1)).await?;
Ok(records.into_iter().next())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_history_query_params_defaults() {
let params = HistoryQueryParams::default();
assert_eq!(params.effective_limit(), 100);
assert_eq!(params.effective_offset(), 0);
}
#[test]
fn test_history_query_params_limit_cap() {
let params = HistoryQueryParams {
limit: Some(5000),
..Default::default()
};
assert_eq!(params.effective_limit(), 1000);
}
#[test]
fn test_history_query_params_limit_min() {
let params = HistoryQueryParams {
limit: Some(-10),
..Default::default()
};
assert_eq!(params.effective_limit(), 1);
}
#[test]
fn test_history_query_params_offset_min() {
let params = HistoryQueryParams {
offset: Some(-5),
..Default::default()
};
assert_eq!(params.effective_offset(), 0);
}
#[test]
fn test_history_entity_type_table_name() {
assert_eq!(
HistoryEntityType::Execution.table_name(),
"execution_history"
);
assert_eq!(HistoryEntityType::Worker.table_name(), "worker_history");
assert_eq!(
HistoryEntityType::Enforcement.table_name(),
"enforcement_history"
);
assert_eq!(HistoryEntityType::Event.table_name(), "event_history");
}
#[test]
fn test_history_entity_type_from_str() {
assert_eq!(
"execution".parse::<HistoryEntityType>().unwrap(),
HistoryEntityType::Execution
);
assert_eq!(
"Worker".parse::<HistoryEntityType>().unwrap(),
HistoryEntityType::Worker
);
assert_eq!(
"ENFORCEMENT".parse::<HistoryEntityType>().unwrap(),
HistoryEntityType::Enforcement
);
assert_eq!(
"event".parse::<HistoryEntityType>().unwrap(),
HistoryEntityType::Event
);
assert!("unknown".parse::<HistoryEntityType>().is_err());
}
#[test]
fn test_history_entity_type_display() {
assert_eq!(HistoryEntityType::Execution.to_string(), "execution");
assert_eq!(HistoryEntityType::Worker.to_string(), "worker");
assert_eq!(HistoryEntityType::Enforcement.to_string(), "enforcement");
assert_eq!(HistoryEntityType::Event.to_string(), "event");
}
}

View File

@@ -28,7 +28,9 @@
use sqlx::{Executor, Postgres, Transaction};
pub mod action;
pub mod analytics;
pub mod artifact;
pub mod entity_history;
pub mod event;
pub mod execution;
pub mod identity;
@@ -46,7 +48,9 @@ pub mod workflow;
// Re-export repository types
pub use action::{ActionRepository, PolicyRepository};
pub use analytics::AnalyticsRepository;
pub use artifact::ArtifactRepository;
pub use entity_history::EntityHistoryRepository;
pub use event::{EnforcementRepository, EventRepository};
pub use execution::ExecutionRepository;
pub use identity::{IdentityRepository, PermissionAssignmentRepository, PermissionSetRepository};

View File

@@ -183,7 +183,14 @@ impl ExecutorService {
// Start event processor with its own consumer
info!("Starting event processor...");
let events_queue = self.inner.mq_config.rabbitmq.queues.events.name.clone();
let events_queue = self
.inner
.mq_config
.rabbitmq
.queues
.executor_events
.name
.clone();
let event_consumer = Consumer::new(
&self.inner.mq_connection,
attune_common::mq::ConsumerConfig {

View File

@@ -541,6 +541,7 @@ impl SensorManager {
entrypoint,
runtime,
runtime_ref,
runtime_version_constraint,
trigger,
trigger_ref,
enabled,