ha executor
Some checks failed
CI / Rustfmt (pull_request) Successful in 19s
CI / Cargo Audit & Deny (pull_request) Successful in 33s
CI / Security Blocking Checks (pull_request) Successful in 5s
CI / Web Blocking Checks (pull_request) Successful in 49s
CI / Web Advisory Checks (pull_request) Successful in 33s
CI / Clippy (pull_request) Has been cancelled
CI / Security Advisory Checks (pull_request) Has been cancelled
CI / Tests (pull_request) Has been cancelled

This commit is contained in:
2026-04-02 17:15:59 -05:00
parent 8e91440f23
commit f93e9229d2
25 changed files with 2736 additions and 422 deletions

View File

@@ -1412,7 +1412,7 @@ pub mod artifact {
pub content_type: Option<String>,
/// Size of the latest version's content in bytes
pub size_bytes: Option<i64>,
/// Execution that produced this artifact (no FK — execution is a hypertable)
/// Execution that produced this artifact (no FK by design)
pub execution: Option<Id>,
/// Structured JSONB data for progress artifacts or metadata
pub data: Option<serde_json::Value>,

View File

@@ -80,7 +80,7 @@ pub struct EnforcementVolumeBucket {
pub enforcement_count: i64,
}
/// A single hourly bucket of execution volume (from execution hypertable directly).
/// A single hourly bucket of execution volume (from the execution table directly).
#[derive(Debug, Clone, Serialize, FromRow)]
pub struct ExecutionVolumeBucket {
/// Start of the 1-hour bucket
@@ -468,7 +468,7 @@ impl AnalyticsRepository {
}
// =======================================================================
// Execution volume (from execution hypertable directly)
// Execution volume (from the execution table directly)
// =======================================================================
/// Query the `execution_volume_hourly` continuous aggregate for execution

View File

@@ -65,6 +65,12 @@ pub struct EnforcementSearchResult {
pub total: u64,
}
#[derive(Debug, Clone)]
pub struct EnforcementCreateOrGetResult {
pub enforcement: Enforcement,
pub created: bool,
}
/// Repository for Event operations
pub struct EventRepository;
@@ -493,11 +499,7 @@ impl EnforcementRepository {
Ok(enforcement)
}
/// Update an enforcement using the loaded row's hypertable keys.
///
/// This avoids wide scans across compressed chunks by including both the
/// partitioning column (`created`) and compression segment key (`rule_ref`)
/// in the locator.
/// Update an enforcement using the loaded row's primary key.
pub async fn update_loaded<'e, E>(
executor: E,
enforcement: &Enforcement,
@@ -510,19 +512,73 @@ impl EnforcementRepository {
return Ok(enforcement.clone());
}
let rule_ref = enforcement.rule_ref.clone();
Self::update_with_locator(executor, input, |query| {
query.push(" WHERE id = ");
query.push_bind(enforcement.id);
query.push(" AND created = ");
query.push_bind(enforcement.created);
query.push(" AND rule_ref = ");
query.push_bind(rule_ref);
})
.await
}
pub async fn update_loaded_if_status<'e, E>(
executor: E,
enforcement: &Enforcement,
expected_status: EnforcementStatus,
input: UpdateEnforcementInput,
) -> Result<Option<Enforcement>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
if input.status.is_none() && input.payload.is_none() && input.resolved_at.is_none() {
return Ok(Some(enforcement.clone()));
}
let mut query = QueryBuilder::new("UPDATE enforcement SET ");
let mut has_updates = false;
if let Some(status) = input.status {
query.push("status = ");
query.push_bind(status);
has_updates = true;
}
if let Some(payload) = &input.payload {
if has_updates {
query.push(", ");
}
query.push("payload = ");
query.push_bind(payload);
has_updates = true;
}
if let Some(resolved_at) = input.resolved_at {
if has_updates {
query.push(", ");
}
query.push("resolved_at = ");
query.push_bind(resolved_at);
has_updates = true;
}
if !has_updates {
return Ok(Some(enforcement.clone()));
}
query.push(" WHERE id = ");
query.push_bind(enforcement.id);
query.push(" AND status = ");
query.push_bind(expected_status);
query.push(
" RETURNING id, rule, rule_ref, trigger_ref, config, event, status, payload, \
condition, conditions, created, resolved_at",
);
query
.build_query_as::<Enforcement>()
.fetch_optional(executor)
.await
.map_err(Into::into)
}
/// Find enforcements by rule ID
pub async fn find_by_rule<'e, E>(executor: E, rule_id: Id) -> Result<Vec<Enforcement>>
where
@@ -589,6 +645,90 @@ impl EnforcementRepository {
Ok(enforcements)
}
pub async fn find_by_rule_and_event<'e, E>(
executor: E,
rule_id: Id,
event_id: Id,
) -> Result<Option<Enforcement>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Enforcement>(
r#"
SELECT id, rule, rule_ref, trigger_ref, config, event, status, payload,
condition, conditions, created, resolved_at
FROM enforcement
WHERE rule = $1 AND event = $2
LIMIT 1
"#,
)
.bind(rule_id)
.bind(event_id)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
pub async fn create_or_get_by_rule_event<'e, E>(
executor: E,
input: CreateEnforcementInput,
) -> Result<EnforcementCreateOrGetResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
let (Some(rule_id), Some(event_id)) = (input.rule, input.event) else {
let enforcement = Self::create(executor, input).await?;
return Ok(EnforcementCreateOrGetResult {
enforcement,
created: true,
});
};
let inserted = sqlx::query_as::<_, Enforcement>(
r#"
INSERT INTO enforcement (rule, rule_ref, trigger_ref, config, event, status,
payload, condition, conditions)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT (rule, event) WHERE rule IS NOT NULL AND event IS NOT NULL DO NOTHING
RETURNING id, rule, rule_ref, trigger_ref, config, event, status, payload,
condition, conditions, created, resolved_at
"#,
)
.bind(input.rule)
.bind(&input.rule_ref)
.bind(&input.trigger_ref)
.bind(&input.config)
.bind(input.event)
.bind(input.status)
.bind(&input.payload)
.bind(input.condition)
.bind(&input.conditions)
.fetch_optional(executor)
.await?;
if let Some(enforcement) = inserted {
return Ok(EnforcementCreateOrGetResult {
enforcement,
created: true,
});
}
let enforcement = Self::find_by_rule_and_event(executor, rule_id, event_id)
.await?
.ok_or_else(|| {
anyhow::anyhow!(
"enforcement for rule {} and event {} disappeared after dedupe conflict",
rule_id,
event_id
)
})?;
Ok(EnforcementCreateOrGetResult {
enforcement,
created: false,
})
}
/// Search enforcements with all filters pushed into SQL.
///
/// All filter fields are combinable (AND). Pagination is server-side.

View File

@@ -4,7 +4,8 @@ use chrono::{DateTime, Utc};
use crate::models::{enums::ExecutionStatus, execution::*, Id, JsonDict};
use crate::Result;
use sqlx::{Executor, Postgres, QueryBuilder};
use sqlx::{Executor, PgConnection, PgPool, Postgres, QueryBuilder};
use tokio::time::{sleep, Duration};
use super::{Create, Delete, FindById, List, Repository, Update};
@@ -47,6 +48,12 @@ pub struct WorkflowTaskExecutionCreateOrGetResult {
pub created: bool,
}
#[derive(Debug, Clone)]
pub struct EnforcementExecutionCreateOrGetResult {
pub execution: Execution,
pub created: bool,
}
/// An execution row with optional `rule_ref` / `trigger_ref` populated from
/// the joined `enforcement` table. This avoids a separate in-memory lookup.
#[derive(Debug, Clone, sqlx::FromRow)]
@@ -215,32 +222,392 @@ impl Update for ExecutionRepository {
}
impl ExecutionRepository {
pub async fn create_workflow_task_if_absent<'e, E>(
pub async fn find_top_level_by_enforcement<'e, E>(
executor: E,
enforcement_id: Id,
) -> Result<Option<Execution>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let sql = format!(
"SELECT {SELECT_COLUMNS} \
FROM execution \
WHERE enforcement = $1
AND parent IS NULL
AND (config IS NULL OR NOT (config ? 'retry_of')) \
ORDER BY created ASC \
LIMIT 1"
);
sqlx::query_as::<_, Execution>(&sql)
.bind(enforcement_id)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
pub async fn create_top_level_for_enforcement_if_absent<'e, E>(
executor: E,
input: CreateExecutionInput,
enforcement_id: Id,
) -> Result<EnforcementExecutionCreateOrGetResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
let inserted = sqlx::query_as::<_, Execution>(&format!(
"INSERT INTO execution \
(action, action_ref, config, env_vars, parent, enforcement, executor, worker, status, result, workflow_task) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) \
ON CONFLICT (enforcement)
WHERE enforcement IS NOT NULL
AND parent IS NULL
AND (config IS NULL OR NOT (config ? 'retry_of'))
DO NOTHING \
RETURNING {SELECT_COLUMNS}"
))
.bind(input.action)
.bind(&input.action_ref)
.bind(&input.config)
.bind(&input.env_vars)
.bind(input.parent)
.bind(input.enforcement)
.bind(input.executor)
.bind(input.worker)
.bind(input.status)
.bind(&input.result)
.bind(sqlx::types::Json(&input.workflow_task))
.fetch_optional(executor)
.await?;
if let Some(execution) = inserted {
return Ok(EnforcementExecutionCreateOrGetResult {
execution,
created: true,
});
}
let execution = Self::find_top_level_by_enforcement(executor, enforcement_id)
.await?
.ok_or_else(|| {
anyhow::anyhow!(
"top-level execution for enforcement {} disappeared after dedupe conflict",
enforcement_id
)
})?;
Ok(EnforcementExecutionCreateOrGetResult {
execution,
created: false,
})
}
async fn claim_workflow_task_dispatch<'e, E>(
executor: E,
workflow_execution_id: Id,
task_name: &str,
task_index: Option<i32>,
) -> Result<bool>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let inserted: Option<(i64,)> = sqlx::query_as(
"INSERT INTO workflow_task_dispatch (workflow_execution, task_name, task_index)
VALUES ($1, $2, $3)
ON CONFLICT (workflow_execution, task_name, COALESCE(task_index, -1)) DO NOTHING
RETURNING id",
)
.bind(workflow_execution_id)
.bind(task_name)
.bind(task_index)
.fetch_optional(executor)
.await?;
Ok(inserted.is_some())
}
async fn assign_workflow_task_dispatch_execution<'e, E>(
executor: E,
workflow_execution_id: Id,
task_name: &str,
task_index: Option<i32>,
execution_id: Id,
) -> Result<()>
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query(
"UPDATE workflow_task_dispatch
SET execution_id = COALESCE(execution_id, $4)
WHERE workflow_execution = $1
AND task_name = $2
AND task_index IS NOT DISTINCT FROM $3",
)
.bind(workflow_execution_id)
.bind(task_name)
.bind(task_index)
.bind(execution_id)
.execute(executor)
.await?;
Ok(())
}
async fn lock_workflow_task_dispatch<'e, E>(
executor: E,
workflow_execution_id: Id,
task_name: &str,
task_index: Option<i32>,
) -> Result<Option<Option<Id>>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let row: Option<(Option<i64>,)> = sqlx::query_as(
"SELECT execution_id
FROM workflow_task_dispatch
WHERE workflow_execution = $1
AND task_name = $2
AND task_index IS NOT DISTINCT FROM $3
FOR UPDATE",
)
.bind(workflow_execution_id)
.bind(task_name)
.bind(task_index)
.fetch_optional(executor)
.await?;
// Map the outer Option to distinguish three cases:
// - None → no row exists
// - Some(None) → row exists but execution_id is still NULL (mid-creation)
// - Some(Some(id)) → row exists with a completed execution_id
Ok(row.map(|(execution_id,)| execution_id))
}
async fn create_workflow_task_if_absent_in_conn(
conn: &mut PgConnection,
input: CreateExecutionInput,
workflow_execution_id: Id,
task_name: &str,
task_index: Option<i32>,
) -> Result<WorkflowTaskExecutionCreateOrGetResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
if let Some(execution) =
Self::find_by_workflow_task(executor, workflow_execution_id, task_name, task_index)
.await?
{
) -> Result<WorkflowTaskExecutionCreateOrGetResult> {
let claimed = Self::claim_workflow_task_dispatch(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
)
.await?;
if claimed {
let execution = Self::create(&mut *conn, input).await?;
Self::assign_workflow_task_dispatch_execution(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
execution.id,
)
.await?;
return Ok(WorkflowTaskExecutionCreateOrGetResult {
execution,
created: false,
created: true,
});
}
let execution = Self::create(executor, input).await?;
let dispatch_state = Self::lock_workflow_task_dispatch(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
)
.await?;
Ok(WorkflowTaskExecutionCreateOrGetResult {
execution,
created: true,
})
match dispatch_state {
Some(Some(existing_execution_id)) => {
// Row exists with execution_id — return the existing execution.
let execution = Self::find_by_id(&mut *conn, existing_execution_id)
.await?
.ok_or_else(|| {
anyhow::anyhow!(
"workflow child execution {} missing for workflow_execution {} task '{}' index {:?}",
existing_execution_id,
workflow_execution_id,
task_name,
task_index
)
})?;
Ok(WorkflowTaskExecutionCreateOrGetResult {
execution,
created: false,
})
}
Some(None) => {
// Row exists but execution_id is still NULL: another transaction is
// mid-creation (between claim and assign). Retry until it's filled in.
// If the original creator's transaction rolled back, the row also
// disappears — handled by the `None` branch inside the loop.
'wait: {
for _ in 0..20_u32 {
sleep(Duration::from_millis(50)).await;
match Self::lock_workflow_task_dispatch(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
)
.await?
{
Some(Some(execution_id)) => {
let execution =
Self::find_by_id(&mut *conn, execution_id).await?.ok_or_else(
|| {
anyhow::anyhow!(
"workflow child execution {} missing for workflow_execution {} task '{}' index {:?}",
execution_id,
workflow_execution_id,
task_name,
task_index
)
},
)?;
return Ok(WorkflowTaskExecutionCreateOrGetResult {
execution,
created: false,
});
}
Some(None) => {} // still NULL, keep waiting
None => break 'wait, // row rolled back; fall through to re-claim
}
}
// Exhausted all retries without the execution_id being set.
return Err(anyhow::anyhow!(
"Timed out waiting for workflow task dispatch execution_id to be set \
for workflow_execution {} task '{}' index {:?}",
workflow_execution_id,
task_name,
task_index
)
.into());
}
// Row disappeared (original creator rolled back) — re-claim and create.
let re_claimed = Self::claim_workflow_task_dispatch(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
)
.await?;
if !re_claimed {
return Err(anyhow::anyhow!(
"Workflow task dispatch for workflow_execution {} task '{}' index {:?} \
was reclaimed by another executor after rollback",
workflow_execution_id,
task_name,
task_index
)
.into());
}
let execution = Self::create(&mut *conn, input).await?;
Self::assign_workflow_task_dispatch_execution(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
execution.id,
)
.await?;
Ok(WorkflowTaskExecutionCreateOrGetResult {
execution,
created: true,
})
}
None => {
// No row at all — the original INSERT was rolled back before we arrived.
// Attempt to re-claim and create as if this were a fresh dispatch.
let re_claimed = Self::claim_workflow_task_dispatch(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
)
.await?;
if !re_claimed {
return Err(anyhow::anyhow!(
"Workflow task dispatch for workflow_execution {} task '{}' index {:?} \
was claimed by another executor",
workflow_execution_id,
task_name,
task_index
)
.into());
}
let execution = Self::create(&mut *conn, input).await?;
Self::assign_workflow_task_dispatch_execution(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
execution.id,
)
.await?;
Ok(WorkflowTaskExecutionCreateOrGetResult {
execution,
created: true,
})
}
}
}
pub async fn create_workflow_task_if_absent(
pool: &PgPool,
input: CreateExecutionInput,
workflow_execution_id: Id,
task_name: &str,
task_index: Option<i32>,
) -> Result<WorkflowTaskExecutionCreateOrGetResult> {
let mut conn = pool.acquire().await?;
sqlx::query("BEGIN").execute(&mut *conn).await?;
let result = Self::create_workflow_task_if_absent_in_conn(
&mut conn,
input,
workflow_execution_id,
task_name,
task_index,
)
.await;
match result {
Ok(result) => {
sqlx::query("COMMIT").execute(&mut *conn).await?;
Ok(result)
}
Err(err) => {
sqlx::query("ROLLBACK").execute(&mut *conn).await?;
Err(err)
}
}
}
pub async fn create_workflow_task_if_absent_with_conn(
conn: &mut PgConnection,
input: CreateExecutionInput,
workflow_execution_id: Id,
task_name: &str,
task_index: Option<i32>,
) -> Result<WorkflowTaskExecutionCreateOrGetResult> {
Self::create_workflow_task_if_absent_in_conn(
conn,
input,
workflow_execution_id,
task_name,
task_index,
)
.await
}
pub async fn claim_for_scheduling<'e, E>(
@@ -320,6 +687,62 @@ impl ExecutionRepository {
.await
}
pub async fn update_if_status_and_updated_before<'e, E>(
executor: E,
id: Id,
expected_status: ExecutionStatus,
stale_before: DateTime<Utc>,
input: UpdateExecutionInput,
) -> Result<Option<Execution>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
if input.status.is_none()
&& input.result.is_none()
&& input.executor.is_none()
&& input.worker.is_none()
&& input.started_at.is_none()
&& input.workflow_task.is_none()
{
return Self::find_by_id(executor, id).await;
}
Self::update_with_locator_optional(executor, input, |query| {
query.push(" WHERE id = ").push_bind(id);
query.push(" AND status = ").push_bind(expected_status);
query.push(" AND updated < ").push_bind(stale_before);
})
.await
}
pub async fn update_if_status_and_updated_at<'e, E>(
executor: E,
id: Id,
expected_status: ExecutionStatus,
expected_updated: DateTime<Utc>,
input: UpdateExecutionInput,
) -> Result<Option<Execution>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
if input.status.is_none()
&& input.result.is_none()
&& input.executor.is_none()
&& input.worker.is_none()
&& input.started_at.is_none()
&& input.workflow_task.is_none()
{
return Self::find_by_id(executor, id).await;
}
Self::update_with_locator_optional(executor, input, |query| {
query.push(" WHERE id = ").push_bind(id);
query.push(" AND status = ").push_bind(expected_status);
query.push(" AND updated = ").push_bind(expected_updated);
})
.await
}
pub async fn revert_scheduled_to_requested<'e, E>(
executor: E,
id: Id,
@@ -473,10 +896,7 @@ impl ExecutionRepository {
.map_err(Into::into)
}
/// Update an execution using the loaded row's hypertable keys.
///
/// Including both the partition key (`created`) and compression segment key
/// (`action_ref`) avoids broad scans across compressed chunks.
/// Update an execution using the loaded row's primary key.
pub async fn update_loaded<'e, E>(
executor: E,
execution: &Execution,
@@ -495,12 +915,8 @@ impl ExecutionRepository {
return Ok(execution.clone());
}
let action_ref = execution.action_ref.clone();
Self::update_with_locator(executor, input, |query| {
query.push(" WHERE id = ").push_bind(execution.id);
query.push(" AND created = ").push_bind(execution.created);
query.push(" AND action_ref = ").push_bind(action_ref);
})
.await
}

View File

@@ -0,0 +1,909 @@
use chrono::{DateTime, Utc};
use sqlx::{PgPool, Postgres, Row, Transaction};
use crate::error::Result;
use crate::models::Id;
use crate::repositories::queue_stats::{QueueStatsRepository, UpsertQueueStatsInput};
#[derive(Debug, Clone)]
pub struct AdmissionSlotAcquireOutcome {
pub acquired: bool,
pub current_count: u32,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AdmissionEnqueueOutcome {
Acquired,
Enqueued,
}
#[derive(Debug, Clone)]
pub struct AdmissionSlotReleaseOutcome {
pub action_id: Id,
pub group_key: Option<String>,
pub next_execution_id: Option<Id>,
}
#[derive(Debug, Clone)]
pub struct AdmissionQueuedRemovalOutcome {
pub action_id: Id,
pub group_key: Option<String>,
pub next_execution_id: Option<Id>,
pub execution_id: Id,
pub queue_order: i64,
pub enqueued_at: DateTime<Utc>,
pub removed_index: usize,
}
#[derive(Debug, Clone)]
pub struct AdmissionQueueStats {
pub action_id: Id,
pub queue_length: usize,
pub active_count: u32,
pub max_concurrent: u32,
pub oldest_enqueued_at: Option<DateTime<Utc>>,
pub total_enqueued: u64,
pub total_completed: u64,
}
#[derive(Debug, Clone)]
struct AdmissionState {
id: Id,
action_id: Id,
group_key: Option<String>,
max_concurrent: i32,
}
#[derive(Debug, Clone)]
struct ExecutionEntry {
state_id: Id,
action_id: Id,
group_key: Option<String>,
status: String,
queue_order: i64,
enqueued_at: DateTime<Utc>,
}
pub struct ExecutionAdmissionRepository;
impl ExecutionAdmissionRepository {
pub async fn enqueue(
pool: &PgPool,
max_queue_length: usize,
action_id: Id,
execution_id: Id,
max_concurrent: u32,
group_key: Option<String>,
) -> Result<AdmissionEnqueueOutcome> {
let mut tx = pool.begin().await?;
let state = Self::lock_state(&mut tx, action_id, group_key, max_concurrent).await?;
let outcome =
Self::enqueue_in_state(&mut tx, &state, max_queue_length, execution_id, true).await?;
Self::refresh_queue_stats(&mut tx, action_id).await?;
tx.commit().await?;
Ok(outcome)
}
pub async fn wait_status(pool: &PgPool, execution_id: Id) -> Result<Option<bool>> {
let row = sqlx::query_scalar::<Postgres, bool>(
r#"
SELECT status = 'active'
FROM execution_admission_entry
WHERE execution_id = $1
"#,
)
.bind(execution_id)
.fetch_optional(pool)
.await?;
Ok(row)
}
pub async fn try_acquire(
pool: &PgPool,
action_id: Id,
execution_id: Id,
max_concurrent: u32,
group_key: Option<String>,
) -> Result<AdmissionSlotAcquireOutcome> {
let mut tx = pool.begin().await?;
let state = Self::lock_state(&mut tx, action_id, group_key, max_concurrent).await?;
let active_count = Self::active_count(&mut tx, state.id).await? as u32;
let outcome = match Self::find_execution_entry(&mut tx, execution_id).await? {
Some(entry) if entry.status == "active" => AdmissionSlotAcquireOutcome {
acquired: true,
current_count: active_count,
},
Some(entry) if entry.status == "queued" && entry.state_id == state.id => {
let promoted =
Self::maybe_promote_existing_queued(&mut tx, &state, execution_id).await?;
AdmissionSlotAcquireOutcome {
acquired: promoted,
current_count: active_count,
}
}
Some(_) => AdmissionSlotAcquireOutcome {
acquired: false,
current_count: active_count,
},
None => {
if active_count < max_concurrent
&& Self::queued_count(&mut tx, state.id).await? == 0
{
let queue_order = Self::allocate_queue_order(&mut tx, state.id).await?;
Self::insert_entry(
&mut tx,
state.id,
execution_id,
"active",
queue_order,
Utc::now(),
)
.await?;
Self::increment_total_enqueued(&mut tx, state.id).await?;
Self::refresh_queue_stats(&mut tx, action_id).await?;
AdmissionSlotAcquireOutcome {
acquired: true,
current_count: active_count,
}
} else {
AdmissionSlotAcquireOutcome {
acquired: false,
current_count: active_count,
}
}
}
};
tx.commit().await?;
Ok(outcome)
}
pub async fn release_active_slot(
pool: &PgPool,
execution_id: Id,
) -> Result<Option<AdmissionSlotReleaseOutcome>> {
let mut tx = pool.begin().await?;
let Some(entry) = Self::find_execution_entry_for_update(&mut tx, execution_id).await?
else {
tx.commit().await?;
return Ok(None);
};
if entry.status != "active" {
tx.commit().await?;
return Ok(None);
}
let state = Self::lock_existing_state(&mut tx, entry.action_id, entry.group_key.clone())
.await?
.ok_or_else(|| {
crate::Error::internal("missing execution_admission_state for active execution")
})?;
sqlx::query("DELETE FROM execution_admission_entry WHERE execution_id = $1")
.bind(execution_id)
.execute(&mut *tx)
.await?;
Self::increment_total_completed(&mut tx, state.id).await?;
let next_execution_id = Self::promote_next_queued(&mut tx, &state).await?;
Self::refresh_queue_stats(&mut tx, state.action_id).await?;
tx.commit().await?;
Ok(Some(AdmissionSlotReleaseOutcome {
action_id: state.action_id,
group_key: state.group_key,
next_execution_id,
}))
}
pub async fn restore_active_slot(
pool: &PgPool,
execution_id: Id,
outcome: &AdmissionSlotReleaseOutcome,
) -> Result<()> {
let mut tx = pool.begin().await?;
let state =
Self::lock_existing_state(&mut tx, outcome.action_id, outcome.group_key.clone())
.await?
.ok_or_else(|| {
crate::Error::internal("missing execution_admission_state on restore")
})?;
if let Some(next_execution_id) = outcome.next_execution_id {
sqlx::query(
r#"
UPDATE execution_admission_entry
SET status = 'queued', activated_at = NULL
WHERE execution_id = $1
AND state_id = $2
AND status = 'active'
"#,
)
.bind(next_execution_id)
.bind(state.id)
.execute(&mut *tx)
.await?;
}
sqlx::query(
r#"
INSERT INTO execution_admission_entry (
state_id, execution_id, status, queue_order, enqueued_at, activated_at
) VALUES ($1, $2, 'active', $3, NOW(), NOW())
ON CONFLICT (execution_id) DO UPDATE
SET state_id = EXCLUDED.state_id,
status = 'active',
activated_at = EXCLUDED.activated_at
"#,
)
.bind(state.id)
.bind(execution_id)
.bind(Self::allocate_queue_order(&mut tx, state.id).await?)
.execute(&mut *tx)
.await?;
sqlx::query(
r#"
UPDATE execution_admission_state
SET total_completed = GREATEST(total_completed - 1, 0)
WHERE id = $1
"#,
)
.bind(state.id)
.execute(&mut *tx)
.await?;
Self::refresh_queue_stats(&mut tx, state.action_id).await?;
tx.commit().await?;
Ok(())
}
pub async fn remove_queued_execution(
pool: &PgPool,
execution_id: Id,
) -> Result<Option<AdmissionQueuedRemovalOutcome>> {
let mut tx = pool.begin().await?;
let Some(entry) = Self::find_execution_entry_for_update(&mut tx, execution_id).await?
else {
tx.commit().await?;
return Ok(None);
};
if entry.status != "queued" {
tx.commit().await?;
return Ok(None);
}
let state = Self::lock_existing_state(&mut tx, entry.action_id, entry.group_key.clone())
.await?
.ok_or_else(|| {
crate::Error::internal("missing execution_admission_state for queued execution")
})?;
let removed_index = sqlx::query_scalar::<Postgres, i64>(
r#"
SELECT COUNT(*)
FROM execution_admission_entry
WHERE state_id = $1
AND status = 'queued'
AND (enqueued_at, id) < (
SELECT enqueued_at, id
FROM execution_admission_entry
WHERE execution_id = $2
)
"#,
)
.bind(state.id)
.bind(execution_id)
.fetch_one(&mut *tx)
.await? as usize;
sqlx::query("DELETE FROM execution_admission_entry WHERE execution_id = $1")
.bind(execution_id)
.execute(&mut *tx)
.await?;
let next_execution_id =
if Self::active_count(&mut tx, state.id).await? < state.max_concurrent as i64 {
Self::promote_next_queued(&mut tx, &state).await?
} else {
None
};
Self::refresh_queue_stats(&mut tx, state.action_id).await?;
tx.commit().await?;
Ok(Some(AdmissionQueuedRemovalOutcome {
action_id: state.action_id,
group_key: state.group_key,
next_execution_id,
execution_id,
queue_order: entry.queue_order,
enqueued_at: entry.enqueued_at,
removed_index,
}))
}
pub async fn restore_queued_execution(
pool: &PgPool,
outcome: &AdmissionQueuedRemovalOutcome,
) -> Result<()> {
let mut tx = pool.begin().await?;
let state =
Self::lock_existing_state(&mut tx, outcome.action_id, outcome.group_key.clone())
.await?
.ok_or_else(|| {
crate::Error::internal("missing execution_admission_state on queued restore")
})?;
if let Some(next_execution_id) = outcome.next_execution_id {
sqlx::query(
r#"
UPDATE execution_admission_entry
SET status = 'queued', activated_at = NULL
WHERE execution_id = $1
AND state_id = $2
AND status = 'active'
"#,
)
.bind(next_execution_id)
.bind(state.id)
.execute(&mut *tx)
.await?;
}
sqlx::query(
r#"
INSERT INTO execution_admission_entry (
state_id, execution_id, status, queue_order, enqueued_at, activated_at
) VALUES ($1, $2, 'queued', $3, $4, NULL)
ON CONFLICT (execution_id) DO NOTHING
"#,
)
.bind(state.id)
.bind(outcome.execution_id)
.bind(outcome.queue_order)
.bind(outcome.enqueued_at)
.execute(&mut *tx)
.await?;
Self::refresh_queue_stats(&mut tx, state.action_id).await?;
tx.commit().await?;
Ok(())
}
pub async fn get_queue_stats(
pool: &PgPool,
action_id: Id,
) -> Result<Option<AdmissionQueueStats>> {
let row = sqlx::query(
r#"
WITH state_rows AS (
SELECT
COUNT(*) AS state_count,
COALESCE(SUM(max_concurrent), 0) AS max_concurrent,
COALESCE(SUM(total_enqueued), 0) AS total_enqueued,
COALESCE(SUM(total_completed), 0) AS total_completed
FROM execution_admission_state
WHERE action_id = $1
),
entry_rows AS (
SELECT
COUNT(*) FILTER (WHERE e.status = 'queued') AS queue_length,
COUNT(*) FILTER (WHERE e.status = 'active') AS active_count,
MIN(e.enqueued_at) FILTER (WHERE e.status = 'queued') AS oldest_enqueued_at
FROM execution_admission_state s
LEFT JOIN execution_admission_entry e ON e.state_id = s.id
WHERE s.action_id = $1
)
SELECT
sr.state_count,
er.queue_length,
er.active_count,
sr.max_concurrent,
er.oldest_enqueued_at,
sr.total_enqueued,
sr.total_completed
FROM state_rows sr
CROSS JOIN entry_rows er
"#,
)
.bind(action_id)
.fetch_one(pool)
.await?;
let state_count: i64 = row.try_get("state_count")?;
if state_count == 0 {
return Ok(None);
}
Ok(Some(AdmissionQueueStats {
action_id,
queue_length: row.try_get::<i64, _>("queue_length")? as usize,
active_count: row.try_get::<i64, _>("active_count")? as u32,
max_concurrent: row.try_get::<i64, _>("max_concurrent")? as u32,
oldest_enqueued_at: row.try_get("oldest_enqueued_at")?,
total_enqueued: row.try_get::<i64, _>("total_enqueued")? as u64,
total_completed: row.try_get::<i64, _>("total_completed")? as u64,
}))
}
async fn enqueue_in_state(
tx: &mut Transaction<'_, Postgres>,
state: &AdmissionState,
max_queue_length: usize,
execution_id: Id,
allow_queue: bool,
) -> Result<AdmissionEnqueueOutcome> {
if let Some(entry) = Self::find_execution_entry(tx, execution_id).await? {
if entry.status == "active" {
return Ok(AdmissionEnqueueOutcome::Acquired);
}
if entry.status == "queued" && entry.state_id == state.id {
if Self::maybe_promote_existing_queued(tx, state, execution_id).await? {
return Ok(AdmissionEnqueueOutcome::Acquired);
}
return Ok(AdmissionEnqueueOutcome::Enqueued);
}
return Ok(AdmissionEnqueueOutcome::Enqueued);
}
let active_count = Self::active_count(tx, state.id).await?;
let queued_count = Self::queued_count(tx, state.id).await?;
if active_count < state.max_concurrent as i64 && queued_count == 0 {
let queue_order = Self::allocate_queue_order(tx, state.id).await?;
Self::insert_entry(
tx,
state.id,
execution_id,
"active",
queue_order,
Utc::now(),
)
.await?;
Self::increment_total_enqueued(tx, state.id).await?;
return Ok(AdmissionEnqueueOutcome::Acquired);
}
if !allow_queue {
return Ok(AdmissionEnqueueOutcome::Enqueued);
}
if queued_count >= max_queue_length as i64 {
return Err(anyhow::anyhow!(
"Queue full for action {}: maximum {} entries",
state.action_id,
max_queue_length
)
.into());
}
let queue_order = Self::allocate_queue_order(tx, state.id).await?;
Self::insert_entry(
tx,
state.id,
execution_id,
"queued",
queue_order,
Utc::now(),
)
.await?;
Self::increment_total_enqueued(tx, state.id).await?;
Ok(AdmissionEnqueueOutcome::Enqueued)
}
async fn maybe_promote_existing_queued(
tx: &mut Transaction<'_, Postgres>,
state: &AdmissionState,
execution_id: Id,
) -> Result<bool> {
let active_count = Self::active_count(tx, state.id).await?;
if active_count >= state.max_concurrent as i64 {
return Ok(false);
}
let front_execution_id = sqlx::query_scalar::<Postgres, Id>(
r#"
SELECT execution_id
FROM execution_admission_entry
WHERE state_id = $1
AND status = 'queued'
ORDER BY queue_order ASC
LIMIT 1
"#,
)
.bind(state.id)
.fetch_optional(&mut **tx)
.await?;
if front_execution_id != Some(execution_id) {
return Ok(false);
}
sqlx::query(
r#"
UPDATE execution_admission_entry
SET status = 'active',
activated_at = NOW()
WHERE execution_id = $1
AND state_id = $2
AND status = 'queued'
"#,
)
.bind(execution_id)
.bind(state.id)
.execute(&mut **tx)
.await?;
Ok(true)
}
async fn promote_next_queued(
tx: &mut Transaction<'_, Postgres>,
state: &AdmissionState,
) -> Result<Option<Id>> {
let next_execution_id = sqlx::query_scalar::<Postgres, Id>(
r#"
SELECT execution_id
FROM execution_admission_entry
WHERE state_id = $1
AND status = 'queued'
ORDER BY queue_order ASC
LIMIT 1
"#,
)
.bind(state.id)
.fetch_optional(&mut **tx)
.await?;
if let Some(next_execution_id) = next_execution_id {
sqlx::query(
r#"
UPDATE execution_admission_entry
SET status = 'active',
activated_at = NOW()
WHERE execution_id = $1
AND state_id = $2
AND status = 'queued'
"#,
)
.bind(next_execution_id)
.bind(state.id)
.execute(&mut **tx)
.await?;
}
Ok(next_execution_id)
}
async fn lock_state(
tx: &mut Transaction<'_, Postgres>,
action_id: Id,
group_key: Option<String>,
max_concurrent: u32,
) -> Result<AdmissionState> {
sqlx::query(
r#"
INSERT INTO execution_admission_state (action_id, group_key, max_concurrent)
VALUES ($1, $2, $3)
ON CONFLICT (action_id, group_key_normalized)
DO UPDATE SET max_concurrent = EXCLUDED.max_concurrent
"#,
)
.bind(action_id)
.bind(group_key.clone())
.bind(max_concurrent as i32)
.execute(&mut **tx)
.await?;
let state = sqlx::query(
r#"
SELECT id, action_id, group_key, max_concurrent
FROM execution_admission_state
WHERE action_id = $1
AND group_key_normalized = COALESCE($2, '')
FOR UPDATE
"#,
)
.bind(action_id)
.bind(group_key)
.fetch_one(&mut **tx)
.await?;
Ok(AdmissionState {
id: state.try_get("id")?,
action_id: state.try_get("action_id")?,
group_key: state.try_get("group_key")?,
max_concurrent: state.try_get("max_concurrent")?,
})
}
async fn lock_existing_state(
tx: &mut Transaction<'_, Postgres>,
action_id: Id,
group_key: Option<String>,
) -> Result<Option<AdmissionState>> {
let row = sqlx::query(
r#"
SELECT id, action_id, group_key, max_concurrent
FROM execution_admission_state
WHERE action_id = $1
AND group_key_normalized = COALESCE($2, '')
FOR UPDATE
"#,
)
.bind(action_id)
.bind(group_key)
.fetch_optional(&mut **tx)
.await?;
Ok(row.map(|state| AdmissionState {
id: state.try_get("id").expect("state.id"),
action_id: state.try_get("action_id").expect("state.action_id"),
group_key: state.try_get("group_key").expect("state.group_key"),
max_concurrent: state
.try_get("max_concurrent")
.expect("state.max_concurrent"),
}))
}
async fn find_execution_entry(
tx: &mut Transaction<'_, Postgres>,
execution_id: Id,
) -> Result<Option<ExecutionEntry>> {
let row = sqlx::query(
r#"
SELECT
e.state_id,
s.action_id,
s.group_key,
e.execution_id,
e.status,
e.queue_order,
e.enqueued_at
FROM execution_admission_entry e
JOIN execution_admission_state s ON s.id = e.state_id
WHERE e.execution_id = $1
"#,
)
.bind(execution_id)
.fetch_optional(&mut **tx)
.await?;
Ok(row.map(|entry| ExecutionEntry {
state_id: entry.try_get("state_id").expect("entry.state_id"),
action_id: entry.try_get("action_id").expect("entry.action_id"),
group_key: entry.try_get("group_key").expect("entry.group_key"),
status: entry.try_get("status").expect("entry.status"),
queue_order: entry.try_get("queue_order").expect("entry.queue_order"),
enqueued_at: entry.try_get("enqueued_at").expect("entry.enqueued_at"),
}))
}
async fn find_execution_entry_for_update(
tx: &mut Transaction<'_, Postgres>,
execution_id: Id,
) -> Result<Option<ExecutionEntry>> {
let row = sqlx::query(
r#"
SELECT
e.state_id,
s.action_id,
s.group_key,
e.execution_id,
e.status,
e.queue_order,
e.enqueued_at
FROM execution_admission_entry e
JOIN execution_admission_state s ON s.id = e.state_id
WHERE e.execution_id = $1
FOR UPDATE OF e, s
"#,
)
.bind(execution_id)
.fetch_optional(&mut **tx)
.await?;
Ok(row.map(|entry| ExecutionEntry {
state_id: entry.try_get("state_id").expect("entry.state_id"),
action_id: entry.try_get("action_id").expect("entry.action_id"),
group_key: entry.try_get("group_key").expect("entry.group_key"),
status: entry.try_get("status").expect("entry.status"),
queue_order: entry.try_get("queue_order").expect("entry.queue_order"),
enqueued_at: entry.try_get("enqueued_at").expect("entry.enqueued_at"),
}))
}
async fn active_count(tx: &mut Transaction<'_, Postgres>, state_id: Id) -> Result<i64> {
Ok(sqlx::query_scalar::<Postgres, i64>(
r#"
SELECT COUNT(*)
FROM execution_admission_entry
WHERE state_id = $1
AND status = 'active'
"#,
)
.bind(state_id)
.fetch_one(&mut **tx)
.await?)
}
async fn queued_count(tx: &mut Transaction<'_, Postgres>, state_id: Id) -> Result<i64> {
Ok(sqlx::query_scalar::<Postgres, i64>(
r#"
SELECT COUNT(*)
FROM execution_admission_entry
WHERE state_id = $1
AND status = 'queued'
"#,
)
.bind(state_id)
.fetch_one(&mut **tx)
.await?)
}
async fn insert_entry(
tx: &mut Transaction<'_, Postgres>,
state_id: Id,
execution_id: Id,
status: &str,
queue_order: i64,
enqueued_at: DateTime<Utc>,
) -> Result<()> {
sqlx::query(
r#"
INSERT INTO execution_admission_entry (
state_id, execution_id, status, queue_order, enqueued_at, activated_at
) VALUES (
$1, $2, $3, $4, $5,
CASE WHEN $3 = 'active' THEN NOW() ELSE NULL END
)
"#,
)
.bind(state_id)
.bind(execution_id)
.bind(status)
.bind(queue_order)
.bind(enqueued_at)
.execute(&mut **tx)
.await?;
Ok(())
}
async fn allocate_queue_order(tx: &mut Transaction<'_, Postgres>, state_id: Id) -> Result<i64> {
let queue_order = sqlx::query_scalar::<Postgres, i64>(
r#"
UPDATE execution_admission_state
SET next_queue_order = next_queue_order + 1
WHERE id = $1
RETURNING next_queue_order - 1
"#,
)
.bind(state_id)
.fetch_one(&mut **tx)
.await?;
Ok(queue_order)
}
async fn increment_total_enqueued(
tx: &mut Transaction<'_, Postgres>,
state_id: Id,
) -> Result<()> {
sqlx::query(
r#"
UPDATE execution_admission_state
SET total_enqueued = total_enqueued + 1
WHERE id = $1
"#,
)
.bind(state_id)
.execute(&mut **tx)
.await?;
Ok(())
}
async fn increment_total_completed(
tx: &mut Transaction<'_, Postgres>,
state_id: Id,
) -> Result<()> {
sqlx::query(
r#"
UPDATE execution_admission_state
SET total_completed = total_completed + 1
WHERE id = $1
"#,
)
.bind(state_id)
.execute(&mut **tx)
.await?;
Ok(())
}
async fn refresh_queue_stats(tx: &mut Transaction<'_, Postgres>, action_id: Id) -> Result<()> {
let Some(stats) = Self::get_queue_stats_from_tx(tx, action_id).await? else {
QueueStatsRepository::delete(&mut **tx, action_id).await?;
return Ok(());
};
QueueStatsRepository::upsert(
&mut **tx,
UpsertQueueStatsInput {
action_id,
queue_length: stats.queue_length as i32,
active_count: stats.active_count as i32,
max_concurrent: stats.max_concurrent as i32,
oldest_enqueued_at: stats.oldest_enqueued_at,
total_enqueued: stats.total_enqueued as i64,
total_completed: stats.total_completed as i64,
},
)
.await?;
Ok(())
}
async fn get_queue_stats_from_tx(
tx: &mut Transaction<'_, Postgres>,
action_id: Id,
) -> Result<Option<AdmissionQueueStats>> {
let row = sqlx::query(
r#"
WITH state_rows AS (
SELECT
COUNT(*) AS state_count,
COALESCE(SUM(max_concurrent), 0) AS max_concurrent,
COALESCE(SUM(total_enqueued), 0) AS total_enqueued,
COALESCE(SUM(total_completed), 0) AS total_completed
FROM execution_admission_state
WHERE action_id = $1
),
entry_rows AS (
SELECT
COUNT(*) FILTER (WHERE e.status = 'queued') AS queue_length,
COUNT(*) FILTER (WHERE e.status = 'active') AS active_count,
MIN(e.enqueued_at) FILTER (WHERE e.status = 'queued') AS oldest_enqueued_at
FROM execution_admission_state s
LEFT JOIN execution_admission_entry e ON e.state_id = s.id
WHERE s.action_id = $1
)
SELECT
sr.state_count,
er.queue_length,
er.active_count,
sr.max_concurrent,
er.oldest_enqueued_at,
sr.total_enqueued,
sr.total_completed
FROM state_rows sr
CROSS JOIN entry_rows er
"#,
)
.bind(action_id)
.fetch_one(&mut **tx)
.await?;
let state_count: i64 = row.try_get("state_count")?;
if state_count == 0 {
return Ok(None);
}
Ok(Some(AdmissionQueueStats {
action_id,
queue_length: row.try_get::<i64, _>("queue_length")? as usize,
active_count: row.try_get::<i64, _>("active_count")? as u32,
max_concurrent: row.try_get::<i64, _>("max_concurrent")? as u32,
oldest_enqueued_at: row.try_get("oldest_enqueued_at")?,
total_enqueued: row.try_get::<i64, _>("total_enqueued")? as u64,
total_completed: row.try_get::<i64, _>("total_completed")? as u64,
}))
}
}

View File

@@ -33,6 +33,7 @@ pub mod artifact;
pub mod entity_history;
pub mod event;
pub mod execution;
pub mod execution_admission;
pub mod identity;
pub mod inquiry;
pub mod key;
@@ -53,6 +54,7 @@ pub use artifact::{ArtifactRepository, ArtifactVersionRepository};
pub use entity_history::EntityHistoryRepository;
pub use event::{EnforcementRepository, EventRepository};
pub use execution::ExecutionRepository;
pub use execution_admission::ExecutionAdmissionRepository;
pub use identity::{IdentityRepository, PermissionAssignmentRepository, PermissionSetRepository};
pub use inquiry::InquiryRepository;
pub use key::KeyRepository;

View File

@@ -3,7 +3,7 @@
//! Provides database operations for queue statistics persistence.
use chrono::{DateTime, Utc};
use sqlx::{PgPool, Postgres, QueryBuilder};
use sqlx::{Executor, PgPool, Postgres, QueryBuilder};
use crate::error::Result;
use crate::models::Id;
@@ -38,7 +38,10 @@ pub struct QueueStatsRepository;
impl QueueStatsRepository {
/// Upsert queue statistics (insert or update)
pub async fn upsert(pool: &PgPool, input: UpsertQueueStatsInput) -> Result<QueueStats> {
pub async fn upsert<'e, E>(executor: E, input: UpsertQueueStatsInput) -> Result<QueueStats>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let stats = sqlx::query_as::<Postgres, QueueStats>(
r#"
INSERT INTO queue_stats (
@@ -69,14 +72,17 @@ impl QueueStatsRepository {
.bind(input.oldest_enqueued_at)
.bind(input.total_enqueued)
.bind(input.total_completed)
.fetch_one(pool)
.fetch_one(executor)
.await?;
Ok(stats)
}
/// Get queue statistics for a specific action
pub async fn find_by_action(pool: &PgPool, action_id: Id) -> Result<Option<QueueStats>> {
pub async fn find_by_action<'e, E>(executor: E, action_id: Id) -> Result<Option<QueueStats>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let stats = sqlx::query_as::<Postgres, QueueStats>(
r#"
SELECT
@@ -93,14 +99,17 @@ impl QueueStatsRepository {
"#,
)
.bind(action_id)
.fetch_optional(pool)
.fetch_optional(executor)
.await?;
Ok(stats)
}
/// List all queue statistics with active queues (queue_length > 0 or active_count > 0)
pub async fn list_active(pool: &PgPool) -> Result<Vec<QueueStats>> {
pub async fn list_active<'e, E>(executor: E) -> Result<Vec<QueueStats>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let stats = sqlx::query_as::<Postgres, QueueStats>(
r#"
SELECT
@@ -117,14 +126,17 @@ impl QueueStatsRepository {
ORDER BY last_updated DESC
"#,
)
.fetch_all(pool)
.fetch_all(executor)
.await?;
Ok(stats)
}
/// List all queue statistics
pub async fn list_all(pool: &PgPool) -> Result<Vec<QueueStats>> {
pub async fn list_all<'e, E>(executor: E) -> Result<Vec<QueueStats>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let stats = sqlx::query_as::<Postgres, QueueStats>(
r#"
SELECT
@@ -140,14 +152,17 @@ impl QueueStatsRepository {
ORDER BY last_updated DESC
"#,
)
.fetch_all(pool)
.fetch_all(executor)
.await?;
Ok(stats)
}
/// Delete queue statistics for a specific action
pub async fn delete(pool: &PgPool, action_id: Id) -> Result<bool> {
pub async fn delete<'e, E>(executor: E, action_id: Id) -> Result<bool>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let result = sqlx::query(
r#"
DELETE FROM queue_stats
@@ -155,7 +170,7 @@ impl QueueStatsRepository {
"#,
)
.bind(action_id)
.execute(pool)
.execute(executor)
.await?;
Ok(result.rows_affected() > 0)
@@ -163,7 +178,7 @@ impl QueueStatsRepository {
/// Batch upsert multiple queue statistics
pub async fn batch_upsert(
pool: &PgPool,
executor: &PgPool,
inputs: Vec<UpsertQueueStatsInput>,
) -> Result<Vec<QueueStats>> {
if inputs.is_empty() {
@@ -213,14 +228,17 @@ impl QueueStatsRepository {
let stats = query_builder
.build_query_as::<QueueStats>()
.fetch_all(pool)
.fetch_all(executor)
.await?;
Ok(stats)
}
/// Clear stale statistics (older than specified duration)
pub async fn clear_stale(pool: &PgPool, older_than_seconds: i64) -> Result<u64> {
pub async fn clear_stale<'e, E>(executor: E, older_than_seconds: i64) -> Result<u64>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let result = sqlx::query(
r#"
DELETE FROM queue_stats
@@ -230,7 +248,7 @@ impl QueueStatsRepository {
"#,
)
.bind(older_than_seconds)
.execute(pool)
.execute(executor)
.await?;
Ok(result.rows_affected())

View File

@@ -612,6 +612,26 @@ impl Delete for WorkflowExecutionRepository {
}
impl WorkflowExecutionRepository {
pub async fn find_by_id_for_update<'e, E>(
executor: E,
id: Id,
) -> Result<Option<WorkflowExecution>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, WorkflowExecution>(
"SELECT id, execution, workflow_def, current_tasks, completed_tasks, failed_tasks, skipped_tasks,
variables, task_graph, status, error_message, paused, pause_reason, created, updated
FROM workflow_execution
WHERE id = $1
FOR UPDATE"
)
.bind(id)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
pub async fn create_or_get_by_execution<'e, E>(
executor: E,
input: CreateWorkflowExecutionInput,