proper sql filtering

This commit is contained in:
2026-03-01 20:43:48 -06:00
parent 6b9d7d6cf2
commit bbe94d75f8
54 changed files with 6692 additions and 928 deletions

View File

@@ -8,6 +8,26 @@ use sqlx::{Executor, Postgres, QueryBuilder};
use super::{Create, Delete, FindById, FindByRef, List, Repository, Update};
/// Filters for [`ActionRepository::list_search`].
///
/// All fields are optional and combinable (AND). Pagination is always applied.
#[derive(Debug, Clone, Default)]
pub struct ActionSearchFilters {
/// Filter by pack ID
pub pack: Option<Id>,
/// Text search across ref, label, description (case-insensitive)
pub query: Option<String>,
pub limit: u32,
pub offset: u32,
}
/// Result of [`ActionRepository::list_search`].
#[derive(Debug)]
pub struct ActionSearchResult {
pub rows: Vec<Action>,
pub total: u64,
}
/// Repository for Action operations
pub struct ActionRepository;
@@ -287,6 +307,92 @@ impl Delete for ActionRepository {
}
impl ActionRepository {
/// Search actions with all filters pushed into SQL.
///
/// All filter fields are combinable (AND). Pagination is server-side.
pub async fn list_search<'e, E>(
db: E,
filters: &ActionSearchFilters,
) -> Result<ActionSearchResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
let select_cols = "id, ref, pack, pack_ref, label, description, entrypoint, runtime, runtime_version_constraint, param_schema, out_schema, workflow_def, is_adhoc, created, updated";
let mut qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new(format!("SELECT {select_cols} FROM action"));
let mut count_qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new("SELECT COUNT(*) FROM action");
let mut has_where = false;
macro_rules! push_condition {
($cond_prefix:expr, $value:expr) => {{
if !has_where {
qb.push(" WHERE ");
count_qb.push(" WHERE ");
has_where = true;
} else {
qb.push(" AND ");
count_qb.push(" AND ");
}
qb.push($cond_prefix);
qb.push_bind($value.clone());
count_qb.push($cond_prefix);
count_qb.push_bind($value);
}};
}
if let Some(pack_id) = filters.pack {
push_condition!("pack = ", pack_id);
}
if let Some(ref query) = filters.query {
let pattern = format!("%{}%", query.to_lowercase());
// Search needs an OR across multiple columns, wrapped in parens
if !has_where {
qb.push(" WHERE ");
count_qb.push(" WHERE ");
has_where = true;
} else {
qb.push(" AND ");
count_qb.push(" AND ");
}
qb.push("(LOWER(ref) LIKE ");
qb.push_bind(pattern.clone());
qb.push(" OR LOWER(label) LIKE ");
qb.push_bind(pattern.clone());
qb.push(" OR LOWER(description) LIKE ");
qb.push_bind(pattern.clone());
qb.push(")");
count_qb.push("(LOWER(ref) LIKE ");
count_qb.push_bind(pattern.clone());
count_qb.push(" OR LOWER(label) LIKE ");
count_qb.push_bind(pattern.clone());
count_qb.push(" OR LOWER(description) LIKE ");
count_qb.push_bind(pattern);
count_qb.push(")");
}
// Suppress unused-assignment warning from the macro's last expansion.
let _ = has_where;
// Count
let total: i64 = count_qb.build_query_scalar().fetch_one(db).await?;
let total = total.max(0) as u64;
// Data query
qb.push(" ORDER BY ref ASC");
qb.push(" LIMIT ");
qb.push_bind(filters.limit as i64);
qb.push(" OFFSET ");
qb.push_bind(filters.offset as i64);
let rows: Vec<Action> = qb.build_query_as().fetch_all(db).await?;
Ok(ActionSearchResult { rows, total })
}
/// Find actions by pack ID
pub async fn find_by_pack<'e, E>(executor: E, pack_id: Id) -> Result<Vec<Action>>
where

View File

@@ -15,6 +15,56 @@ use sqlx::{Executor, Postgres, QueryBuilder};
use super::{Create, Delete, FindById, List, Repository, Update};
// ============================================================================
// Event Search
// ============================================================================
/// Filters for [`EventRepository::search`].
///
/// All fields are optional. When set, the corresponding WHERE clause is added.
/// Pagination is always applied.
#[derive(Debug, Clone, Default)]
pub struct EventSearchFilters {
pub trigger: Option<Id>,
pub trigger_ref: Option<String>,
pub source: Option<Id>,
pub rule_ref: Option<String>,
pub limit: u32,
pub offset: u32,
}
/// Result of [`EventRepository::search`].
#[derive(Debug)]
pub struct EventSearchResult {
pub rows: Vec<Event>,
pub total: u64,
}
// ============================================================================
// Enforcement Search
// ============================================================================
/// Filters for [`EnforcementRepository::search`].
///
/// All fields are optional and combinable. Pagination is always applied.
#[derive(Debug, Clone, Default)]
pub struct EnforcementSearchFilters {
pub rule: Option<Id>,
pub event: Option<Id>,
pub status: Option<EnforcementStatus>,
pub trigger_ref: Option<String>,
pub rule_ref: Option<String>,
pub limit: u32,
pub offset: u32,
}
/// Result of [`EnforcementRepository::search`].
#[derive(Debug)]
pub struct EnforcementSearchResult {
pub rows: Vec<Enforcement>,
pub total: u64,
}
/// Repository for Event operations
pub struct EventRepository;
@@ -173,6 +223,75 @@ impl EventRepository {
Ok(events)
}
/// Search events with all filters pushed into SQL.
///
/// Builds a dynamic query so that every filter, pagination, and the total
/// count are handled in the database — no in-memory filtering or slicing.
pub async fn search<'e, E>(db: E, filters: &EventSearchFilters) -> Result<EventSearchResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
let select_cols = "id, trigger, trigger_ref, config, payload, source, source_ref, rule, rule_ref, created";
let mut qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new(format!("SELECT {select_cols} FROM event"));
let mut count_qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new("SELECT COUNT(*) FROM event");
let mut has_where = false;
macro_rules! push_condition {
($cond_prefix:expr, $value:expr) => {{
if !has_where {
qb.push(" WHERE ");
count_qb.push(" WHERE ");
has_where = true;
} else {
qb.push(" AND ");
count_qb.push(" AND ");
}
qb.push($cond_prefix);
qb.push_bind($value.clone());
count_qb.push($cond_prefix);
count_qb.push_bind($value);
}};
}
if let Some(trigger_id) = filters.trigger {
push_condition!("trigger = ", trigger_id);
}
if let Some(ref trigger_ref) = filters.trigger_ref {
push_condition!("trigger_ref = ", trigger_ref.clone());
}
if let Some(source_id) = filters.source {
push_condition!("source = ", source_id);
}
if let Some(ref rule_ref) = filters.rule_ref {
push_condition!(
"LOWER(rule_ref) LIKE ",
format!("%{}%", rule_ref.to_lowercase())
);
}
// Suppress unused-assignment warning from the macro's last expansion.
let _ = has_where;
// Count
let total: i64 = count_qb.build_query_scalar().fetch_one(db).await?;
let total = total.max(0) as u64;
// Data query
qb.push(" ORDER BY created DESC");
qb.push(" LIMIT ");
qb.push_bind(filters.limit as i64);
qb.push(" OFFSET ");
qb.push_bind(filters.offset as i64);
let rows: Vec<Event> = qb.build_query_as().fetch_all(db).await?;
Ok(EventSearchResult { rows, total })
}
}
// ============================================================================
@@ -425,4 +544,75 @@ impl EnforcementRepository {
Ok(enforcements)
}
/// Search enforcements with all filters pushed into SQL.
///
/// All filter fields are combinable (AND). Pagination is server-side.
pub async fn search<'e, E>(
db: E,
filters: &EnforcementSearchFilters,
) -> Result<EnforcementSearchResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
let select_cols = "id, rule, rule_ref, trigger_ref, config, event, status, payload, condition, conditions, created, resolved_at";
let mut qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new(format!("SELECT {select_cols} FROM enforcement"));
let mut count_qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new("SELECT COUNT(*) FROM enforcement");
let mut has_where = false;
macro_rules! push_condition {
($cond_prefix:expr, $value:expr) => {{
if !has_where {
qb.push(" WHERE ");
count_qb.push(" WHERE ");
has_where = true;
} else {
qb.push(" AND ");
count_qb.push(" AND ");
}
qb.push($cond_prefix);
qb.push_bind($value.clone());
count_qb.push($cond_prefix);
count_qb.push_bind($value);
}};
}
if let Some(status) = &filters.status {
push_condition!("status = ", status.clone());
}
if let Some(rule_id) = filters.rule {
push_condition!("rule = ", rule_id);
}
if let Some(event_id) = filters.event {
push_condition!("event = ", event_id);
}
if let Some(ref trigger_ref) = filters.trigger_ref {
push_condition!("trigger_ref = ", trigger_ref.clone());
}
if let Some(ref rule_ref) = filters.rule_ref {
push_condition!("rule_ref = ", rule_ref.clone());
}
// Suppress unused-assignment warning from the macro's last expansion.
let _ = has_where;
// Count
let total: i64 = count_qb.build_query_scalar().fetch_one(db).await?;
let total = total.max(0) as u64;
// Data query
qb.push(" ORDER BY created DESC");
qb.push(" LIMIT ");
qb.push_bind(filters.limit as i64);
qb.push(" OFFSET ");
qb.push_bind(filters.offset as i64);
let rows: Vec<Enforcement> = qb.build_query_as().fetch_all(db).await?;
Ok(EnforcementSearchResult { rows, total })
}
}

View File

@@ -1,11 +1,71 @@
//! Execution repository for database operations
use chrono::{DateTime, Utc};
use crate::models::{enums::ExecutionStatus, execution::*, Id, JsonDict};
use crate::Result;
use sqlx::{Executor, Postgres, QueryBuilder};
use super::{Create, Delete, FindById, List, Repository, Update};
/// Filters for the [`ExecutionRepository::search`] query-builder method.
///
/// Every field is optional. When set, the corresponding `WHERE` clause is
/// appended to the query. Pagination (`limit`/`offset`) is always applied.
///
/// Filters that involve the `enforcement` table (`rule_ref`, `trigger_ref`)
/// cause a `LEFT JOIN enforcement` to be added automatically.
#[derive(Debug, Clone, Default)]
pub struct ExecutionSearchFilters {
pub status: Option<ExecutionStatus>,
pub action_ref: Option<String>,
pub pack_name: Option<String>,
pub rule_ref: Option<String>,
pub trigger_ref: Option<String>,
pub executor: Option<Id>,
pub result_contains: Option<String>,
pub enforcement: Option<Id>,
pub parent: Option<Id>,
pub top_level_only: bool,
pub limit: u32,
pub offset: u32,
}
/// Result of [`ExecutionRepository::search`].
///
/// Includes the matching rows *and* the total count (before LIMIT/OFFSET)
/// so the caller can build pagination metadata without a second round-trip.
#[derive(Debug)]
pub struct ExecutionSearchResult {
pub rows: Vec<ExecutionWithRefs>,
pub total: u64,
}
/// An execution row with optional `rule_ref` / `trigger_ref` populated from
/// the joined `enforcement` table. This avoids a separate in-memory lookup.
#[derive(Debug, Clone, sqlx::FromRow)]
pub struct ExecutionWithRefs {
// — execution columns (same order as SELECT_COLUMNS) —
pub id: Id,
pub action: Option<Id>,
pub action_ref: String,
pub config: Option<JsonDict>,
pub env_vars: Option<JsonDict>,
pub parent: Option<Id>,
pub enforcement: Option<Id>,
pub executor: Option<Id>,
pub status: ExecutionStatus,
pub result: Option<JsonDict>,
pub started_at: Option<DateTime<Utc>>,
#[sqlx(json, default)]
pub workflow_task: Option<WorkflowTaskMetadata>,
pub created: DateTime<Utc>,
pub updated: DateTime<Utc>,
// — joined from enforcement —
pub rule_ref: Option<String>,
pub trigger_ref: Option<String>,
}
/// Column list for SELECT queries on the execution table.
///
/// Defined once to avoid drift between queries and the `Execution` model.
@@ -13,7 +73,7 @@ use super::{Create, Delete, FindById, List, Repository, Update};
/// are NOT in the Rust struct, so `SELECT *` must never be used.
pub const SELECT_COLUMNS: &str = "\
id, action, action_ref, config, env_vars, parent, enforcement, \
executor, status, result, workflow_task, created, updated";
executor, status, result, started_at, workflow_task, created, updated";
pub struct ExecutionRepository;
@@ -43,6 +103,7 @@ pub struct UpdateExecutionInput {
pub status: Option<ExecutionStatus>,
pub result: Option<JsonDict>,
pub executor: Option<Id>,
pub started_at: Option<DateTime<Utc>>,
pub workflow_task: Option<WorkflowTaskMetadata>,
}
@@ -52,6 +113,7 @@ impl From<Execution> for UpdateExecutionInput {
status: Some(execution.status),
result: execution.result,
executor: execution.executor,
started_at: execution.started_at,
workflow_task: execution.workflow_task,
}
}
@@ -146,6 +208,13 @@ impl Update for ExecutionRepository {
query.push("executor = ").push_bind(executor_id);
has_updates = true;
}
if let Some(started_at) = input.started_at {
if has_updates {
query.push(", ");
}
query.push("started_at = ").push_bind(started_at);
has_updates = true;
}
if let Some(workflow_task) = &input.workflow_task {
if has_updates {
query.push(", ");
@@ -239,4 +308,141 @@ impl ExecutionRepository {
.await
.map_err(Into::into)
}
/// Search executions with all filters pushed into SQL.
///
/// Builds a dynamic query with only the WHERE clauses that apply,
/// a LEFT JOIN on `enforcement` when `rule_ref` or `trigger_ref` filters
/// are present (or always, to populate those columns on the result),
/// and proper LIMIT/OFFSET so pagination is server-side.
///
/// Returns both the matching page of rows and the total count.
pub async fn search<'e, E>(
db: E,
filters: &ExecutionSearchFilters,
) -> Result<ExecutionSearchResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
// We always LEFT JOIN enforcement so we can return rule_ref/trigger_ref
// on every row without a second round-trip.
let prefixed_select = SELECT_COLUMNS
.split(", ")
.map(|col| format!("e.{col}"))
.collect::<Vec<_>>()
.join(", ");
let select_clause = format!(
"{prefixed_select}, enf.rule_ref AS rule_ref, enf.trigger_ref AS trigger_ref"
);
let from_clause = "FROM execution e LEFT JOIN enforcement enf ON e.enforcement = enf.id";
// ── Build WHERE clauses ──────────────────────────────────────────
let mut conditions: Vec<String> = Vec::new();
// We'll collect bind values to push into the QueryBuilder afterwards.
// Because QueryBuilder doesn't let us interleave raw SQL and binds in
// arbitrary order easily, we build the SQL string with numbered $N
// placeholders and then bind in order.
// Track the next placeholder index ($1, $2, …).
// We can't use QueryBuilder's push_bind because we need the COUNT(*)
// query to share the same WHERE clause text. Instead we build the
// clause once and execute both queries with manual binds.
// ── Use QueryBuilder for the data query ──────────────────────────
let mut qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new(format!("SELECT {select_clause} {from_clause}"));
let mut count_qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new(format!("SELECT COUNT(*) AS total {from_clause}"));
// Helper: append the same condition to both builders.
// We need a tiny state machine since push_bind moves the value.
macro_rules! push_condition {
($cond_prefix:expr, $value:expr) => {{
let needs_where = conditions.is_empty();
conditions.push(String::new()); // just to track count
if needs_where {
qb.push(" WHERE ");
count_qb.push(" WHERE ");
} else {
qb.push(" AND ");
count_qb.push(" AND ");
}
qb.push($cond_prefix);
qb.push_bind($value.clone());
count_qb.push($cond_prefix);
count_qb.push_bind($value);
}};
}
macro_rules! push_raw_condition {
($cond:expr) => {{
let needs_where = conditions.is_empty();
conditions.push(String::new());
if needs_where {
qb.push(concat!(" WHERE ", $cond));
count_qb.push(concat!(" WHERE ", $cond));
} else {
qb.push(concat!(" AND ", $cond));
count_qb.push(concat!(" AND ", $cond));
}
}};
}
if let Some(status) = &filters.status {
push_condition!("e.status = ", status.clone());
}
if let Some(action_ref) = &filters.action_ref {
push_condition!("e.action_ref = ", action_ref.clone());
}
if let Some(pack_name) = &filters.pack_name {
let pattern = format!("{pack_name}.%");
push_condition!("e.action_ref LIKE ", pattern);
}
if let Some(enforcement_id) = filters.enforcement {
push_condition!("e.enforcement = ", enforcement_id);
}
if let Some(parent_id) = filters.parent {
push_condition!("e.parent = ", parent_id);
}
if filters.top_level_only {
push_raw_condition!("e.parent IS NULL");
}
if let Some(executor_id) = filters.executor {
push_condition!("e.executor = ", executor_id);
}
if let Some(rule_ref) = &filters.rule_ref {
push_condition!("enf.rule_ref = ", rule_ref.clone());
}
if let Some(trigger_ref) = &filters.trigger_ref {
push_condition!("enf.trigger_ref = ", trigger_ref.clone());
}
if let Some(search) = &filters.result_contains {
let pattern = format!("%{}%", search.to_lowercase());
push_condition!("LOWER(e.result::text) LIKE ", pattern);
}
// ── COUNT query ──────────────────────────────────────────────────
let total: i64 = count_qb
.build_query_scalar()
.fetch_one(db)
.await?;
let total = total.max(0) as u64;
// ── Data query with ORDER BY + pagination ────────────────────────
qb.push(" ORDER BY e.created DESC");
qb.push(" LIMIT ");
qb.push_bind(filters.limit as i64);
qb.push(" OFFSET ");
qb.push_bind(filters.offset as i64);
let rows: Vec<ExecutionWithRefs> = qb
.build_query_as()
.fetch_all(db)
.await?;
Ok(ExecutionSearchResult { rows, total })
}
}

View File

@@ -7,6 +7,25 @@ use sqlx::{Executor, Postgres, QueryBuilder};
use super::{Create, Delete, FindById, List, Repository, Update};
/// Filters for [`InquiryRepository::search`].
///
/// All fields are optional and combinable (AND). Pagination is always applied.
#[derive(Debug, Clone, Default)]
pub struct InquirySearchFilters {
pub status: Option<InquiryStatus>,
pub execution: Option<Id>,
pub assigned_to: Option<Id>,
pub limit: u32,
pub offset: u32,
}
/// Result of [`InquiryRepository::search`].
#[derive(Debug)]
pub struct InquirySearchResult {
pub rows: Vec<Inquiry>,
pub total: u64,
}
pub struct InquiryRepository;
impl Repository for InquiryRepository {
@@ -157,4 +176,66 @@ impl InquiryRepository {
"SELECT id, execution, prompt, response_schema, assigned_to, status, response, timeout_at, responded_at, created, updated FROM inquiry WHERE execution = $1 ORDER BY created DESC"
).bind(execution_id).fetch_all(executor).await.map_err(Into::into)
}
/// Search inquiries with all filters pushed into SQL.
///
/// All filter fields are combinable (AND). Pagination is server-side.
pub async fn search<'e, E>(db: E, filters: &InquirySearchFilters) -> Result<InquirySearchResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
let select_cols = "id, execution, prompt, response_schema, assigned_to, status, response, timeout_at, responded_at, created, updated";
let mut qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new(format!("SELECT {select_cols} FROM inquiry"));
let mut count_qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new("SELECT COUNT(*) FROM inquiry");
let mut has_where = false;
macro_rules! push_condition {
($cond_prefix:expr, $value:expr) => {{
if !has_where {
qb.push(" WHERE ");
count_qb.push(" WHERE ");
has_where = true;
} else {
qb.push(" AND ");
count_qb.push(" AND ");
}
qb.push($cond_prefix);
qb.push_bind($value.clone());
count_qb.push($cond_prefix);
count_qb.push_bind($value);
}};
}
if let Some(status) = &filters.status {
push_condition!("status = ", status.clone());
}
if let Some(execution_id) = filters.execution {
push_condition!("execution = ", execution_id);
}
if let Some(assigned_to) = filters.assigned_to {
push_condition!("assigned_to = ", assigned_to);
}
// Suppress unused-assignment warning from the macro's last expansion.
let _ = has_where;
// Count
let total: i64 = count_qb.build_query_scalar().fetch_one(db).await?;
let total = total.max(0) as u64;
// Data query
qb.push(" ORDER BY created DESC");
qb.push(" LIMIT ");
qb.push_bind(filters.limit as i64);
qb.push(" OFFSET ");
qb.push_bind(filters.offset as i64);
let rows: Vec<Inquiry> = qb.build_query_as().fetch_all(db).await?;
Ok(InquirySearchResult { rows, total })
}
}

View File

@@ -6,6 +6,24 @@ use sqlx::{Executor, Postgres, QueryBuilder};
use super::{Create, Delete, FindById, List, Repository, Update};
/// Filters for [`KeyRepository::search`].
///
/// All fields are optional and combinable (AND). Pagination is always applied.
#[derive(Debug, Clone, Default)]
pub struct KeySearchFilters {
pub owner_type: Option<OwnerType>,
pub owner: Option<String>,
pub limit: u32,
pub offset: u32,
}
/// Result of [`KeyRepository::search`].
#[derive(Debug)]
pub struct KeySearchResult {
pub rows: Vec<Key>,
pub total: u64,
}
pub struct KeyRepository;
impl Repository for KeyRepository {
@@ -165,4 +183,63 @@ impl KeyRepository {
"SELECT id, ref, owner_type, owner, owner_identity, owner_pack, owner_pack_ref, owner_action, owner_action_ref, owner_sensor, owner_sensor_ref, name, encrypted, encryption_key_hash, value, created, updated FROM key WHERE owner_type = $1 ORDER BY ref ASC"
).bind(owner_type).fetch_all(executor).await.map_err(Into::into)
}
/// Search keys with all filters pushed into SQL.
///
/// All filter fields are combinable (AND). Pagination is server-side.
pub async fn search<'e, E>(db: E, filters: &KeySearchFilters) -> Result<KeySearchResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
let select_cols = "id, ref, owner_type, owner, owner_identity, owner_pack, owner_pack_ref, owner_action, owner_action_ref, owner_sensor, owner_sensor_ref, name, encrypted, encryption_key_hash, value, created, updated";
let mut qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new(format!("SELECT {select_cols} FROM key"));
let mut count_qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new("SELECT COUNT(*) FROM key");
let mut has_where = false;
macro_rules! push_condition {
($cond_prefix:expr, $value:expr) => {{
if !has_where {
qb.push(" WHERE ");
count_qb.push(" WHERE ");
has_where = true;
} else {
qb.push(" AND ");
count_qb.push(" AND ");
}
qb.push($cond_prefix);
qb.push_bind($value.clone());
count_qb.push($cond_prefix);
count_qb.push_bind($value);
}};
}
if let Some(ref owner_type) = filters.owner_type {
push_condition!("owner_type = ", owner_type.clone());
}
if let Some(ref owner) = filters.owner {
push_condition!("owner = ", owner.clone());
}
// Suppress unused-assignment warning from the macro's last expansion.
let _ = has_where;
// Count
let total: i64 = count_qb.build_query_scalar().fetch_one(db).await?;
let total = total.max(0) as u64;
// Data query
qb.push(" ORDER BY ref ASC");
qb.push(" LIMIT ");
qb.push_bind(filters.limit as i64);
qb.push(" OFFSET ");
qb.push_bind(filters.offset as i64);
let rows: Vec<Key> = qb.build_query_as().fetch_all(db).await?;
Ok(KeySearchResult { rows, total })
}
}

View File

@@ -8,6 +8,30 @@ use sqlx::{Executor, Postgres, QueryBuilder};
use super::{Create, Delete, FindById, FindByRef, List, Repository, Update};
/// Filters for [`RuleRepository::list_search`].
///
/// All fields are optional and combinable (AND). Pagination is always applied.
#[derive(Debug, Clone, Default)]
pub struct RuleSearchFilters {
/// Filter by pack ID
pub pack: Option<Id>,
/// Filter by action ID
pub action: Option<Id>,
/// Filter by trigger ID
pub trigger: Option<Id>,
/// Filter by enabled status
pub enabled: Option<bool>,
pub limit: u32,
pub offset: u32,
}
/// Result of [`RuleRepository::list_search`].
#[derive(Debug)]
pub struct RuleSearchResult {
pub rows: Vec<Rule>,
pub total: u64,
}
/// Input for restoring an ad-hoc rule during pack reinstallation.
/// Unlike `CreateRuleInput`, action and trigger IDs are optional because
/// the referenced entities may not exist yet or may have been removed.
@@ -275,6 +299,71 @@ impl Delete for RuleRepository {
}
impl RuleRepository {
/// Search rules with all filters pushed into SQL.
///
/// All filter fields are combinable (AND). Pagination is server-side.
pub async fn list_search<'e, E>(db: E, filters: &RuleSearchFilters) -> Result<RuleSearchResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
let select_cols = "id, ref, pack, pack_ref, label, description, action, action_ref, trigger, trigger_ref, conditions, action_params, trigger_params, enabled, is_adhoc, created, updated";
let mut qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new(format!("SELECT {select_cols} FROM rule"));
let mut count_qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new("SELECT COUNT(*) FROM rule");
let mut has_where = false;
macro_rules! push_condition {
($cond_prefix:expr, $value:expr) => {{
if !has_where {
qb.push(" WHERE ");
count_qb.push(" WHERE ");
has_where = true;
} else {
qb.push(" AND ");
count_qb.push(" AND ");
}
qb.push($cond_prefix);
qb.push_bind($value.clone());
count_qb.push($cond_prefix);
count_qb.push_bind($value);
}};
}
if let Some(pack_id) = filters.pack {
push_condition!("pack = ", pack_id);
}
if let Some(action_id) = filters.action {
push_condition!("action = ", action_id);
}
if let Some(trigger_id) = filters.trigger {
push_condition!("trigger = ", trigger_id);
}
if let Some(enabled) = filters.enabled {
push_condition!("enabled = ", enabled);
}
// Suppress unused-assignment warning from the macro's last expansion.
let _ = has_where;
// Count
let total: i64 = count_qb.build_query_scalar().fetch_one(db).await?;
let total = total.max(0) as u64;
// Data query
qb.push(" ORDER BY ref ASC");
qb.push(" LIMIT ");
qb.push_bind(filters.limit as i64);
qb.push(" OFFSET ");
qb.push_bind(filters.offset as i64);
let rows: Vec<Rule> = qb.build_query_as().fetch_all(db).await?;
Ok(RuleSearchResult { rows, total })
}
/// Find rules by pack ID
pub async fn find_by_pack<'e, E>(executor: E, pack_id: Id) -> Result<Vec<Rule>>
where

View File

@@ -9,6 +9,56 @@ use sqlx::{Executor, Postgres, QueryBuilder};
use super::{Create, Delete, FindById, FindByRef, List, Repository, Update};
// ============================================================================
// Trigger Search
// ============================================================================
/// Filters for [`TriggerRepository::list_search`].
///
/// All fields are optional and combinable (AND). Pagination is always applied.
#[derive(Debug, Clone, Default)]
pub struct TriggerSearchFilters {
/// Filter by pack ID
pub pack: Option<Id>,
/// Filter by enabled status
pub enabled: Option<bool>,
pub limit: u32,
pub offset: u32,
}
/// Result of [`TriggerRepository::list_search`].
#[derive(Debug)]
pub struct TriggerSearchResult {
pub rows: Vec<Trigger>,
pub total: u64,
}
// ============================================================================
// Sensor Search
// ============================================================================
/// Filters for [`SensorRepository::list_search`].
///
/// All fields are optional and combinable (AND). Pagination is always applied.
#[derive(Debug, Clone, Default)]
pub struct SensorSearchFilters {
/// Filter by pack ID
pub pack: Option<Id>,
/// Filter by trigger ID
pub trigger: Option<Id>,
/// Filter by enabled status
pub enabled: Option<bool>,
pub limit: u32,
pub offset: u32,
}
/// Result of [`SensorRepository::list_search`].
#[derive(Debug)]
pub struct SensorSearchResult {
pub rows: Vec<Sensor>,
pub total: u64,
}
/// Repository for Trigger operations
pub struct TriggerRepository;
@@ -251,6 +301,68 @@ impl Delete for TriggerRepository {
}
impl TriggerRepository {
/// Search triggers with all filters pushed into SQL.
///
/// All filter fields are combinable (AND). Pagination is server-side.
pub async fn list_search<'e, E>(
db: E,
filters: &TriggerSearchFilters,
) -> Result<TriggerSearchResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
let select_cols = "id, ref, pack, pack_ref, label, description, enabled, param_schema, out_schema, webhook_enabled, webhook_key, webhook_config, is_adhoc, created, updated";
let mut qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new(format!("SELECT {select_cols} FROM trigger"));
let mut count_qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new("SELECT COUNT(*) FROM trigger");
let mut has_where = false;
macro_rules! push_condition {
($cond_prefix:expr, $value:expr) => {{
if !has_where {
qb.push(" WHERE ");
count_qb.push(" WHERE ");
has_where = true;
} else {
qb.push(" AND ");
count_qb.push(" AND ");
}
qb.push($cond_prefix);
qb.push_bind($value.clone());
count_qb.push($cond_prefix);
count_qb.push_bind($value);
}};
}
if let Some(pack_id) = filters.pack {
push_condition!("pack = ", pack_id);
}
if let Some(enabled) = filters.enabled {
push_condition!("enabled = ", enabled);
}
// Suppress unused-assignment warning from the macro's last expansion.
let _ = has_where;
// Count
let total: i64 = count_qb.build_query_scalar().fetch_one(db).await?;
let total = total.max(0) as u64;
// Data query
qb.push(" ORDER BY ref ASC");
qb.push(" LIMIT ");
qb.push_bind(filters.limit as i64);
qb.push(" OFFSET ");
qb.push_bind(filters.offset as i64);
let rows: Vec<Trigger> = qb.build_query_as().fetch_all(db).await?;
Ok(TriggerSearchResult { rows, total })
}
/// Find triggers by pack ID
pub async fn find_by_pack<'e, E>(executor: E, pack_id: Id) -> Result<Vec<Trigger>>
where
@@ -795,6 +907,71 @@ impl Delete for SensorRepository {
}
impl SensorRepository {
/// Search sensors with all filters pushed into SQL.
///
/// All filter fields are combinable (AND). Pagination is server-side.
pub async fn list_search<'e, E>(
db: E,
filters: &SensorSearchFilters,
) -> Result<SensorSearchResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
let select_cols = "id, ref, pack, pack_ref, label, description, entrypoint, runtime, runtime_ref, runtime_version_constraint, trigger, trigger_ref, enabled, param_schema, config, created, updated";
let mut qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new(format!("SELECT {select_cols} FROM sensor"));
let mut count_qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new("SELECT COUNT(*) FROM sensor");
let mut has_where = false;
macro_rules! push_condition {
($cond_prefix:expr, $value:expr) => {{
if !has_where {
qb.push(" WHERE ");
count_qb.push(" WHERE ");
has_where = true;
} else {
qb.push(" AND ");
count_qb.push(" AND ");
}
qb.push($cond_prefix);
qb.push_bind($value.clone());
count_qb.push($cond_prefix);
count_qb.push_bind($value);
}};
}
if let Some(pack_id) = filters.pack {
push_condition!("pack = ", pack_id);
}
if let Some(trigger_id) = filters.trigger {
push_condition!("trigger = ", trigger_id);
}
if let Some(enabled) = filters.enabled {
push_condition!("enabled = ", enabled);
}
// Suppress unused-assignment warning from the macro's last expansion.
let _ = has_where;
// Count
let total: i64 = count_qb.build_query_scalar().fetch_one(db).await?;
let total = total.max(0) as u64;
// Data query
qb.push(" ORDER BY ref ASC");
qb.push(" LIMIT ");
qb.push_bind(filters.limit as i64);
qb.push(" OFFSET ");
qb.push_bind(filters.offset as i64);
let rows: Vec<Sensor> = qb.build_query_as().fetch_all(db).await?;
Ok(SensorSearchResult { rows, total })
}
/// Find sensors by trigger ID
pub async fn find_by_trigger<'e, E>(executor: E, trigger_id: Id) -> Result<Vec<Sensor>>
where

View File

@@ -6,6 +6,37 @@ use sqlx::{Executor, Postgres, QueryBuilder};
use super::{Create, Delete, FindById, FindByRef, List, Repository, Update};
// ============================================================================
// Workflow Definition Search
// ============================================================================
/// Filters for [`WorkflowDefinitionRepository::list_search`].
///
/// All fields are optional and combinable (AND). Pagination is always applied.
/// Tag filtering uses `ANY(tags)` for each tag (OR across tags, AND with other filters).
#[derive(Debug, Clone, Default)]
pub struct WorkflowSearchFilters {
/// Filter by pack ID
pub pack: Option<Id>,
/// Filter by pack reference
pub pack_ref: Option<String>,
/// Filter by enabled status
pub enabled: Option<bool>,
/// Filter by tags (OR across tags — matches if any tag is present)
pub tags: Option<Vec<String>>,
/// Text search across label and description (case-insensitive substring)
pub search: Option<String>,
pub limit: u32,
pub offset: u32,
}
/// Result of [`WorkflowDefinitionRepository::list_search`].
#[derive(Debug)]
pub struct WorkflowSearchResult {
pub rows: Vec<WorkflowDefinition>,
pub total: u64,
}
// ============================================================================
// WORKFLOW DEFINITION REPOSITORY
// ============================================================================
@@ -226,6 +257,102 @@ impl Delete for WorkflowDefinitionRepository {
}
impl WorkflowDefinitionRepository {
/// Search workflow definitions with all filters pushed into SQL.
///
/// All filter fields are combinable (AND). Pagination is server-side.
/// Tags use an OR match — a workflow matches if it contains ANY of the
/// requested tags (via `tags && ARRAY[...]`).
pub async fn list_search<'e, E>(
db: E,
filters: &WorkflowSearchFilters,
) -> Result<WorkflowSearchResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
let select_cols = "id, ref, pack, pack_ref, label, description, version, param_schema, out_schema, definition, tags, enabled, created, updated";
let mut qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new(format!("SELECT {select_cols} FROM workflow_definition"));
let mut count_qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new("SELECT COUNT(*) FROM workflow_definition");
let mut has_where = false;
macro_rules! push_condition {
($cond_prefix:expr, $value:expr) => {{
if !has_where {
qb.push(" WHERE ");
count_qb.push(" WHERE ");
has_where = true;
} else {
qb.push(" AND ");
count_qb.push(" AND ");
}
qb.push($cond_prefix);
qb.push_bind($value.clone());
count_qb.push($cond_prefix);
count_qb.push_bind($value);
}};
}
if let Some(pack_id) = filters.pack {
push_condition!("pack = ", pack_id);
}
if let Some(ref pack_ref) = filters.pack_ref {
push_condition!("pack_ref = ", pack_ref.clone());
}
if let Some(enabled) = filters.enabled {
push_condition!("enabled = ", enabled);
}
if let Some(ref tags) = filters.tags {
if !tags.is_empty() {
// Use PostgreSQL array overlap operator: tags && ARRAY[...]
push_condition!("tags && ", tags.clone());
}
}
if let Some(ref search) = filters.search {
let pattern = format!("%{}%", search.to_lowercase());
// Search needs an OR across multiple columns, wrapped in parens
if !has_where {
qb.push(" WHERE ");
count_qb.push(" WHERE ");
has_where = true;
} else {
qb.push(" AND ");
count_qb.push(" AND ");
}
qb.push("(LOWER(label) LIKE ");
qb.push_bind(pattern.clone());
qb.push(" OR LOWER(COALESCE(description, '')) LIKE ");
qb.push_bind(pattern.clone());
qb.push(")");
count_qb.push("(LOWER(label) LIKE ");
count_qb.push_bind(pattern.clone());
count_qb.push(" OR LOWER(COALESCE(description, '')) LIKE ");
count_qb.push_bind(pattern);
count_qb.push(")");
}
// Suppress unused-assignment warning from the macro's last expansion.
let _ = has_where;
// Count
let total: i64 = count_qb.build_query_scalar().fetch_one(db).await?;
let total = total.max(0) as u64;
// Data query
qb.push(" ORDER BY label ASC");
qb.push(" LIMIT ");
qb.push_bind(filters.limit as i64);
qb.push(" OFFSET ");
qb.push_bind(filters.offset as i64);
let rows: Vec<WorkflowDefinition> = qb.build_query_as().fetch_all(db).await?;
Ok(WorkflowSearchResult { rows, total })
}
/// Find all workflows for a specific pack by pack ID
pub async fn find_by_pack<'e, E>(executor: E, pack_id: Id) -> Result<Vec<WorkflowDefinition>>
where