trying to rework database migrations
This commit is contained in:
223
migrations.old/20250101000001_initial_setup.sql
Normal file
223
migrations.old/20250101000001_initial_setup.sql
Normal file
@@ -0,0 +1,223 @@
|
||||
-- Migration: Initial Setup
|
||||
-- Description: Creates the attune schema, enums, and shared database functions
|
||||
-- Version: 20250101000001
|
||||
|
||||
-- ============================================================================
|
||||
-- SCHEMA AND ROLE SETUP
|
||||
-- ============================================================================
|
||||
|
||||
-- Create the attune schema
|
||||
-- NOTE: For tests, the test schema is created separately. For production, uncomment below:
|
||||
-- CREATE SCHEMA IF NOT EXISTS attune;
|
||||
|
||||
-- Set search path (now set via connection pool configuration)
|
||||
|
||||
-- Create service role for the application
|
||||
-- NOTE: Commented out for tests, uncomment for production:
|
||||
-- DO $$
|
||||
-- BEGIN
|
||||
-- IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'svc_attune') THEN
|
||||
-- CREATE ROLE svc_attune WITH LOGIN PASSWORD 'attune_service_password';
|
||||
-- END IF;
|
||||
-- END
|
||||
-- $$;
|
||||
|
||||
-- Grant usage on schema
|
||||
-- NOTE: Commented out for tests, uncomment for production:
|
||||
-- GRANT USAGE ON SCHEMA attune TO svc_attune;
|
||||
-- GRANT CREATE ON SCHEMA attune TO svc_attune;
|
||||
|
||||
-- Enable required extensions
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
|
||||
|
||||
-- COMMENT ON SCHEMA attune IS 'Attune automation platform schema';
|
||||
|
||||
-- ============================================================================
|
||||
-- ENUM TYPES
|
||||
-- ============================================================================
|
||||
|
||||
-- RuntimeType enum
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE runtime_type_enum AS ENUM (
|
||||
'action',
|
||||
'sensor'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
COMMENT ON TYPE runtime_type_enum IS 'Type of runtime environment';
|
||||
|
||||
-- WorkerType enum
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE worker_type_enum AS ENUM (
|
||||
'local',
|
||||
'remote',
|
||||
'container'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
COMMENT ON TYPE worker_type_enum IS 'Type of worker deployment';
|
||||
|
||||
-- WorkerStatus enum
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE worker_status_enum AS ENUM (
|
||||
'active',
|
||||
'inactive',
|
||||
'busy',
|
||||
'error'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
COMMENT ON TYPE worker_status_enum IS 'Worker operational status';
|
||||
|
||||
-- EnforcementStatus enum
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE enforcement_status_enum AS ENUM (
|
||||
'created',
|
||||
'processed',
|
||||
'disabled'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
COMMENT ON TYPE enforcement_status_enum IS 'Enforcement processing status';
|
||||
|
||||
-- EnforcementCondition enum
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE enforcement_condition_enum AS ENUM (
|
||||
'any',
|
||||
'all'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
COMMENT ON TYPE enforcement_condition_enum IS 'Logical operator for conditions (OR/AND)';
|
||||
|
||||
-- ExecutionStatus enum
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE execution_status_enum AS ENUM (
|
||||
'requested',
|
||||
'scheduling',
|
||||
'scheduled',
|
||||
'running',
|
||||
'completed',
|
||||
'failed',
|
||||
'canceling',
|
||||
'cancelled',
|
||||
'timeout',
|
||||
'abandoned'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
COMMENT ON TYPE execution_status_enum IS 'Execution lifecycle status';
|
||||
|
||||
-- InquiryStatus enum
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE inquiry_status_enum AS ENUM (
|
||||
'pending',
|
||||
'responded',
|
||||
'timeout',
|
||||
'cancelled'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
COMMENT ON TYPE inquiry_status_enum IS 'Inquiry lifecycle status';
|
||||
|
||||
-- PolicyMethod enum
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE policy_method_enum AS ENUM (
|
||||
'cancel',
|
||||
'enqueue'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
COMMENT ON TYPE policy_method_enum IS 'Policy enforcement method';
|
||||
|
||||
-- OwnerType enum
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE owner_type_enum AS ENUM (
|
||||
'system',
|
||||
'identity',
|
||||
'pack',
|
||||
'action',
|
||||
'sensor'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
COMMENT ON TYPE owner_type_enum IS 'Type of resource owner';
|
||||
|
||||
-- NotificationState enum
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE notification_status_enum AS ENUM (
|
||||
'created',
|
||||
'queued',
|
||||
'processing',
|
||||
'error'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
COMMENT ON TYPE notification_status_enum IS 'Notification processing state';
|
||||
|
||||
-- ArtifactType enum
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE artifact_type_enum AS ENUM (
|
||||
'file_binary',
|
||||
'file_datatable',
|
||||
'file_image',
|
||||
'file_text',
|
||||
'other',
|
||||
'progress',
|
||||
'url'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
COMMENT ON TYPE artifact_type_enum IS 'Type of artifact';
|
||||
|
||||
-- RetentionPolicyType enum
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE artifact_retention_enum AS ENUM (
|
||||
'versions',
|
||||
'days',
|
||||
'hours',
|
||||
'minutes'
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
COMMENT ON TYPE artifact_retention_enum IS 'Type of retention policy';
|
||||
|
||||
-- ============================================================================
|
||||
-- SHARED FUNCTIONS
|
||||
-- ============================================================================
|
||||
|
||||
-- Function to automatically update the 'updated' timestamp
|
||||
CREATE OR REPLACE FUNCTION update_updated_column()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated = NOW();
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION update_updated_column() IS 'Automatically updates the updated timestamp on row modification';
|
||||
445
migrations.old/20250101000002_core_tables.sql
Normal file
445
migrations.old/20250101000002_core_tables.sql
Normal file
@@ -0,0 +1,445 @@
|
||||
-- Migration: Core Tables
|
||||
-- Description: Creates core tables for packs, runtimes, workers, identity, permissions, policies, and keys
|
||||
-- Version: 20250101000002
|
||||
|
||||
|
||||
-- ============================================================================
|
||||
-- PACK TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE pack (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
ref TEXT NOT NULL UNIQUE,
|
||||
label TEXT NOT NULL,
|
||||
description TEXT,
|
||||
version TEXT NOT NULL,
|
||||
conf_schema JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||
config JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||
meta JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||
tags TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
|
||||
runtime_deps TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
|
||||
is_standard BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Constraints
|
||||
CONSTRAINT pack_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||
CONSTRAINT pack_ref_format CHECK (ref ~ '^[a-z][a-z0-9_-]+$'),
|
||||
CONSTRAINT pack_version_semver CHECK (
|
||||
version ~ '^\d+\.\d+\.\d+(-[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$'
|
||||
)
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_pack_ref ON pack(ref);
|
||||
CREATE INDEX idx_pack_created ON pack(created DESC);
|
||||
CREATE INDEX idx_pack_is_standard ON pack(is_standard) WHERE is_standard = TRUE;
|
||||
CREATE INDEX idx_pack_is_standard_created ON pack(is_standard, created DESC);
|
||||
CREATE INDEX idx_pack_version_created ON pack(version, created DESC);
|
||||
CREATE INDEX idx_pack_config_gin ON pack USING GIN (config);
|
||||
CREATE INDEX idx_pack_meta_gin ON pack USING GIN (meta);
|
||||
CREATE INDEX idx_pack_tags_gin ON pack USING GIN (tags);
|
||||
CREATE INDEX idx_pack_runtime_deps_gin ON pack USING GIN (runtime_deps);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_pack_updated
|
||||
BEFORE UPDATE ON pack
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON pack TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE pack_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE pack IS 'Packs bundle related automation components';
|
||||
COMMENT ON COLUMN pack.ref IS 'Unique pack reference identifier (e.g., "slack", "github")';
|
||||
COMMENT ON COLUMN pack.label IS 'Human-readable pack name';
|
||||
COMMENT ON COLUMN pack.version IS 'Semantic version of the pack';
|
||||
COMMENT ON COLUMN pack.conf_schema IS 'JSON schema for pack configuration';
|
||||
COMMENT ON COLUMN pack.config IS 'Pack configuration values';
|
||||
COMMENT ON COLUMN pack.meta IS 'Pack metadata';
|
||||
COMMENT ON COLUMN pack.runtime_deps IS 'Array of required runtime references';
|
||||
COMMENT ON COLUMN pack.is_standard IS 'Whether this is a core/built-in pack';
|
||||
|
||||
-- ============================================================================
|
||||
-- RUNTIME TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE runtime (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
ref TEXT NOT NULL UNIQUE,
|
||||
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
|
||||
pack_ref TEXT,
|
||||
description TEXT,
|
||||
runtime_type runtime_type_enum NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
distributions JSONB NOT NULL,
|
||||
installation JSONB,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Constraints
|
||||
CONSTRAINT runtime_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||
CONSTRAINT runtime_ref_format CHECK (ref ~ '^[^.]+\.(action|sensor)\.[^.]+$')
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_runtime_ref ON runtime(ref);
|
||||
CREATE INDEX idx_runtime_pack ON runtime(pack);
|
||||
CREATE INDEX idx_runtime_type ON runtime(runtime_type);
|
||||
CREATE INDEX idx_runtime_created ON runtime(created DESC);
|
||||
CREATE INDEX idx_runtime_pack_type ON runtime(pack, runtime_type);
|
||||
CREATE INDEX idx_runtime_type_created ON runtime(runtime_type, created DESC);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_runtime_updated
|
||||
BEFORE UPDATE ON runtime
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON runtime TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE runtime_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE runtime IS 'Runtime environments for executing actions and sensors';
|
||||
COMMENT ON COLUMN runtime.ref IS 'Unique runtime reference (format: pack.type.name)';
|
||||
COMMENT ON COLUMN runtime.runtime_type IS 'Type of runtime (action or sensor)';
|
||||
COMMENT ON COLUMN runtime.name IS 'Runtime name (e.g., "python3.11", "nodejs20")';
|
||||
COMMENT ON COLUMN runtime.distributions IS 'Available distributions for this runtime';
|
||||
COMMENT ON COLUMN runtime.installation IS 'Installation requirements and instructions';
|
||||
|
||||
-- ============================================================================
|
||||
-- WORKER TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE worker (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
worker_type worker_type_enum NOT NULL,
|
||||
runtime BIGINT REFERENCES runtime(id),
|
||||
host TEXT,
|
||||
port INTEGER,
|
||||
status worker_status_enum DEFAULT 'inactive',
|
||||
capabilities JSONB,
|
||||
meta JSONB,
|
||||
last_heartbeat TIMESTAMPTZ,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Constraints
|
||||
CONSTRAINT worker_port_range CHECK (port IS NULL OR (port > 0 AND port <= 65535))
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_worker_name ON worker(name);
|
||||
CREATE INDEX idx_worker_type ON worker(worker_type);
|
||||
CREATE INDEX idx_worker_runtime ON worker(runtime);
|
||||
CREATE INDEX idx_worker_status ON worker(status);
|
||||
CREATE INDEX idx_worker_last_heartbeat ON worker(last_heartbeat DESC);
|
||||
CREATE INDEX idx_worker_status_runtime ON worker(status, runtime);
|
||||
CREATE INDEX idx_worker_type_status ON worker(worker_type, status);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_worker_updated
|
||||
BEFORE UPDATE ON worker
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON worker TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE worker_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE worker IS 'Worker processes that execute actions';
|
||||
COMMENT ON COLUMN worker.name IS 'Worker identifier';
|
||||
COMMENT ON COLUMN worker.worker_type IS 'Deployment type (local, remote, container)';
|
||||
COMMENT ON COLUMN worker.runtime IS 'Associated runtime environment';
|
||||
COMMENT ON COLUMN worker.status IS 'Current operational status';
|
||||
COMMENT ON COLUMN worker.capabilities IS 'Worker capabilities and features';
|
||||
COMMENT ON COLUMN worker.last_heartbeat IS 'Last health check timestamp';
|
||||
|
||||
-- ============================================================================
|
||||
-- IDENTITY TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE identity (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
login TEXT NOT NULL UNIQUE,
|
||||
display_name TEXT,
|
||||
password_hash TEXT,
|
||||
attributes JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_identity_login ON identity(login);
|
||||
CREATE INDEX idx_identity_created ON identity(created DESC);
|
||||
CREATE INDEX idx_identity_password_hash ON identity(password_hash) WHERE password_hash IS NOT NULL;
|
||||
CREATE INDEX idx_identity_attributes_gin ON identity USING GIN (attributes);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_identity_updated
|
||||
BEFORE UPDATE ON identity
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON identity TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE identity_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE identity IS 'Identities represent users or service accounts';
|
||||
COMMENT ON COLUMN identity.login IS 'Unique login identifier';
|
||||
COMMENT ON COLUMN identity.display_name IS 'Human-readable name';
|
||||
COMMENT ON COLUMN identity.password_hash IS 'Argon2 hashed password for authentication (NULL for service accounts or external auth)';
|
||||
COMMENT ON COLUMN identity.attributes IS 'Custom attributes (email, groups, etc.)';
|
||||
|
||||
-- ============================================================================
|
||||
-- PERMISSION_SET TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE permission_set (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
ref TEXT NOT NULL UNIQUE,
|
||||
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
|
||||
pack_ref TEXT,
|
||||
label TEXT,
|
||||
description TEXT,
|
||||
grants JSONB NOT NULL DEFAULT '[]'::jsonb,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Constraints
|
||||
CONSTRAINT permission_set_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||
CONSTRAINT permission_set_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_permission_set_ref ON permission_set(ref);
|
||||
CREATE INDEX idx_permission_set_pack ON permission_set(pack);
|
||||
CREATE INDEX idx_permission_set_created ON permission_set(created DESC);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_permission_set_updated
|
||||
BEFORE UPDATE ON permission_set
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON permission_set TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE permission_set_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE permission_set IS 'Permission sets group permissions together (like roles)';
|
||||
COMMENT ON COLUMN permission_set.ref IS 'Unique permission set reference (format: pack.name)';
|
||||
COMMENT ON COLUMN permission_set.label IS 'Human-readable name';
|
||||
COMMENT ON COLUMN permission_set.grants IS 'Array of permission grants';
|
||||
|
||||
-- ============================================================================
|
||||
-- PERMISSION_ASSIGNMENT TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE permission_assignment (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
identity BIGINT NOT NULL REFERENCES identity(id) ON DELETE CASCADE,
|
||||
permset BIGINT NOT NULL REFERENCES permission_set(id) ON DELETE CASCADE,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Unique constraint to prevent duplicate assignments
|
||||
CONSTRAINT unique_identity_permset UNIQUE (identity, permset)
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_permission_assignment_identity ON permission_assignment(identity);
|
||||
CREATE INDEX idx_permission_assignment_permset ON permission_assignment(permset);
|
||||
CREATE INDEX idx_permission_assignment_created ON permission_assignment(created DESC);
|
||||
CREATE INDEX idx_permission_assignment_identity_created ON permission_assignment(identity, created DESC);
|
||||
CREATE INDEX idx_permission_assignment_permset_created ON permission_assignment(permset, created DESC);
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON permission_assignment TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE permission_assignment_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE permission_assignment IS 'Links identities to permission sets (many-to-many)';
|
||||
COMMENT ON COLUMN permission_assignment.identity IS 'Identity being granted permissions';
|
||||
COMMENT ON COLUMN permission_assignment.permset IS 'Permission set being assigned';
|
||||
|
||||
-- ============================================================================
|
||||
-- POLICY TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE policy (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
ref TEXT NOT NULL UNIQUE,
|
||||
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
|
||||
pack_ref TEXT,
|
||||
action BIGINT, -- Forward reference to action table, will add constraint in next migration
|
||||
action_ref TEXT,
|
||||
parameters TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
|
||||
method policy_method_enum NOT NULL,
|
||||
threshold INTEGER NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
description TEXT,
|
||||
tags TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Constraints
|
||||
CONSTRAINT policy_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||
CONSTRAINT policy_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$'),
|
||||
CONSTRAINT policy_threshold_positive CHECK (threshold > 0)
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_policy_ref ON policy(ref);
|
||||
CREATE INDEX idx_policy_pack ON policy(pack);
|
||||
CREATE INDEX idx_policy_action ON policy(action);
|
||||
CREATE INDEX idx_policy_created ON policy(created DESC);
|
||||
CREATE INDEX idx_policy_action_created ON policy(action, created DESC);
|
||||
CREATE INDEX idx_policy_pack_created ON policy(pack, created DESC);
|
||||
CREATE INDEX idx_policy_parameters_gin ON policy USING GIN (parameters);
|
||||
CREATE INDEX idx_policy_tags_gin ON policy USING GIN (tags);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_policy_updated
|
||||
BEFORE UPDATE ON policy
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON policy TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE policy_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE policy IS 'Policies define execution controls (rate limiting, concurrency)';
|
||||
COMMENT ON COLUMN policy.ref IS 'Unique policy reference (format: pack.name)';
|
||||
COMMENT ON COLUMN policy.action IS 'Action this policy applies to';
|
||||
COMMENT ON COLUMN policy.parameters IS 'Parameter names used for policy grouping';
|
||||
COMMENT ON COLUMN policy.method IS 'How to handle policy violations (cancel/enqueue)';
|
||||
COMMENT ON COLUMN policy.threshold IS 'Numeric limit (e.g., max concurrent executions)';
|
||||
|
||||
-- ============================================================================
|
||||
-- KEY TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE key (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
ref TEXT NOT NULL UNIQUE,
|
||||
owner_type owner_type_enum NOT NULL,
|
||||
owner TEXT,
|
||||
owner_identity BIGINT REFERENCES identity(id),
|
||||
owner_pack BIGINT REFERENCES pack(id),
|
||||
owner_pack_ref TEXT,
|
||||
owner_action BIGINT, -- Forward reference to action table
|
||||
owner_action_ref TEXT,
|
||||
owner_sensor BIGINT, -- Forward reference to sensor table
|
||||
owner_sensor_ref TEXT,
|
||||
name TEXT NOT NULL,
|
||||
encrypted BOOLEAN NOT NULL,
|
||||
encryption_key_hash TEXT,
|
||||
value TEXT NOT NULL,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Constraints
|
||||
CONSTRAINT key_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||
CONSTRAINT key_ref_format CHECK (ref ~ '^([^.]+\.)?[^.]+$')
|
||||
);
|
||||
|
||||
-- Unique index on owner_type, owner, name
|
||||
CREATE UNIQUE INDEX idx_key_unique ON key(owner_type, owner, name);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_key_ref ON key(ref);
|
||||
CREATE INDEX idx_key_owner_type ON key(owner_type);
|
||||
CREATE INDEX idx_key_owner_identity ON key(owner_identity);
|
||||
CREATE INDEX idx_key_owner_pack ON key(owner_pack);
|
||||
CREATE INDEX idx_key_owner_action ON key(owner_action);
|
||||
CREATE INDEX idx_key_owner_sensor ON key(owner_sensor);
|
||||
CREATE INDEX idx_key_created ON key(created DESC);
|
||||
CREATE INDEX idx_key_owner_type_owner ON key(owner_type, owner);
|
||||
CREATE INDEX idx_key_owner_identity_name ON key(owner_identity, name);
|
||||
CREATE INDEX idx_key_owner_pack_name ON key(owner_pack, name);
|
||||
|
||||
-- Function to validate and set owner fields
|
||||
CREATE OR REPLACE FUNCTION validate_key_owner()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
owner_count INTEGER := 0;
|
||||
BEGIN
|
||||
-- Count how many owner fields are set
|
||||
IF NEW.owner_identity IS NOT NULL THEN owner_count := owner_count + 1; END IF;
|
||||
IF NEW.owner_pack IS NOT NULL THEN owner_count := owner_count + 1; END IF;
|
||||
IF NEW.owner_action IS NOT NULL THEN owner_count := owner_count + 1; END IF;
|
||||
IF NEW.owner_sensor IS NOT NULL THEN owner_count := owner_count + 1; END IF;
|
||||
|
||||
-- System owner should have no owner fields set
|
||||
IF NEW.owner_type = 'system' THEN
|
||||
IF owner_count > 0 THEN
|
||||
RAISE EXCEPTION 'System owner cannot have specific owner fields set';
|
||||
END IF;
|
||||
NEW.owner := 'system';
|
||||
-- All other types must have exactly one owner field set
|
||||
ELSIF owner_count != 1 THEN
|
||||
RAISE EXCEPTION 'Exactly one owner field must be set for owner_type %', NEW.owner_type;
|
||||
-- Validate owner_type matches the populated field and set owner
|
||||
ELSIF NEW.owner_type = 'identity' THEN
|
||||
IF NEW.owner_identity IS NULL THEN
|
||||
RAISE EXCEPTION 'owner_identity must be set for owner_type identity';
|
||||
END IF;
|
||||
NEW.owner := NEW.owner_identity::TEXT;
|
||||
ELSIF NEW.owner_type = 'pack' THEN
|
||||
IF NEW.owner_pack IS NULL THEN
|
||||
RAISE EXCEPTION 'owner_pack must be set for owner_type pack';
|
||||
END IF;
|
||||
NEW.owner := NEW.owner_pack::TEXT;
|
||||
ELSIF NEW.owner_type = 'action' THEN
|
||||
IF NEW.owner_action IS NULL THEN
|
||||
RAISE EXCEPTION 'owner_action must be set for owner_type action';
|
||||
END IF;
|
||||
NEW.owner := NEW.owner_action::TEXT;
|
||||
ELSIF NEW.owner_type = 'sensor' THEN
|
||||
IF NEW.owner_sensor IS NULL THEN
|
||||
RAISE EXCEPTION 'owner_sensor must be set for owner_type sensor';
|
||||
END IF;
|
||||
NEW.owner := NEW.owner_sensor::TEXT;
|
||||
END IF;
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Trigger to validate owner fields
|
||||
CREATE TRIGGER validate_key_owner_trigger
|
||||
BEFORE INSERT OR UPDATE ON key
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION validate_key_owner();
|
||||
|
||||
-- Trigger for updated timestamp
|
||||
CREATE TRIGGER update_key_updated
|
||||
BEFORE UPDATE ON key
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON key TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE key_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE key IS 'Keys store configuration values and secrets with ownership scoping';
|
||||
COMMENT ON COLUMN key.ref IS 'Unique key reference (format: [owner.]name)';
|
||||
COMMENT ON COLUMN key.owner_type IS 'Type of owner (system, identity, pack, action, sensor)';
|
||||
COMMENT ON COLUMN key.owner IS 'Owner identifier (auto-populated by trigger)';
|
||||
COMMENT ON COLUMN key.owner_identity IS 'Identity owner (if owner_type=identity)';
|
||||
COMMENT ON COLUMN key.owner_pack IS 'Pack owner (if owner_type=pack)';
|
||||
COMMENT ON COLUMN key.owner_pack_ref IS 'Pack reference for owner_pack';
|
||||
COMMENT ON COLUMN key.owner_action IS 'Action owner (if owner_type=action)';
|
||||
COMMENT ON COLUMN key.owner_sensor IS 'Sensor owner (if owner_type=sensor)';
|
||||
COMMENT ON COLUMN key.name IS 'Key name within owner scope';
|
||||
COMMENT ON COLUMN key.encrypted IS 'Whether the value is encrypted';
|
||||
COMMENT ON COLUMN key.encryption_key_hash IS 'Hash of encryption key used';
|
||||
COMMENT ON COLUMN key.value IS 'The actual value (encrypted if encrypted=true)';
|
||||
215
migrations.old/20250101000003_event_system.sql
Normal file
215
migrations.old/20250101000003_event_system.sql
Normal file
@@ -0,0 +1,215 @@
|
||||
-- Migration: Event System
|
||||
-- Description: Creates tables for triggers, sensors, events, and enforcement
|
||||
-- Version: 20250101000003
|
||||
|
||||
|
||||
-- ============================================================================
|
||||
-- TRIGGER TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE trigger (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
ref TEXT NOT NULL UNIQUE,
|
||||
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
|
||||
pack_ref TEXT,
|
||||
label TEXT NOT NULL,
|
||||
description TEXT,
|
||||
enabled BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
param_schema JSONB,
|
||||
out_schema JSONB,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Constraints
|
||||
CONSTRAINT trigger_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||
CONSTRAINT trigger_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_trigger_ref ON trigger(ref);
|
||||
CREATE INDEX idx_trigger_pack ON trigger(pack);
|
||||
CREATE INDEX idx_trigger_enabled ON trigger(enabled) WHERE enabled = TRUE;
|
||||
CREATE INDEX idx_trigger_created ON trigger(created DESC);
|
||||
CREATE INDEX idx_trigger_pack_enabled ON trigger(pack, enabled);
|
||||
CREATE INDEX idx_trigger_enabled_created ON trigger(enabled, created DESC) WHERE enabled = TRUE;
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_trigger_updated
|
||||
BEFORE UPDATE ON trigger
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON trigger TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE trigger_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE trigger IS 'Trigger definitions that can activate rules';
|
||||
COMMENT ON COLUMN trigger.ref IS 'Unique trigger reference (format: pack.name)';
|
||||
COMMENT ON COLUMN trigger.label IS 'Human-readable trigger name';
|
||||
COMMENT ON COLUMN trigger.enabled IS 'Whether this trigger is active';
|
||||
COMMENT ON COLUMN trigger.param_schema IS 'JSON schema defining the expected configuration parameters when this trigger is used';
|
||||
COMMENT ON COLUMN trigger.out_schema IS 'JSON schema defining the structure of event payloads generated by this trigger';
|
||||
|
||||
-- ============================================================================
|
||||
-- SENSOR TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE sensor (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
ref TEXT NOT NULL UNIQUE,
|
||||
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
|
||||
pack_ref TEXT,
|
||||
label TEXT NOT NULL,
|
||||
description TEXT NOT NULL,
|
||||
entrypoint TEXT NOT NULL,
|
||||
runtime BIGINT NOT NULL REFERENCES runtime(id) ON DELETE CASCADE,
|
||||
runtime_ref TEXT NOT NULL,
|
||||
trigger BIGINT NOT NULL REFERENCES trigger(id) ON DELETE CASCADE,
|
||||
trigger_ref TEXT NOT NULL,
|
||||
enabled BOOLEAN NOT NULL,
|
||||
param_schema JSONB,
|
||||
config JSONB,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Constraints
|
||||
CONSTRAINT sensor_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||
CONSTRAINT sensor_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_sensor_ref ON sensor(ref);
|
||||
CREATE INDEX idx_sensor_pack ON sensor(pack);
|
||||
CREATE INDEX idx_sensor_runtime ON sensor(runtime);
|
||||
CREATE INDEX idx_sensor_trigger ON sensor(trigger);
|
||||
CREATE INDEX idx_sensor_enabled ON sensor(enabled) WHERE enabled = TRUE;
|
||||
CREATE INDEX idx_sensor_created ON sensor(created DESC);
|
||||
CREATE INDEX idx_sensor_trigger_enabled ON sensor(trigger, enabled);
|
||||
CREATE INDEX idx_sensor_pack_enabled ON sensor(pack, enabled);
|
||||
CREATE INDEX idx_sensor_runtime_enabled ON sensor(runtime, enabled);
|
||||
CREATE INDEX idx_sensor_config ON sensor USING GIN (config);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_sensor_updated
|
||||
BEFORE UPDATE ON sensor
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON sensor TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE sensor_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE sensor IS 'Sensors monitor for trigger conditions and generate events';
|
||||
COMMENT ON COLUMN sensor.ref IS 'Unique sensor reference (format: pack.name)';
|
||||
COMMENT ON COLUMN sensor.entrypoint IS 'Code entry point for the sensor';
|
||||
COMMENT ON COLUMN sensor.runtime IS 'Execution environment for the sensor';
|
||||
COMMENT ON COLUMN sensor.trigger IS 'Trigger that this sensor monitors for';
|
||||
COMMENT ON COLUMN sensor.enabled IS 'Whether this sensor is active';
|
||||
COMMENT ON COLUMN sensor.param_schema IS 'JSON schema describing expected configuration (optional, usually inherited from trigger)';
|
||||
COMMENT ON COLUMN sensor.config IS 'Actual configuration values for this sensor instance (conforms to trigger param_schema)';
|
||||
|
||||
-- Add foreign key constraint to key table for sensor ownership
|
||||
ALTER TABLE key
|
||||
ADD CONSTRAINT key_owner_sensor_fkey
|
||||
FOREIGN KEY (owner_sensor) REFERENCES sensor(id) ON DELETE CASCADE;
|
||||
|
||||
-- ============================================================================
|
||||
-- EVENT TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE event (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
trigger BIGINT REFERENCES trigger(id) ON DELETE SET NULL,
|
||||
trigger_ref TEXT NOT NULL,
|
||||
config JSONB,
|
||||
payload JSONB,
|
||||
source BIGINT REFERENCES sensor(id),
|
||||
source_ref TEXT,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_event_trigger ON event(trigger);
|
||||
CREATE INDEX idx_event_trigger_ref ON event(trigger_ref);
|
||||
CREATE INDEX idx_event_source ON event(source);
|
||||
CREATE INDEX idx_event_created ON event(created DESC);
|
||||
CREATE INDEX idx_event_trigger_created ON event(trigger, created DESC);
|
||||
CREATE INDEX idx_event_trigger_ref_created ON event(trigger_ref, created DESC);
|
||||
CREATE INDEX idx_event_source_created ON event(source, created DESC);
|
||||
CREATE INDEX idx_event_payload_gin ON event USING GIN (payload);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_event_updated
|
||||
BEFORE UPDATE ON event
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON event TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE event_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE event IS 'Events are instances of triggers firing';
|
||||
COMMENT ON COLUMN event.trigger IS 'Trigger that fired (may be null if trigger deleted)';
|
||||
COMMENT ON COLUMN event.trigger_ref IS 'Trigger reference (preserved even if trigger deleted)';
|
||||
COMMENT ON COLUMN event.config IS 'Snapshot of trigger/sensor configuration at event time';
|
||||
COMMENT ON COLUMN event.payload IS 'Event data payload';
|
||||
COMMENT ON COLUMN event.source IS 'Sensor that generated this event';
|
||||
|
||||
-- ============================================================================
|
||||
-- ENFORCEMENT TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE enforcement (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
rule BIGINT, -- Forward reference to rule table, will add constraint in next migration
|
||||
rule_ref TEXT NOT NULL,
|
||||
trigger_ref TEXT NOT NULL,
|
||||
config JSONB,
|
||||
event BIGINT REFERENCES event(id) ON DELETE SET NULL,
|
||||
status enforcement_status_enum NOT NULL DEFAULT 'created',
|
||||
payload JSONB NOT NULL,
|
||||
condition enforcement_condition_enum NOT NULL DEFAULT 'all',
|
||||
conditions JSONB NOT NULL DEFAULT '[]'::jsonb,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Constraints
|
||||
CONSTRAINT enforcement_condition_check CHECK (condition IN ('any', 'all'))
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_enforcement_rule ON enforcement(rule);
|
||||
CREATE INDEX idx_enforcement_rule_ref ON enforcement(rule_ref);
|
||||
CREATE INDEX idx_enforcement_trigger_ref ON enforcement(trigger_ref);
|
||||
CREATE INDEX idx_enforcement_event ON enforcement(event);
|
||||
CREATE INDEX idx_enforcement_status ON enforcement(status);
|
||||
CREATE INDEX idx_enforcement_created ON enforcement(created DESC);
|
||||
CREATE INDEX idx_enforcement_status_created ON enforcement(status, created DESC);
|
||||
CREATE INDEX idx_enforcement_rule_status ON enforcement(rule, status);
|
||||
CREATE INDEX idx_enforcement_event_status ON enforcement(event, status);
|
||||
CREATE INDEX idx_enforcement_payload_gin ON enforcement USING GIN (payload);
|
||||
CREATE INDEX idx_enforcement_conditions_gin ON enforcement USING GIN (conditions);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_enforcement_updated
|
||||
BEFORE UPDATE ON enforcement
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON enforcement TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE enforcement_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE enforcement IS 'Enforcements represent rule triggering by events';
|
||||
COMMENT ON COLUMN enforcement.rule IS 'Rule being enforced (may be null if rule deleted)';
|
||||
COMMENT ON COLUMN enforcement.rule_ref IS 'Rule reference (preserved even if rule deleted)';
|
||||
COMMENT ON COLUMN enforcement.event IS 'Event that triggered this enforcement';
|
||||
COMMENT ON COLUMN enforcement.status IS 'Processing status';
|
||||
COMMENT ON COLUMN enforcement.payload IS 'Event payload for rule evaluation';
|
||||
COMMENT ON COLUMN enforcement.condition IS 'Logical operator for conditions (any=OR, all=AND)';
|
||||
COMMENT ON COLUMN enforcement.conditions IS 'Condition expressions to evaluate';
|
||||
457
migrations.old/20250101000004_execution_system.sql
Normal file
457
migrations.old/20250101000004_execution_system.sql
Normal file
@@ -0,0 +1,457 @@
|
||||
-- Migration: Execution System
|
||||
-- Description: Creates tables for actions, rules, executions, and inquiries
|
||||
-- Version: 20250101000004
|
||||
|
||||
|
||||
-- ============================================================================
|
||||
-- ACTION TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE action (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
ref TEXT NOT NULL UNIQUE,
|
||||
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
|
||||
pack_ref TEXT NOT NULL,
|
||||
label TEXT NOT NULL,
|
||||
description TEXT NOT NULL,
|
||||
entrypoint TEXT NOT NULL,
|
||||
runtime BIGINT REFERENCES runtime(id),
|
||||
param_schema JSONB,
|
||||
out_schema JSONB,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Constraints
|
||||
CONSTRAINT action_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||
CONSTRAINT action_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_action_ref ON action(ref);
|
||||
CREATE INDEX idx_action_pack ON action(pack);
|
||||
CREATE INDEX idx_action_runtime ON action(runtime);
|
||||
CREATE INDEX idx_action_created ON action(created DESC);
|
||||
CREATE INDEX idx_action_pack_runtime ON action(pack, runtime);
|
||||
CREATE INDEX idx_action_pack_created ON action(pack, created DESC);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_action_updated
|
||||
BEFORE UPDATE ON action
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON action TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE action_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE action IS 'Actions are executable tasks/operations';
|
||||
COMMENT ON COLUMN action.ref IS 'Unique action reference (format: pack.name)';
|
||||
COMMENT ON COLUMN action.label IS 'Human-readable action name';
|
||||
COMMENT ON COLUMN action.entrypoint IS 'Code entry point for the action';
|
||||
COMMENT ON COLUMN action.runtime IS 'Execution environment for the action';
|
||||
COMMENT ON COLUMN action.param_schema IS 'JSON schema for action input parameters';
|
||||
COMMENT ON COLUMN action.out_schema IS 'JSON schema for action output/results';
|
||||
|
||||
-- Add foreign key constraints that reference action table
|
||||
ALTER TABLE policy
|
||||
ADD CONSTRAINT policy_action_fkey
|
||||
FOREIGN KEY (action) REFERENCES action(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE key
|
||||
ADD CONSTRAINT key_owner_action_fkey
|
||||
FOREIGN KEY (owner_action) REFERENCES action(id) ON DELETE CASCADE;
|
||||
|
||||
-- ============================================================================
|
||||
-- RULE TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE rule (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
ref TEXT NOT NULL UNIQUE,
|
||||
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
|
||||
pack_ref TEXT NOT NULL,
|
||||
label TEXT NOT NULL,
|
||||
description TEXT NOT NULL,
|
||||
action BIGINT NOT NULL REFERENCES action(id),
|
||||
action_ref TEXT NOT NULL,
|
||||
trigger BIGINT NOT NULL REFERENCES trigger(id),
|
||||
trigger_ref TEXT NOT NULL,
|
||||
conditions JSONB NOT NULL DEFAULT '[]'::jsonb,
|
||||
action_params JSONB DEFAULT '{}'::jsonb,
|
||||
trigger_params JSONB DEFAULT '{}'::jsonb,
|
||||
enabled BOOLEAN NOT NULL,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Constraints
|
||||
CONSTRAINT rule_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||
CONSTRAINT rule_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_rule_ref ON rule(ref);
|
||||
CREATE INDEX idx_rule_pack ON rule(pack);
|
||||
CREATE INDEX idx_rule_action ON rule(action);
|
||||
CREATE INDEX idx_rule_trigger ON rule(trigger);
|
||||
CREATE INDEX idx_rule_enabled ON rule(enabled) WHERE enabled = TRUE;
|
||||
CREATE INDEX idx_rule_created ON rule(created DESC);
|
||||
CREATE INDEX idx_rule_trigger_enabled ON rule(trigger, enabled);
|
||||
CREATE INDEX idx_rule_action_enabled ON rule(action, enabled);
|
||||
CREATE INDEX idx_rule_pack_enabled ON rule(pack, enabled);
|
||||
CREATE INDEX idx_rule_action_params_gin ON rule USING GIN (action_params);
|
||||
CREATE INDEX idx_rule_trigger_params_gin ON rule USING GIN (trigger_params);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_rule_updated
|
||||
BEFORE UPDATE ON rule
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON rule TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE rule_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE rule IS 'Rules connect triggers to actions with conditional logic';
|
||||
COMMENT ON COLUMN rule.ref IS 'Unique rule reference (format: pack.name)';
|
||||
COMMENT ON COLUMN rule.label IS 'Human-readable rule name';
|
||||
COMMENT ON COLUMN rule.action IS 'Action to execute when rule conditions are met';
|
||||
COMMENT ON COLUMN rule.trigger IS 'Trigger that activates this rule';
|
||||
COMMENT ON COLUMN rule.conditions IS 'JSON array of condition expressions';
|
||||
COMMENT ON COLUMN rule.action_params IS 'JSON object of parameters to pass to the action when rule is triggered';
|
||||
COMMENT ON COLUMN rule.trigger_params IS 'JSON object of parameters for trigger configuration and event filtering';
|
||||
COMMENT ON COLUMN rule.enabled IS 'Whether this rule is active';
|
||||
|
||||
-- Add foreign key constraint to enforcement table
|
||||
ALTER TABLE enforcement
|
||||
ADD CONSTRAINT enforcement_rule_fkey
|
||||
FOREIGN KEY (rule) REFERENCES rule(id) ON DELETE SET NULL;
|
||||
|
||||
-- ============================================================================
|
||||
-- EXECUTION TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE execution (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
action BIGINT REFERENCES action(id),
|
||||
action_ref TEXT NOT NULL,
|
||||
config JSONB,
|
||||
parent BIGINT REFERENCES execution(id),
|
||||
enforcement BIGINT REFERENCES enforcement(id),
|
||||
executor BIGINT REFERENCES identity(id) ON DELETE SET NULL,
|
||||
status execution_status_enum NOT NULL DEFAULT 'requested',
|
||||
result JSONB,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_execution_action ON execution(action);
|
||||
CREATE INDEX idx_execution_action_ref ON execution(action_ref);
|
||||
CREATE INDEX idx_execution_parent ON execution(parent);
|
||||
CREATE INDEX idx_execution_enforcement ON execution(enforcement);
|
||||
CREATE INDEX idx_execution_executor ON execution(executor);
|
||||
CREATE INDEX idx_execution_status ON execution(status);
|
||||
CREATE INDEX idx_execution_created ON execution(created DESC);
|
||||
CREATE INDEX idx_execution_updated ON execution(updated DESC);
|
||||
CREATE INDEX idx_execution_status_created ON execution(status, created DESC);
|
||||
CREATE INDEX idx_execution_status_updated ON execution(status, updated DESC);
|
||||
CREATE INDEX idx_execution_action_status ON execution(action, status);
|
||||
CREATE INDEX idx_execution_executor_created ON execution(executor, created DESC);
|
||||
CREATE INDEX idx_execution_parent_created ON execution(parent, created DESC);
|
||||
CREATE INDEX idx_execution_result_gin ON execution USING GIN (result);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_execution_updated
|
||||
BEFORE UPDATE ON execution
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON execution TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE execution_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE execution IS 'Executions represent action runs, supports nested workflows';
|
||||
COMMENT ON COLUMN execution.action IS 'Action being executed (may be null if action deleted)';
|
||||
COMMENT ON COLUMN execution.action_ref IS 'Action reference (preserved even if action deleted)';
|
||||
COMMENT ON COLUMN execution.config IS 'Snapshot of action configuration at execution time';
|
||||
COMMENT ON COLUMN execution.parent IS 'Parent execution ID for workflow hierarchies';
|
||||
COMMENT ON COLUMN execution.enforcement IS 'Enforcement that triggered this execution (if rule-driven)';
|
||||
COMMENT ON COLUMN execution.executor IS 'Identity that initiated the execution';
|
||||
COMMENT ON COLUMN execution.status IS 'Current execution lifecycle status';
|
||||
COMMENT ON COLUMN execution.result IS 'Execution output/results';
|
||||
|
||||
-- ============================================================================
|
||||
-- INQUIRY TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE inquiry (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
execution BIGINT NOT NULL REFERENCES execution(id) ON DELETE CASCADE,
|
||||
prompt TEXT NOT NULL,
|
||||
response_schema JSONB,
|
||||
assigned_to BIGINT REFERENCES identity(id) ON DELETE SET NULL,
|
||||
status inquiry_status_enum NOT NULL DEFAULT 'pending',
|
||||
response JSONB,
|
||||
timeout_at TIMESTAMPTZ,
|
||||
responded_at TIMESTAMPTZ,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_inquiry_execution ON inquiry(execution);
|
||||
CREATE INDEX idx_inquiry_assigned_to ON inquiry(assigned_to);
|
||||
CREATE INDEX idx_inquiry_status ON inquiry(status);
|
||||
CREATE INDEX idx_inquiry_timeout_at ON inquiry(timeout_at) WHERE timeout_at IS NOT NULL;
|
||||
CREATE INDEX idx_inquiry_created ON inquiry(created DESC);
|
||||
CREATE INDEX idx_inquiry_status_created ON inquiry(status, created DESC);
|
||||
CREATE INDEX idx_inquiry_assigned_status ON inquiry(assigned_to, status);
|
||||
CREATE INDEX idx_inquiry_execution_status ON inquiry(execution, status);
|
||||
CREATE INDEX idx_inquiry_response_gin ON inquiry USING GIN (response);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_inquiry_updated
|
||||
BEFORE UPDATE ON inquiry
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON inquiry TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE inquiry_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE inquiry IS 'Inquiries enable human-in-the-loop workflows with async user interactions';
|
||||
COMMENT ON COLUMN inquiry.execution IS 'Execution that is waiting on this inquiry';
|
||||
COMMENT ON COLUMN inquiry.prompt IS 'Question or prompt text for the user';
|
||||
COMMENT ON COLUMN inquiry.response_schema IS 'JSON schema defining expected response format';
|
||||
COMMENT ON COLUMN inquiry.assigned_to IS 'Identity who should respond to this inquiry';
|
||||
COMMENT ON COLUMN inquiry.status IS 'Current inquiry lifecycle status';
|
||||
COMMENT ON COLUMN inquiry.response IS 'User response data';
|
||||
COMMENT ON COLUMN inquiry.timeout_at IS 'When this inquiry expires';
|
||||
COMMENT ON COLUMN inquiry.responded_at IS 'When the response was received';
|
||||
|
||||
-- ============================================================================
|
||||
-- WORKFLOW DEFINITION TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE workflow_definition (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
ref VARCHAR(255) NOT NULL UNIQUE,
|
||||
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
|
||||
pack_ref VARCHAR(255) NOT NULL,
|
||||
label VARCHAR(255) NOT NULL,
|
||||
description TEXT,
|
||||
version VARCHAR(50) NOT NULL,
|
||||
param_schema JSONB,
|
||||
out_schema JSONB,
|
||||
definition JSONB NOT NULL,
|
||||
tags TEXT[] DEFAULT '{}',
|
||||
enabled BOOLEAN DEFAULT true NOT NULL,
|
||||
created TIMESTAMPTZ DEFAULT NOW() NOT NULL,
|
||||
updated TIMESTAMPTZ DEFAULT NOW() NOT NULL
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_workflow_def_pack ON workflow_definition(pack);
|
||||
CREATE INDEX idx_workflow_def_enabled ON workflow_definition(enabled);
|
||||
CREATE INDEX idx_workflow_def_ref ON workflow_definition(ref);
|
||||
CREATE INDEX idx_workflow_def_tags ON workflow_definition USING gin(tags);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_workflow_definition_updated
|
||||
BEFORE UPDATE ON workflow_definition
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON workflow_definition TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE workflow_definition_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE workflow_definition IS 'Stores workflow definitions (YAML parsed to JSON)';
|
||||
COMMENT ON COLUMN workflow_definition.ref IS 'Unique workflow reference (e.g., pack_name.workflow_name)';
|
||||
COMMENT ON COLUMN workflow_definition.definition IS 'Complete workflow specification including tasks, variables, and transitions';
|
||||
COMMENT ON COLUMN workflow_definition.param_schema IS 'JSON schema for workflow input parameters';
|
||||
COMMENT ON COLUMN workflow_definition.out_schema IS 'JSON schema for workflow output';
|
||||
|
||||
-- ============================================================================
|
||||
-- WORKFLOW EXECUTION TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE workflow_execution (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
execution BIGINT NOT NULL REFERENCES execution(id) ON DELETE CASCADE,
|
||||
workflow_def BIGINT NOT NULL REFERENCES workflow_definition(id),
|
||||
current_tasks TEXT[] DEFAULT '{}',
|
||||
completed_tasks TEXT[] DEFAULT '{}',
|
||||
failed_tasks TEXT[] DEFAULT '{}',
|
||||
skipped_tasks TEXT[] DEFAULT '{}',
|
||||
variables JSONB DEFAULT '{}',
|
||||
task_graph JSONB NOT NULL,
|
||||
status execution_status_enum NOT NULL DEFAULT 'requested',
|
||||
error_message TEXT,
|
||||
paused BOOLEAN DEFAULT false NOT NULL,
|
||||
pause_reason TEXT,
|
||||
created TIMESTAMPTZ DEFAULT NOW() NOT NULL,
|
||||
updated TIMESTAMPTZ DEFAULT NOW() NOT NULL
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_workflow_exec_execution ON workflow_execution(execution);
|
||||
CREATE INDEX idx_workflow_exec_workflow_def ON workflow_execution(workflow_def);
|
||||
CREATE INDEX idx_workflow_exec_status ON workflow_execution(status);
|
||||
CREATE INDEX idx_workflow_exec_paused ON workflow_execution(paused) WHERE paused = true;
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_workflow_execution_updated
|
||||
BEFORE UPDATE ON workflow_execution
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON workflow_execution TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE workflow_execution_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE workflow_execution IS 'Runtime state tracking for workflow executions';
|
||||
COMMENT ON COLUMN workflow_execution.variables IS 'Workflow-scoped variables, updated via publish directives';
|
||||
COMMENT ON COLUMN workflow_execution.task_graph IS 'Execution graph with dependencies and transitions';
|
||||
COMMENT ON COLUMN workflow_execution.current_tasks IS 'Array of task names currently executing';
|
||||
COMMENT ON COLUMN workflow_execution.paused IS 'True if workflow execution is paused (can be resumed)';
|
||||
|
||||
-- ============================================================================
|
||||
-- WORKFLOW TASK EXECUTION TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE workflow_task_execution (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
workflow_execution BIGINT NOT NULL REFERENCES workflow_execution(id) ON DELETE CASCADE,
|
||||
execution BIGINT NOT NULL REFERENCES execution(id) ON DELETE CASCADE,
|
||||
task_name VARCHAR(255) NOT NULL,
|
||||
task_index INTEGER,
|
||||
task_batch INTEGER,
|
||||
status execution_status_enum NOT NULL DEFAULT 'requested',
|
||||
started_at TIMESTAMPTZ,
|
||||
completed_at TIMESTAMPTZ,
|
||||
duration_ms BIGINT,
|
||||
result JSONB,
|
||||
error JSONB,
|
||||
retry_count INTEGER DEFAULT 0 NOT NULL,
|
||||
max_retries INTEGER DEFAULT 0 NOT NULL,
|
||||
next_retry_at TIMESTAMPTZ,
|
||||
timeout_seconds INTEGER,
|
||||
timed_out BOOLEAN DEFAULT false NOT NULL,
|
||||
created TIMESTAMPTZ DEFAULT NOW() NOT NULL,
|
||||
updated TIMESTAMPTZ DEFAULT NOW() NOT NULL
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_wf_task_exec_workflow ON workflow_task_execution(workflow_execution);
|
||||
CREATE INDEX idx_wf_task_exec_execution ON workflow_task_execution(execution);
|
||||
CREATE INDEX idx_wf_task_exec_status ON workflow_task_execution(status);
|
||||
CREATE INDEX idx_wf_task_exec_task_name ON workflow_task_execution(task_name);
|
||||
CREATE INDEX idx_wf_task_exec_retry ON workflow_task_execution(retry_count) WHERE retry_count > 0;
|
||||
CREATE INDEX idx_wf_task_exec_timeout ON workflow_task_execution(timed_out) WHERE timed_out = true;
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_workflow_task_execution_updated
|
||||
BEFORE UPDATE ON workflow_task_execution
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON workflow_task_execution TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE workflow_task_execution_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE workflow_task_execution IS 'Individual task executions within workflows';
|
||||
COMMENT ON COLUMN workflow_task_execution.task_index IS 'Index for with-items iteration tasks (0-based)';
|
||||
COMMENT ON COLUMN workflow_task_execution.task_batch IS 'Batch number for batched with-items processing';
|
||||
COMMENT ON COLUMN workflow_task_execution.duration_ms IS 'Task execution duration in milliseconds';
|
||||
|
||||
-- ============================================================================
|
||||
-- MODIFY ACTION TABLE - Add Workflow Support
|
||||
-- ============================================================================
|
||||
|
||||
ALTER TABLE action
|
||||
ADD COLUMN is_workflow BOOLEAN DEFAULT false NOT NULL,
|
||||
ADD COLUMN workflow_def BIGINT REFERENCES workflow_definition(id) ON DELETE CASCADE;
|
||||
|
||||
CREATE INDEX idx_action_is_workflow ON action(is_workflow) WHERE is_workflow = true;
|
||||
CREATE INDEX idx_action_workflow_def ON action(workflow_def);
|
||||
|
||||
COMMENT ON COLUMN action.is_workflow IS 'True if this action is a workflow (composable action graph)';
|
||||
COMMENT ON COLUMN action.workflow_def IS 'Reference to workflow definition if is_workflow=true';
|
||||
|
||||
-- ============================================================================
|
||||
-- WORKFLOW VIEWS
|
||||
-- ============================================================================
|
||||
|
||||
CREATE VIEW workflow_execution_summary AS
|
||||
SELECT
|
||||
we.id,
|
||||
we.execution,
|
||||
wd.ref as workflow_ref,
|
||||
wd.label as workflow_label,
|
||||
wd.version as workflow_version,
|
||||
we.status,
|
||||
we.paused,
|
||||
array_length(we.current_tasks, 1) as current_task_count,
|
||||
array_length(we.completed_tasks, 1) as completed_task_count,
|
||||
array_length(we.failed_tasks, 1) as failed_task_count,
|
||||
array_length(we.skipped_tasks, 1) as skipped_task_count,
|
||||
we.error_message,
|
||||
we.created,
|
||||
we.updated
|
||||
FROM workflow_execution we
|
||||
JOIN workflow_definition wd ON we.workflow_def = wd.id;
|
||||
|
||||
COMMENT ON VIEW workflow_execution_summary IS 'Summary view of workflow executions with task counts';
|
||||
|
||||
CREATE VIEW workflow_task_detail AS
|
||||
SELECT
|
||||
wte.id,
|
||||
wte.workflow_execution,
|
||||
we.execution as workflow_execution_id,
|
||||
wd.ref as workflow_ref,
|
||||
wte.task_name,
|
||||
wte.task_index,
|
||||
wte.task_batch,
|
||||
wte.status,
|
||||
wte.retry_count,
|
||||
wte.max_retries,
|
||||
wte.timed_out,
|
||||
wte.duration_ms,
|
||||
wte.started_at,
|
||||
wte.completed_at,
|
||||
wte.created,
|
||||
wte.updated
|
||||
FROM workflow_task_execution wte
|
||||
JOIN workflow_execution we ON wte.workflow_execution = we.id
|
||||
JOIN workflow_definition wd ON we.workflow_def = wd.id;
|
||||
|
||||
COMMENT ON VIEW workflow_task_detail IS 'Detailed view of task executions with workflow context';
|
||||
|
||||
CREATE VIEW workflow_action_link AS
|
||||
SELECT
|
||||
wd.id as workflow_def_id,
|
||||
wd.ref as workflow_ref,
|
||||
wd.label,
|
||||
wd.version,
|
||||
wd.enabled,
|
||||
a.id as action_id,
|
||||
a.ref as action_ref,
|
||||
a.pack as pack_id,
|
||||
a.pack_ref
|
||||
FROM workflow_definition wd
|
||||
LEFT JOIN action a ON a.workflow_def = wd.id AND a.is_workflow = true;
|
||||
|
||||
COMMENT ON VIEW workflow_action_link IS 'Links workflow definitions to their corresponding action records';
|
||||
|
||||
-- Permissions for views
|
||||
GRANT SELECT ON workflow_execution_summary TO svc_attune;
|
||||
GRANT SELECT ON workflow_task_detail TO svc_attune;
|
||||
GRANT SELECT ON workflow_action_link TO svc_attune;
|
||||
153
migrations.old/20250101000005_supporting_tables.sql
Normal file
153
migrations.old/20250101000005_supporting_tables.sql
Normal file
@@ -0,0 +1,153 @@
|
||||
-- Migration: Supporting Tables and Indexes
|
||||
-- Description: Creates notification and artifact tables plus performance optimization indexes
|
||||
-- Version: 20250101000005
|
||||
|
||||
|
||||
-- ============================================================================
|
||||
-- NOTIFICATION TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE notification (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
channel TEXT NOT NULL,
|
||||
entity_type TEXT NOT NULL,
|
||||
entity TEXT NOT NULL,
|
||||
activity TEXT NOT NULL,
|
||||
state notification_status_enum NOT NULL DEFAULT 'created',
|
||||
content JSONB,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_notification_channel ON notification(channel);
|
||||
CREATE INDEX idx_notification_entity_type ON notification(entity_type);
|
||||
CREATE INDEX idx_notification_entity ON notification(entity);
|
||||
CREATE INDEX idx_notification_state ON notification(state);
|
||||
CREATE INDEX idx_notification_created ON notification(created DESC);
|
||||
CREATE INDEX idx_notification_channel_state ON notification(channel, state);
|
||||
CREATE INDEX idx_notification_entity_type_entity ON notification(entity_type, entity);
|
||||
CREATE INDEX idx_notification_state_created ON notification(state, created DESC);
|
||||
CREATE INDEX idx_notification_content_gin ON notification USING GIN (content);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_notification_updated
|
||||
BEFORE UPDATE ON notification
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Function for pg_notify on notification insert
|
||||
CREATE OR REPLACE FUNCTION notify_on_insert()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
payload TEXT;
|
||||
BEGIN
|
||||
-- Build JSON payload with id, entity, and activity
|
||||
payload := json_build_object(
|
||||
'id', NEW.id,
|
||||
'entity_type', NEW.entity_type,
|
||||
'entity', NEW.entity,
|
||||
'activity', NEW.activity
|
||||
)::text;
|
||||
|
||||
-- Send notification to the specified channel
|
||||
PERFORM pg_notify(NEW.channel, payload);
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Trigger to send pg_notify on notification insert
|
||||
CREATE TRIGGER notify_on_notification_insert
|
||||
AFTER INSERT ON notification
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION notify_on_insert();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON notification TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE notification_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE notification IS 'System notifications about entity changes for real-time updates';
|
||||
COMMENT ON COLUMN notification.channel IS 'Notification channel (typically table name)';
|
||||
COMMENT ON COLUMN notification.entity_type IS 'Type of entity (table name)';
|
||||
COMMENT ON COLUMN notification.entity IS 'Entity identifier (typically ID or ref)';
|
||||
COMMENT ON COLUMN notification.activity IS 'Activity type (e.g., "created", "updated", "completed")';
|
||||
COMMENT ON COLUMN notification.state IS 'Processing state of notification';
|
||||
COMMENT ON COLUMN notification.content IS 'Optional notification payload data';
|
||||
|
||||
-- ============================================================================
|
||||
-- ARTIFACT TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE artifact (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
ref TEXT NOT NULL,
|
||||
scope owner_type_enum NOT NULL DEFAULT 'system',
|
||||
owner TEXT NOT NULL DEFAULT '',
|
||||
type artifact_type_enum NOT NULL,
|
||||
retention_policy artifact_retention_enum NOT NULL DEFAULT 'versions',
|
||||
retention_limit INTEGER NOT NULL DEFAULT 1,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_artifact_ref ON artifact(ref);
|
||||
CREATE INDEX idx_artifact_scope ON artifact(scope);
|
||||
CREATE INDEX idx_artifact_owner ON artifact(owner);
|
||||
CREATE INDEX idx_artifact_type ON artifact(type);
|
||||
CREATE INDEX idx_artifact_created ON artifact(created DESC);
|
||||
CREATE INDEX idx_artifact_scope_owner ON artifact(scope, owner);
|
||||
CREATE INDEX idx_artifact_type_created ON artifact(type, created DESC);
|
||||
|
||||
-- Trigger
|
||||
CREATE TRIGGER update_artifact_updated
|
||||
BEFORE UPDATE ON artifact
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON artifact TO svc_attune;
|
||||
GRANT USAGE, SELECT ON SEQUENCE artifact_id_seq TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE artifact IS 'Artifacts track files, logs, and outputs from executions';
|
||||
COMMENT ON COLUMN artifact.ref IS 'Artifact reference/path';
|
||||
COMMENT ON COLUMN artifact.scope IS 'Owner type (system, identity, pack, action, sensor)';
|
||||
COMMENT ON COLUMN artifact.owner IS 'Owner identifier';
|
||||
COMMENT ON COLUMN artifact.type IS 'Artifact type (file, url, progress, etc.)';
|
||||
COMMENT ON COLUMN artifact.retention_policy IS 'How to retain artifacts (versions, days, hours, minutes)';
|
||||
COMMENT ON COLUMN artifact.retention_limit IS 'Numeric limit for retention policy';
|
||||
|
||||
-- ============================================================================
|
||||
-- QUEUE_STATS TABLE
|
||||
-- ============================================================================
|
||||
|
||||
CREATE TABLE queue_stats (
|
||||
action_id BIGINT PRIMARY KEY REFERENCES action(id) ON DELETE CASCADE,
|
||||
queue_length INTEGER NOT NULL DEFAULT 0,
|
||||
active_count INTEGER NOT NULL DEFAULT 0,
|
||||
max_concurrent INTEGER NOT NULL DEFAULT 1,
|
||||
oldest_enqueued_at TIMESTAMPTZ,
|
||||
total_enqueued BIGINT NOT NULL DEFAULT 0,
|
||||
total_completed BIGINT NOT NULL DEFAULT 0,
|
||||
last_updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_queue_stats_last_updated ON queue_stats(last_updated);
|
||||
|
||||
-- Permissions
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON queue_stats TO svc_attune;
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE queue_stats IS 'Real-time queue statistics for action execution ordering';
|
||||
COMMENT ON COLUMN queue_stats.action_id IS 'Foreign key to action table';
|
||||
COMMENT ON COLUMN queue_stats.queue_length IS 'Number of executions waiting in queue';
|
||||
COMMENT ON COLUMN queue_stats.active_count IS 'Number of currently running executions';
|
||||
COMMENT ON COLUMN queue_stats.max_concurrent IS 'Maximum concurrent executions allowed';
|
||||
COMMENT ON COLUMN queue_stats.oldest_enqueued_at IS 'Timestamp of oldest queued execution (NULL if queue empty)';
|
||||
COMMENT ON COLUMN queue_stats.total_enqueued IS 'Total executions enqueued since queue creation';
|
||||
COMMENT ON COLUMN queue_stats.total_completed IS 'Total executions completed since queue creation';
|
||||
COMMENT ON COLUMN queue_stats.last_updated IS 'Timestamp of last statistics update';
|
||||
@@ -0,0 +1,43 @@
|
||||
-- Migration: Add NOTIFY trigger for execution updates
|
||||
-- This enables real-time SSE streaming of execution status changes
|
||||
|
||||
-- Function to send notifications on execution changes
|
||||
CREATE OR REPLACE FUNCTION notify_execution_change()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
payload JSONB;
|
||||
BEGIN
|
||||
-- Build JSON payload with execution details
|
||||
payload := jsonb_build_object(
|
||||
'entity_type', 'execution',
|
||||
'entity_id', NEW.id,
|
||||
'timestamp', NOW(),
|
||||
'data', jsonb_build_object(
|
||||
'id', NEW.id,
|
||||
'status', NEW.status,
|
||||
'action_id', NEW.action,
|
||||
'action_ref', NEW.action_ref,
|
||||
'result', NEW.result,
|
||||
'created', NEW.created,
|
||||
'updated', NEW.updated
|
||||
)
|
||||
);
|
||||
|
||||
-- Send notification to the attune_notifications channel
|
||||
PERFORM pg_notify('attune_notifications', payload::text);
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Trigger to send pg_notify on execution insert or update
|
||||
CREATE TRIGGER notify_execution_change
|
||||
AFTER INSERT OR UPDATE ON execution
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION notify_execution_change();
|
||||
|
||||
-- Add comment
|
||||
COMMENT ON FUNCTION notify_execution_change() IS
|
||||
'Sends PostgreSQL NOTIFY for execution changes to enable real-time SSE streaming';
|
||||
COMMENT ON TRIGGER notify_execution_change ON execution IS
|
||||
'Broadcasts execution changes via pg_notify for SSE clients';
|
||||
245
migrations.old/20260120000001_add_webhook_support.sql
Normal file
245
migrations.old/20260120000001_add_webhook_support.sql
Normal file
@@ -0,0 +1,245 @@
|
||||
-- Migration: Add Webhook Support to Triggers
|
||||
-- Date: 2026-01-20
|
||||
-- Description: Adds webhook capabilities to the trigger system, allowing any trigger
|
||||
-- to be webhook-enabled with a unique webhook key for external integrations.
|
||||
|
||||
|
||||
-- Add webhook columns to trigger table
|
||||
ALTER TABLE trigger
|
||||
ADD COLUMN IF NOT EXISTS webhook_enabled BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN IF NOT EXISTS webhook_key VARCHAR(64) UNIQUE,
|
||||
ADD COLUMN IF NOT EXISTS webhook_secret VARCHAR(128);
|
||||
|
||||
-- Add comments for documentation
|
||||
COMMENT ON COLUMN trigger.webhook_enabled IS
|
||||
'Whether webhooks are enabled for this trigger. When enabled, external systems can POST to the webhook URL to create events.';
|
||||
|
||||
COMMENT ON COLUMN trigger.webhook_key IS
|
||||
'Unique webhook key used in the webhook URL. Format: wh_[32 alphanumeric chars]. Acts as a bearer token for webhook authentication.';
|
||||
|
||||
COMMENT ON COLUMN trigger.webhook_secret IS
|
||||
'Optional secret for HMAC signature verification. When set, webhook requests must include a valid X-Webhook-Signature header.';
|
||||
|
||||
-- Create index for fast webhook key lookup
|
||||
CREATE INDEX IF NOT EXISTS idx_trigger_webhook_key
|
||||
ON trigger(webhook_key)
|
||||
WHERE webhook_key IS NOT NULL;
|
||||
|
||||
-- Create index for querying webhook-enabled triggers
|
||||
CREATE INDEX IF NOT EXISTS idx_trigger_webhook_enabled
|
||||
ON trigger(webhook_enabled)
|
||||
WHERE webhook_enabled = TRUE;
|
||||
|
||||
-- Add webhook-related metadata tracking to events
|
||||
-- Events use the 'config' JSONB column for metadata
|
||||
-- We'll add indexes to efficiently query webhook-sourced events
|
||||
|
||||
-- Create index for webhook-sourced events (using config column)
|
||||
CREATE INDEX IF NOT EXISTS idx_event_webhook_source
|
||||
ON event((config->>'source'))
|
||||
WHERE (config->>'source') = 'webhook';
|
||||
|
||||
-- Create index for webhook key lookup in event config
|
||||
CREATE INDEX IF NOT EXISTS idx_event_webhook_key
|
||||
ON event((config->>'webhook_key'))
|
||||
WHERE config->>'webhook_key' IS NOT NULL;
|
||||
|
||||
-- Function to generate webhook key
|
||||
CREATE OR REPLACE FUNCTION generate_webhook_key()
|
||||
RETURNS VARCHAR(64) AS $$
|
||||
DECLARE
|
||||
key_prefix VARCHAR(3) := 'wh_';
|
||||
random_suffix VARCHAR(32);
|
||||
new_key VARCHAR(64);
|
||||
max_attempts INT := 10;
|
||||
attempt INT := 0;
|
||||
BEGIN
|
||||
LOOP
|
||||
-- Generate 32 random alphanumeric characters
|
||||
random_suffix := encode(gen_random_bytes(24), 'base64');
|
||||
random_suffix := REPLACE(random_suffix, '/', '');
|
||||
random_suffix := REPLACE(random_suffix, '+', '');
|
||||
random_suffix := REPLACE(random_suffix, '=', '');
|
||||
random_suffix := LOWER(LEFT(random_suffix, 32));
|
||||
|
||||
-- Construct full key
|
||||
new_key := key_prefix || random_suffix;
|
||||
|
||||
-- Check if key already exists
|
||||
IF NOT EXISTS (SELECT 1 FROM trigger WHERE webhook_key = new_key) THEN
|
||||
RETURN new_key;
|
||||
END IF;
|
||||
|
||||
-- Increment attempt counter
|
||||
attempt := attempt + 1;
|
||||
IF attempt >= max_attempts THEN
|
||||
RAISE EXCEPTION 'Failed to generate unique webhook key after % attempts', max_attempts;
|
||||
END IF;
|
||||
END LOOP;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION generate_webhook_key() IS
|
||||
'Generates a unique webhook key with format wh_[32 alphanumeric chars]. Ensures uniqueness by checking existing keys.';
|
||||
|
||||
-- Function to enable webhooks for a trigger
|
||||
CREATE OR REPLACE FUNCTION enable_trigger_webhook(
|
||||
p_trigger_id BIGINT
|
||||
)
|
||||
RETURNS TABLE(
|
||||
webhook_enabled BOOLEAN,
|
||||
webhook_key VARCHAR(64),
|
||||
webhook_url TEXT
|
||||
) AS $$
|
||||
DECLARE
|
||||
v_new_key VARCHAR(64);
|
||||
v_existing_key VARCHAR(64);
|
||||
v_base_url TEXT;
|
||||
BEGIN
|
||||
-- Check if trigger exists
|
||||
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
|
||||
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
|
||||
END IF;
|
||||
|
||||
-- Get existing webhook key if any
|
||||
SELECT t.webhook_key INTO v_existing_key
|
||||
FROM trigger t
|
||||
WHERE t.id = p_trigger_id;
|
||||
|
||||
-- Generate new key if one doesn't exist
|
||||
IF v_existing_key IS NULL THEN
|
||||
v_new_key := generate_webhook_key();
|
||||
ELSE
|
||||
v_new_key := v_existing_key;
|
||||
END IF;
|
||||
|
||||
-- Update trigger to enable webhooks
|
||||
UPDATE trigger
|
||||
SET
|
||||
webhook_enabled = TRUE,
|
||||
webhook_key = v_new_key,
|
||||
updated = NOW()
|
||||
WHERE id = p_trigger_id;
|
||||
|
||||
-- Construct webhook URL (base URL should be configured elsewhere)
|
||||
-- For now, return just the path
|
||||
v_base_url := '/api/v1/webhooks/' || v_new_key;
|
||||
|
||||
-- Return result
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
TRUE::BOOLEAN as webhook_enabled,
|
||||
v_new_key as webhook_key,
|
||||
v_base_url as webhook_url;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION enable_trigger_webhook(BIGINT) IS
|
||||
'Enables webhooks for a trigger. Generates a new webhook key if one does not exist. Returns webhook details.';
|
||||
|
||||
-- Function to disable webhooks for a trigger
|
||||
CREATE OR REPLACE FUNCTION disable_trigger_webhook(
|
||||
p_trigger_id BIGINT
|
||||
)
|
||||
RETURNS BOOLEAN AS $$
|
||||
BEGIN
|
||||
-- Check if trigger exists
|
||||
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
|
||||
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
|
||||
END IF;
|
||||
|
||||
-- Update trigger to disable webhooks
|
||||
-- Note: We keep the webhook_key for audit purposes
|
||||
UPDATE trigger
|
||||
SET
|
||||
webhook_enabled = FALSE,
|
||||
updated = NOW()
|
||||
WHERE id = p_trigger_id;
|
||||
|
||||
RETURN TRUE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION disable_trigger_webhook(BIGINT) IS
|
||||
'Disables webhooks for a trigger. Webhook key is retained for audit purposes.';
|
||||
|
||||
-- Function to regenerate webhook key for a trigger
|
||||
CREATE OR REPLACE FUNCTION regenerate_trigger_webhook_key(
|
||||
p_trigger_id BIGINT
|
||||
)
|
||||
RETURNS TABLE(
|
||||
webhook_key VARCHAR(64),
|
||||
previous_key_revoked BOOLEAN
|
||||
) AS $$
|
||||
DECLARE
|
||||
v_old_key VARCHAR(64);
|
||||
v_new_key VARCHAR(64);
|
||||
BEGIN
|
||||
-- Check if trigger exists
|
||||
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
|
||||
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
|
||||
END IF;
|
||||
|
||||
-- Get existing key
|
||||
SELECT t.webhook_key INTO v_old_key
|
||||
FROM trigger t
|
||||
WHERE t.id = p_trigger_id;
|
||||
|
||||
-- Generate new key
|
||||
v_new_key := generate_webhook_key();
|
||||
|
||||
-- Update trigger with new key
|
||||
UPDATE trigger
|
||||
SET
|
||||
webhook_key = v_new_key,
|
||||
updated = NOW()
|
||||
WHERE id = p_trigger_id;
|
||||
|
||||
-- Return result
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
v_new_key as webhook_key,
|
||||
(v_old_key IS NOT NULL)::BOOLEAN as previous_key_revoked;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION regenerate_trigger_webhook_key(BIGINT) IS
|
||||
'Regenerates the webhook key for a trigger. The old key is immediately revoked.';
|
||||
|
||||
-- Create a view for webhook statistics
|
||||
CREATE OR REPLACE VIEW webhook_stats AS
|
||||
SELECT
|
||||
t.id as trigger_id,
|
||||
t.ref as trigger_ref,
|
||||
t.webhook_enabled,
|
||||
t.webhook_key,
|
||||
t.created as webhook_created_at,
|
||||
COUNT(e.id) as total_events,
|
||||
MAX(e.created) as last_event_at,
|
||||
MIN(e.created) as first_event_at
|
||||
FROM trigger t
|
||||
LEFT JOIN event e ON
|
||||
e.trigger = t.id
|
||||
AND (e.config->>'source') = 'webhook'
|
||||
WHERE t.webhook_enabled = TRUE
|
||||
GROUP BY t.id, t.ref, t.webhook_enabled, t.webhook_key, t.created;
|
||||
|
||||
COMMENT ON VIEW webhook_stats IS
|
||||
'Statistics for webhook-enabled triggers including event counts and timestamps.';
|
||||
|
||||
-- Grant permissions (adjust as needed for your RBAC setup)
|
||||
-- GRANT SELECT ON webhook_stats TO attune_api;
|
||||
-- GRANT EXECUTE ON FUNCTION generate_webhook_key() TO attune_api;
|
||||
-- GRANT EXECUTE ON FUNCTION enable_trigger_webhook(BIGINT) TO attune_api;
|
||||
-- GRANT EXECUTE ON FUNCTION disable_trigger_webhook(BIGINT) TO attune_api;
|
||||
-- GRANT EXECUTE ON FUNCTION regenerate_trigger_webhook_key(BIGINT) TO attune_api;
|
||||
|
||||
-- Trigger update timestamp is already handled by existing triggers
|
||||
-- No need to add it again
|
||||
|
||||
-- Migration complete messages
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE 'Webhook support migration completed successfully';
|
||||
RAISE NOTICE 'Webhook-enabled triggers can now receive events via POST /api/v1/webhooks/:webhook_key';
|
||||
END $$;
|
||||
362
migrations.old/20260120000002_webhook_advanced_features.sql
Normal file
362
migrations.old/20260120000002_webhook_advanced_features.sql
Normal file
@@ -0,0 +1,362 @@
|
||||
-- Migration: Add advanced webhook features (HMAC, rate limiting, IP whitelist)
|
||||
-- Created: 2026-01-20
|
||||
-- Phase: 3 - Advanced Security Features
|
||||
|
||||
-- Add advanced webhook configuration columns to trigger table
|
||||
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
|
||||
webhook_hmac_enabled BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
|
||||
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
|
||||
webhook_hmac_secret VARCHAR(128);
|
||||
|
||||
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
|
||||
webhook_hmac_algorithm VARCHAR(32) DEFAULT 'sha256';
|
||||
|
||||
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
|
||||
webhook_rate_limit_enabled BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
|
||||
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
|
||||
webhook_rate_limit_requests INTEGER DEFAULT 100;
|
||||
|
||||
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
|
||||
webhook_rate_limit_window_seconds INTEGER DEFAULT 60;
|
||||
|
||||
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
|
||||
webhook_ip_whitelist_enabled BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
|
||||
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
|
||||
webhook_ip_whitelist TEXT[]; -- Array of IP addresses/CIDR blocks
|
||||
|
||||
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
|
||||
webhook_payload_size_limit_kb INTEGER DEFAULT 1024; -- Default 1MB
|
||||
|
||||
COMMENT ON COLUMN trigger.webhook_hmac_enabled IS 'Whether HMAC signature verification is required';
|
||||
COMMENT ON COLUMN trigger.webhook_hmac_secret IS 'Secret key for HMAC signature verification';
|
||||
COMMENT ON COLUMN trigger.webhook_hmac_algorithm IS 'HMAC algorithm (sha256, sha512, etc.)';
|
||||
COMMENT ON COLUMN trigger.webhook_rate_limit_enabled IS 'Whether rate limiting is enabled';
|
||||
COMMENT ON COLUMN trigger.webhook_rate_limit_requests IS 'Max requests allowed per window';
|
||||
COMMENT ON COLUMN trigger.webhook_rate_limit_window_seconds IS 'Rate limit time window in seconds';
|
||||
COMMENT ON COLUMN trigger.webhook_ip_whitelist_enabled IS 'Whether IP whitelist is enabled';
|
||||
COMMENT ON COLUMN trigger.webhook_ip_whitelist IS 'Array of allowed IP addresses/CIDR blocks';
|
||||
COMMENT ON COLUMN trigger.webhook_payload_size_limit_kb IS 'Maximum webhook payload size in KB';
|
||||
|
||||
-- Create webhook event log table for auditing and analytics
|
||||
CREATE TABLE IF NOT EXISTS webhook_event_log (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
trigger_id BIGINT NOT NULL REFERENCES trigger(id) ON DELETE CASCADE,
|
||||
trigger_ref VARCHAR(255) NOT NULL,
|
||||
webhook_key VARCHAR(64) NOT NULL,
|
||||
event_id BIGINT REFERENCES event(id) ON DELETE SET NULL,
|
||||
source_ip INET,
|
||||
user_agent TEXT,
|
||||
payload_size_bytes INTEGER,
|
||||
headers JSONB,
|
||||
status_code INTEGER NOT NULL,
|
||||
error_message TEXT,
|
||||
processing_time_ms INTEGER,
|
||||
hmac_verified BOOLEAN,
|
||||
rate_limited BOOLEAN DEFAULT FALSE,
|
||||
ip_allowed BOOLEAN,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_webhook_event_log_trigger_id ON webhook_event_log(trigger_id);
|
||||
CREATE INDEX idx_webhook_event_log_webhook_key ON webhook_event_log(webhook_key);
|
||||
CREATE INDEX idx_webhook_event_log_created ON webhook_event_log(created DESC);
|
||||
CREATE INDEX idx_webhook_event_log_status ON webhook_event_log(status_code);
|
||||
CREATE INDEX idx_webhook_event_log_source_ip ON webhook_event_log(source_ip);
|
||||
|
||||
COMMENT ON TABLE webhook_event_log IS 'Audit log of all webhook requests';
|
||||
COMMENT ON COLUMN webhook_event_log.status_code IS 'HTTP status code returned (200, 400, 403, 429, etc.)';
|
||||
COMMENT ON COLUMN webhook_event_log.error_message IS 'Error message if request failed';
|
||||
COMMENT ON COLUMN webhook_event_log.processing_time_ms IS 'Time taken to process webhook in milliseconds';
|
||||
COMMENT ON COLUMN webhook_event_log.hmac_verified IS 'Whether HMAC signature was verified successfully';
|
||||
COMMENT ON COLUMN webhook_event_log.rate_limited IS 'Whether request was rate limited';
|
||||
COMMENT ON COLUMN webhook_event_log.ip_allowed IS 'Whether source IP was in whitelist (if enabled)';
|
||||
|
||||
-- Create webhook rate limit tracking table
|
||||
CREATE TABLE IF NOT EXISTS webhook_rate_limit (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
webhook_key VARCHAR(64) NOT NULL,
|
||||
window_start TIMESTAMPTZ NOT NULL,
|
||||
request_count INTEGER NOT NULL DEFAULT 1,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(webhook_key, window_start)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_webhook_rate_limit_key ON webhook_rate_limit(webhook_key);
|
||||
CREATE INDEX idx_webhook_rate_limit_window ON webhook_rate_limit(window_start DESC);
|
||||
|
||||
COMMENT ON TABLE webhook_rate_limit IS 'Tracks webhook request counts for rate limiting';
|
||||
COMMENT ON COLUMN webhook_rate_limit.window_start IS 'Start of the rate limit time window';
|
||||
COMMENT ON COLUMN webhook_rate_limit.request_count IS 'Number of requests in this window';
|
||||
|
||||
-- Function to generate HMAC secret
|
||||
CREATE OR REPLACE FUNCTION generate_webhook_hmac_secret()
|
||||
RETURNS VARCHAR(128) AS $$
|
||||
DECLARE
|
||||
secret VARCHAR(128);
|
||||
BEGIN
|
||||
-- Generate 64-byte (128 hex chars) random secret
|
||||
SELECT encode(gen_random_bytes(64), 'hex') INTO secret;
|
||||
RETURN secret;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION generate_webhook_hmac_secret() IS 'Generate a secure random HMAC secret';
|
||||
|
||||
-- Function to enable HMAC for a trigger
|
||||
CREATE OR REPLACE FUNCTION enable_trigger_webhook_hmac(
|
||||
p_trigger_id BIGINT,
|
||||
p_algorithm VARCHAR(32) DEFAULT 'sha256'
|
||||
)
|
||||
RETURNS TABLE(
|
||||
webhook_hmac_enabled BOOLEAN,
|
||||
webhook_hmac_secret VARCHAR(128),
|
||||
webhook_hmac_algorithm VARCHAR(32)
|
||||
) AS $$
|
||||
DECLARE
|
||||
v_webhook_enabled BOOLEAN;
|
||||
v_secret VARCHAR(128);
|
||||
BEGIN
|
||||
-- Check if webhooks are enabled
|
||||
SELECT t.webhook_enabled INTO v_webhook_enabled
|
||||
FROM trigger t
|
||||
WHERE t.id = p_trigger_id;
|
||||
|
||||
IF NOT FOUND THEN
|
||||
RAISE EXCEPTION 'Trigger with id % not found', p_trigger_id;
|
||||
END IF;
|
||||
|
||||
IF NOT v_webhook_enabled THEN
|
||||
RAISE EXCEPTION 'Webhooks must be enabled before enabling HMAC verification';
|
||||
END IF;
|
||||
|
||||
-- Validate algorithm
|
||||
IF p_algorithm NOT IN ('sha256', 'sha512', 'sha1') THEN
|
||||
RAISE EXCEPTION 'Invalid HMAC algorithm. Supported: sha256, sha512, sha1';
|
||||
END IF;
|
||||
|
||||
-- Generate new secret
|
||||
v_secret := generate_webhook_hmac_secret();
|
||||
|
||||
-- Update trigger
|
||||
UPDATE trigger
|
||||
SET
|
||||
webhook_hmac_enabled = TRUE,
|
||||
webhook_hmac_secret = v_secret,
|
||||
webhook_hmac_algorithm = p_algorithm,
|
||||
updated = NOW()
|
||||
WHERE id = p_trigger_id;
|
||||
|
||||
-- Return result
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
TRUE AS webhook_hmac_enabled,
|
||||
v_secret AS webhook_hmac_secret,
|
||||
p_algorithm AS webhook_hmac_algorithm;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION enable_trigger_webhook_hmac(BIGINT, VARCHAR) IS 'Enable HMAC signature verification for a trigger';
|
||||
|
||||
-- Function to disable HMAC for a trigger
|
||||
CREATE OR REPLACE FUNCTION disable_trigger_webhook_hmac(p_trigger_id BIGINT)
|
||||
RETURNS BOOLEAN AS $$
|
||||
BEGIN
|
||||
UPDATE trigger
|
||||
SET
|
||||
webhook_hmac_enabled = FALSE,
|
||||
webhook_hmac_secret = NULL,
|
||||
updated = NOW()
|
||||
WHERE id = p_trigger_id;
|
||||
|
||||
RETURN FOUND;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION disable_trigger_webhook_hmac(BIGINT) IS 'Disable HMAC verification for a trigger';
|
||||
|
||||
-- Function to configure rate limiting
|
||||
CREATE OR REPLACE FUNCTION configure_trigger_webhook_rate_limit(
|
||||
p_trigger_id BIGINT,
|
||||
p_enabled BOOLEAN,
|
||||
p_requests INTEGER DEFAULT 100,
|
||||
p_window_seconds INTEGER DEFAULT 60
|
||||
)
|
||||
RETURNS TABLE(
|
||||
rate_limit_enabled BOOLEAN,
|
||||
rate_limit_requests INTEGER,
|
||||
rate_limit_window_seconds INTEGER
|
||||
) AS $$
|
||||
BEGIN
|
||||
-- Validate inputs
|
||||
IF p_requests < 1 OR p_requests > 10000 THEN
|
||||
RAISE EXCEPTION 'Rate limit requests must be between 1 and 10000';
|
||||
END IF;
|
||||
|
||||
IF p_window_seconds < 1 OR p_window_seconds > 3600 THEN
|
||||
RAISE EXCEPTION 'Rate limit window must be between 1 and 3600 seconds';
|
||||
END IF;
|
||||
|
||||
-- Update trigger
|
||||
UPDATE trigger
|
||||
SET
|
||||
webhook_rate_limit_enabled = p_enabled,
|
||||
webhook_rate_limit_requests = p_requests,
|
||||
webhook_rate_limit_window_seconds = p_window_seconds,
|
||||
updated = NOW()
|
||||
WHERE id = p_trigger_id;
|
||||
|
||||
IF NOT FOUND THEN
|
||||
RAISE EXCEPTION 'Trigger with id % not found', p_trigger_id;
|
||||
END IF;
|
||||
|
||||
-- Return configuration
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
p_enabled AS rate_limit_enabled,
|
||||
p_requests AS rate_limit_requests,
|
||||
p_window_seconds AS rate_limit_window_seconds;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION configure_trigger_webhook_rate_limit(BIGINT, BOOLEAN, INTEGER, INTEGER) IS 'Configure rate limiting for a trigger webhook';
|
||||
|
||||
-- Function to configure IP whitelist
|
||||
CREATE OR REPLACE FUNCTION configure_trigger_webhook_ip_whitelist(
|
||||
p_trigger_id BIGINT,
|
||||
p_enabled BOOLEAN,
|
||||
p_ip_list TEXT[] DEFAULT ARRAY[]::TEXT[]
|
||||
)
|
||||
RETURNS TABLE(
|
||||
ip_whitelist_enabled BOOLEAN,
|
||||
ip_whitelist TEXT[]
|
||||
) AS $$
|
||||
BEGIN
|
||||
-- Update trigger
|
||||
UPDATE trigger
|
||||
SET
|
||||
webhook_ip_whitelist_enabled = p_enabled,
|
||||
webhook_ip_whitelist = p_ip_list,
|
||||
updated = NOW()
|
||||
WHERE id = p_trigger_id;
|
||||
|
||||
IF NOT FOUND THEN
|
||||
RAISE EXCEPTION 'Trigger with id % not found', p_trigger_id;
|
||||
END IF;
|
||||
|
||||
-- Return configuration
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
p_enabled AS ip_whitelist_enabled,
|
||||
p_ip_list AS ip_whitelist;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION configure_trigger_webhook_ip_whitelist(BIGINT, BOOLEAN, TEXT[]) IS 'Configure IP whitelist for a trigger webhook';
|
||||
|
||||
-- Function to check rate limit (call before processing webhook)
|
||||
CREATE OR REPLACE FUNCTION check_webhook_rate_limit(
|
||||
p_webhook_key VARCHAR(64),
|
||||
p_max_requests INTEGER,
|
||||
p_window_seconds INTEGER
|
||||
)
|
||||
RETURNS BOOLEAN AS $$
|
||||
DECLARE
|
||||
v_window_start TIMESTAMPTZ;
|
||||
v_request_count INTEGER;
|
||||
BEGIN
|
||||
-- Calculate current window start (truncated to window boundary)
|
||||
v_window_start := date_trunc('minute', NOW()) -
|
||||
((EXTRACT(EPOCH FROM date_trunc('minute', NOW()))::INTEGER % p_window_seconds) || ' seconds')::INTERVAL;
|
||||
|
||||
-- Get or create rate limit record
|
||||
INSERT INTO webhook_rate_limit (webhook_key, window_start, request_count)
|
||||
VALUES (p_webhook_key, v_window_start, 1)
|
||||
ON CONFLICT (webhook_key, window_start)
|
||||
DO UPDATE SET
|
||||
request_count = webhook_rate_limit.request_count + 1,
|
||||
updated = NOW()
|
||||
RETURNING request_count INTO v_request_count;
|
||||
|
||||
-- Clean up old rate limit records (older than 1 hour)
|
||||
DELETE FROM webhook_rate_limit
|
||||
WHERE window_start < NOW() - INTERVAL '1 hour';
|
||||
|
||||
-- Return TRUE if within limit, FALSE if exceeded
|
||||
RETURN v_request_count <= p_max_requests;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION check_webhook_rate_limit(VARCHAR, INTEGER, INTEGER) IS 'Check if webhook request is within rate limit';
|
||||
|
||||
-- Function to check if IP is in whitelist (supports CIDR notation)
|
||||
CREATE OR REPLACE FUNCTION check_webhook_ip_whitelist(
|
||||
p_source_ip INET,
|
||||
p_whitelist TEXT[]
|
||||
)
|
||||
RETURNS BOOLEAN AS $$
|
||||
DECLARE
|
||||
v_allowed_cidr TEXT;
|
||||
BEGIN
|
||||
-- If whitelist is empty, deny access
|
||||
IF p_whitelist IS NULL OR array_length(p_whitelist, 1) IS NULL THEN
|
||||
RETURN FALSE;
|
||||
END IF;
|
||||
|
||||
-- Check if source IP matches any entry in whitelist
|
||||
FOREACH v_allowed_cidr IN ARRAY p_whitelist
|
||||
LOOP
|
||||
-- Handle both single IPs and CIDR notation
|
||||
IF p_source_ip <<= v_allowed_cidr::INET THEN
|
||||
RETURN TRUE;
|
||||
END IF;
|
||||
END LOOP;
|
||||
|
||||
RETURN FALSE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION check_webhook_ip_whitelist(INET, TEXT[]) IS 'Check if source IP is in whitelist (supports CIDR notation)';
|
||||
|
||||
-- View for webhook statistics
|
||||
CREATE OR REPLACE VIEW webhook_stats_detailed AS
|
||||
SELECT
|
||||
t.id AS trigger_id,
|
||||
t.ref AS trigger_ref,
|
||||
t.label AS trigger_label,
|
||||
t.webhook_enabled,
|
||||
t.webhook_key,
|
||||
t.webhook_hmac_enabled,
|
||||
t.webhook_rate_limit_enabled,
|
||||
t.webhook_rate_limit_requests,
|
||||
t.webhook_rate_limit_window_seconds,
|
||||
t.webhook_ip_whitelist_enabled,
|
||||
COUNT(DISTINCT wel.id) AS total_requests,
|
||||
COUNT(DISTINCT wel.id) FILTER (WHERE wel.status_code = 200) AS successful_requests,
|
||||
COUNT(DISTINCT wel.id) FILTER (WHERE wel.status_code >= 400) AS failed_requests,
|
||||
COUNT(DISTINCT wel.id) FILTER (WHERE wel.rate_limited = TRUE) AS rate_limited_requests,
|
||||
COUNT(DISTINCT wel.id) FILTER (WHERE wel.hmac_verified = FALSE AND t.webhook_hmac_enabled = TRUE) AS hmac_failures,
|
||||
COUNT(DISTINCT wel.id) FILTER (WHERE wel.ip_allowed = FALSE AND t.webhook_ip_whitelist_enabled = TRUE) AS ip_blocked_requests,
|
||||
COUNT(DISTINCT wel.event_id) AS events_created,
|
||||
AVG(wel.processing_time_ms) AS avg_processing_time_ms,
|
||||
MAX(wel.created) AS last_request_at,
|
||||
t.created AS webhook_enabled_at
|
||||
FROM trigger t
|
||||
LEFT JOIN webhook_event_log wel ON wel.trigger_id = t.id
|
||||
WHERE t.webhook_enabled = TRUE
|
||||
GROUP BY t.id, t.ref, t.label, t.webhook_enabled, t.webhook_key,
|
||||
t.webhook_hmac_enabled, t.webhook_rate_limit_enabled,
|
||||
t.webhook_rate_limit_requests, t.webhook_rate_limit_window_seconds,
|
||||
t.webhook_ip_whitelist_enabled, t.created;
|
||||
|
||||
COMMENT ON VIEW webhook_stats_detailed IS 'Detailed statistics for webhook-enabled triggers';
|
||||
|
||||
-- Grant permissions (adjust as needed for your security model)
|
||||
GRANT SELECT, INSERT ON webhook_event_log TO attune_api;
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON webhook_rate_limit TO attune_api;
|
||||
GRANT SELECT ON webhook_stats_detailed TO attune_api;
|
||||
GRANT USAGE, SELECT ON SEQUENCE webhook_event_log_id_seq TO attune_api;
|
||||
GRANT USAGE, SELECT ON SEQUENCE webhook_rate_limit_id_seq TO attune_api;
|
||||
154
migrations.old/20260120200000_add_pack_test_results.sql
Normal file
154
migrations.old/20260120200000_add_pack_test_results.sql
Normal file
@@ -0,0 +1,154 @@
|
||||
-- Migration: Add Pack Test Results Tracking
|
||||
-- Created: 2026-01-20
|
||||
-- Description: Add tables and views for tracking pack test execution results
|
||||
|
||||
-- Pack test execution tracking table
|
||||
CREATE TABLE IF NOT EXISTS pack_test_execution (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
pack_id BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
|
||||
pack_version VARCHAR(50) NOT NULL,
|
||||
execution_time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
trigger_reason VARCHAR(50) NOT NULL, -- 'install', 'update', 'manual', 'validation'
|
||||
total_tests INT NOT NULL,
|
||||
passed INT NOT NULL,
|
||||
failed INT NOT NULL,
|
||||
skipped INT NOT NULL,
|
||||
pass_rate DECIMAL(5,4) NOT NULL, -- 0.0000 to 1.0000
|
||||
duration_ms BIGINT NOT NULL,
|
||||
result JSONB NOT NULL, -- Full test result structure
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
CONSTRAINT valid_test_counts CHECK (total_tests >= 0 AND passed >= 0 AND failed >= 0 AND skipped >= 0),
|
||||
CONSTRAINT valid_pass_rate CHECK (pass_rate >= 0.0 AND pass_rate <= 1.0),
|
||||
CONSTRAINT valid_trigger_reason CHECK (trigger_reason IN ('install', 'update', 'manual', 'validation'))
|
||||
);
|
||||
|
||||
-- Indexes for efficient queries
|
||||
CREATE INDEX idx_pack_test_execution_pack_id ON pack_test_execution(pack_id);
|
||||
CREATE INDEX idx_pack_test_execution_time ON pack_test_execution(execution_time DESC);
|
||||
CREATE INDEX idx_pack_test_execution_pass_rate ON pack_test_execution(pass_rate);
|
||||
CREATE INDEX idx_pack_test_execution_trigger ON pack_test_execution(trigger_reason);
|
||||
|
||||
-- Comments for documentation
|
||||
COMMENT ON TABLE pack_test_execution IS 'Tracks pack test execution results for validation and auditing';
|
||||
COMMENT ON COLUMN pack_test_execution.pack_id IS 'Reference to the pack being tested';
|
||||
COMMENT ON COLUMN pack_test_execution.pack_version IS 'Version of the pack at test time';
|
||||
COMMENT ON COLUMN pack_test_execution.trigger_reason IS 'What triggered the test: install, update, manual, validation';
|
||||
COMMENT ON COLUMN pack_test_execution.pass_rate IS 'Percentage of tests passed (0.0 to 1.0)';
|
||||
COMMENT ON COLUMN pack_test_execution.result IS 'Full JSON structure with detailed test results';
|
||||
|
||||
-- Pack test result summary view (all test executions with pack info)
|
||||
CREATE OR REPLACE VIEW pack_test_summary AS
|
||||
SELECT
|
||||
p.id AS pack_id,
|
||||
p.ref AS pack_ref,
|
||||
p.label AS pack_label,
|
||||
pte.id AS test_execution_id,
|
||||
pte.pack_version,
|
||||
pte.execution_time AS test_time,
|
||||
pte.trigger_reason,
|
||||
pte.total_tests,
|
||||
pte.passed,
|
||||
pte.failed,
|
||||
pte.skipped,
|
||||
pte.pass_rate,
|
||||
pte.duration_ms,
|
||||
ROW_NUMBER() OVER (PARTITION BY p.id ORDER BY pte.execution_time DESC) AS rn
|
||||
FROM pack p
|
||||
LEFT JOIN pack_test_execution pte ON p.id = pte.pack_id
|
||||
WHERE pte.id IS NOT NULL;
|
||||
|
||||
COMMENT ON VIEW pack_test_summary IS 'Summary of all pack test executions with pack details';
|
||||
|
||||
-- Latest test results per pack view
|
||||
CREATE OR REPLACE VIEW pack_latest_test AS
|
||||
SELECT
|
||||
pack_id,
|
||||
pack_ref,
|
||||
pack_label,
|
||||
test_execution_id,
|
||||
pack_version,
|
||||
test_time,
|
||||
trigger_reason,
|
||||
total_tests,
|
||||
passed,
|
||||
failed,
|
||||
skipped,
|
||||
pass_rate,
|
||||
duration_ms
|
||||
FROM pack_test_summary
|
||||
WHERE rn = 1;
|
||||
|
||||
COMMENT ON VIEW pack_latest_test IS 'Latest test results for each pack';
|
||||
|
||||
-- Function to get pack test statistics
|
||||
CREATE OR REPLACE FUNCTION get_pack_test_stats(p_pack_id BIGINT)
|
||||
RETURNS TABLE (
|
||||
total_executions BIGINT,
|
||||
successful_executions BIGINT,
|
||||
failed_executions BIGINT,
|
||||
avg_pass_rate DECIMAL,
|
||||
avg_duration_ms BIGINT,
|
||||
last_test_time TIMESTAMPTZ,
|
||||
last_test_passed BOOLEAN
|
||||
) AS $$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
COUNT(*)::BIGINT AS total_executions,
|
||||
COUNT(*) FILTER (WHERE passed = total_tests)::BIGINT AS successful_executions,
|
||||
COUNT(*) FILTER (WHERE failed > 0)::BIGINT AS failed_executions,
|
||||
AVG(pass_rate) AS avg_pass_rate,
|
||||
AVG(duration_ms)::BIGINT AS avg_duration_ms,
|
||||
MAX(execution_time) AS last_test_time,
|
||||
(SELECT failed = 0 FROM pack_test_execution
|
||||
WHERE pack_id = p_pack_id
|
||||
ORDER BY execution_time DESC
|
||||
LIMIT 1) AS last_test_passed
|
||||
FROM pack_test_execution
|
||||
WHERE pack_id = p_pack_id;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION get_pack_test_stats IS 'Get statistical summary of test executions for a pack';
|
||||
|
||||
-- Function to check if pack has recent passing tests
|
||||
CREATE OR REPLACE FUNCTION pack_has_passing_tests(
|
||||
p_pack_id BIGINT,
|
||||
p_hours_ago INT DEFAULT 24
|
||||
)
|
||||
RETURNS BOOLEAN AS $$
|
||||
DECLARE
|
||||
v_has_passing_tests BOOLEAN;
|
||||
BEGIN
|
||||
SELECT EXISTS(
|
||||
SELECT 1
|
||||
FROM pack_test_execution
|
||||
WHERE pack_id = p_pack_id
|
||||
AND execution_time > NOW() - (p_hours_ago || ' hours')::INTERVAL
|
||||
AND failed = 0
|
||||
AND total_tests > 0
|
||||
) INTO v_has_passing_tests;
|
||||
|
||||
RETURN v_has_passing_tests;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION pack_has_passing_tests IS 'Check if pack has recent passing test executions';
|
||||
|
||||
-- Add trigger to update pack metadata on test execution
|
||||
CREATE OR REPLACE FUNCTION update_pack_test_metadata()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
-- Could update pack table with last_tested timestamp if we add that column
|
||||
-- For now, just a placeholder for future functionality
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER trigger_update_pack_test_metadata
|
||||
AFTER INSERT ON pack_test_execution
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_pack_test_metadata();
|
||||
|
||||
COMMENT ON TRIGGER trigger_update_pack_test_metadata ON pack_test_execution IS 'Updates pack metadata when tests are executed';
|
||||
59
migrations.old/20260122000001_pack_installation_metadata.sql
Normal file
59
migrations.old/20260122000001_pack_installation_metadata.sql
Normal file
@@ -0,0 +1,59 @@
|
||||
-- Migration: Pack Installation Metadata
|
||||
-- Description: Tracks pack installation sources, checksums, and metadata
|
||||
-- Created: 2026-01-22
|
||||
|
||||
-- Pack installation metadata table
|
||||
CREATE TABLE IF NOT EXISTS pack_installation (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
pack_id BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
|
||||
|
||||
-- Installation source information
|
||||
source_type VARCHAR(50) NOT NULL CHECK (source_type IN ('git', 'archive', 'local_directory', 'local_archive', 'registry')),
|
||||
source_url TEXT,
|
||||
source_ref TEXT, -- git ref (branch/tag/commit) or registry version
|
||||
|
||||
-- Verification
|
||||
checksum VARCHAR(64), -- SHA256 checksum of installed pack
|
||||
checksum_verified BOOLEAN DEFAULT FALSE,
|
||||
|
||||
-- Installation metadata
|
||||
installed_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
installed_by BIGINT REFERENCES identity(id) ON DELETE SET NULL,
|
||||
installation_method VARCHAR(50) DEFAULT 'manual' CHECK (installation_method IN ('manual', 'api', 'cli', 'auto')),
|
||||
|
||||
-- Storage information
|
||||
storage_path TEXT NOT NULL,
|
||||
|
||||
-- Additional metadata
|
||||
meta JSONB DEFAULT '{}'::jsonb,
|
||||
|
||||
created TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
updated TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
|
||||
-- Constraints
|
||||
CONSTRAINT pack_installation_unique_pack UNIQUE (pack_id)
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_pack_installation_pack_id ON pack_installation(pack_id);
|
||||
CREATE INDEX idx_pack_installation_source_type ON pack_installation(source_type);
|
||||
CREATE INDEX idx_pack_installation_installed_at ON pack_installation(installed_at);
|
||||
CREATE INDEX idx_pack_installation_installed_by ON pack_installation(installed_by);
|
||||
|
||||
-- Trigger for updated timestamp
|
||||
CREATE TRIGGER pack_installation_updated_trigger
|
||||
BEFORE UPDATE ON pack_installation
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE pack_installation IS 'Tracks pack installation metadata including source, checksum, and storage location';
|
||||
COMMENT ON COLUMN pack_installation.source_type IS 'Type of installation source (git, archive, local_directory, local_archive, registry)';
|
||||
COMMENT ON COLUMN pack_installation.source_url IS 'URL or path of the installation source';
|
||||
COMMENT ON COLUMN pack_installation.source_ref IS 'Git reference (branch/tag/commit) or registry version';
|
||||
COMMENT ON COLUMN pack_installation.checksum IS 'SHA256 checksum of the installed pack contents';
|
||||
COMMENT ON COLUMN pack_installation.checksum_verified IS 'Whether the checksum was verified during installation';
|
||||
COMMENT ON COLUMN pack_installation.installed_by IS 'Identity that installed the pack';
|
||||
COMMENT ON COLUMN pack_installation.installation_method IS 'Method used to install (manual, api, cli, auto)';
|
||||
COMMENT ON COLUMN pack_installation.storage_path IS 'File system path where pack is stored';
|
||||
COMMENT ON COLUMN pack_installation.meta IS 'Additional installation metadata (dependencies resolved, warnings, etc.)';
|
||||
249
migrations.old/20260127000001_consolidate_webhook_config.sql
Normal file
249
migrations.old/20260127000001_consolidate_webhook_config.sql
Normal file
@@ -0,0 +1,249 @@
|
||||
-- Migration: Consolidate Webhook Configuration
|
||||
-- Date: 2026-01-27
|
||||
-- Description: Consolidates multiple webhook_* columns into a single webhook_config JSONB column
|
||||
-- for cleaner schema and better flexibility. Keeps webhook_enabled and webhook_key
|
||||
-- as separate columns for indexing and quick filtering.
|
||||
|
||||
|
||||
-- Step 1: Add new webhook_config column
|
||||
ALTER TABLE trigger
|
||||
ADD COLUMN IF NOT EXISTS webhook_config JSONB DEFAULT '{}'::jsonb;
|
||||
|
||||
COMMENT ON COLUMN trigger.webhook_config IS
|
||||
'Webhook configuration as JSON. Contains settings like secret, HMAC config, rate limits, IP whitelist, etc.';
|
||||
|
||||
-- Step 2: Migrate existing data to webhook_config
|
||||
-- Build JSON object from existing columns
|
||||
UPDATE trigger
|
||||
SET webhook_config = jsonb_build_object(
|
||||
'secret', COALESCE(webhook_secret, NULL),
|
||||
'hmac', jsonb_build_object(
|
||||
'enabled', COALESCE(webhook_hmac_enabled, false),
|
||||
'secret', COALESCE(webhook_hmac_secret, NULL),
|
||||
'algorithm', COALESCE(webhook_hmac_algorithm, 'sha256')
|
||||
),
|
||||
'rate_limit', jsonb_build_object(
|
||||
'enabled', COALESCE(webhook_rate_limit_enabled, false),
|
||||
'requests', COALESCE(webhook_rate_limit_requests, NULL),
|
||||
'window_seconds', COALESCE(webhook_rate_limit_window_seconds, NULL)
|
||||
),
|
||||
'ip_whitelist', jsonb_build_object(
|
||||
'enabled', COALESCE(webhook_ip_whitelist_enabled, false),
|
||||
'ips', COALESCE(
|
||||
(SELECT jsonb_agg(ip) FROM unnest(webhook_ip_whitelist) AS ip),
|
||||
'[]'::jsonb
|
||||
)
|
||||
),
|
||||
'payload_size_limit_kb', COALESCE(webhook_payload_size_limit_kb, NULL)
|
||||
)
|
||||
WHERE webhook_enabled = true OR webhook_key IS NOT NULL;
|
||||
|
||||
-- Step 3: Drop dependent views that reference the columns we're about to drop
|
||||
DROP VIEW IF EXISTS webhook_stats;
|
||||
DROP VIEW IF EXISTS webhook_stats_detailed;
|
||||
|
||||
-- Step 4: Drop NOT NULL constraints on columns we're about to drop
|
||||
ALTER TABLE trigger
|
||||
DROP CONSTRAINT IF EXISTS trigger_webhook_hmac_enabled_not_null,
|
||||
DROP CONSTRAINT IF EXISTS trigger_webhook_rate_limit_enabled_not_null,
|
||||
DROP CONSTRAINT IF EXISTS trigger_webhook_ip_whitelist_enabled_not_null;
|
||||
|
||||
-- Step 5: Drop old webhook columns (keeping webhook_enabled and webhook_key)
|
||||
ALTER TABLE trigger
|
||||
DROP COLUMN IF EXISTS webhook_secret,
|
||||
DROP COLUMN IF EXISTS webhook_hmac_enabled,
|
||||
DROP COLUMN IF EXISTS webhook_hmac_secret,
|
||||
DROP COLUMN IF EXISTS webhook_hmac_algorithm,
|
||||
DROP COLUMN IF EXISTS webhook_rate_limit_enabled,
|
||||
DROP COLUMN IF EXISTS webhook_rate_limit_requests,
|
||||
DROP COLUMN IF EXISTS webhook_rate_limit_window_seconds,
|
||||
DROP COLUMN IF EXISTS webhook_ip_whitelist_enabled,
|
||||
DROP COLUMN IF EXISTS webhook_ip_whitelist,
|
||||
DROP COLUMN IF EXISTS webhook_payload_size_limit_kb;
|
||||
|
||||
-- Step 6: Drop old indexes that referenced removed columns
|
||||
DROP INDEX IF EXISTS idx_trigger_webhook_enabled;
|
||||
|
||||
-- Step 7: Recreate index for webhook_enabled with better name
|
||||
CREATE INDEX IF NOT EXISTS idx_trigger_webhook_enabled
|
||||
ON trigger(webhook_enabled)
|
||||
WHERE webhook_enabled = TRUE;
|
||||
|
||||
-- Index on webhook_key already exists from previous migration
|
||||
-- CREATE INDEX IF NOT EXISTS idx_trigger_webhook_key ON trigger(webhook_key) WHERE webhook_key IS NOT NULL;
|
||||
|
||||
-- Step 8: Add GIN index for webhook_config JSONB queries
|
||||
CREATE INDEX IF NOT EXISTS idx_trigger_webhook_config
|
||||
ON trigger USING gin(webhook_config)
|
||||
WHERE webhook_config IS NOT NULL AND webhook_config != '{}'::jsonb;
|
||||
|
||||
-- Step 9: Recreate webhook stats view with new schema
|
||||
CREATE OR REPLACE VIEW webhook_stats AS
|
||||
SELECT
|
||||
t.id as trigger_id,
|
||||
t.ref as trigger_ref,
|
||||
t.webhook_enabled,
|
||||
t.webhook_key,
|
||||
t.webhook_config,
|
||||
t.created as webhook_created_at,
|
||||
COUNT(e.id) as total_events,
|
||||
MAX(e.created) as last_event_at,
|
||||
MIN(e.created) as first_event_at
|
||||
FROM trigger t
|
||||
LEFT JOIN event e ON
|
||||
e.trigger = t.id
|
||||
AND (e.config->>'source') = 'webhook'
|
||||
WHERE t.webhook_enabled = TRUE
|
||||
GROUP BY t.id, t.ref, t.webhook_enabled, t.webhook_key, t.webhook_config, t.created;
|
||||
|
||||
COMMENT ON VIEW webhook_stats IS
|
||||
'Statistics for webhook-enabled triggers including event counts and timestamps.';
|
||||
|
||||
-- Step 10: Update helper functions to work with webhook_config
|
||||
|
||||
-- Update enable_trigger_webhook to work with new schema
|
||||
CREATE OR REPLACE FUNCTION enable_trigger_webhook(
|
||||
p_trigger_id BIGINT,
|
||||
p_config JSONB DEFAULT '{}'::jsonb
|
||||
)
|
||||
RETURNS TABLE(
|
||||
webhook_enabled BOOLEAN,
|
||||
webhook_key VARCHAR(64),
|
||||
webhook_url TEXT,
|
||||
webhook_config JSONB
|
||||
) AS $$
|
||||
DECLARE
|
||||
v_new_key VARCHAR(64);
|
||||
v_existing_key VARCHAR(64);
|
||||
v_base_url TEXT;
|
||||
v_config JSONB;
|
||||
BEGIN
|
||||
-- Check if trigger exists
|
||||
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
|
||||
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
|
||||
END IF;
|
||||
|
||||
-- Get existing webhook key if any
|
||||
SELECT t.webhook_key INTO v_existing_key
|
||||
FROM trigger t
|
||||
WHERE t.id = p_trigger_id;
|
||||
|
||||
-- Generate new key if one doesn't exist
|
||||
IF v_existing_key IS NULL THEN
|
||||
v_new_key := generate_webhook_key();
|
||||
ELSE
|
||||
v_new_key := v_existing_key;
|
||||
END IF;
|
||||
|
||||
-- Merge provided config with defaults
|
||||
v_config := p_config || jsonb_build_object(
|
||||
'hmac', COALESCE(p_config->'hmac', jsonb_build_object('enabled', false, 'algorithm', 'sha256')),
|
||||
'rate_limit', COALESCE(p_config->'rate_limit', jsonb_build_object('enabled', false)),
|
||||
'ip_whitelist', COALESCE(p_config->'ip_whitelist', jsonb_build_object('enabled', false, 'ips', '[]'::jsonb))
|
||||
);
|
||||
|
||||
-- Update trigger to enable webhooks
|
||||
UPDATE trigger
|
||||
SET
|
||||
webhook_enabled = TRUE,
|
||||
webhook_key = v_new_key,
|
||||
webhook_config = v_config,
|
||||
updated = NOW()
|
||||
WHERE id = p_trigger_id;
|
||||
|
||||
-- Construct webhook URL
|
||||
v_base_url := '/api/v1/webhooks/' || v_new_key;
|
||||
|
||||
-- Return result
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
TRUE::BOOLEAN as webhook_enabled,
|
||||
v_new_key as webhook_key,
|
||||
v_base_url as webhook_url,
|
||||
v_config as webhook_config;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION enable_trigger_webhook(BIGINT, JSONB) IS
|
||||
'Enables webhooks for a trigger with optional configuration. Generates a new webhook key if one does not exist. Returns webhook details.';
|
||||
|
||||
-- Update disable_trigger_webhook (no changes needed, but recreate for consistency)
|
||||
CREATE OR REPLACE FUNCTION disable_trigger_webhook(
|
||||
p_trigger_id BIGINT
|
||||
)
|
||||
RETURNS BOOLEAN AS $$
|
||||
BEGIN
|
||||
-- Check if trigger exists
|
||||
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
|
||||
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
|
||||
END IF;
|
||||
|
||||
-- Update trigger to disable webhooks
|
||||
-- Note: We keep the webhook_key and webhook_config for audit purposes
|
||||
UPDATE trigger
|
||||
SET
|
||||
webhook_enabled = FALSE,
|
||||
updated = NOW()
|
||||
WHERE id = p_trigger_id;
|
||||
|
||||
RETURN TRUE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION disable_trigger_webhook(BIGINT) IS
|
||||
'Disables webhooks for a trigger. Webhook key and config are retained for audit purposes.';
|
||||
|
||||
-- Update regenerate_trigger_webhook_key (no changes to logic)
|
||||
CREATE OR REPLACE FUNCTION regenerate_trigger_webhook_key(
|
||||
p_trigger_id BIGINT
|
||||
)
|
||||
RETURNS TABLE(
|
||||
webhook_key VARCHAR(64),
|
||||
previous_key_revoked BOOLEAN
|
||||
) AS $$
|
||||
DECLARE
|
||||
v_old_key VARCHAR(64);
|
||||
v_new_key VARCHAR(64);
|
||||
BEGIN
|
||||
-- Check if trigger exists
|
||||
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
|
||||
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
|
||||
END IF;
|
||||
|
||||
-- Get existing key
|
||||
SELECT t.webhook_key INTO v_old_key
|
||||
FROM trigger t
|
||||
WHERE t.id = p_trigger_id;
|
||||
|
||||
-- Generate new key
|
||||
v_new_key := generate_webhook_key();
|
||||
|
||||
-- Update trigger with new key
|
||||
UPDATE trigger
|
||||
SET
|
||||
webhook_key = v_new_key,
|
||||
updated = NOW()
|
||||
WHERE id = p_trigger_id;
|
||||
|
||||
-- Return result
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
v_new_key as webhook_key,
|
||||
(v_old_key IS NOT NULL)::BOOLEAN as previous_key_revoked;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION regenerate_trigger_webhook_key(BIGINT) IS
|
||||
'Regenerates the webhook key for a trigger. The old key is immediately revoked.';
|
||||
|
||||
-- Drop old webhook-specific functions that are no longer needed
|
||||
DROP FUNCTION IF EXISTS enable_trigger_webhook_hmac(BIGINT, VARCHAR);
|
||||
DROP FUNCTION IF EXISTS disable_trigger_webhook_hmac(BIGINT);
|
||||
|
||||
-- Migration complete messages
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE 'Webhook configuration consolidation completed successfully';
|
||||
RAISE NOTICE 'Webhook settings now stored in webhook_config JSONB column';
|
||||
RAISE NOTICE 'Kept separate columns: webhook_enabled (indexed), webhook_key (indexed)';
|
||||
END $$;
|
||||
@@ -0,0 +1,97 @@
|
||||
-- Migration: Consolidate workflow_task_execution into execution table
|
||||
-- Description: Adds workflow_task JSONB column to execution table and migrates data from workflow_task_execution
|
||||
-- Version: 20260127212500
|
||||
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 1: Add workflow_task column to execution table
|
||||
-- ============================================================================
|
||||
|
||||
ALTER TABLE execution ADD COLUMN workflow_task JSONB;
|
||||
|
||||
COMMENT ON COLUMN execution.workflow_task IS 'Workflow task metadata (only populated for workflow task executions)';
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 2: Migrate existing workflow_task_execution data to execution.workflow_task
|
||||
-- ============================================================================
|
||||
|
||||
-- Update execution records with workflow task metadata
|
||||
UPDATE execution e
|
||||
SET workflow_task = jsonb_build_object(
|
||||
'workflow_execution', wte.workflow_execution,
|
||||
'task_name', wte.task_name,
|
||||
'task_index', wte.task_index,
|
||||
'task_batch', wte.task_batch,
|
||||
'retry_count', wte.retry_count,
|
||||
'max_retries', wte.max_retries,
|
||||
'next_retry_at', to_char(wte.next_retry_at, 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"'),
|
||||
'timeout_seconds', wte.timeout_seconds,
|
||||
'timed_out', wte.timed_out,
|
||||
'duration_ms', wte.duration_ms,
|
||||
'started_at', to_char(wte.started_at, 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"'),
|
||||
'completed_at', to_char(wte.completed_at, 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"')
|
||||
)
|
||||
FROM workflow_task_execution wte
|
||||
WHERE e.id = wte.execution;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 3: Create indexes for efficient JSONB queries
|
||||
-- ============================================================================
|
||||
|
||||
-- General GIN index for JSONB operations
|
||||
CREATE INDEX idx_execution_workflow_task_gin ON execution USING GIN (workflow_task)
|
||||
WHERE workflow_task IS NOT NULL;
|
||||
|
||||
-- Specific index for workflow_execution lookups (most common query)
|
||||
CREATE INDEX idx_execution_workflow_execution ON execution ((workflow_task->>'workflow_execution'))
|
||||
WHERE workflow_task IS NOT NULL;
|
||||
|
||||
-- Index for task name lookups
|
||||
CREATE INDEX idx_execution_task_name ON execution ((workflow_task->>'task_name'))
|
||||
WHERE workflow_task IS NOT NULL;
|
||||
|
||||
-- Index for retry queries (using text comparison to avoid IMMUTABLE issue)
|
||||
CREATE INDEX idx_execution_pending_retries ON execution ((workflow_task->>'next_retry_at'))
|
||||
WHERE workflow_task IS NOT NULL
|
||||
AND workflow_task->>'next_retry_at' IS NOT NULL;
|
||||
|
||||
-- Index for timeout queries
|
||||
CREATE INDEX idx_execution_timed_out ON execution ((workflow_task->>'timed_out'))
|
||||
WHERE workflow_task IS NOT NULL;
|
||||
|
||||
-- Index for workflow task status queries (combined with execution status)
|
||||
CREATE INDEX idx_execution_workflow_status ON execution (status, (workflow_task->>'workflow_execution'))
|
||||
WHERE workflow_task IS NOT NULL;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 4: Drop the workflow_task_execution table
|
||||
-- ============================================================================
|
||||
|
||||
-- Drop the old table (this will cascade delete any dependent objects)
|
||||
DROP TABLE IF EXISTS workflow_task_execution CASCADE;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 5: Update comments and documentation
|
||||
-- ============================================================================
|
||||
|
||||
COMMENT ON INDEX idx_execution_workflow_task_gin IS 'GIN index for general JSONB queries on workflow_task';
|
||||
COMMENT ON INDEX idx_execution_workflow_execution IS 'Index for finding tasks by workflow execution ID';
|
||||
COMMENT ON INDEX idx_execution_task_name IS 'Index for finding tasks by name';
|
||||
COMMENT ON INDEX idx_execution_pending_retries IS 'Index for finding tasks pending retry';
|
||||
COMMENT ON INDEX idx_execution_timed_out IS 'Index for finding timed out tasks';
|
||||
COMMENT ON INDEX idx_execution_workflow_status IS 'Index for workflow task status queries';
|
||||
|
||||
-- ============================================================================
|
||||
-- VERIFICATION QUERIES (for manual testing)
|
||||
-- ============================================================================
|
||||
|
||||
-- Verify migration: Count workflow task executions
|
||||
-- SELECT COUNT(*) FROM execution WHERE workflow_task IS NOT NULL;
|
||||
|
||||
-- Verify indexes exist
|
||||
-- SELECT indexname, indexdef FROM pg_indexes WHERE tablename = 'execution' AND indexname LIKE '%workflow%';
|
||||
|
||||
-- Test workflow task queries
|
||||
-- SELECT * FROM execution WHERE workflow_task->>'workflow_execution' = '1';
|
||||
-- SELECT * FROM execution WHERE workflow_task->>'task_name' = 'example_task';
|
||||
-- SELECT * FROM execution WHERE (workflow_task->>'timed_out')::boolean = true;
|
||||
@@ -0,0 +1,42 @@
|
||||
-- Migration: Fix webhook function overload issue
|
||||
-- Description: Drop the old enable_trigger_webhook(bigint) signature to resolve
|
||||
-- "function is not unique" error when the newer version with config
|
||||
-- parameter is present.
|
||||
-- Date: 2026-01-29
|
||||
|
||||
-- Drop the old function signature from 20260120000001_add_webhook_support.sql
|
||||
-- The newer version with JSONB config parameter should be the only one
|
||||
DROP FUNCTION IF EXISTS enable_trigger_webhook(BIGINT);
|
||||
|
||||
-- The new signature with config parameter is already defined in
|
||||
-- 20260127000001_consolidate_webhook_config.sql:
|
||||
-- attune.enable_trigger_webhook(p_trigger_id BIGINT, p_config JSONB DEFAULT '{}'::jsonb)
|
||||
|
||||
-- Similarly, check and clean up any other webhook function overloads
|
||||
|
||||
-- Drop old disable_trigger_webhook if it has conflicts
|
||||
DROP FUNCTION IF EXISTS disable_trigger_webhook(BIGINT);
|
||||
|
||||
-- Drop old regenerate_webhook_key if it has conflicts
|
||||
DROP FUNCTION IF EXISTS regenerate_trigger_webhook_key(BIGINT);
|
||||
|
||||
-- Note: The current versions of these functions should be:
|
||||
-- - attune.enable_trigger_webhook(BIGINT, JSONB DEFAULT '{}'::jsonb)
|
||||
-- - attune.disable_trigger_webhook(BIGINT)
|
||||
-- - attune.regenerate_trigger_webhook_key(BIGINT)
|
||||
|
||||
-- Verify functions exist after cleanup
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Check that enable_trigger_webhook exists with correct signature
|
||||
-- Use current_schema() to work with both production (attune) and test schemas
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_proc p
|
||||
JOIN pg_namespace n ON p.pronamespace = n.oid
|
||||
WHERE n.nspname = current_schema()
|
||||
AND p.proname = 'enable_trigger_webhook'
|
||||
AND pg_get_function_arguments(p.oid) LIKE '%jsonb%'
|
||||
) THEN
|
||||
RAISE EXCEPTION 'enable_trigger_webhook function with JSONB config not found after migration';
|
||||
END IF;
|
||||
END $$;
|
||||
43
migrations.old/20260129140130_add_is_adhoc_flag.sql
Normal file
43
migrations.old/20260129140130_add_is_adhoc_flag.sql
Normal file
@@ -0,0 +1,43 @@
|
||||
-- Migration: Add is_adhoc flag to action, rule, and trigger tables
|
||||
-- Description: Distinguishes between pack-installed components (is_adhoc=false) and manually created ad-hoc components (is_adhoc=true)
|
||||
-- Version: 20260129140130
|
||||
|
||||
-- ============================================================================
|
||||
-- Add is_adhoc column to action table
|
||||
-- ============================================================================
|
||||
|
||||
ALTER TABLE action ADD COLUMN is_adhoc BOOLEAN DEFAULT false NOT NULL;
|
||||
|
||||
-- Index for filtering ad-hoc actions
|
||||
CREATE INDEX idx_action_is_adhoc ON action(is_adhoc) WHERE is_adhoc = true;
|
||||
|
||||
COMMENT ON COLUMN action.is_adhoc IS 'True if action was manually created (ad-hoc), false if installed from pack';
|
||||
|
||||
-- ============================================================================
|
||||
-- Add is_adhoc column to rule table
|
||||
-- ============================================================================
|
||||
|
||||
ALTER TABLE rule ADD COLUMN is_adhoc BOOLEAN DEFAULT false NOT NULL;
|
||||
|
||||
-- Index for filtering ad-hoc rules
|
||||
CREATE INDEX idx_rule_is_adhoc ON rule(is_adhoc) WHERE is_adhoc = true;
|
||||
|
||||
COMMENT ON COLUMN rule.is_adhoc IS 'True if rule was manually created (ad-hoc), false if installed from pack';
|
||||
|
||||
-- ============================================================================
|
||||
-- Add is_adhoc column to trigger table
|
||||
-- ============================================================================
|
||||
|
||||
ALTER TABLE trigger ADD COLUMN is_adhoc BOOLEAN DEFAULT false NOT NULL;
|
||||
|
||||
-- Index for filtering ad-hoc triggers
|
||||
CREATE INDEX idx_trigger_is_adhoc ON trigger(is_adhoc) WHERE is_adhoc = true;
|
||||
|
||||
COMMENT ON COLUMN trigger.is_adhoc IS 'True if trigger was manually created (ad-hoc), false if installed from pack';
|
||||
|
||||
-- ============================================================================
|
||||
-- Notes
|
||||
-- ============================================================================
|
||||
-- - Default is false (not ad-hoc) for backward compatibility with existing pack-installed components
|
||||
-- - Ad-hoc components are eligible for deletion by users with appropriate permissions
|
||||
-- - Pack-installed components (is_adhoc=false) should not be deletable directly, only via pack uninstallation
|
||||
43
migrations.old/20260129150000_add_event_notify_trigger.sql
Normal file
43
migrations.old/20260129150000_add_event_notify_trigger.sql
Normal file
@@ -0,0 +1,43 @@
|
||||
-- Migration: Add NOTIFY trigger for event creation
|
||||
-- This enables real-time notifications when events are created
|
||||
|
||||
-- Function to send notifications on event creation
|
||||
CREATE OR REPLACE FUNCTION notify_event_created()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
payload JSONB;
|
||||
BEGIN
|
||||
-- Build JSON payload with event details
|
||||
payload := jsonb_build_object(
|
||||
'entity_type', 'event',
|
||||
'entity_id', NEW.id,
|
||||
'timestamp', NOW(),
|
||||
'data', jsonb_build_object(
|
||||
'id', NEW.id,
|
||||
'trigger', NEW.trigger,
|
||||
'trigger_ref', NEW.trigger_ref,
|
||||
'source', NEW.source,
|
||||
'source_ref', NEW.source_ref,
|
||||
'payload', NEW.payload,
|
||||
'created', NEW.created
|
||||
)
|
||||
);
|
||||
|
||||
-- Send notification to the event_created channel
|
||||
PERFORM pg_notify('event_created', payload::text);
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Trigger to send pg_notify on event insert
|
||||
CREATE TRIGGER notify_event_created
|
||||
AFTER INSERT ON event
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION notify_event_created();
|
||||
|
||||
-- Add comments
|
||||
COMMENT ON FUNCTION notify_event_created() IS
|
||||
'Sends PostgreSQL NOTIFY for event creation to enable real-time notifications';
|
||||
COMMENT ON TRIGGER notify_event_created ON event IS
|
||||
'Broadcasts event creation via pg_notify for real-time updates';
|
||||
61
migrations.old/20260130000001_add_rule_to_event.sql
Normal file
61
migrations.old/20260130000001_add_rule_to_event.sql
Normal file
@@ -0,0 +1,61 @@
|
||||
-- Migration: Add rule association to event table
|
||||
-- This enables events to be directly associated with specific rules,
|
||||
-- improving query performance and enabling rule-specific event filtering.
|
||||
|
||||
-- Add rule and rule_ref columns to event table
|
||||
ALTER TABLE event
|
||||
ADD COLUMN rule BIGINT,
|
||||
ADD COLUMN rule_ref TEXT;
|
||||
|
||||
-- Add foreign key constraint
|
||||
ALTER TABLE event
|
||||
ADD CONSTRAINT event_rule_fkey
|
||||
FOREIGN KEY (rule) REFERENCES rule(id) ON DELETE SET NULL;
|
||||
|
||||
-- Add indexes for efficient querying
|
||||
CREATE INDEX idx_event_rule ON event(rule);
|
||||
CREATE INDEX idx_event_rule_ref ON event(rule_ref);
|
||||
CREATE INDEX idx_event_rule_created ON event(rule, created DESC);
|
||||
CREATE INDEX idx_event_trigger_rule ON event(trigger, rule);
|
||||
|
||||
-- Add comments
|
||||
COMMENT ON COLUMN event.rule IS
|
||||
'Optional reference to the specific rule that generated this event. Used by sensors that emit events for specific rule instances (e.g., timer sensors with multiple interval rules).';
|
||||
|
||||
COMMENT ON COLUMN event.rule_ref IS
|
||||
'Human-readable reference to the rule (e.g., "core.echo_every_second"). Denormalized for query convenience.';
|
||||
|
||||
-- Update the notify trigger to include rule information if present
|
||||
CREATE OR REPLACE FUNCTION notify_event_created()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
payload JSONB;
|
||||
BEGIN
|
||||
-- Build JSON payload with event details
|
||||
payload := jsonb_build_object(
|
||||
'entity_type', 'event',
|
||||
'entity_id', NEW.id,
|
||||
'timestamp', NOW(),
|
||||
'data', jsonb_build_object(
|
||||
'id', NEW.id,
|
||||
'trigger', NEW.trigger,
|
||||
'trigger_ref', NEW.trigger_ref,
|
||||
'rule', NEW.rule,
|
||||
'rule_ref', NEW.rule_ref,
|
||||
'source', NEW.source,
|
||||
'source_ref', NEW.source_ref,
|
||||
'payload', NEW.payload,
|
||||
'created', NEW.created
|
||||
)
|
||||
);
|
||||
|
||||
-- Send notification to the event_created channel
|
||||
PERFORM pg_notify('event_created', payload::text);
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Add comment on updated function
|
||||
COMMENT ON FUNCTION notify_event_created() IS
|
||||
'Sends PostgreSQL NOTIFY for event creation with optional rule association';
|
||||
32
migrations.old/20260131000001_add_worker_role.sql
Normal file
32
migrations.old/20260131000001_add_worker_role.sql
Normal file
@@ -0,0 +1,32 @@
|
||||
-- Migration: Add Worker Role
|
||||
-- Description: Adds worker_role field to distinguish between action workers and sensor workers
|
||||
-- Version: 20260131000001
|
||||
|
||||
-- ============================================================================
|
||||
-- WORKER ROLE ENUM
|
||||
-- ============================================================================
|
||||
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE worker_role_enum AS ENUM ('action', 'sensor', 'hybrid');
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
COMMENT ON TYPE worker_role_enum IS 'Worker role type: action (executes actions), sensor (monitors triggers), or hybrid (both)';
|
||||
|
||||
-- ============================================================================
|
||||
-- ADD WORKER ROLE COLUMN
|
||||
-- ============================================================================
|
||||
|
||||
ALTER TABLE worker
|
||||
ADD COLUMN IF NOT EXISTS worker_role worker_role_enum NOT NULL DEFAULT 'action';
|
||||
|
||||
-- Create index for efficient role-based queries
|
||||
CREATE INDEX IF NOT EXISTS idx_worker_role ON worker(worker_role);
|
||||
CREATE INDEX IF NOT EXISTS idx_worker_role_status ON worker(worker_role, status);
|
||||
|
||||
-- Comments
|
||||
COMMENT ON COLUMN worker.worker_role IS 'Worker role: action (executes actions), sensor (monitors for triggers), or hybrid (both capabilities)';
|
||||
|
||||
-- Update existing workers to be action workers (backward compatibility)
|
||||
UPDATE worker SET worker_role = 'action' WHERE worker_role IS NULL;
|
||||
204
migrations.old/20260202000001_add_sensor_runtimes.sql
Normal file
204
migrations.old/20260202000001_add_sensor_runtimes.sql
Normal file
@@ -0,0 +1,204 @@
|
||||
-- Migration: Add Sensor Runtimes
|
||||
-- Description: Adds common sensor runtimes (Python, Node.js, Shell, Native) with verification metadata
|
||||
-- Version: 20260202000001
|
||||
|
||||
-- ============================================================================
|
||||
-- SENSOR RUNTIMES
|
||||
-- ============================================================================
|
||||
|
||||
-- Insert Python sensor runtime
|
||||
INSERT INTO runtime (ref, pack, pack_ref, description, runtime_type, name, distributions, installation)
|
||||
VALUES (
|
||||
'core.sensor.python',
|
||||
(SELECT id FROM pack WHERE ref = 'core'),
|
||||
'core',
|
||||
'Python 3 sensor runtime with automatic environment management',
|
||||
'sensor',
|
||||
'Python',
|
||||
jsonb_build_object(
|
||||
'verification', jsonb_build_object(
|
||||
'commands', jsonb_build_array(
|
||||
jsonb_build_object(
|
||||
'binary', 'python3',
|
||||
'args', jsonb_build_array('--version'),
|
||||
'exit_code', 0,
|
||||
'pattern', 'Python 3\.',
|
||||
'priority', 1
|
||||
),
|
||||
jsonb_build_object(
|
||||
'binary', 'python',
|
||||
'args', jsonb_build_array('--version'),
|
||||
'exit_code', 0,
|
||||
'pattern', 'Python 3\.',
|
||||
'priority', 2
|
||||
)
|
||||
)
|
||||
),
|
||||
'min_version', '3.8',
|
||||
'recommended_version', '3.11'
|
||||
),
|
||||
jsonb_build_object(
|
||||
'package_managers', jsonb_build_array('pip', 'pipenv', 'poetry'),
|
||||
'virtual_env_support', true
|
||||
)
|
||||
)
|
||||
ON CONFLICT (ref) DO UPDATE SET
|
||||
distributions = EXCLUDED.distributions,
|
||||
installation = EXCLUDED.installation,
|
||||
updated = NOW();
|
||||
|
||||
-- Insert Node.js sensor runtime
|
||||
INSERT INTO runtime (ref, pack, pack_ref, description, runtime_type, name, distributions, installation)
|
||||
VALUES (
|
||||
'core.sensor.nodejs',
|
||||
(SELECT id FROM pack WHERE ref = 'core'),
|
||||
'core',
|
||||
'Node.js sensor runtime for JavaScript-based sensors',
|
||||
'sensor',
|
||||
'Node.js',
|
||||
jsonb_build_object(
|
||||
'verification', jsonb_build_object(
|
||||
'commands', jsonb_build_array(
|
||||
jsonb_build_object(
|
||||
'binary', 'node',
|
||||
'args', jsonb_build_array('--version'),
|
||||
'exit_code', 0,
|
||||
'pattern', 'v\d+\.\d+\.\d+',
|
||||
'priority', 1
|
||||
)
|
||||
)
|
||||
),
|
||||
'min_version', '16.0.0',
|
||||
'recommended_version', '20.0.0'
|
||||
),
|
||||
jsonb_build_object(
|
||||
'package_managers', jsonb_build_array('npm', 'yarn', 'pnpm'),
|
||||
'module_support', true
|
||||
)
|
||||
)
|
||||
ON CONFLICT (ref) DO UPDATE SET
|
||||
distributions = EXCLUDED.distributions,
|
||||
installation = EXCLUDED.installation,
|
||||
updated = NOW();
|
||||
|
||||
-- Insert Shell sensor runtime
|
||||
INSERT INTO runtime (ref, pack, pack_ref, description, runtime_type, name, distributions, installation)
|
||||
VALUES (
|
||||
'core.sensor.shell',
|
||||
(SELECT id FROM pack WHERE ref = 'core'),
|
||||
'core',
|
||||
'Shell (bash/sh) sensor runtime - always available',
|
||||
'sensor',
|
||||
'Shell',
|
||||
jsonb_build_object(
|
||||
'verification', jsonb_build_object(
|
||||
'commands', jsonb_build_array(
|
||||
jsonb_build_object(
|
||||
'binary', 'sh',
|
||||
'args', jsonb_build_array('--version'),
|
||||
'exit_code', 0,
|
||||
'optional', true,
|
||||
'priority', 1
|
||||
),
|
||||
jsonb_build_object(
|
||||
'binary', 'bash',
|
||||
'args', jsonb_build_array('--version'),
|
||||
'exit_code', 0,
|
||||
'optional', true,
|
||||
'priority', 2
|
||||
)
|
||||
),
|
||||
'always_available', true
|
||||
)
|
||||
),
|
||||
jsonb_build_object(
|
||||
'interpreters', jsonb_build_array('sh', 'bash', 'dash'),
|
||||
'portable', true
|
||||
)
|
||||
)
|
||||
ON CONFLICT (ref) DO UPDATE SET
|
||||
distributions = EXCLUDED.distributions,
|
||||
installation = EXCLUDED.installation,
|
||||
updated = NOW();
|
||||
|
||||
-- Insert Native sensor runtime
|
||||
INSERT INTO runtime (ref, pack, pack_ref, description, runtime_type, name, distributions, installation)
|
||||
VALUES (
|
||||
'core.sensor.native',
|
||||
(SELECT id FROM pack WHERE ref = 'core'),
|
||||
'core',
|
||||
'Native compiled sensor runtime (Rust, Go, C, etc.) - always available',
|
||||
'sensor',
|
||||
'Native',
|
||||
jsonb_build_object(
|
||||
'verification', jsonb_build_object(
|
||||
'always_available', true,
|
||||
'check_required', false
|
||||
),
|
||||
'languages', jsonb_build_array('rust', 'go', 'c', 'c++')
|
||||
),
|
||||
jsonb_build_object(
|
||||
'build_required', false,
|
||||
'system_native', true
|
||||
)
|
||||
)
|
||||
ON CONFLICT (ref) DO UPDATE SET
|
||||
distributions = EXCLUDED.distributions,
|
||||
installation = EXCLUDED.installation,
|
||||
updated = NOW();
|
||||
|
||||
-- Update existing builtin sensor runtime with verification metadata
|
||||
UPDATE runtime
|
||||
SET distributions = jsonb_build_object(
|
||||
'verification', jsonb_build_object(
|
||||
'always_available', true,
|
||||
'check_required', false
|
||||
),
|
||||
'type', 'builtin'
|
||||
),
|
||||
installation = jsonb_build_object(
|
||||
'method', 'builtin',
|
||||
'included_with_service', true
|
||||
),
|
||||
updated = NOW()
|
||||
WHERE ref = 'core.sensor.builtin';
|
||||
|
||||
-- Add comments
|
||||
COMMENT ON COLUMN runtime.distributions IS 'Runtime distribution metadata including verification commands, version requirements, and capabilities';
|
||||
COMMENT ON COLUMN runtime.installation IS 'Installation requirements and instructions including package managers and setup steps';
|
||||
|
||||
-- Create index for efficient runtime verification queries
|
||||
CREATE INDEX IF NOT EXISTS idx_runtime_type_sensor ON runtime(runtime_type) WHERE runtime_type = 'sensor';
|
||||
|
||||
-- Verification metadata structure documentation
|
||||
/*
|
||||
VERIFICATION METADATA STRUCTURE:
|
||||
|
||||
distributions->verification = {
|
||||
"commands": [ // Array of verification commands to try (in priority order)
|
||||
{
|
||||
"binary": "python3", // Binary name to execute
|
||||
"args": ["--version"], // Arguments to pass
|
||||
"exit_code": 0, // Expected exit code (0 = success)
|
||||
"pattern": "Python 3\.", // Optional regex pattern to match in output
|
||||
"priority": 1, // Lower = higher priority (try first)
|
||||
"optional": false // If true, failure doesn't mean runtime unavailable
|
||||
}
|
||||
],
|
||||
"always_available": false, // If true, skip verification (shell, native)
|
||||
"check_required": true // If false, assume available without checking
|
||||
}
|
||||
|
||||
USAGE EXAMPLE:
|
||||
|
||||
To verify Python runtime availability:
|
||||
1. Query: SELECT distributions->'verification'->'commands' FROM runtime WHERE ref = 'core.sensor.python'
|
||||
2. Parse commands array
|
||||
3. Try each command in priority order
|
||||
4. If any command succeeds with expected exit_code and matches pattern (if provided), runtime is available
|
||||
5. If all commands fail, runtime is not available
|
||||
|
||||
For always_available runtimes (shell, native):
|
||||
1. Check distributions->'verification'->'always_available'
|
||||
2. If true, skip verification and report as available
|
||||
*/
|
||||
96
migrations.old/20260203000001_unify_runtimes.sql
Normal file
96
migrations.old/20260203000001_unify_runtimes.sql
Normal file
@@ -0,0 +1,96 @@
|
||||
-- Migration: Unify Runtimes (Remove runtime_type distinction)
|
||||
-- Description: Removes the runtime_type field and consolidates sensor/action runtimes
|
||||
-- into a single unified runtime system. Both sensors and actions use the
|
||||
-- same binaries and verification logic, so the distinction is redundant.
|
||||
-- Runtime metadata is now loaded from YAML files in packs/core/runtimes/
|
||||
-- Version: 20260203000001
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 1: Drop constraints that prevent unified runtime format
|
||||
-- ============================================================================
|
||||
|
||||
-- Drop NOT NULL constraint from runtime_type to allow migration
|
||||
ALTER TABLE runtime ALTER COLUMN runtime_type DROP NOT NULL;
|
||||
|
||||
-- Drop the runtime_ref_format constraint (expects pack.type.name, we want pack.name)
|
||||
ALTER TABLE runtime DROP CONSTRAINT IF EXISTS runtime_ref_format;
|
||||
|
||||
-- Drop the runtime_ref_lowercase constraint (will recreate after migration)
|
||||
ALTER TABLE runtime DROP CONSTRAINT IF EXISTS runtime_ref_lowercase;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 2: Drop runtime_type column and related objects
|
||||
-- ============================================================================
|
||||
|
||||
-- Drop indexes that reference runtime_type
|
||||
DROP INDEX IF EXISTS idx_runtime_type;
|
||||
DROP INDEX IF EXISTS idx_runtime_pack_type;
|
||||
DROP INDEX IF EXISTS idx_runtime_type_created;
|
||||
DROP INDEX IF EXISTS idx_runtime_type_sensor;
|
||||
|
||||
-- Drop the runtime_type column
|
||||
ALTER TABLE runtime DROP COLUMN IF EXISTS runtime_type;
|
||||
|
||||
-- Drop the enum type
|
||||
DROP TYPE IF EXISTS runtime_type_enum;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 3: Clean up old runtime records (data will be reloaded from YAML)
|
||||
-- ============================================================================
|
||||
|
||||
-- Remove all existing runtime records - they will be reloaded from YAML files
|
||||
TRUNCATE TABLE runtime CASCADE;
|
||||
|
||||
-- ============================================================================
|
||||
-- STEP 4: Update comments and create new indexes
|
||||
-- ============================================================================
|
||||
|
||||
COMMENT ON TABLE runtime IS 'Runtime environments for executing actions and sensors (unified)';
|
||||
COMMENT ON COLUMN runtime.ref IS 'Unique runtime reference (format: pack.name, e.g., core.python)';
|
||||
COMMENT ON COLUMN runtime.name IS 'Runtime name (e.g., "Python", "Node.js", "Shell")';
|
||||
COMMENT ON COLUMN runtime.distributions IS 'Runtime distribution metadata including verification commands, version requirements, and capabilities';
|
||||
COMMENT ON COLUMN runtime.installation IS 'Installation requirements and instructions including package managers and setup steps';
|
||||
|
||||
-- Create new indexes for efficient queries
|
||||
CREATE INDEX IF NOT EXISTS idx_runtime_name ON runtime(name);
|
||||
CREATE INDEX IF NOT EXISTS idx_runtime_verification ON runtime USING gin ((distributions->'verification'));
|
||||
|
||||
-- ============================================================================
|
||||
-- VERIFICATION METADATA STRUCTURE DOCUMENTATION
|
||||
-- ============================================================================
|
||||
|
||||
COMMENT ON COLUMN runtime.distributions IS 'Runtime verification and capability metadata. Structure:
|
||||
{
|
||||
"verification": {
|
||||
"commands": [ // Array of verification commands (in priority order)
|
||||
{
|
||||
"binary": "python3", // Binary name to execute
|
||||
"args": ["--version"], // Arguments to pass
|
||||
"exit_code": 0, // Expected exit code
|
||||
"pattern": "Python 3\\.", // Optional regex pattern to match in output
|
||||
"priority": 1, // Lower = higher priority
|
||||
"optional": false // If true, failure is non-fatal
|
||||
}
|
||||
],
|
||||
"always_available": false, // If true, skip verification (shell, native)
|
||||
"check_required": true // If false, assume available without checking
|
||||
},
|
||||
"min_version": "3.8", // Minimum supported version
|
||||
"recommended_version": "3.11" // Recommended version
|
||||
}';
|
||||
|
||||
-- ============================================================================
|
||||
-- SUMMARY
|
||||
-- ============================================================================
|
||||
|
||||
-- Runtime records are now loaded from YAML files in packs/core/runtimes/:
|
||||
-- 1. python.yaml - Python 3 runtime (unified)
|
||||
-- 2. nodejs.yaml - Node.js runtime (unified)
|
||||
-- 3. shell.yaml - Shell runtime (unified)
|
||||
-- 4. native.yaml - Native runtime (unified)
|
||||
-- 5. sensor_builtin.yaml - Built-in sensor runtime (sensor-specific timers, etc.)
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE 'Runtime unification complete. Runtime records will be loaded from YAML files.';
|
||||
END $$;
|
||||
330
migrations.old/20260203000002_add_pack_environments.sql
Normal file
330
migrations.old/20260203000002_add_pack_environments.sql
Normal file
@@ -0,0 +1,330 @@
|
||||
-- Migration: Add Pack Runtime Environments
|
||||
-- Description: Adds support for per-pack isolated runtime environments with installer metadata
|
||||
-- Version: 20260203000002
|
||||
|
||||
-- ============================================================================
|
||||
-- PART 1: Add installer metadata to runtime table
|
||||
-- ============================================================================
|
||||
|
||||
-- Add installers field to runtime table for environment setup instructions
|
||||
ALTER TABLE runtime ADD COLUMN IF NOT EXISTS installers JSONB DEFAULT '[]'::jsonb;
|
||||
|
||||
COMMENT ON COLUMN runtime.installers IS 'Array of installer actions to create pack-specific runtime environments. Each installer defines commands to set up isolated environments (e.g., Python venv, npm install).
|
||||
|
||||
Structure:
|
||||
{
|
||||
"installers": [
|
||||
{
|
||||
"name": "create_environment",
|
||||
"description": "Create isolated runtime environment",
|
||||
"command": "python3",
|
||||
"args": ["-m", "venv", "{env_path}"],
|
||||
"cwd": "{pack_path}",
|
||||
"env": {},
|
||||
"order": 1
|
||||
},
|
||||
{
|
||||
"name": "install_dependencies",
|
||||
"description": "Install pack dependencies",
|
||||
"command": "{env_path}/bin/pip",
|
||||
"args": ["install", "-r", "{pack_path}/requirements.txt"],
|
||||
"cwd": "{pack_path}",
|
||||
"env": {},
|
||||
"order": 2,
|
||||
"optional": false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
Template variables:
|
||||
{env_path} - Full path to environment directory (e.g., /opt/attune/packenvs/mypack/python)
|
||||
{pack_path} - Full path to pack directory (e.g., /opt/attune/packs/mypack)
|
||||
{pack_ref} - Pack reference (e.g., mycompany.monitoring)
|
||||
{runtime_ref} - Runtime reference (e.g., core.python)
|
||||
{runtime_name} - Runtime name (e.g., Python)
|
||||
';
|
||||
|
||||
-- ============================================================================
|
||||
-- PART 2: Create pack_environment table
|
||||
-- ============================================================================
|
||||
|
||||
-- PackEnvironmentStatus enum
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE pack_environment_status_enum AS ENUM (
|
||||
'pending', -- Environment creation scheduled
|
||||
'installing', -- Currently installing
|
||||
'ready', -- Environment ready for use
|
||||
'failed', -- Installation failed
|
||||
'outdated' -- Pack updated, environment needs rebuild
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
COMMENT ON TYPE pack_environment_status_enum IS 'Status of pack runtime environment installation';
|
||||
|
||||
-- Pack environment table
|
||||
CREATE TABLE IF NOT EXISTS pack_environment (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
|
||||
pack_ref TEXT NOT NULL,
|
||||
runtime BIGINT NOT NULL REFERENCES runtime(id) ON DELETE CASCADE,
|
||||
runtime_ref TEXT NOT NULL,
|
||||
env_path TEXT NOT NULL,
|
||||
status pack_environment_status_enum NOT NULL DEFAULT 'pending',
|
||||
installed_at TIMESTAMPTZ,
|
||||
last_verified TIMESTAMPTZ,
|
||||
install_log TEXT,
|
||||
install_error TEXT,
|
||||
metadata JSONB DEFAULT '{}'::jsonb,
|
||||
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(pack, runtime)
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_pack_environment_pack ON pack_environment(pack);
|
||||
CREATE INDEX IF NOT EXISTS idx_pack_environment_runtime ON pack_environment(runtime);
|
||||
CREATE INDEX IF NOT EXISTS idx_pack_environment_status ON pack_environment(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_pack_environment_pack_ref ON pack_environment(pack_ref);
|
||||
CREATE INDEX IF NOT EXISTS idx_pack_environment_runtime_ref ON pack_environment(runtime_ref);
|
||||
CREATE INDEX IF NOT EXISTS idx_pack_environment_pack_runtime ON pack_environment(pack, runtime);
|
||||
|
||||
-- Trigger for updated timestamp
|
||||
CREATE TRIGGER update_pack_environment_updated
|
||||
BEFORE UPDATE ON pack_environment
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Comments
|
||||
COMMENT ON TABLE pack_environment IS 'Tracks pack-specific runtime environments for dependency isolation';
|
||||
COMMENT ON COLUMN pack_environment.pack IS 'Pack that owns this environment';
|
||||
COMMENT ON COLUMN pack_environment.pack_ref IS 'Pack reference for quick lookup';
|
||||
COMMENT ON COLUMN pack_environment.runtime IS 'Runtime used for this environment';
|
||||
COMMENT ON COLUMN pack_environment.runtime_ref IS 'Runtime reference for quick lookup';
|
||||
COMMENT ON COLUMN pack_environment.env_path IS 'Filesystem path to the environment directory (e.g., /opt/attune/packenvs/mypack/python)';
|
||||
COMMENT ON COLUMN pack_environment.status IS 'Current installation status';
|
||||
COMMENT ON COLUMN pack_environment.installed_at IS 'When the environment was successfully installed';
|
||||
COMMENT ON COLUMN pack_environment.last_verified IS 'Last time the environment was verified as working';
|
||||
COMMENT ON COLUMN pack_environment.install_log IS 'Installation output logs';
|
||||
COMMENT ON COLUMN pack_environment.install_error IS 'Error message if installation failed';
|
||||
COMMENT ON COLUMN pack_environment.metadata IS 'Additional metadata (installed packages, versions, etc.)';
|
||||
|
||||
-- ============================================================================
|
||||
-- PART 3: Update existing runtimes with installer metadata
|
||||
-- ============================================================================
|
||||
|
||||
-- Python runtime installers
|
||||
UPDATE runtime
|
||||
SET installers = jsonb_build_object(
|
||||
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
|
||||
'installers', jsonb_build_array(
|
||||
jsonb_build_object(
|
||||
'name', 'create_venv',
|
||||
'description', 'Create Python virtual environment',
|
||||
'command', 'python3',
|
||||
'args', jsonb_build_array('-m', 'venv', '{env_path}'),
|
||||
'cwd', '{pack_path}',
|
||||
'env', jsonb_build_object(),
|
||||
'order', 1,
|
||||
'optional', false
|
||||
),
|
||||
jsonb_build_object(
|
||||
'name', 'upgrade_pip',
|
||||
'description', 'Upgrade pip to latest version',
|
||||
'command', '{env_path}/bin/pip',
|
||||
'args', jsonb_build_array('install', '--upgrade', 'pip'),
|
||||
'cwd', '{pack_path}',
|
||||
'env', jsonb_build_object(),
|
||||
'order', 2,
|
||||
'optional', true
|
||||
),
|
||||
jsonb_build_object(
|
||||
'name', 'install_requirements',
|
||||
'description', 'Install pack Python dependencies',
|
||||
'command', '{env_path}/bin/pip',
|
||||
'args', jsonb_build_array('install', '-r', '{pack_path}/requirements.txt'),
|
||||
'cwd', '{pack_path}',
|
||||
'env', jsonb_build_object(),
|
||||
'order', 3,
|
||||
'optional', false,
|
||||
'condition', jsonb_build_object(
|
||||
'file_exists', '{pack_path}/requirements.txt'
|
||||
)
|
||||
)
|
||||
),
|
||||
'executable_templates', jsonb_build_object(
|
||||
'python', '{env_path}/bin/python',
|
||||
'pip', '{env_path}/bin/pip'
|
||||
)
|
||||
)
|
||||
WHERE ref = 'core.python';
|
||||
|
||||
-- Node.js runtime installers
|
||||
UPDATE runtime
|
||||
SET installers = jsonb_build_object(
|
||||
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
|
||||
'installers', jsonb_build_array(
|
||||
jsonb_build_object(
|
||||
'name', 'npm_install',
|
||||
'description', 'Install Node.js dependencies',
|
||||
'command', 'npm',
|
||||
'args', jsonb_build_array('install', '--prefix', '{env_path}'),
|
||||
'cwd', '{pack_path}',
|
||||
'env', jsonb_build_object(
|
||||
'NODE_PATH', '{env_path}/node_modules'
|
||||
),
|
||||
'order', 1,
|
||||
'optional', false,
|
||||
'condition', jsonb_build_object(
|
||||
'file_exists', '{pack_path}/package.json'
|
||||
)
|
||||
)
|
||||
),
|
||||
'executable_templates', jsonb_build_object(
|
||||
'node', 'node',
|
||||
'npm', 'npm'
|
||||
),
|
||||
'env_vars', jsonb_build_object(
|
||||
'NODE_PATH', '{env_path}/node_modules'
|
||||
)
|
||||
)
|
||||
WHERE ref = 'core.nodejs';
|
||||
|
||||
-- Shell runtime (no environment needed, uses system shell)
|
||||
UPDATE runtime
|
||||
SET installers = jsonb_build_object(
|
||||
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
|
||||
'installers', jsonb_build_array(),
|
||||
'executable_templates', jsonb_build_object(
|
||||
'sh', 'sh',
|
||||
'bash', 'bash'
|
||||
),
|
||||
'requires_environment', false
|
||||
)
|
||||
WHERE ref = 'core.shell';
|
||||
|
||||
-- Native runtime (no environment needed, binaries are standalone)
|
||||
UPDATE runtime
|
||||
SET installers = jsonb_build_object(
|
||||
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
|
||||
'installers', jsonb_build_array(),
|
||||
'executable_templates', jsonb_build_object(),
|
||||
'requires_environment', false
|
||||
)
|
||||
WHERE ref = 'core.native';
|
||||
|
||||
-- Built-in sensor runtime (internal, no environment)
|
||||
UPDATE runtime
|
||||
SET installers = jsonb_build_object(
|
||||
'installers', jsonb_build_array(),
|
||||
'requires_environment', false
|
||||
)
|
||||
WHERE ref = 'core.sensor.builtin';
|
||||
|
||||
-- ============================================================================
|
||||
-- PART 4: Add helper functions
|
||||
-- ============================================================================
|
||||
|
||||
-- Function to get environment path for a pack/runtime combination
|
||||
CREATE OR REPLACE FUNCTION get_pack_environment_path(p_pack_ref TEXT, p_runtime_ref TEXT)
|
||||
RETURNS TEXT AS $$
|
||||
DECLARE
|
||||
v_runtime_name TEXT;
|
||||
v_base_template TEXT;
|
||||
v_result TEXT;
|
||||
BEGIN
|
||||
-- Get runtime name and base path template
|
||||
SELECT
|
||||
LOWER(name),
|
||||
installers->>'base_path_template'
|
||||
INTO v_runtime_name, v_base_template
|
||||
FROM runtime
|
||||
WHERE ref = p_runtime_ref;
|
||||
|
||||
IF v_base_template IS NULL THEN
|
||||
v_base_template := '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}';
|
||||
END IF;
|
||||
|
||||
-- Replace template variables
|
||||
v_result := v_base_template;
|
||||
v_result := REPLACE(v_result, '{pack_ref}', p_pack_ref);
|
||||
v_result := REPLACE(v_result, '{runtime_ref}', p_runtime_ref);
|
||||
v_result := REPLACE(v_result, '{runtime_name_lower}', v_runtime_name);
|
||||
|
||||
RETURN v_result;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql IMMUTABLE;
|
||||
|
||||
COMMENT ON FUNCTION get_pack_environment_path IS 'Calculate the filesystem path for a pack runtime environment';
|
||||
|
||||
-- Function to check if a runtime requires an environment
|
||||
CREATE OR REPLACE FUNCTION runtime_requires_environment(p_runtime_ref TEXT)
|
||||
RETURNS BOOLEAN AS $$
|
||||
DECLARE
|
||||
v_requires BOOLEAN;
|
||||
BEGIN
|
||||
SELECT COALESCE((installers->>'requires_environment')::boolean, true)
|
||||
INTO v_requires
|
||||
FROM runtime
|
||||
WHERE ref = p_runtime_ref;
|
||||
|
||||
RETURN COALESCE(v_requires, false);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql STABLE;
|
||||
|
||||
COMMENT ON FUNCTION runtime_requires_environment IS 'Check if a runtime needs a pack-specific environment';
|
||||
|
||||
-- ============================================================================
|
||||
-- PART 5: Create view for environment status
|
||||
-- ============================================================================
|
||||
|
||||
CREATE OR REPLACE VIEW v_pack_environment_status AS
|
||||
SELECT
|
||||
pe.id,
|
||||
pe.pack,
|
||||
p.ref AS pack_ref,
|
||||
p.label AS pack_name,
|
||||
pe.runtime,
|
||||
r.ref AS runtime_ref,
|
||||
r.name AS runtime_name,
|
||||
pe.env_path,
|
||||
pe.status,
|
||||
pe.installed_at,
|
||||
pe.last_verified,
|
||||
CASE
|
||||
WHEN pe.status = 'ready' AND pe.last_verified < NOW() - INTERVAL '7 days' THEN true
|
||||
ELSE false
|
||||
END AS needs_verification,
|
||||
CASE
|
||||
WHEN pe.status = 'ready' THEN 'healthy'
|
||||
WHEN pe.status = 'failed' THEN 'unhealthy'
|
||||
WHEN pe.status IN ('pending', 'installing') THEN 'provisioning'
|
||||
WHEN pe.status = 'outdated' THEN 'needs_update'
|
||||
ELSE 'unknown'
|
||||
END AS health_status,
|
||||
pe.install_error,
|
||||
pe.created,
|
||||
pe.updated
|
||||
FROM pack_environment pe
|
||||
JOIN pack p ON pe.pack = p.id
|
||||
JOIN runtime r ON pe.runtime = r.id;
|
||||
|
||||
COMMENT ON VIEW v_pack_environment_status IS 'Consolidated view of pack environment status with health indicators';
|
||||
|
||||
-- ============================================================================
|
||||
-- SUMMARY
|
||||
-- ============================================================================
|
||||
|
||||
-- Display summary of changes
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE 'Pack environment system migration complete.';
|
||||
RAISE NOTICE '';
|
||||
RAISE NOTICE 'New table: pack_environment (tracks installed environments)';
|
||||
RAISE NOTICE 'New column: runtime.installers (environment setup instructions)';
|
||||
RAISE NOTICE 'New functions: get_pack_environment_path, runtime_requires_environment';
|
||||
RAISE NOTICE 'New view: v_pack_environment_status';
|
||||
RAISE NOTICE '';
|
||||
RAISE NOTICE 'Environment paths will be: /opt/attune/packenvs/{pack_ref}/{runtime}';
|
||||
END $$;
|
||||
@@ -0,0 +1,58 @@
|
||||
-- Migration: Add rule_ref and trigger_ref to execution notification payload
|
||||
-- This includes enforcement information in real-time notifications to avoid additional API calls
|
||||
|
||||
-- Drop the existing trigger first
|
||||
DROP TRIGGER IF EXISTS notify_execution_change ON execution;
|
||||
|
||||
-- Replace the notification function to include enforcement details
|
||||
CREATE OR REPLACE FUNCTION notify_execution_change()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
payload JSONB;
|
||||
enforcement_rule_ref TEXT;
|
||||
enforcement_trigger_ref TEXT;
|
||||
BEGIN
|
||||
-- Lookup enforcement details if this execution is linked to an enforcement
|
||||
IF NEW.enforcement IS NOT NULL THEN
|
||||
SELECT rule_ref, trigger_ref
|
||||
INTO enforcement_rule_ref, enforcement_trigger_ref
|
||||
FROM enforcement
|
||||
WHERE id = NEW.enforcement;
|
||||
END IF;
|
||||
|
||||
-- Build JSON payload with execution details including rule/trigger info
|
||||
payload := jsonb_build_object(
|
||||
'entity_type', 'execution',
|
||||
'entity_id', NEW.id,
|
||||
'timestamp', NOW(),
|
||||
'data', jsonb_build_object(
|
||||
'id', NEW.id,
|
||||
'status', NEW.status,
|
||||
'action_id', NEW.action,
|
||||
'action_ref', NEW.action_ref,
|
||||
'enforcement', NEW.enforcement,
|
||||
'rule_ref', enforcement_rule_ref,
|
||||
'trigger_ref', enforcement_trigger_ref,
|
||||
'parent', NEW.parent,
|
||||
'result', NEW.result,
|
||||
'created', NEW.created,
|
||||
'updated', NEW.updated
|
||||
)
|
||||
);
|
||||
|
||||
-- Send notification to the attune_notifications channel
|
||||
PERFORM pg_notify('attune_notifications', payload::text);
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Recreate the trigger
|
||||
CREATE TRIGGER notify_execution_change
|
||||
AFTER INSERT OR UPDATE ON execution
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION notify_execution_change();
|
||||
|
||||
-- Update comment
|
||||
COMMENT ON FUNCTION notify_execution_change() IS
|
||||
'Sends PostgreSQL NOTIFY for execution changes with enforcement details (rule_ref, trigger_ref) to enable real-time SSE streaming without additional API calls';
|
||||
@@ -0,0 +1,59 @@
|
||||
-- Migration: Add NOTIFY trigger for enforcement creation
|
||||
-- This enables real-time notifications when enforcements are created or updated
|
||||
|
||||
-- Function to send notifications on enforcement changes
|
||||
CREATE OR REPLACE FUNCTION notify_enforcement_change()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
payload JSONB;
|
||||
operation TEXT;
|
||||
BEGIN
|
||||
-- Determine operation type
|
||||
IF TG_OP = 'INSERT' THEN
|
||||
operation := 'created';
|
||||
ELSIF TG_OP = 'UPDATE' THEN
|
||||
operation := 'updated';
|
||||
ELSE
|
||||
operation := 'deleted';
|
||||
END IF;
|
||||
|
||||
-- Build JSON payload with enforcement details
|
||||
payload := jsonb_build_object(
|
||||
'entity_type', 'enforcement',
|
||||
'entity_id', NEW.id,
|
||||
'operation', operation,
|
||||
'timestamp', NOW(),
|
||||
'data', jsonb_build_object(
|
||||
'id', NEW.id,
|
||||
'rule', NEW.rule,
|
||||
'rule_ref', NEW.rule_ref,
|
||||
'trigger_ref', NEW.trigger_ref,
|
||||
'event', NEW.event,
|
||||
'status', NEW.status,
|
||||
'condition', NEW.condition,
|
||||
'conditions', NEW.conditions,
|
||||
'config', NEW.config,
|
||||
'payload', NEW.payload,
|
||||
'created', NEW.created,
|
||||
'updated', NEW.updated
|
||||
)
|
||||
);
|
||||
|
||||
-- Send notification to the attune_notifications channel
|
||||
PERFORM pg_notify('attune_notifications', payload::text);
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Trigger to send pg_notify on enforcement insert
|
||||
CREATE TRIGGER notify_enforcement_change
|
||||
AFTER INSERT OR UPDATE ON enforcement
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION notify_enforcement_change();
|
||||
|
||||
-- Add comments
|
||||
COMMENT ON FUNCTION notify_enforcement_change() IS
|
||||
'Sends PostgreSQL NOTIFY for enforcement changes to enable real-time notifications';
|
||||
COMMENT ON TRIGGER notify_enforcement_change ON enforcement IS
|
||||
'Broadcasts enforcement changes via pg_notify for real-time updates';
|
||||
168
migrations.old/20260204000001_restore_webhook_functions.sql
Normal file
168
migrations.old/20260204000001_restore_webhook_functions.sql
Normal file
@@ -0,0 +1,168 @@
|
||||
-- Migration: Restore webhook functions
|
||||
-- Description: Recreate webhook functions that were accidentally dropped in 20260129000001
|
||||
-- Date: 2026-02-04
|
||||
|
||||
-- Drop existing functions to avoid signature conflicts
|
||||
DROP FUNCTION IF EXISTS enable_trigger_webhook(BIGINT, JSONB);
|
||||
DROP FUNCTION IF EXISTS enable_trigger_webhook(BIGINT);
|
||||
DROP FUNCTION IF EXISTS disable_trigger_webhook(BIGINT);
|
||||
DROP FUNCTION IF EXISTS regenerate_trigger_webhook_key(BIGINT);
|
||||
|
||||
-- Function to enable webhooks for a trigger
|
||||
CREATE OR REPLACE FUNCTION enable_trigger_webhook(
|
||||
p_trigger_id BIGINT,
|
||||
p_config JSONB DEFAULT '{}'::jsonb
|
||||
)
|
||||
RETURNS TABLE(
|
||||
webhook_enabled BOOLEAN,
|
||||
webhook_key VARCHAR(255),
|
||||
webhook_url TEXT
|
||||
) AS $$
|
||||
DECLARE
|
||||
v_webhook_key VARCHAR(255);
|
||||
v_api_base_url TEXT := 'http://localhost:8080'; -- Default, should be configured
|
||||
BEGIN
|
||||
-- Check if trigger exists
|
||||
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
|
||||
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
|
||||
END IF;
|
||||
|
||||
-- Generate webhook key if one doesn't exist
|
||||
SELECT t.webhook_key INTO v_webhook_key
|
||||
FROM trigger t
|
||||
WHERE t.id = p_trigger_id;
|
||||
|
||||
IF v_webhook_key IS NULL THEN
|
||||
v_webhook_key := generate_webhook_key();
|
||||
END IF;
|
||||
|
||||
-- Update trigger to enable webhooks
|
||||
UPDATE trigger
|
||||
SET
|
||||
webhook_enabled = TRUE,
|
||||
webhook_key = v_webhook_key,
|
||||
webhook_config = p_config,
|
||||
updated = NOW()
|
||||
WHERE id = p_trigger_id;
|
||||
|
||||
-- Return webhook details
|
||||
RETURN QUERY SELECT
|
||||
TRUE,
|
||||
v_webhook_key,
|
||||
v_api_base_url || '/api/v1/webhooks/' || v_webhook_key;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION enable_trigger_webhook(BIGINT, JSONB) IS
|
||||
'Enables webhooks for a trigger with optional configuration. Generates a new webhook key if one does not exist. Returns webhook details.';
|
||||
|
||||
-- Function to disable webhooks for a trigger
|
||||
CREATE OR REPLACE FUNCTION disable_trigger_webhook(
|
||||
p_trigger_id BIGINT
|
||||
)
|
||||
RETURNS BOOLEAN AS $$
|
||||
BEGIN
|
||||
-- Check if trigger exists
|
||||
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
|
||||
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
|
||||
END IF;
|
||||
|
||||
-- Update trigger to disable webhooks
|
||||
-- Set webhook_key to NULL when disabling to remove it from API responses
|
||||
UPDATE trigger
|
||||
SET
|
||||
webhook_enabled = FALSE,
|
||||
webhook_key = NULL,
|
||||
updated = NOW()
|
||||
WHERE id = p_trigger_id;
|
||||
|
||||
RETURN TRUE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION disable_trigger_webhook(BIGINT) IS
|
||||
'Disables webhooks for a trigger. Webhook key is removed when disabled.';
|
||||
|
||||
-- Function to regenerate webhook key for a trigger
|
||||
CREATE OR REPLACE FUNCTION regenerate_trigger_webhook_key(
|
||||
p_trigger_id BIGINT
|
||||
)
|
||||
RETURNS TABLE(
|
||||
webhook_key VARCHAR(255),
|
||||
previous_key_revoked BOOLEAN
|
||||
) AS $$
|
||||
DECLARE
|
||||
v_new_key VARCHAR(255);
|
||||
v_old_key VARCHAR(255);
|
||||
v_webhook_enabled BOOLEAN;
|
||||
BEGIN
|
||||
-- Check if trigger exists
|
||||
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
|
||||
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
|
||||
END IF;
|
||||
|
||||
-- Get current webhook state
|
||||
SELECT t.webhook_key, t.webhook_enabled INTO v_old_key, v_webhook_enabled
|
||||
FROM trigger t
|
||||
WHERE t.id = p_trigger_id;
|
||||
|
||||
-- Check if webhooks are enabled
|
||||
IF NOT v_webhook_enabled THEN
|
||||
RAISE EXCEPTION 'Webhooks are not enabled for trigger %', p_trigger_id;
|
||||
END IF;
|
||||
|
||||
-- Generate new key
|
||||
v_new_key := generate_webhook_key();
|
||||
|
||||
-- Update trigger with new key
|
||||
UPDATE trigger
|
||||
SET
|
||||
webhook_key = v_new_key,
|
||||
updated = NOW()
|
||||
WHERE id = p_trigger_id;
|
||||
|
||||
-- Return new key and whether old key was present
|
||||
RETURN QUERY SELECT
|
||||
v_new_key,
|
||||
(v_old_key IS NOT NULL);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
COMMENT ON FUNCTION regenerate_trigger_webhook_key(BIGINT) IS
|
||||
'Regenerates webhook key for a trigger. Returns new key and whether a previous key was revoked.';
|
||||
|
||||
-- Verify all functions exist
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Check enable_trigger_webhook exists
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_proc p
|
||||
JOIN pg_namespace n ON p.pronamespace = n.oid
|
||||
WHERE n.nspname = current_schema()
|
||||
AND p.proname = 'enable_trigger_webhook'
|
||||
) THEN
|
||||
RAISE EXCEPTION 'enable_trigger_webhook function not found after migration';
|
||||
END IF;
|
||||
|
||||
-- Check disable_trigger_webhook exists
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_proc p
|
||||
JOIN pg_namespace n ON p.pronamespace = n.oid
|
||||
WHERE n.nspname = current_schema()
|
||||
AND p.proname = 'disable_trigger_webhook'
|
||||
) THEN
|
||||
RAISE EXCEPTION 'disable_trigger_webhook function not found after migration';
|
||||
END IF;
|
||||
|
||||
-- Check regenerate_trigger_webhook_key exists
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_proc p
|
||||
JOIN pg_namespace n ON p.pronamespace = n.oid
|
||||
WHERE n.nspname = current_schema()
|
||||
AND p.proname = 'regenerate_trigger_webhook_key'
|
||||
) THEN
|
||||
RAISE EXCEPTION 'regenerate_trigger_webhook_key function not found after migration';
|
||||
END IF;
|
||||
|
||||
RAISE NOTICE 'All webhook functions successfully restored';
|
||||
END $$;
|
||||
348
migrations.old/README.md
Normal file
348
migrations.old/README.md
Normal file
@@ -0,0 +1,348 @@
|
||||
# Attune Database Migrations
|
||||
|
||||
This directory contains SQL migrations for the Attune automation platform database schema.
|
||||
|
||||
## Overview
|
||||
|
||||
Migrations are numbered and executed in order. Each migration file is named with a timestamp prefix to ensure proper ordering:
|
||||
|
||||
```
|
||||
YYYYMMDDHHMMSS_description.sql
|
||||
```
|
||||
|
||||
## Migration Files
|
||||
|
||||
The schema is organized into 5 logical migration files:
|
||||
|
||||
| File | Description |
|
||||
|------|-------------|
|
||||
| `20250101000001_initial_setup.sql` | Creates schema, service role, all enum types, and shared functions |
|
||||
| `20250101000002_core_tables.sql` | Creates pack, runtime, worker, identity, permission_set, permission_assignment, policy, and key tables |
|
||||
| `20250101000003_event_system.sql` | Creates trigger, sensor, event, and enforcement tables |
|
||||
| `20250101000004_execution_system.sql` | Creates action, rule, execution, inquiry, workflow orchestration tables (workflow_definition, workflow_execution, workflow_task_execution), and workflow views |
|
||||
| `20250101000005_supporting_tables.sql` | Creates notification, artifact, and queue_stats tables with performance indexes |
|
||||
|
||||
### Migration Dependencies
|
||||
|
||||
The migrations must be run in order due to foreign key dependencies:
|
||||
|
||||
1. **Initial Setup** - Foundation (schema, enums, functions)
|
||||
2. **Core Tables** - Base entities (pack, runtime, worker, identity, permissions, policy, key)
|
||||
3. **Event System** - Event monitoring (trigger, sensor, event, enforcement)
|
||||
4. **Execution System** - Action execution (action, rule, execution, inquiry)
|
||||
5. **Supporting Tables** - Auxiliary features (notification, artifact)
|
||||
|
||||
## Running Migrations
|
||||
|
||||
### Using SQLx CLI
|
||||
|
||||
```bash
|
||||
# Install sqlx-cli if not already installed
|
||||
cargo install sqlx-cli --no-default-features --features postgres
|
||||
|
||||
# Run all pending migrations
|
||||
sqlx migrate run
|
||||
|
||||
# Check migration status
|
||||
sqlx migrate info
|
||||
|
||||
# Revert last migration (if needed)
|
||||
sqlx migrate revert
|
||||
```
|
||||
|
||||
### Manual Execution
|
||||
|
||||
You can also run migrations manually using `psql`:
|
||||
|
||||
```bash
|
||||
# Run all migrations in order
|
||||
for file in migrations/202501*.sql; do
|
||||
psql -U postgres -d attune -f "$file"
|
||||
done
|
||||
```
|
||||
|
||||
Or individually:
|
||||
|
||||
```bash
|
||||
psql -U postgres -d attune -f migrations/20250101000001_initial_setup.sql
|
||||
psql -U postgres -d attune -f migrations/20250101000002_core_tables.sql
|
||||
# ... etc
|
||||
```
|
||||
|
||||
## Database Setup
|
||||
|
||||
### Prerequisites
|
||||
|
||||
1. PostgreSQL 14 or later installed
|
||||
2. Create the database:
|
||||
|
||||
```bash
|
||||
createdb attune
|
||||
```
|
||||
|
||||
3. Set environment variable:
|
||||
|
||||
```bash
|
||||
export DATABASE_URL="postgresql://postgres:postgres@localhost:5432/attune"
|
||||
```
|
||||
|
||||
### Initial Setup
|
||||
|
||||
```bash
|
||||
# Navigate to workspace root
|
||||
cd /path/to/attune
|
||||
|
||||
# Run migrations
|
||||
sqlx migrate run
|
||||
|
||||
# Verify tables were created
|
||||
psql -U postgres -d attune -c "\dt attune.*"
|
||||
```
|
||||
|
||||
## Schema Overview
|
||||
|
||||
The Attune schema includes 22 tables organized into logical groups:
|
||||
|
||||
### Core Tables (Migration 2)
|
||||
- **pack**: Automation component bundles
|
||||
- **runtime**: Execution environments (Python, Node.js, containers)
|
||||
- **worker**: Execution workers
|
||||
- **identity**: Users and service accounts
|
||||
- **permission_set**: Permission groups (like roles)
|
||||
- **permission_assignment**: Identity-permission links (many-to-many)
|
||||
- **policy**: Execution policies (rate limiting, concurrency)
|
||||
- **key**: Secure configuration and secrets storage
|
||||
|
||||
### Event System (Migration 3)
|
||||
- **trigger**: Event type definitions
|
||||
- **sensor**: Event monitors that watch for triggers
|
||||
- **event**: Event instances (trigger firings)
|
||||
- **enforcement**: Rule activation instances
|
||||
|
||||
### Execution System (Migration 4)
|
||||
- **action**: Executable operations (can be workflows)
|
||||
- **rule**: Trigger-to-action automation logic
|
||||
- **execution**: Action execution instances (supports workflows)
|
||||
- **inquiry**: Human-in-the-loop interactions (approvals, inputs)
|
||||
- **workflow_definition**: YAML-based workflow definitions (composable action graphs)
|
||||
- **workflow_execution**: Runtime state tracking for workflow executions
|
||||
- **workflow_task_execution**: Individual task executions within workflows
|
||||
|
||||
### Supporting Tables (Migration 5)
|
||||
- **notification**: Real-time system notifications (uses PostgreSQL LISTEN/NOTIFY)
|
||||
- **artifact**: Execution outputs (files, logs, progress data)
|
||||
- **queue_stats**: Real-time execution queue statistics for FIFO ordering
|
||||
|
||||
## Key Features
|
||||
|
||||
### Automatic Timestamps
|
||||
All tables include `created` and `updated` timestamps that are automatically managed by the `update_updated_column()` trigger function.
|
||||
|
||||
### Reference Preservation
|
||||
Tables use both ID foreign keys and `*_ref` text columns. The ref columns preserve string references even when the referenced entity is deleted, maintaining complete audit trails.
|
||||
|
||||
### Soft Deletes
|
||||
Foreign keys strategically use:
|
||||
- `ON DELETE CASCADE` - For dependent data that should be removed
|
||||
- `ON DELETE SET NULL` - To preserve historical records while breaking the link
|
||||
|
||||
### Validation Constraints
|
||||
- **Reference format validation** - Lowercase, specific patterns (e.g., `pack.name`)
|
||||
- **Semantic version validation** - For pack versions
|
||||
- **Ownership validation** - Custom trigger for key table ownership rules
|
||||
- **Range checks** - Port numbers, positive thresholds, etc.
|
||||
|
||||
### Performance Optimization
|
||||
- **B-tree indexes** - On frequently queried columns (IDs, refs, status, timestamps)
|
||||
- **Partial indexes** - For filtered queries (e.g., `enabled = TRUE`)
|
||||
- **GIN indexes** - On JSONB and array columns for fast containment queries
|
||||
- **Composite indexes** - For common multi-column query patterns
|
||||
|
||||
### PostgreSQL Features
|
||||
- **JSONB** - Flexible schema storage for configurations, payloads, results
|
||||
- **Array types** - Multi-value fields (tags, parameters, dependencies)
|
||||
- **Custom enum types** - Constrained string values with type safety
|
||||
- **Triggers** - Data validation, timestamp management, notifications
|
||||
- **pg_notify** - Real-time notifications via PostgreSQL's LISTEN/NOTIFY
|
||||
|
||||
## Service Role
|
||||
|
||||
The migrations create a `svc_attune` role with appropriate permissions. **Change the password in production:**
|
||||
|
||||
```sql
|
||||
ALTER ROLE svc_attune WITH PASSWORD 'secure_password_here';
|
||||
```
|
||||
|
||||
The default password is `attune_service_password` (only for development).
|
||||
|
||||
## Rollback Strategy
|
||||
|
||||
### Complete Reset
|
||||
|
||||
To completely reset the database:
|
||||
|
||||
```bash
|
||||
# Drop and recreate
|
||||
dropdb attune
|
||||
createdb attune
|
||||
sqlx migrate run
|
||||
```
|
||||
|
||||
Or drop just the schema:
|
||||
|
||||
```sql
|
||||
psql -U postgres -d attune -c "DROP SCHEMA attune CASCADE;"
|
||||
```
|
||||
|
||||
Then re-run migrations.
|
||||
|
||||
### Individual Migration Revert
|
||||
|
||||
With SQLx CLI:
|
||||
|
||||
```bash
|
||||
sqlx migrate revert
|
||||
```
|
||||
|
||||
Or manually remove from tracking:
|
||||
|
||||
```sql
|
||||
DELETE FROM _sqlx_migrations WHERE version = 20250101000001;
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Never edit existing migrations** - Create new migrations to modify schema
|
||||
2. **Test migrations** - Always test on a copy of production data first
|
||||
3. **Backup before migrating** - Backup production database before applying migrations
|
||||
4. **Review changes** - Review all migrations before applying to production
|
||||
5. **Version control** - Keep migrations in version control (they are!)
|
||||
6. **Document changes** - Add comments to complex migrations
|
||||
|
||||
## Development Workflow
|
||||
|
||||
1. Create new migration file with timestamp:
|
||||
```bash
|
||||
touch migrations/$(date +%Y%m%d%H%M%S)_description.sql
|
||||
```
|
||||
|
||||
2. Write migration SQL (follow existing patterns)
|
||||
|
||||
3. Test migration:
|
||||
```bash
|
||||
sqlx migrate run
|
||||
```
|
||||
|
||||
4. Verify changes:
|
||||
```bash
|
||||
psql -U postgres -d attune
|
||||
\d+ attune.table_name
|
||||
```
|
||||
|
||||
5. Commit to version control
|
||||
|
||||
## Production Deployment
|
||||
|
||||
1. **Backup** production database
|
||||
2. **Review** all pending migrations
|
||||
3. **Test** migrations on staging environment with production data copy
|
||||
4. **Schedule** maintenance window if needed
|
||||
5. **Apply** migrations:
|
||||
```bash
|
||||
sqlx migrate run
|
||||
```
|
||||
6. **Verify** application functionality
|
||||
7. **Monitor** for errors in logs
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Migration already applied
|
||||
|
||||
If you need to re-run a migration:
|
||||
|
||||
```bash
|
||||
# Remove from migration tracking (SQLx)
|
||||
psql -U postgres -d attune -c "DELETE FROM _sqlx_migrations WHERE version = 20250101000001;"
|
||||
|
||||
# Then re-run
|
||||
sqlx migrate run
|
||||
```
|
||||
|
||||
### Permission denied
|
||||
|
||||
Ensure the PostgreSQL user has sufficient permissions:
|
||||
|
||||
```sql
|
||||
GRANT ALL PRIVILEGES ON DATABASE attune TO postgres;
|
||||
GRANT ALL PRIVILEGES ON SCHEMA attune TO postgres;
|
||||
```
|
||||
|
||||
### Connection refused
|
||||
|
||||
Check PostgreSQL is running:
|
||||
|
||||
```bash
|
||||
# Linux/macOS
|
||||
pg_ctl status
|
||||
sudo systemctl status postgresql
|
||||
|
||||
# Check if listening
|
||||
psql -U postgres -c "SELECT version();"
|
||||
```
|
||||
|
||||
### Foreign key constraint violations
|
||||
|
||||
Ensure migrations run in correct order. The consolidated migrations handle forward references correctly:
|
||||
- Migration 2 creates tables with forward references (commented as such)
|
||||
- Migration 3 and 4 add the foreign key constraints back
|
||||
|
||||
## Schema Diagram
|
||||
|
||||
```
|
||||
┌─────────────┐
|
||||
│ pack │◄──┐
|
||||
└─────────────┘ │
|
||||
▲ │
|
||||
│ │
|
||||
┌──────┴──────────┴──────┐
|
||||
│ runtime │ trigger │ ... │ (Core entities reference pack)
|
||||
└─────────┴─────────┴─────┘
|
||||
▲ ▲
|
||||
│ │
|
||||
┌──────┴──────┐ │
|
||||
│ sensor │──┘ (Sensors reference both runtime and trigger)
|
||||
└─────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────┐ ┌──────────────┐
|
||||
│ event │────►│ enforcement │ (Events trigger enforcements)
|
||||
└─────────────┘ └──────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ execution │ (Enforcements create executions)
|
||||
└──────────────┘
|
||||
```
|
||||
|
||||
## Workflow Orchestration
|
||||
|
||||
Migration 4 includes comprehensive workflow orchestration support:
|
||||
- **workflow_definition**: Stores parsed YAML workflow definitions with tasks, variables, and transitions
|
||||
- **workflow_execution**: Tracks runtime state including current/completed/failed tasks and variables
|
||||
- **workflow_task_execution**: Individual task execution tracking with retry and timeout support
|
||||
- **Action table extensions**: `is_workflow` and `workflow_def` columns link actions to workflows
|
||||
- **Helper views**: Three views for querying workflow state (summary, task detail, action links)
|
||||
|
||||
## Queue Statistics
|
||||
|
||||
Migration 5 includes the queue_stats table for execution ordering:
|
||||
- Tracks per-action queue length, active executions, and concurrency limits
|
||||
- Enables FIFO queue management with database persistence
|
||||
- Supports monitoring and API visibility of execution queues
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [SQLx Documentation](https://github.com/launchbadge/sqlx)
|
||||
- [PostgreSQL Documentation](https://www.postgresql.org/docs/)
|
||||
- [Attune Architecture Documentation](../docs/architecture.md)
|
||||
- [Attune Data Model Documentation](../docs/data-model.md)
|
||||
Reference in New Issue
Block a user