trying to rework database migrations

This commit is contained in:
2026-02-05 11:42:04 -06:00
parent 3b14c65998
commit 343488b3eb
83 changed files with 5793 additions and 876 deletions

View File

@@ -3,52 +3,17 @@
-- Version: 20250101000001
-- ============================================================================
-- SCHEMA AND ROLE SETUP
-- EXTENSIONS
-- ============================================================================
-- Create the attune schema
-- NOTE: For tests, the test schema is created separately. For production, uncomment below:
-- CREATE SCHEMA IF NOT EXISTS attune;
-- Set search path (now set via connection pool configuration)
-- Create service role for the application
-- NOTE: Commented out for tests, uncomment for production:
-- DO $$
-- BEGIN
-- IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'svc_attune') THEN
-- CREATE ROLE svc_attune WITH LOGIN PASSWORD 'attune_service_password';
-- END IF;
-- END
-- $$;
-- Grant usage on schema
-- NOTE: Commented out for tests, uncomment for production:
-- GRANT USAGE ON SCHEMA attune TO svc_attune;
-- GRANT CREATE ON SCHEMA attune TO svc_attune;
-- Enable required extensions
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
-- COMMENT ON SCHEMA attune IS 'Attune automation platform schema';
-- ============================================================================
-- ENUM TYPES
-- ============================================================================
-- RuntimeType enum
DO $$ BEGIN
CREATE TYPE runtime_type_enum AS ENUM (
'action',
'sensor'
);
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
COMMENT ON TYPE runtime_type_enum IS 'Type of runtime environment';
-- WorkerType enum
DO $$ BEGIN
CREATE TYPE worker_type_enum AS ENUM (
@@ -62,6 +27,20 @@ END $$;
COMMENT ON TYPE worker_type_enum IS 'Type of worker deployment';
-- WorkerRole enum
DO $$ BEGIN
CREATE TYPE worker_role_enum AS ENUM (
'action',
'sensor',
'hybrid'
);
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
COMMENT ON TYPE worker_role_enum IS 'Role of worker (action executor, sensor, or both)';
-- WorkerStatus enum
DO $$ BEGIN
CREATE TYPE worker_status_enum AS ENUM (
@@ -207,6 +186,22 @@ END $$;
COMMENT ON TYPE artifact_retention_enum IS 'Type of retention policy';
-- PackEnvironmentStatus enum
DO $$ BEGIN
CREATE TYPE pack_environment_status_enum AS ENUM (
'pending',
'installing',
'ready',
'failed',
'outdated'
);
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
COMMENT ON TYPE pack_environment_status_enum IS 'Status of pack runtime environment installation';
-- ============================================================================
-- SHARED FUNCTIONS
-- ============================================================================

View File

@@ -1,445 +0,0 @@
-- Migration: Core Tables
-- Description: Creates core tables for packs, runtimes, workers, identity, permissions, policies, and keys
-- Version: 20250101000002
-- ============================================================================
-- PACK TABLE
-- ============================================================================
CREATE TABLE pack (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
label TEXT NOT NULL,
description TEXT,
version TEXT NOT NULL,
conf_schema JSONB NOT NULL DEFAULT '{}'::jsonb,
config JSONB NOT NULL DEFAULT '{}'::jsonb,
meta JSONB NOT NULL DEFAULT '{}'::jsonb,
tags TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
runtime_deps TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
is_standard BOOLEAN NOT NULL DEFAULT FALSE,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT pack_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT pack_ref_format CHECK (ref ~ '^[a-z][a-z0-9_-]+$'),
CONSTRAINT pack_version_semver CHECK (
version ~ '^\d+\.\d+\.\d+(-[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$'
)
);
-- Indexes
CREATE INDEX idx_pack_ref ON pack(ref);
CREATE INDEX idx_pack_created ON pack(created DESC);
CREATE INDEX idx_pack_is_standard ON pack(is_standard) WHERE is_standard = TRUE;
CREATE INDEX idx_pack_is_standard_created ON pack(is_standard, created DESC);
CREATE INDEX idx_pack_version_created ON pack(version, created DESC);
CREATE INDEX idx_pack_config_gin ON pack USING GIN (config);
CREATE INDEX idx_pack_meta_gin ON pack USING GIN (meta);
CREATE INDEX idx_pack_tags_gin ON pack USING GIN (tags);
CREATE INDEX idx_pack_runtime_deps_gin ON pack USING GIN (runtime_deps);
-- Trigger
CREATE TRIGGER update_pack_updated
BEFORE UPDATE ON pack
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON pack TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE pack_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE pack IS 'Packs bundle related automation components';
COMMENT ON COLUMN pack.ref IS 'Unique pack reference identifier (e.g., "slack", "github")';
COMMENT ON COLUMN pack.label IS 'Human-readable pack name';
COMMENT ON COLUMN pack.version IS 'Semantic version of the pack';
COMMENT ON COLUMN pack.conf_schema IS 'JSON schema for pack configuration';
COMMENT ON COLUMN pack.config IS 'Pack configuration values';
COMMENT ON COLUMN pack.meta IS 'Pack metadata';
COMMENT ON COLUMN pack.runtime_deps IS 'Array of required runtime references';
COMMENT ON COLUMN pack.is_standard IS 'Whether this is a core/built-in pack';
-- ============================================================================
-- RUNTIME TABLE
-- ============================================================================
CREATE TABLE runtime (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
pack_ref TEXT,
description TEXT,
runtime_type runtime_type_enum NOT NULL,
name TEXT NOT NULL,
distributions JSONB NOT NULL,
installation JSONB,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT runtime_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT runtime_ref_format CHECK (ref ~ '^[^.]+\.(action|sensor)\.[^.]+$')
);
-- Indexes
CREATE INDEX idx_runtime_ref ON runtime(ref);
CREATE INDEX idx_runtime_pack ON runtime(pack);
CREATE INDEX idx_runtime_type ON runtime(runtime_type);
CREATE INDEX idx_runtime_created ON runtime(created DESC);
CREATE INDEX idx_runtime_pack_type ON runtime(pack, runtime_type);
CREATE INDEX idx_runtime_type_created ON runtime(runtime_type, created DESC);
-- Trigger
CREATE TRIGGER update_runtime_updated
BEFORE UPDATE ON runtime
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON runtime TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE runtime_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE runtime IS 'Runtime environments for executing actions and sensors';
COMMENT ON COLUMN runtime.ref IS 'Unique runtime reference (format: pack.type.name)';
COMMENT ON COLUMN runtime.runtime_type IS 'Type of runtime (action or sensor)';
COMMENT ON COLUMN runtime.name IS 'Runtime name (e.g., "python3.11", "nodejs20")';
COMMENT ON COLUMN runtime.distributions IS 'Available distributions for this runtime';
COMMENT ON COLUMN runtime.installation IS 'Installation requirements and instructions';
-- ============================================================================
-- WORKER TABLE
-- ============================================================================
CREATE TABLE worker (
id BIGSERIAL PRIMARY KEY,
name TEXT NOT NULL,
worker_type worker_type_enum NOT NULL,
runtime BIGINT REFERENCES runtime(id),
host TEXT,
port INTEGER,
status worker_status_enum DEFAULT 'inactive',
capabilities JSONB,
meta JSONB,
last_heartbeat TIMESTAMPTZ,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT worker_port_range CHECK (port IS NULL OR (port > 0 AND port <= 65535))
);
-- Indexes
CREATE INDEX idx_worker_name ON worker(name);
CREATE INDEX idx_worker_type ON worker(worker_type);
CREATE INDEX idx_worker_runtime ON worker(runtime);
CREATE INDEX idx_worker_status ON worker(status);
CREATE INDEX idx_worker_last_heartbeat ON worker(last_heartbeat DESC);
CREATE INDEX idx_worker_status_runtime ON worker(status, runtime);
CREATE INDEX idx_worker_type_status ON worker(worker_type, status);
-- Trigger
CREATE TRIGGER update_worker_updated
BEFORE UPDATE ON worker
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON worker TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE worker_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE worker IS 'Worker processes that execute actions';
COMMENT ON COLUMN worker.name IS 'Worker identifier';
COMMENT ON COLUMN worker.worker_type IS 'Deployment type (local, remote, container)';
COMMENT ON COLUMN worker.runtime IS 'Associated runtime environment';
COMMENT ON COLUMN worker.status IS 'Current operational status';
COMMENT ON COLUMN worker.capabilities IS 'Worker capabilities and features';
COMMENT ON COLUMN worker.last_heartbeat IS 'Last health check timestamp';
-- ============================================================================
-- IDENTITY TABLE
-- ============================================================================
CREATE TABLE identity (
id BIGSERIAL PRIMARY KEY,
login TEXT NOT NULL UNIQUE,
display_name TEXT,
password_hash TEXT,
attributes JSONB NOT NULL DEFAULT '{}'::jsonb,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_identity_login ON identity(login);
CREATE INDEX idx_identity_created ON identity(created DESC);
CREATE INDEX idx_identity_password_hash ON identity(password_hash) WHERE password_hash IS NOT NULL;
CREATE INDEX idx_identity_attributes_gin ON identity USING GIN (attributes);
-- Trigger
CREATE TRIGGER update_identity_updated
BEFORE UPDATE ON identity
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON identity TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE identity_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE identity IS 'Identities represent users or service accounts';
COMMENT ON COLUMN identity.login IS 'Unique login identifier';
COMMENT ON COLUMN identity.display_name IS 'Human-readable name';
COMMENT ON COLUMN identity.password_hash IS 'Argon2 hashed password for authentication (NULL for service accounts or external auth)';
COMMENT ON COLUMN identity.attributes IS 'Custom attributes (email, groups, etc.)';
-- ============================================================================
-- PERMISSION_SET TABLE
-- ============================================================================
CREATE TABLE permission_set (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
pack_ref TEXT,
label TEXT,
description TEXT,
grants JSONB NOT NULL DEFAULT '[]'::jsonb,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT permission_set_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT permission_set_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
);
-- Indexes
CREATE INDEX idx_permission_set_ref ON permission_set(ref);
CREATE INDEX idx_permission_set_pack ON permission_set(pack);
CREATE INDEX idx_permission_set_created ON permission_set(created DESC);
-- Trigger
CREATE TRIGGER update_permission_set_updated
BEFORE UPDATE ON permission_set
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON permission_set TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE permission_set_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE permission_set IS 'Permission sets group permissions together (like roles)';
COMMENT ON COLUMN permission_set.ref IS 'Unique permission set reference (format: pack.name)';
COMMENT ON COLUMN permission_set.label IS 'Human-readable name';
COMMENT ON COLUMN permission_set.grants IS 'Array of permission grants';
-- ============================================================================
-- PERMISSION_ASSIGNMENT TABLE
-- ============================================================================
CREATE TABLE permission_assignment (
id BIGSERIAL PRIMARY KEY,
identity BIGINT NOT NULL REFERENCES identity(id) ON DELETE CASCADE,
permset BIGINT NOT NULL REFERENCES permission_set(id) ON DELETE CASCADE,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Unique constraint to prevent duplicate assignments
CONSTRAINT unique_identity_permset UNIQUE (identity, permset)
);
-- Indexes
CREATE INDEX idx_permission_assignment_identity ON permission_assignment(identity);
CREATE INDEX idx_permission_assignment_permset ON permission_assignment(permset);
CREATE INDEX idx_permission_assignment_created ON permission_assignment(created DESC);
CREATE INDEX idx_permission_assignment_identity_created ON permission_assignment(identity, created DESC);
CREATE INDEX idx_permission_assignment_permset_created ON permission_assignment(permset, created DESC);
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON permission_assignment TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE permission_assignment_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE permission_assignment IS 'Links identities to permission sets (many-to-many)';
COMMENT ON COLUMN permission_assignment.identity IS 'Identity being granted permissions';
COMMENT ON COLUMN permission_assignment.permset IS 'Permission set being assigned';
-- ============================================================================
-- POLICY TABLE
-- ============================================================================
CREATE TABLE policy (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
pack_ref TEXT,
action BIGINT, -- Forward reference to action table, will add constraint in next migration
action_ref TEXT,
parameters TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
method policy_method_enum NOT NULL,
threshold INTEGER NOT NULL,
name TEXT NOT NULL,
description TEXT,
tags TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT policy_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT policy_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$'),
CONSTRAINT policy_threshold_positive CHECK (threshold > 0)
);
-- Indexes
CREATE INDEX idx_policy_ref ON policy(ref);
CREATE INDEX idx_policy_pack ON policy(pack);
CREATE INDEX idx_policy_action ON policy(action);
CREATE INDEX idx_policy_created ON policy(created DESC);
CREATE INDEX idx_policy_action_created ON policy(action, created DESC);
CREATE INDEX idx_policy_pack_created ON policy(pack, created DESC);
CREATE INDEX idx_policy_parameters_gin ON policy USING GIN (parameters);
CREATE INDEX idx_policy_tags_gin ON policy USING GIN (tags);
-- Trigger
CREATE TRIGGER update_policy_updated
BEFORE UPDATE ON policy
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON policy TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE policy_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE policy IS 'Policies define execution controls (rate limiting, concurrency)';
COMMENT ON COLUMN policy.ref IS 'Unique policy reference (format: pack.name)';
COMMENT ON COLUMN policy.action IS 'Action this policy applies to';
COMMENT ON COLUMN policy.parameters IS 'Parameter names used for policy grouping';
COMMENT ON COLUMN policy.method IS 'How to handle policy violations (cancel/enqueue)';
COMMENT ON COLUMN policy.threshold IS 'Numeric limit (e.g., max concurrent executions)';
-- ============================================================================
-- KEY TABLE
-- ============================================================================
CREATE TABLE key (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
owner_type owner_type_enum NOT NULL,
owner TEXT,
owner_identity BIGINT REFERENCES identity(id),
owner_pack BIGINT REFERENCES pack(id),
owner_pack_ref TEXT,
owner_action BIGINT, -- Forward reference to action table
owner_action_ref TEXT,
owner_sensor BIGINT, -- Forward reference to sensor table
owner_sensor_ref TEXT,
name TEXT NOT NULL,
encrypted BOOLEAN NOT NULL,
encryption_key_hash TEXT,
value TEXT NOT NULL,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT key_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT key_ref_format CHECK (ref ~ '^([^.]+\.)?[^.]+$')
);
-- Unique index on owner_type, owner, name
CREATE UNIQUE INDEX idx_key_unique ON key(owner_type, owner, name);
-- Indexes
CREATE INDEX idx_key_ref ON key(ref);
CREATE INDEX idx_key_owner_type ON key(owner_type);
CREATE INDEX idx_key_owner_identity ON key(owner_identity);
CREATE INDEX idx_key_owner_pack ON key(owner_pack);
CREATE INDEX idx_key_owner_action ON key(owner_action);
CREATE INDEX idx_key_owner_sensor ON key(owner_sensor);
CREATE INDEX idx_key_created ON key(created DESC);
CREATE INDEX idx_key_owner_type_owner ON key(owner_type, owner);
CREATE INDEX idx_key_owner_identity_name ON key(owner_identity, name);
CREATE INDEX idx_key_owner_pack_name ON key(owner_pack, name);
-- Function to validate and set owner fields
CREATE OR REPLACE FUNCTION validate_key_owner()
RETURNS TRIGGER AS $$
DECLARE
owner_count INTEGER := 0;
BEGIN
-- Count how many owner fields are set
IF NEW.owner_identity IS NOT NULL THEN owner_count := owner_count + 1; END IF;
IF NEW.owner_pack IS NOT NULL THEN owner_count := owner_count + 1; END IF;
IF NEW.owner_action IS NOT NULL THEN owner_count := owner_count + 1; END IF;
IF NEW.owner_sensor IS NOT NULL THEN owner_count := owner_count + 1; END IF;
-- System owner should have no owner fields set
IF NEW.owner_type = 'system' THEN
IF owner_count > 0 THEN
RAISE EXCEPTION 'System owner cannot have specific owner fields set';
END IF;
NEW.owner := 'system';
-- All other types must have exactly one owner field set
ELSIF owner_count != 1 THEN
RAISE EXCEPTION 'Exactly one owner field must be set for owner_type %', NEW.owner_type;
-- Validate owner_type matches the populated field and set owner
ELSIF NEW.owner_type = 'identity' THEN
IF NEW.owner_identity IS NULL THEN
RAISE EXCEPTION 'owner_identity must be set for owner_type identity';
END IF;
NEW.owner := NEW.owner_identity::TEXT;
ELSIF NEW.owner_type = 'pack' THEN
IF NEW.owner_pack IS NULL THEN
RAISE EXCEPTION 'owner_pack must be set for owner_type pack';
END IF;
NEW.owner := NEW.owner_pack::TEXT;
ELSIF NEW.owner_type = 'action' THEN
IF NEW.owner_action IS NULL THEN
RAISE EXCEPTION 'owner_action must be set for owner_type action';
END IF;
NEW.owner := NEW.owner_action::TEXT;
ELSIF NEW.owner_type = 'sensor' THEN
IF NEW.owner_sensor IS NULL THEN
RAISE EXCEPTION 'owner_sensor must be set for owner_type sensor';
END IF;
NEW.owner := NEW.owner_sensor::TEXT;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger to validate owner fields
CREATE TRIGGER validate_key_owner_trigger
BEFORE INSERT OR UPDATE ON key
FOR EACH ROW
EXECUTE FUNCTION validate_key_owner();
-- Trigger for updated timestamp
CREATE TRIGGER update_key_updated
BEFORE UPDATE ON key
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON key TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE key_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE key IS 'Keys store configuration values and secrets with ownership scoping';
COMMENT ON COLUMN key.ref IS 'Unique key reference (format: [owner.]name)';
COMMENT ON COLUMN key.owner_type IS 'Type of owner (system, identity, pack, action, sensor)';
COMMENT ON COLUMN key.owner IS 'Owner identifier (auto-populated by trigger)';
COMMENT ON COLUMN key.owner_identity IS 'Identity owner (if owner_type=identity)';
COMMENT ON COLUMN key.owner_pack IS 'Pack owner (if owner_type=pack)';
COMMENT ON COLUMN key.owner_pack_ref IS 'Pack reference for owner_pack';
COMMENT ON COLUMN key.owner_action IS 'Action owner (if owner_type=action)';
COMMENT ON COLUMN key.owner_sensor IS 'Sensor owner (if owner_type=sensor)';
COMMENT ON COLUMN key.name IS 'Key name within owner scope';
COMMENT ON COLUMN key.encrypted IS 'Whether the value is encrypted';
COMMENT ON COLUMN key.encryption_key_hash IS 'Hash of encryption key used';
COMMENT ON COLUMN key.value IS 'The actual value (encrypted if encrypted=true)';

View File

@@ -0,0 +1,123 @@
-- Migration: Pack System
-- Description: Creates pack and runtime tables (runtime without runtime_type)
-- Version: 20250101000002
-- ============================================================================
-- PACK TABLE
-- ============================================================================
CREATE TABLE pack (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
label TEXT NOT NULL,
description TEXT,
version TEXT NOT NULL,
conf_schema JSONB NOT NULL DEFAULT '{}'::jsonb,
config JSONB NOT NULL DEFAULT '{}'::jsonb,
meta JSONB NOT NULL DEFAULT '{}'::jsonb,
tags TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
runtime_deps TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
is_standard BOOLEAN NOT NULL DEFAULT FALSE,
installers JSONB DEFAULT '[]'::jsonb,
-- Installation metadata (nullable for non-installed packs)
source_type TEXT,
source_url TEXT,
source_ref TEXT,
checksum TEXT,
checksum_verified BOOLEAN DEFAULT FALSE,
installed_at TIMESTAMPTZ,
installed_by BIGINT,
installation_method TEXT,
storage_path TEXT,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT pack_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT pack_ref_format CHECK (ref ~ '^[a-z][a-z0-9_-]+$'),
CONSTRAINT pack_version_semver CHECK (
version ~ '^\d+\.\d+\.\d+(-[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$'
)
);
-- Indexes
CREATE INDEX idx_pack_ref ON pack(ref);
CREATE INDEX idx_pack_created ON pack(created DESC);
CREATE INDEX idx_pack_is_standard ON pack(is_standard) WHERE is_standard = TRUE;
CREATE INDEX idx_pack_is_standard_created ON pack(is_standard, created DESC);
CREATE INDEX idx_pack_version_created ON pack(version, created DESC);
CREATE INDEX idx_pack_config_gin ON pack USING GIN (config);
CREATE INDEX idx_pack_meta_gin ON pack USING GIN (meta);
CREATE INDEX idx_pack_tags_gin ON pack USING GIN (tags);
CREATE INDEX idx_pack_runtime_deps_gin ON pack USING GIN (runtime_deps);
CREATE INDEX idx_pack_installed_at ON pack(installed_at DESC) WHERE installed_at IS NOT NULL;
CREATE INDEX idx_pack_installed_by ON pack(installed_by) WHERE installed_by IS NOT NULL;
CREATE INDEX idx_pack_source_type ON pack(source_type) WHERE source_type IS NOT NULL;
-- Trigger
CREATE TRIGGER update_pack_updated
BEFORE UPDATE ON pack
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE pack IS 'Packs bundle related automation components';
COMMENT ON COLUMN pack.ref IS 'Unique pack reference identifier (e.g., "slack", "github")';
COMMENT ON COLUMN pack.label IS 'Human-readable pack name';
COMMENT ON COLUMN pack.version IS 'Semantic version of the pack';
COMMENT ON COLUMN pack.conf_schema IS 'JSON schema for pack configuration';
COMMENT ON COLUMN pack.config IS 'Pack configuration values';
COMMENT ON COLUMN pack.meta IS 'Pack metadata';
COMMENT ON COLUMN pack.runtime_deps IS 'Array of required runtime references';
COMMENT ON COLUMN pack.is_standard IS 'Whether this is a core/built-in pack';
COMMENT ON COLUMN pack.source_type IS 'Installation source type (e.g., "git", "local", "registry")';
COMMENT ON COLUMN pack.source_url IS 'URL or path where pack was installed from';
COMMENT ON COLUMN pack.source_ref IS 'Git ref, version tag, or other source reference';
COMMENT ON COLUMN pack.checksum IS 'Content checksum for verification';
COMMENT ON COLUMN pack.checksum_verified IS 'Whether checksum has been verified';
COMMENT ON COLUMN pack.installed_at IS 'Timestamp when pack was installed';
COMMENT ON COLUMN pack.installed_by IS 'Identity ID of user who installed the pack';
COMMENT ON COLUMN pack.installation_method IS 'Method used for installation (e.g., "cli", "api", "auto")';
COMMENT ON COLUMN pack.storage_path IS 'Filesystem path where pack files are stored';
-- ============================================================================
-- RUNTIME TABLE
-- ============================================================================
CREATE TABLE runtime (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
pack_ref TEXT,
description TEXT,
name TEXT NOT NULL,
distributions JSONB NOT NULL,
installation JSONB,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT runtime_ref_lowercase CHECK (ref = LOWER(ref))
);
-- Indexes
CREATE INDEX idx_runtime_ref ON runtime(ref);
CREATE INDEX idx_runtime_pack ON runtime(pack);
CREATE INDEX idx_runtime_created ON runtime(created DESC);
CREATE INDEX idx_runtime_name ON runtime(name);
CREATE INDEX idx_runtime_verification ON runtime USING GIN ((distributions->'verification'));
-- Trigger
CREATE TRIGGER update_runtime_updated
BEFORE UPDATE ON runtime
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE runtime IS 'Runtime environments for executing actions and sensors (unified)';
COMMENT ON COLUMN runtime.ref IS 'Unique runtime reference (format: pack.name, e.g., core.python)';
COMMENT ON COLUMN runtime.name IS 'Runtime name (e.g., "Python", "Node.js", "Shell")';
COMMENT ON COLUMN runtime.distributions IS 'Runtime distribution metadata including verification commands, version requirements, and capabilities';
COMMENT ON COLUMN runtime.installation IS 'Installation requirements and instructions including package managers and setup steps';

View File

@@ -0,0 +1,168 @@
-- Migration: Identity and Authentication
-- Description: Creates identity, permission, and policy tables
-- Version: 20250101000002
-- ============================================================================
-- IDENTITY TABLE
-- ============================================================================
CREATE TABLE identity (
id BIGSERIAL PRIMARY KEY,
login TEXT NOT NULL UNIQUE,
display_name TEXT,
password_hash TEXT,
attributes JSONB NOT NULL DEFAULT '{}'::jsonb,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_identity_login ON identity(login);
CREATE INDEX idx_identity_created ON identity(created DESC);
CREATE INDEX idx_identity_password_hash ON identity(password_hash) WHERE password_hash IS NOT NULL;
CREATE INDEX idx_identity_attributes_gin ON identity USING GIN (attributes);
-- Trigger
CREATE TRIGGER update_identity_updated
BEFORE UPDATE ON identity
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE identity IS 'Identities represent users or service accounts';
COMMENT ON COLUMN identity.login IS 'Unique login identifier';
COMMENT ON COLUMN identity.display_name IS 'Human-readable name';
COMMENT ON COLUMN identity.password_hash IS 'Argon2 hashed password for authentication (NULL for service accounts or external auth)';
COMMENT ON COLUMN identity.attributes IS 'Custom attributes (email, groups, etc.)';
-- ============================================================================
-- ADD FOREIGN KEY CONSTRAINTS TO EXISTING TABLES
-- ============================================================================
-- Add foreign key constraint for pack.installed_by now that identity table exists
ALTER TABLE pack
ADD CONSTRAINT fk_pack_installed_by
FOREIGN KEY (installed_by)
REFERENCES identity(id)
ON DELETE SET NULL;
-- ============================================================================
-- ============================================================================
-- PERMISSION_SET TABLE
-- ============================================================================
CREATE TABLE permission_set (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
pack_ref TEXT,
label TEXT,
description TEXT,
grants JSONB NOT NULL DEFAULT '[]'::jsonb,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT permission_set_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT permission_set_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
);
-- Indexes
CREATE INDEX idx_permission_set_ref ON permission_set(ref);
CREATE INDEX idx_permission_set_pack ON permission_set(pack);
CREATE INDEX idx_permission_set_created ON permission_set(created DESC);
-- Trigger
CREATE TRIGGER update_permission_set_updated
BEFORE UPDATE ON permission_set
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE permission_set IS 'Permission sets group permissions together (like roles)';
COMMENT ON COLUMN permission_set.ref IS 'Unique permission set reference (format: pack.name)';
COMMENT ON COLUMN permission_set.label IS 'Human-readable name';
COMMENT ON COLUMN permission_set.grants IS 'Array of permission grants';
-- ============================================================================
-- ============================================================================
-- PERMISSION_ASSIGNMENT TABLE
-- ============================================================================
CREATE TABLE permission_assignment (
id BIGSERIAL PRIMARY KEY,
identity BIGINT NOT NULL REFERENCES identity(id) ON DELETE CASCADE,
permset BIGINT NOT NULL REFERENCES permission_set(id) ON DELETE CASCADE,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Unique constraint to prevent duplicate assignments
CONSTRAINT unique_identity_permset UNIQUE (identity, permset)
);
-- Indexes
CREATE INDEX idx_permission_assignment_identity ON permission_assignment(identity);
CREATE INDEX idx_permission_assignment_permset ON permission_assignment(permset);
CREATE INDEX idx_permission_assignment_created ON permission_assignment(created DESC);
CREATE INDEX idx_permission_assignment_identity_created ON permission_assignment(identity, created DESC);
CREATE INDEX idx_permission_assignment_permset_created ON permission_assignment(permset, created DESC);
-- Comments
COMMENT ON TABLE permission_assignment IS 'Links identities to permission sets (many-to-many)';
COMMENT ON COLUMN permission_assignment.identity IS 'Identity being granted permissions';
COMMENT ON COLUMN permission_assignment.permset IS 'Permission set being assigned';
-- ============================================================================
-- ============================================================================
-- POLICY TABLE
-- ============================================================================
CREATE TABLE policy (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
pack_ref TEXT,
action BIGINT, -- Forward reference to action table, will add constraint in next migration
action_ref TEXT,
parameters TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
method policy_method_enum NOT NULL,
threshold INTEGER NOT NULL,
name TEXT NOT NULL,
description TEXT,
tags TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT policy_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT policy_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$'),
CONSTRAINT policy_threshold_positive CHECK (threshold > 0)
);
-- Indexes
CREATE INDEX idx_policy_ref ON policy(ref);
CREATE INDEX idx_policy_pack ON policy(pack);
CREATE INDEX idx_policy_action ON policy(action);
CREATE INDEX idx_policy_created ON policy(created DESC);
CREATE INDEX idx_policy_action_created ON policy(action, created DESC);
CREATE INDEX idx_policy_pack_created ON policy(pack, created DESC);
CREATE INDEX idx_policy_parameters_gin ON policy USING GIN (parameters);
CREATE INDEX idx_policy_tags_gin ON policy USING GIN (tags);
-- Trigger
CREATE TRIGGER update_policy_updated
BEFORE UPDATE ON policy
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE policy IS 'Policies define execution controls (rate limiting, concurrency)';
COMMENT ON COLUMN policy.ref IS 'Unique policy reference (format: pack.name)';
COMMENT ON COLUMN policy.action IS 'Action this policy applies to';
COMMENT ON COLUMN policy.parameters IS 'Parameter names used for policy grouping';
COMMENT ON COLUMN policy.method IS 'How to handle policy violations (cancel/enqueue)';
COMMENT ON COLUMN policy.threshold IS 'Numeric limit (e.g., max concurrent executions)';
-- ============================================================================

View File

@@ -1,457 +0,0 @@
-- Migration: Execution System
-- Description: Creates tables for actions, rules, executions, and inquiries
-- Version: 20250101000004
-- ============================================================================
-- ACTION TABLE
-- ============================================================================
CREATE TABLE action (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
pack_ref TEXT NOT NULL,
label TEXT NOT NULL,
description TEXT NOT NULL,
entrypoint TEXT NOT NULL,
runtime BIGINT REFERENCES runtime(id),
param_schema JSONB,
out_schema JSONB,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT action_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT action_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
);
-- Indexes
CREATE INDEX idx_action_ref ON action(ref);
CREATE INDEX idx_action_pack ON action(pack);
CREATE INDEX idx_action_runtime ON action(runtime);
CREATE INDEX idx_action_created ON action(created DESC);
CREATE INDEX idx_action_pack_runtime ON action(pack, runtime);
CREATE INDEX idx_action_pack_created ON action(pack, created DESC);
-- Trigger
CREATE TRIGGER update_action_updated
BEFORE UPDATE ON action
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON action TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE action_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE action IS 'Actions are executable tasks/operations';
COMMENT ON COLUMN action.ref IS 'Unique action reference (format: pack.name)';
COMMENT ON COLUMN action.label IS 'Human-readable action name';
COMMENT ON COLUMN action.entrypoint IS 'Code entry point for the action';
COMMENT ON COLUMN action.runtime IS 'Execution environment for the action';
COMMENT ON COLUMN action.param_schema IS 'JSON schema for action input parameters';
COMMENT ON COLUMN action.out_schema IS 'JSON schema for action output/results';
-- Add foreign key constraints that reference action table
ALTER TABLE policy
ADD CONSTRAINT policy_action_fkey
FOREIGN KEY (action) REFERENCES action(id) ON DELETE CASCADE;
ALTER TABLE key
ADD CONSTRAINT key_owner_action_fkey
FOREIGN KEY (owner_action) REFERENCES action(id) ON DELETE CASCADE;
-- ============================================================================
-- RULE TABLE
-- ============================================================================
CREATE TABLE rule (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
pack_ref TEXT NOT NULL,
label TEXT NOT NULL,
description TEXT NOT NULL,
action BIGINT NOT NULL REFERENCES action(id),
action_ref TEXT NOT NULL,
trigger BIGINT NOT NULL REFERENCES trigger(id),
trigger_ref TEXT NOT NULL,
conditions JSONB NOT NULL DEFAULT '[]'::jsonb,
action_params JSONB DEFAULT '{}'::jsonb,
trigger_params JSONB DEFAULT '{}'::jsonb,
enabled BOOLEAN NOT NULL,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT rule_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT rule_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
);
-- Indexes
CREATE INDEX idx_rule_ref ON rule(ref);
CREATE INDEX idx_rule_pack ON rule(pack);
CREATE INDEX idx_rule_action ON rule(action);
CREATE INDEX idx_rule_trigger ON rule(trigger);
CREATE INDEX idx_rule_enabled ON rule(enabled) WHERE enabled = TRUE;
CREATE INDEX idx_rule_created ON rule(created DESC);
CREATE INDEX idx_rule_trigger_enabled ON rule(trigger, enabled);
CREATE INDEX idx_rule_action_enabled ON rule(action, enabled);
CREATE INDEX idx_rule_pack_enabled ON rule(pack, enabled);
CREATE INDEX idx_rule_action_params_gin ON rule USING GIN (action_params);
CREATE INDEX idx_rule_trigger_params_gin ON rule USING GIN (trigger_params);
-- Trigger
CREATE TRIGGER update_rule_updated
BEFORE UPDATE ON rule
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON rule TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE rule_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE rule IS 'Rules connect triggers to actions with conditional logic';
COMMENT ON COLUMN rule.ref IS 'Unique rule reference (format: pack.name)';
COMMENT ON COLUMN rule.label IS 'Human-readable rule name';
COMMENT ON COLUMN rule.action IS 'Action to execute when rule conditions are met';
COMMENT ON COLUMN rule.trigger IS 'Trigger that activates this rule';
COMMENT ON COLUMN rule.conditions IS 'JSON array of condition expressions';
COMMENT ON COLUMN rule.action_params IS 'JSON object of parameters to pass to the action when rule is triggered';
COMMENT ON COLUMN rule.trigger_params IS 'JSON object of parameters for trigger configuration and event filtering';
COMMENT ON COLUMN rule.enabled IS 'Whether this rule is active';
-- Add foreign key constraint to enforcement table
ALTER TABLE enforcement
ADD CONSTRAINT enforcement_rule_fkey
FOREIGN KEY (rule) REFERENCES rule(id) ON DELETE SET NULL;
-- ============================================================================
-- EXECUTION TABLE
-- ============================================================================
CREATE TABLE execution (
id BIGSERIAL PRIMARY KEY,
action BIGINT REFERENCES action(id),
action_ref TEXT NOT NULL,
config JSONB,
parent BIGINT REFERENCES execution(id),
enforcement BIGINT REFERENCES enforcement(id),
executor BIGINT REFERENCES identity(id) ON DELETE SET NULL,
status execution_status_enum NOT NULL DEFAULT 'requested',
result JSONB,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_execution_action ON execution(action);
CREATE INDEX idx_execution_action_ref ON execution(action_ref);
CREATE INDEX idx_execution_parent ON execution(parent);
CREATE INDEX idx_execution_enforcement ON execution(enforcement);
CREATE INDEX idx_execution_executor ON execution(executor);
CREATE INDEX idx_execution_status ON execution(status);
CREATE INDEX idx_execution_created ON execution(created DESC);
CREATE INDEX idx_execution_updated ON execution(updated DESC);
CREATE INDEX idx_execution_status_created ON execution(status, created DESC);
CREATE INDEX idx_execution_status_updated ON execution(status, updated DESC);
CREATE INDEX idx_execution_action_status ON execution(action, status);
CREATE INDEX idx_execution_executor_created ON execution(executor, created DESC);
CREATE INDEX idx_execution_parent_created ON execution(parent, created DESC);
CREATE INDEX idx_execution_result_gin ON execution USING GIN (result);
-- Trigger
CREATE TRIGGER update_execution_updated
BEFORE UPDATE ON execution
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON execution TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE execution_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE execution IS 'Executions represent action runs, supports nested workflows';
COMMENT ON COLUMN execution.action IS 'Action being executed (may be null if action deleted)';
COMMENT ON COLUMN execution.action_ref IS 'Action reference (preserved even if action deleted)';
COMMENT ON COLUMN execution.config IS 'Snapshot of action configuration at execution time';
COMMENT ON COLUMN execution.parent IS 'Parent execution ID for workflow hierarchies';
COMMENT ON COLUMN execution.enforcement IS 'Enforcement that triggered this execution (if rule-driven)';
COMMENT ON COLUMN execution.executor IS 'Identity that initiated the execution';
COMMENT ON COLUMN execution.status IS 'Current execution lifecycle status';
COMMENT ON COLUMN execution.result IS 'Execution output/results';
-- ============================================================================
-- INQUIRY TABLE
-- ============================================================================
CREATE TABLE inquiry (
id BIGSERIAL PRIMARY KEY,
execution BIGINT NOT NULL REFERENCES execution(id) ON DELETE CASCADE,
prompt TEXT NOT NULL,
response_schema JSONB,
assigned_to BIGINT REFERENCES identity(id) ON DELETE SET NULL,
status inquiry_status_enum NOT NULL DEFAULT 'pending',
response JSONB,
timeout_at TIMESTAMPTZ,
responded_at TIMESTAMPTZ,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_inquiry_execution ON inquiry(execution);
CREATE INDEX idx_inquiry_assigned_to ON inquiry(assigned_to);
CREATE INDEX idx_inquiry_status ON inquiry(status);
CREATE INDEX idx_inquiry_timeout_at ON inquiry(timeout_at) WHERE timeout_at IS NOT NULL;
CREATE INDEX idx_inquiry_created ON inquiry(created DESC);
CREATE INDEX idx_inquiry_status_created ON inquiry(status, created DESC);
CREATE INDEX idx_inquiry_assigned_status ON inquiry(assigned_to, status);
CREATE INDEX idx_inquiry_execution_status ON inquiry(execution, status);
CREATE INDEX idx_inquiry_response_gin ON inquiry USING GIN (response);
-- Trigger
CREATE TRIGGER update_inquiry_updated
BEFORE UPDATE ON inquiry
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON inquiry TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE inquiry_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE inquiry IS 'Inquiries enable human-in-the-loop workflows with async user interactions';
COMMENT ON COLUMN inquiry.execution IS 'Execution that is waiting on this inquiry';
COMMENT ON COLUMN inquiry.prompt IS 'Question or prompt text for the user';
COMMENT ON COLUMN inquiry.response_schema IS 'JSON schema defining expected response format';
COMMENT ON COLUMN inquiry.assigned_to IS 'Identity who should respond to this inquiry';
COMMENT ON COLUMN inquiry.status IS 'Current inquiry lifecycle status';
COMMENT ON COLUMN inquiry.response IS 'User response data';
COMMENT ON COLUMN inquiry.timeout_at IS 'When this inquiry expires';
COMMENT ON COLUMN inquiry.responded_at IS 'When the response was received';
-- ============================================================================
-- WORKFLOW DEFINITION TABLE
-- ============================================================================
CREATE TABLE workflow_definition (
id BIGSERIAL PRIMARY KEY,
ref VARCHAR(255) NOT NULL UNIQUE,
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
pack_ref VARCHAR(255) NOT NULL,
label VARCHAR(255) NOT NULL,
description TEXT,
version VARCHAR(50) NOT NULL,
param_schema JSONB,
out_schema JSONB,
definition JSONB NOT NULL,
tags TEXT[] DEFAULT '{}',
enabled BOOLEAN DEFAULT true NOT NULL,
created TIMESTAMPTZ DEFAULT NOW() NOT NULL,
updated TIMESTAMPTZ DEFAULT NOW() NOT NULL
);
-- Indexes
CREATE INDEX idx_workflow_def_pack ON workflow_definition(pack);
CREATE INDEX idx_workflow_def_enabled ON workflow_definition(enabled);
CREATE INDEX idx_workflow_def_ref ON workflow_definition(ref);
CREATE INDEX idx_workflow_def_tags ON workflow_definition USING gin(tags);
-- Trigger
CREATE TRIGGER update_workflow_definition_updated
BEFORE UPDATE ON workflow_definition
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON workflow_definition TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE workflow_definition_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE workflow_definition IS 'Stores workflow definitions (YAML parsed to JSON)';
COMMENT ON COLUMN workflow_definition.ref IS 'Unique workflow reference (e.g., pack_name.workflow_name)';
COMMENT ON COLUMN workflow_definition.definition IS 'Complete workflow specification including tasks, variables, and transitions';
COMMENT ON COLUMN workflow_definition.param_schema IS 'JSON schema for workflow input parameters';
COMMENT ON COLUMN workflow_definition.out_schema IS 'JSON schema for workflow output';
-- ============================================================================
-- WORKFLOW EXECUTION TABLE
-- ============================================================================
CREATE TABLE workflow_execution (
id BIGSERIAL PRIMARY KEY,
execution BIGINT NOT NULL REFERENCES execution(id) ON DELETE CASCADE,
workflow_def BIGINT NOT NULL REFERENCES workflow_definition(id),
current_tasks TEXT[] DEFAULT '{}',
completed_tasks TEXT[] DEFAULT '{}',
failed_tasks TEXT[] DEFAULT '{}',
skipped_tasks TEXT[] DEFAULT '{}',
variables JSONB DEFAULT '{}',
task_graph JSONB NOT NULL,
status execution_status_enum NOT NULL DEFAULT 'requested',
error_message TEXT,
paused BOOLEAN DEFAULT false NOT NULL,
pause_reason TEXT,
created TIMESTAMPTZ DEFAULT NOW() NOT NULL,
updated TIMESTAMPTZ DEFAULT NOW() NOT NULL
);
-- Indexes
CREATE INDEX idx_workflow_exec_execution ON workflow_execution(execution);
CREATE INDEX idx_workflow_exec_workflow_def ON workflow_execution(workflow_def);
CREATE INDEX idx_workflow_exec_status ON workflow_execution(status);
CREATE INDEX idx_workflow_exec_paused ON workflow_execution(paused) WHERE paused = true;
-- Trigger
CREATE TRIGGER update_workflow_execution_updated
BEFORE UPDATE ON workflow_execution
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON workflow_execution TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE workflow_execution_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE workflow_execution IS 'Runtime state tracking for workflow executions';
COMMENT ON COLUMN workflow_execution.variables IS 'Workflow-scoped variables, updated via publish directives';
COMMENT ON COLUMN workflow_execution.task_graph IS 'Execution graph with dependencies and transitions';
COMMENT ON COLUMN workflow_execution.current_tasks IS 'Array of task names currently executing';
COMMENT ON COLUMN workflow_execution.paused IS 'True if workflow execution is paused (can be resumed)';
-- ============================================================================
-- WORKFLOW TASK EXECUTION TABLE
-- ============================================================================
CREATE TABLE workflow_task_execution (
id BIGSERIAL PRIMARY KEY,
workflow_execution BIGINT NOT NULL REFERENCES workflow_execution(id) ON DELETE CASCADE,
execution BIGINT NOT NULL REFERENCES execution(id) ON DELETE CASCADE,
task_name VARCHAR(255) NOT NULL,
task_index INTEGER,
task_batch INTEGER,
status execution_status_enum NOT NULL DEFAULT 'requested',
started_at TIMESTAMPTZ,
completed_at TIMESTAMPTZ,
duration_ms BIGINT,
result JSONB,
error JSONB,
retry_count INTEGER DEFAULT 0 NOT NULL,
max_retries INTEGER DEFAULT 0 NOT NULL,
next_retry_at TIMESTAMPTZ,
timeout_seconds INTEGER,
timed_out BOOLEAN DEFAULT false NOT NULL,
created TIMESTAMPTZ DEFAULT NOW() NOT NULL,
updated TIMESTAMPTZ DEFAULT NOW() NOT NULL
);
-- Indexes
CREATE INDEX idx_wf_task_exec_workflow ON workflow_task_execution(workflow_execution);
CREATE INDEX idx_wf_task_exec_execution ON workflow_task_execution(execution);
CREATE INDEX idx_wf_task_exec_status ON workflow_task_execution(status);
CREATE INDEX idx_wf_task_exec_task_name ON workflow_task_execution(task_name);
CREATE INDEX idx_wf_task_exec_retry ON workflow_task_execution(retry_count) WHERE retry_count > 0;
CREATE INDEX idx_wf_task_exec_timeout ON workflow_task_execution(timed_out) WHERE timed_out = true;
-- Trigger
CREATE TRIGGER update_workflow_task_execution_updated
BEFORE UPDATE ON workflow_task_execution
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON workflow_task_execution TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE workflow_task_execution_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE workflow_task_execution IS 'Individual task executions within workflows';
COMMENT ON COLUMN workflow_task_execution.task_index IS 'Index for with-items iteration tasks (0-based)';
COMMENT ON COLUMN workflow_task_execution.task_batch IS 'Batch number for batched with-items processing';
COMMENT ON COLUMN workflow_task_execution.duration_ms IS 'Task execution duration in milliseconds';
-- ============================================================================
-- MODIFY ACTION TABLE - Add Workflow Support
-- ============================================================================
ALTER TABLE action
ADD COLUMN is_workflow BOOLEAN DEFAULT false NOT NULL,
ADD COLUMN workflow_def BIGINT REFERENCES workflow_definition(id) ON DELETE CASCADE;
CREATE INDEX idx_action_is_workflow ON action(is_workflow) WHERE is_workflow = true;
CREATE INDEX idx_action_workflow_def ON action(workflow_def);
COMMENT ON COLUMN action.is_workflow IS 'True if this action is a workflow (composable action graph)';
COMMENT ON COLUMN action.workflow_def IS 'Reference to workflow definition if is_workflow=true';
-- ============================================================================
-- WORKFLOW VIEWS
-- ============================================================================
CREATE VIEW workflow_execution_summary AS
SELECT
we.id,
we.execution,
wd.ref as workflow_ref,
wd.label as workflow_label,
wd.version as workflow_version,
we.status,
we.paused,
array_length(we.current_tasks, 1) as current_task_count,
array_length(we.completed_tasks, 1) as completed_task_count,
array_length(we.failed_tasks, 1) as failed_task_count,
array_length(we.skipped_tasks, 1) as skipped_task_count,
we.error_message,
we.created,
we.updated
FROM workflow_execution we
JOIN workflow_definition wd ON we.workflow_def = wd.id;
COMMENT ON VIEW workflow_execution_summary IS 'Summary view of workflow executions with task counts';
CREATE VIEW workflow_task_detail AS
SELECT
wte.id,
wte.workflow_execution,
we.execution as workflow_execution_id,
wd.ref as workflow_ref,
wte.task_name,
wte.task_index,
wte.task_batch,
wte.status,
wte.retry_count,
wte.max_retries,
wte.timed_out,
wte.duration_ms,
wte.started_at,
wte.completed_at,
wte.created,
wte.updated
FROM workflow_task_execution wte
JOIN workflow_execution we ON wte.workflow_execution = we.id
JOIN workflow_definition wd ON we.workflow_def = wd.id;
COMMENT ON VIEW workflow_task_detail IS 'Detailed view of task executions with workflow context';
CREATE VIEW workflow_action_link AS
SELECT
wd.id as workflow_def_id,
wd.ref as workflow_ref,
wd.label,
wd.version,
wd.enabled,
a.id as action_id,
a.ref as action_ref,
a.pack as pack_id,
a.pack_ref
FROM workflow_definition wd
LEFT JOIN action a ON a.workflow_def = wd.id AND a.is_workflow = true;
COMMENT ON VIEW workflow_action_link IS 'Links workflow definitions to their corresponding action records';
-- Permissions for views
GRANT SELECT ON workflow_execution_summary TO svc_attune;
GRANT SELECT ON workflow_task_detail TO svc_attune;
GRANT SELECT ON workflow_action_link TO svc_attune;

View File

@@ -1,8 +1,7 @@
-- Migration: Event System
-- Description: Creates tables for triggers, sensors, events, and enforcement
-- Description: Creates trigger, sensor, event, and rule tables (with webhook_config, is_adhoc from start)
-- Version: 20250101000003
-- ============================================================================
-- TRIGGER TABLE
-- ============================================================================
@@ -15,8 +14,12 @@ CREATE TABLE trigger (
label TEXT NOT NULL,
description TEXT,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
is_adhoc BOOLEAN DEFAULT false NOT NULL,
param_schema JSONB,
out_schema JSONB,
webhook_enabled BOOLEAN NOT NULL DEFAULT FALSE,
webhook_key VARCHAR(64) UNIQUE,
webhook_config JSONB DEFAULT '{}'::jsonb,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
@@ -31,6 +34,7 @@ CREATE INDEX idx_trigger_pack ON trigger(pack);
CREATE INDEX idx_trigger_enabled ON trigger(enabled) WHERE enabled = TRUE;
CREATE INDEX idx_trigger_created ON trigger(created DESC);
CREATE INDEX idx_trigger_pack_enabled ON trigger(pack, enabled);
CREATE INDEX idx_trigger_webhook_key ON trigger(webhook_key) WHERE webhook_key IS NOT NULL;
CREATE INDEX idx_trigger_enabled_created ON trigger(enabled, created DESC) WHERE enabled = TRUE;
-- Trigger
@@ -39,10 +43,6 @@ CREATE TRIGGER update_trigger_updated
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON trigger TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE trigger_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE trigger IS 'Trigger definitions that can activate rules';
COMMENT ON COLUMN trigger.ref IS 'Unique trigger reference (format: pack.name)';
@@ -51,6 +51,9 @@ COMMENT ON COLUMN trigger.enabled IS 'Whether this trigger is active';
COMMENT ON COLUMN trigger.param_schema IS 'JSON schema defining the expected configuration parameters when this trigger is used';
COMMENT ON COLUMN trigger.out_schema IS 'JSON schema defining the structure of event payloads generated by this trigger';
-- ============================================================================
-- ============================================================================
-- SENSOR TABLE
-- ============================================================================
@@ -78,43 +81,6 @@ CREATE TABLE sensor (
CONSTRAINT sensor_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
);
-- Indexes
CREATE INDEX idx_sensor_ref ON sensor(ref);
CREATE INDEX idx_sensor_pack ON sensor(pack);
CREATE INDEX idx_sensor_runtime ON sensor(runtime);
CREATE INDEX idx_sensor_trigger ON sensor(trigger);
CREATE INDEX idx_sensor_enabled ON sensor(enabled) WHERE enabled = TRUE;
CREATE INDEX idx_sensor_created ON sensor(created DESC);
CREATE INDEX idx_sensor_trigger_enabled ON sensor(trigger, enabled);
CREATE INDEX idx_sensor_pack_enabled ON sensor(pack, enabled);
CREATE INDEX idx_sensor_runtime_enabled ON sensor(runtime, enabled);
CREATE INDEX idx_sensor_config ON sensor USING GIN (config);
-- Trigger
CREATE TRIGGER update_sensor_updated
BEFORE UPDATE ON sensor
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON sensor TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE sensor_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE sensor IS 'Sensors monitor for trigger conditions and generate events';
COMMENT ON COLUMN sensor.ref IS 'Unique sensor reference (format: pack.name)';
COMMENT ON COLUMN sensor.entrypoint IS 'Code entry point for the sensor';
COMMENT ON COLUMN sensor.runtime IS 'Execution environment for the sensor';
COMMENT ON COLUMN sensor.trigger IS 'Trigger that this sensor monitors for';
COMMENT ON COLUMN sensor.enabled IS 'Whether this sensor is active';
COMMENT ON COLUMN sensor.param_schema IS 'JSON schema describing expected configuration (optional, usually inherited from trigger)';
COMMENT ON COLUMN sensor.config IS 'Actual configuration values for this sensor instance (conforms to trigger param_schema)';
-- Add foreign key constraint to key table for sensor ownership
ALTER TABLE key
ADD CONSTRAINT key_owner_sensor_fkey
FOREIGN KEY (owner_sensor) REFERENCES sensor(id) ON DELETE CASCADE;
-- ============================================================================
-- EVENT TABLE
-- ============================================================================
@@ -128,6 +94,8 @@ CREATE TABLE event (
source BIGINT REFERENCES sensor(id),
source_ref TEXT,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
rule BIGINT,
rule_ref TEXT,
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
@@ -147,10 +115,6 @@ CREATE TRIGGER update_event_updated
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON event TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE event_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE event IS 'Events are instances of triggers firing';
COMMENT ON COLUMN event.trigger IS 'Trigger that fired (may be null if trigger deleted)';
@@ -200,10 +164,6 @@ CREATE TRIGGER update_enforcement_updated
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON enforcement TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE enforcement_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE enforcement IS 'Enforcements represent rule triggering by events';
COMMENT ON COLUMN enforcement.rule IS 'Rule being enforced (may be null if rule deleted)';

View File

@@ -0,0 +1,36 @@
-- Migration: Action
-- Description: Creates action table (with is_adhoc from start)
-- Version: 20250101000005
-- ============================================================================
-- ACTION TABLE
-- ============================================================================
CREATE TABLE action (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
pack_ref TEXT NOT NULL,
label TEXT NOT NULL,
description TEXT NOT NULL,
entrypoint TEXT NOT NULL,
runtime BIGINT REFERENCES runtime(id),
param_schema JSONB,
out_schema JSONB,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT action_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT action_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
);
-- ============================================================================
-- Add foreign key constraint for policy table
ALTER TABLE policy
ADD CONSTRAINT policy_action_fkey
FOREIGN KEY (action) REFERENCES action(id) ON DELETE CASCADE;
-- Note: Foreign key constraints for key table (key_owner_action_fkey, key_owner_sensor_fkey)
-- will be added in migration 20250101000009_keys_artifacts.sql after the key table is created

View File

@@ -1,153 +0,0 @@
-- Migration: Supporting Tables and Indexes
-- Description: Creates notification and artifact tables plus performance optimization indexes
-- Version: 20250101000005
-- ============================================================================
-- NOTIFICATION TABLE
-- ============================================================================
CREATE TABLE notification (
id BIGSERIAL PRIMARY KEY,
channel TEXT NOT NULL,
entity_type TEXT NOT NULL,
entity TEXT NOT NULL,
activity TEXT NOT NULL,
state notification_status_enum NOT NULL DEFAULT 'created',
content JSONB,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_notification_channel ON notification(channel);
CREATE INDEX idx_notification_entity_type ON notification(entity_type);
CREATE INDEX idx_notification_entity ON notification(entity);
CREATE INDEX idx_notification_state ON notification(state);
CREATE INDEX idx_notification_created ON notification(created DESC);
CREATE INDEX idx_notification_channel_state ON notification(channel, state);
CREATE INDEX idx_notification_entity_type_entity ON notification(entity_type, entity);
CREATE INDEX idx_notification_state_created ON notification(state, created DESC);
CREATE INDEX idx_notification_content_gin ON notification USING GIN (content);
-- Trigger
CREATE TRIGGER update_notification_updated
BEFORE UPDATE ON notification
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Function for pg_notify on notification insert
CREATE OR REPLACE FUNCTION notify_on_insert()
RETURNS TRIGGER AS $$
DECLARE
payload TEXT;
BEGIN
-- Build JSON payload with id, entity, and activity
payload := json_build_object(
'id', NEW.id,
'entity_type', NEW.entity_type,
'entity', NEW.entity,
'activity', NEW.activity
)::text;
-- Send notification to the specified channel
PERFORM pg_notify(NEW.channel, payload);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger to send pg_notify on notification insert
CREATE TRIGGER notify_on_notification_insert
AFTER INSERT ON notification
FOR EACH ROW
EXECUTE FUNCTION notify_on_insert();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON notification TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE notification_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE notification IS 'System notifications about entity changes for real-time updates';
COMMENT ON COLUMN notification.channel IS 'Notification channel (typically table name)';
COMMENT ON COLUMN notification.entity_type IS 'Type of entity (table name)';
COMMENT ON COLUMN notification.entity IS 'Entity identifier (typically ID or ref)';
COMMENT ON COLUMN notification.activity IS 'Activity type (e.g., "created", "updated", "completed")';
COMMENT ON COLUMN notification.state IS 'Processing state of notification';
COMMENT ON COLUMN notification.content IS 'Optional notification payload data';
-- ============================================================================
-- ARTIFACT TABLE
-- ============================================================================
CREATE TABLE artifact (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL,
scope owner_type_enum NOT NULL DEFAULT 'system',
owner TEXT NOT NULL DEFAULT '',
type artifact_type_enum NOT NULL,
retention_policy artifact_retention_enum NOT NULL DEFAULT 'versions',
retention_limit INTEGER NOT NULL DEFAULT 1,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_artifact_ref ON artifact(ref);
CREATE INDEX idx_artifact_scope ON artifact(scope);
CREATE INDEX idx_artifact_owner ON artifact(owner);
CREATE INDEX idx_artifact_type ON artifact(type);
CREATE INDEX idx_artifact_created ON artifact(created DESC);
CREATE INDEX idx_artifact_scope_owner ON artifact(scope, owner);
CREATE INDEX idx_artifact_type_created ON artifact(type, created DESC);
-- Trigger
CREATE TRIGGER update_artifact_updated
BEFORE UPDATE ON artifact
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON artifact TO svc_attune;
GRANT USAGE, SELECT ON SEQUENCE artifact_id_seq TO svc_attune;
-- Comments
COMMENT ON TABLE artifact IS 'Artifacts track files, logs, and outputs from executions';
COMMENT ON COLUMN artifact.ref IS 'Artifact reference/path';
COMMENT ON COLUMN artifact.scope IS 'Owner type (system, identity, pack, action, sensor)';
COMMENT ON COLUMN artifact.owner IS 'Owner identifier';
COMMENT ON COLUMN artifact.type IS 'Artifact type (file, url, progress, etc.)';
COMMENT ON COLUMN artifact.retention_policy IS 'How to retain artifacts (versions, days, hours, minutes)';
COMMENT ON COLUMN artifact.retention_limit IS 'Numeric limit for retention policy';
-- ============================================================================
-- QUEUE_STATS TABLE
-- ============================================================================
CREATE TABLE queue_stats (
action_id BIGINT PRIMARY KEY REFERENCES action(id) ON DELETE CASCADE,
queue_length INTEGER NOT NULL DEFAULT 0,
active_count INTEGER NOT NULL DEFAULT 0,
max_concurrent INTEGER NOT NULL DEFAULT 1,
oldest_enqueued_at TIMESTAMPTZ,
total_enqueued BIGINT NOT NULL DEFAULT 0,
total_completed BIGINT NOT NULL DEFAULT 0,
last_updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_queue_stats_last_updated ON queue_stats(last_updated);
-- Permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON queue_stats TO svc_attune;
-- Comments
COMMENT ON TABLE queue_stats IS 'Real-time queue statistics for action execution ordering';
COMMENT ON COLUMN queue_stats.action_id IS 'Foreign key to action table';
COMMENT ON COLUMN queue_stats.queue_length IS 'Number of executions waiting in queue';
COMMENT ON COLUMN queue_stats.active_count IS 'Number of currently running executions';
COMMENT ON COLUMN queue_stats.max_concurrent IS 'Maximum concurrent executions allowed';
COMMENT ON COLUMN queue_stats.oldest_enqueued_at IS 'Timestamp of oldest queued execution (NULL if queue empty)';
COMMENT ON COLUMN queue_stats.total_enqueued IS 'Total executions enqueued since queue creation';
COMMENT ON COLUMN queue_stats.total_completed IS 'Total executions completed since queue creation';
COMMENT ON COLUMN queue_stats.last_updated IS 'Timestamp of last statistics update';

View File

@@ -0,0 +1,107 @@
-- Migration: Execution System
-- Description: Creates execution (with workflow columns) and inquiry tables
-- Version: 20250101000006
-- ============================================================================
-- EXECUTION TABLE
-- ============================================================================
CREATE TABLE execution (
id BIGSERIAL PRIMARY KEY,
action BIGINT REFERENCES action(id),
action_ref TEXT NOT NULL,
config JSONB,
parent BIGINT REFERENCES execution(id),
enforcement BIGINT REFERENCES enforcement(id),
executor BIGINT REFERENCES identity(id) ON DELETE SET NULL,
status execution_status_enum NOT NULL DEFAULT 'requested',
result JSONB,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
is_workflow BOOLEAN DEFAULT false NOT NULL,
workflow_def BIGINT,
workflow_task JSONB,
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_execution_action ON execution(action);
CREATE INDEX idx_execution_action_ref ON execution(action_ref);
CREATE INDEX idx_execution_parent ON execution(parent);
CREATE INDEX idx_execution_enforcement ON execution(enforcement);
CREATE INDEX idx_execution_executor ON execution(executor);
CREATE INDEX idx_execution_status ON execution(status);
CREATE INDEX idx_execution_created ON execution(created DESC);
CREATE INDEX idx_execution_updated ON execution(updated DESC);
CREATE INDEX idx_execution_status_created ON execution(status, created DESC);
CREATE INDEX idx_execution_status_updated ON execution(status, updated DESC);
CREATE INDEX idx_execution_action_status ON execution(action, status);
CREATE INDEX idx_execution_executor_created ON execution(executor, created DESC);
CREATE INDEX idx_execution_parent_created ON execution(parent, created DESC);
CREATE INDEX idx_execution_result_gin ON execution USING GIN (result);
-- Trigger
CREATE TRIGGER update_execution_updated
BEFORE UPDATE ON execution
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE execution IS 'Executions represent action runs, supports nested workflows';
COMMENT ON COLUMN execution.action IS 'Action being executed (may be null if action deleted)';
COMMENT ON COLUMN execution.action_ref IS 'Action reference (preserved even if action deleted)';
COMMENT ON COLUMN execution.config IS 'Snapshot of action configuration at execution time';
COMMENT ON COLUMN execution.parent IS 'Parent execution ID for workflow hierarchies';
COMMENT ON COLUMN execution.enforcement IS 'Enforcement that triggered this execution (if rule-driven)';
COMMENT ON COLUMN execution.executor IS 'Identity that initiated the execution';
COMMENT ON COLUMN execution.status IS 'Current execution lifecycle status';
COMMENT ON COLUMN execution.result IS 'Execution output/results';
-- ============================================================================
-- ============================================================================
-- INQUIRY TABLE
-- ============================================================================
CREATE TABLE inquiry (
id BIGSERIAL PRIMARY KEY,
execution BIGINT NOT NULL REFERENCES execution(id) ON DELETE CASCADE,
prompt TEXT NOT NULL,
response_schema JSONB,
assigned_to BIGINT REFERENCES identity(id) ON DELETE SET NULL,
status inquiry_status_enum NOT NULL DEFAULT 'pending',
response JSONB,
timeout_at TIMESTAMPTZ,
responded_at TIMESTAMPTZ,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_inquiry_execution ON inquiry(execution);
CREATE INDEX idx_inquiry_assigned_to ON inquiry(assigned_to);
CREATE INDEX idx_inquiry_status ON inquiry(status);
CREATE INDEX idx_inquiry_timeout_at ON inquiry(timeout_at) WHERE timeout_at IS NOT NULL;
CREATE INDEX idx_inquiry_created ON inquiry(created DESC);
CREATE INDEX idx_inquiry_status_created ON inquiry(status, created DESC);
CREATE INDEX idx_inquiry_assigned_status ON inquiry(assigned_to, status);
CREATE INDEX idx_inquiry_execution_status ON inquiry(execution, status);
CREATE INDEX idx_inquiry_response_gin ON inquiry USING GIN (response);
-- Trigger
CREATE TRIGGER update_inquiry_updated
BEFORE UPDATE ON inquiry
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE inquiry IS 'Inquiries enable human-in-the-loop workflows with async user interactions';
COMMENT ON COLUMN inquiry.execution IS 'Execution that is waiting on this inquiry';
COMMENT ON COLUMN inquiry.prompt IS 'Question or prompt text for the user';
COMMENT ON COLUMN inquiry.response_schema IS 'JSON schema defining expected response format';
COMMENT ON COLUMN inquiry.assigned_to IS 'Identity who should respond to this inquiry';
COMMENT ON COLUMN inquiry.status IS 'Current inquiry lifecycle status';
COMMENT ON COLUMN inquiry.response IS 'User response data';
COMMENT ON COLUMN inquiry.timeout_at IS 'When this inquiry expires';
COMMENT ON COLUMN inquiry.responded_at IS 'When the response was received';
-- ============================================================================

View File

@@ -0,0 +1,147 @@
-- Migration: Workflow System
-- Description: Creates workflow_definition and workflow_execution tables (workflow_task_execution consolidated into execution.workflow_task JSONB)
-- Version: 20250101000007
-- ============================================================================
-- WORKFLOW DEFINITION TABLE
-- ============================================================================
CREATE TABLE workflow_definition (
id BIGSERIAL PRIMARY KEY,
ref VARCHAR(255) NOT NULL UNIQUE,
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
pack_ref VARCHAR(255) NOT NULL,
label VARCHAR(255) NOT NULL,
description TEXT,
version VARCHAR(50) NOT NULL,
param_schema JSONB,
out_schema JSONB,
definition JSONB NOT NULL,
tags TEXT[] DEFAULT '{}',
enabled BOOLEAN DEFAULT true NOT NULL,
created TIMESTAMPTZ DEFAULT NOW() NOT NULL,
updated TIMESTAMPTZ DEFAULT NOW() NOT NULL
);
-- Indexes
CREATE INDEX idx_workflow_def_pack ON workflow_definition(pack);
CREATE INDEX idx_workflow_def_enabled ON workflow_definition(enabled);
CREATE INDEX idx_workflow_def_ref ON workflow_definition(ref);
CREATE INDEX idx_workflow_def_tags ON workflow_definition USING gin(tags);
-- Trigger
CREATE TRIGGER update_workflow_definition_updated
BEFORE UPDATE ON workflow_definition
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE workflow_definition IS 'Stores workflow definitions (YAML parsed to JSON)';
COMMENT ON COLUMN workflow_definition.ref IS 'Unique workflow reference (e.g., pack_name.workflow_name)';
COMMENT ON COLUMN workflow_definition.definition IS 'Complete workflow specification including tasks, variables, and transitions';
COMMENT ON COLUMN workflow_definition.param_schema IS 'JSON schema for workflow input parameters';
COMMENT ON COLUMN workflow_definition.out_schema IS 'JSON schema for workflow output';
-- ============================================================================
-- WORKFLOW EXECUTION TABLE
-- ============================================================================
CREATE TABLE workflow_execution (
id BIGSERIAL PRIMARY KEY,
execution BIGINT NOT NULL REFERENCES execution(id) ON DELETE CASCADE,
workflow_def BIGINT NOT NULL REFERENCES workflow_definition(id),
current_tasks TEXT[] DEFAULT '{}',
completed_tasks TEXT[] DEFAULT '{}',
failed_tasks TEXT[] DEFAULT '{}',
skipped_tasks TEXT[] DEFAULT '{}',
variables JSONB DEFAULT '{}',
task_graph JSONB NOT NULL,
status execution_status_enum NOT NULL DEFAULT 'requested',
error_message TEXT,
paused BOOLEAN DEFAULT false NOT NULL,
pause_reason TEXT,
created TIMESTAMPTZ DEFAULT NOW() NOT NULL,
updated TIMESTAMPTZ DEFAULT NOW() NOT NULL
);
-- Indexes
CREATE INDEX idx_workflow_exec_execution ON workflow_execution(execution);
CREATE INDEX idx_workflow_exec_workflow_def ON workflow_execution(workflow_def);
CREATE INDEX idx_workflow_exec_status ON workflow_execution(status);
CREATE INDEX idx_workflow_exec_paused ON workflow_execution(paused) WHERE paused = true;
-- Trigger
CREATE TRIGGER update_workflow_execution_updated
BEFORE UPDATE ON workflow_execution
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE workflow_execution IS 'Runtime state tracking for workflow executions';
COMMENT ON COLUMN workflow_execution.variables IS 'Workflow-scoped variables, updated via publish directives';
COMMENT ON COLUMN workflow_execution.task_graph IS 'Execution graph with dependencies and transitions';
COMMENT ON COLUMN workflow_execution.current_tasks IS 'Array of task names currently executing';
COMMENT ON COLUMN workflow_execution.paused IS 'True if workflow execution is paused (can be resumed)';
-- ============================================================================
-- MODIFY ACTION TABLE - Add Workflow Support
-- ============================================================================
ALTER TABLE action
ADD COLUMN is_workflow BOOLEAN DEFAULT false NOT NULL,
ADD COLUMN workflow_def BIGINT REFERENCES workflow_definition(id) ON DELETE CASCADE;
CREATE INDEX idx_action_is_workflow ON action(is_workflow) WHERE is_workflow = true;
CREATE INDEX idx_action_workflow_def ON action(workflow_def);
COMMENT ON COLUMN action.is_workflow IS 'True if this action is a workflow (composable action graph)';
COMMENT ON COLUMN action.workflow_def IS 'Reference to workflow definition if is_workflow=true';
-- ============================================================================
-- ADD FOREIGN KEY CONSTRAINT FOR EXECUTION.WORKFLOW_DEF
-- ============================================================================
ALTER TABLE execution
ADD CONSTRAINT execution_workflow_def_fkey
FOREIGN KEY (workflow_def) REFERENCES workflow_definition(id) ON DELETE CASCADE;
-- ============================================================================
-- WORKFLOW VIEWS
-- ============================================================================
CREATE VIEW workflow_execution_summary AS
SELECT
we.id,
we.execution,
wd.ref as workflow_ref,
wd.label as workflow_label,
wd.version as workflow_version,
we.status,
we.paused,
array_length(we.current_tasks, 1) as current_task_count,
array_length(we.completed_tasks, 1) as completed_task_count,
array_length(we.failed_tasks, 1) as failed_task_count,
array_length(we.skipped_tasks, 1) as skipped_task_count,
we.error_message,
we.created,
we.updated
FROM workflow_execution we
JOIN workflow_definition wd ON we.workflow_def = wd.id;
COMMENT ON VIEW workflow_execution_summary IS 'Summary view of workflow executions with task counts';
CREATE VIEW workflow_action_link AS
SELECT
wd.id as workflow_def_id,
wd.ref as workflow_ref,
wd.label,
wd.version,
wd.enabled,
a.id as action_id,
a.ref as action_ref,
a.pack as pack_id,
a.pack_ref
FROM workflow_definition wd
LEFT JOIN action a ON a.workflow_def = wd.id AND a.is_workflow = true;
COMMENT ON VIEW workflow_action_link IS 'Links workflow definitions to their corresponding action records';

View File

@@ -0,0 +1,75 @@
-- Migration: Supporting Tables and Indexes
-- Description: Creates notification and artifact tables plus performance optimization indexes
-- Version: 20250101000005
-- ============================================================================
-- NOTIFICATION TABLE
-- ============================================================================
CREATE TABLE notification (
id BIGSERIAL PRIMARY KEY,
channel TEXT NOT NULL,
entity_type TEXT NOT NULL,
entity TEXT NOT NULL,
activity TEXT NOT NULL,
state notification_status_enum NOT NULL DEFAULT 'created',
content JSONB,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_notification_channel ON notification(channel);
CREATE INDEX idx_notification_entity_type ON notification(entity_type);
CREATE INDEX idx_notification_entity ON notification(entity);
CREATE INDEX idx_notification_state ON notification(state);
CREATE INDEX idx_notification_created ON notification(created DESC);
CREATE INDEX idx_notification_channel_state ON notification(channel, state);
CREATE INDEX idx_notification_entity_type_entity ON notification(entity_type, entity);
CREATE INDEX idx_notification_state_created ON notification(state, created DESC);
CREATE INDEX idx_notification_content_gin ON notification USING GIN (content);
-- Trigger
CREATE TRIGGER update_notification_updated
BEFORE UPDATE ON notification
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Function for pg_notify on notification insert
CREATE OR REPLACE FUNCTION notify_on_insert()
RETURNS TRIGGER AS $$
DECLARE
payload TEXT;
BEGIN
-- Build JSON payload with id, entity, and activity
payload := json_build_object(
'id', NEW.id,
'entity_type', NEW.entity_type,
'entity', NEW.entity,
'activity', NEW.activity
)::text;
-- Send notification to the specified channel
PERFORM pg_notify(NEW.channel, payload);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger to send pg_notify on notification insert
CREATE TRIGGER notify_on_notification_insert
AFTER INSERT ON notification
FOR EACH ROW
EXECUTE FUNCTION notify_on_insert();
-- Comments
COMMENT ON TABLE notification IS 'System notifications about entity changes for real-time updates';
COMMENT ON COLUMN notification.channel IS 'Notification channel (typically table name)';
COMMENT ON COLUMN notification.entity_type IS 'Type of entity (table name)';
COMMENT ON COLUMN notification.entity IS 'Entity identifier (typically ID or ref)';
COMMENT ON COLUMN notification.activity IS 'Activity type (e.g., "created", "updated", "completed")';
COMMENT ON COLUMN notification.state IS 'Processing state of notification';
COMMENT ON COLUMN notification.content IS 'Optional notification payload data';
-- ============================================================================

View File

@@ -0,0 +1,200 @@
-- Migration: Keys and Artifacts
-- Description: Creates key table for secrets management and artifact table for execution outputs
-- Version: 20250101000009
-- ============================================================================
-- KEY TABLE
-- ============================================================================
CREATE TABLE key (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
owner_type owner_type_enum NOT NULL,
owner TEXT,
owner_identity BIGINT REFERENCES identity(id),
owner_pack BIGINT REFERENCES pack(id),
owner_pack_ref TEXT,
owner_action BIGINT, -- Forward reference to action table
owner_action_ref TEXT,
owner_sensor BIGINT, -- Forward reference to sensor table
owner_sensor_ref TEXT,
name TEXT NOT NULL,
encrypted BOOLEAN NOT NULL,
encryption_key_hash TEXT,
value TEXT NOT NULL,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT key_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT key_ref_format CHECK (ref ~ '^([^.]+\.)?[^.]+$')
);
-- Unique index on owner_type, owner, name
CREATE UNIQUE INDEX idx_key_unique ON key(owner_type, owner, name);
-- Indexes
CREATE INDEX idx_key_ref ON key(ref);
CREATE INDEX idx_key_owner_type ON key(owner_type);
CREATE INDEX idx_key_owner_identity ON key(owner_identity);
CREATE INDEX idx_key_owner_pack ON key(owner_pack);
CREATE INDEX idx_key_owner_action ON key(owner_action);
CREATE INDEX idx_key_owner_sensor ON key(owner_sensor);
CREATE INDEX idx_key_created ON key(created DESC);
CREATE INDEX idx_key_owner_type_owner ON key(owner_type, owner);
CREATE INDEX idx_key_owner_identity_name ON key(owner_identity, name);
CREATE INDEX idx_key_owner_pack_name ON key(owner_pack, name);
-- Function to validate and set owner fields
CREATE OR REPLACE FUNCTION validate_key_owner()
RETURNS TRIGGER AS $$
DECLARE
owner_count INTEGER := 0;
BEGIN
-- Count how many owner fields are set
IF NEW.owner_identity IS NOT NULL THEN owner_count := owner_count + 1; END IF;
IF NEW.owner_pack IS NOT NULL THEN owner_count := owner_count + 1; END IF;
IF NEW.owner_action IS NOT NULL THEN owner_count := owner_count + 1; END IF;
IF NEW.owner_sensor IS NOT NULL THEN owner_count := owner_count + 1; END IF;
-- System owner should have no owner fields set
IF NEW.owner_type = 'system' THEN
IF owner_count > 0 THEN
RAISE EXCEPTION 'System owner cannot have specific owner fields set';
END IF;
NEW.owner := 'system';
-- All other types must have exactly one owner field set
ELSIF owner_count != 1 THEN
RAISE EXCEPTION 'Exactly one owner field must be set for owner_type %', NEW.owner_type;
-- Validate owner_type matches the populated field and set owner
ELSIF NEW.owner_type = 'identity' THEN
IF NEW.owner_identity IS NULL THEN
RAISE EXCEPTION 'owner_identity must be set for owner_type identity';
END IF;
NEW.owner := NEW.owner_identity::TEXT;
ELSIF NEW.owner_type = 'pack' THEN
IF NEW.owner_pack IS NULL THEN
RAISE EXCEPTION 'owner_pack must be set for owner_type pack';
END IF;
NEW.owner := NEW.owner_pack::TEXT;
ELSIF NEW.owner_type = 'action' THEN
IF NEW.owner_action IS NULL THEN
RAISE EXCEPTION 'owner_action must be set for owner_type action';
END IF;
NEW.owner := NEW.owner_action::TEXT;
ELSIF NEW.owner_type = 'sensor' THEN
IF NEW.owner_sensor IS NULL THEN
RAISE EXCEPTION 'owner_sensor must be set for owner_type sensor';
END IF;
NEW.owner := NEW.owner_sensor::TEXT;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger to validate owner fields
CREATE TRIGGER validate_key_owner_trigger
BEFORE INSERT OR UPDATE ON key
FOR EACH ROW
EXECUTE FUNCTION validate_key_owner();
-- Trigger for updated timestamp
CREATE TRIGGER update_key_updated
BEFORE UPDATE ON key
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE key IS 'Keys store configuration values and secrets with ownership scoping';
COMMENT ON COLUMN key.ref IS 'Unique key reference (format: [owner.]name)';
COMMENT ON COLUMN key.owner_type IS 'Type of owner (system, identity, pack, action, sensor)';
COMMENT ON COLUMN key.owner IS 'Owner identifier (auto-populated by trigger)';
COMMENT ON COLUMN key.owner_identity IS 'Identity owner (if owner_type=identity)';
COMMENT ON COLUMN key.owner_pack IS 'Pack owner (if owner_type=pack)';
COMMENT ON COLUMN key.owner_pack_ref IS 'Pack reference for owner_pack';
COMMENT ON COLUMN key.owner_action IS 'Action owner (if owner_type=action)';
COMMENT ON COLUMN key.owner_sensor IS 'Sensor owner (if owner_type=sensor)';
COMMENT ON COLUMN key.name IS 'Key name within owner scope';
COMMENT ON COLUMN key.encrypted IS 'Whether the value is encrypted';
COMMENT ON COLUMN key.encryption_key_hash IS 'Hash of encryption key used';
COMMENT ON COLUMN key.value IS 'The actual value (encrypted if encrypted=true)';
-- Add foreign key constraints for action and sensor references
ALTER TABLE key
ADD CONSTRAINT key_owner_action_fkey
FOREIGN KEY (owner_action) REFERENCES action(id) ON DELETE CASCADE;
ALTER TABLE key
ADD CONSTRAINT key_owner_sensor_fkey
FOREIGN KEY (owner_sensor) REFERENCES sensor(id) ON DELETE CASCADE;
-- ============================================================================
-- ARTIFACT TABLE
-- ============================================================================
CREATE TABLE artifact (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL,
scope owner_type_enum NOT NULL DEFAULT 'system',
owner TEXT NOT NULL DEFAULT '',
type artifact_type_enum NOT NULL,
retention_policy artifact_retention_enum NOT NULL DEFAULT 'versions',
retention_limit INTEGER NOT NULL DEFAULT 1,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_artifact_ref ON artifact(ref);
CREATE INDEX idx_artifact_scope ON artifact(scope);
CREATE INDEX idx_artifact_owner ON artifact(owner);
CREATE INDEX idx_artifact_type ON artifact(type);
CREATE INDEX idx_artifact_created ON artifact(created DESC);
CREATE INDEX idx_artifact_scope_owner ON artifact(scope, owner);
CREATE INDEX idx_artifact_type_created ON artifact(type, created DESC);
-- Trigger
CREATE TRIGGER update_artifact_updated
BEFORE UPDATE ON artifact
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE artifact IS 'Artifacts track files, logs, and outputs from executions';
COMMENT ON COLUMN artifact.ref IS 'Artifact reference/path';
COMMENT ON COLUMN artifact.scope IS 'Owner type (system, identity, pack, action, sensor)';
COMMENT ON COLUMN artifact.owner IS 'Owner identifier';
COMMENT ON COLUMN artifact.type IS 'Artifact type (file, url, progress, etc.)';
COMMENT ON COLUMN artifact.retention_policy IS 'How to retain artifacts (versions, days, hours, minutes)';
COMMENT ON COLUMN artifact.retention_limit IS 'Numeric limit for retention policy';
-- ============================================================================
-- QUEUE_STATS TABLE
-- ============================================================================
CREATE TABLE queue_stats (
action_id BIGINT PRIMARY KEY REFERENCES action(id) ON DELETE CASCADE,
queue_length INTEGER NOT NULL DEFAULT 0,
active_count INTEGER NOT NULL DEFAULT 0,
max_concurrent INTEGER NOT NULL DEFAULT 1,
oldest_enqueued_at TIMESTAMPTZ,
total_enqueued BIGINT NOT NULL DEFAULT 0,
total_completed BIGINT NOT NULL DEFAULT 0,
last_updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_queue_stats_last_updated ON queue_stats(last_updated);
-- Comments
COMMENT ON TABLE queue_stats IS 'Real-time queue statistics for action execution ordering';
COMMENT ON COLUMN queue_stats.action_id IS 'Foreign key to action table';
COMMENT ON COLUMN queue_stats.queue_length IS 'Number of executions waiting in queue';
COMMENT ON COLUMN queue_stats.active_count IS 'Number of currently running executions';
COMMENT ON COLUMN queue_stats.max_concurrent IS 'Maximum concurrent executions allowed';
COMMENT ON COLUMN queue_stats.oldest_enqueued_at IS 'Timestamp of oldest queued execution (NULL if queue empty)';
COMMENT ON COLUMN queue_stats.total_enqueued IS 'Total executions enqueued since queue creation';
COMMENT ON COLUMN queue_stats.total_completed IS 'Total executions completed since queue creation';
COMMENT ON COLUMN queue_stats.last_updated IS 'Timestamp of last statistics update';

View File

@@ -48,21 +48,6 @@ Template variables:
-- PART 2: Create pack_environment table
-- ============================================================================
-- PackEnvironmentStatus enum
DO $$ BEGIN
CREATE TYPE pack_environment_status_enum AS ENUM (
'pending', -- Environment creation scheduled
'installing', -- Currently installing
'ready', -- Environment ready for use
'failed', -- Installation failed
'outdated' -- Pack updated, environment needs rebuild
);
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
COMMENT ON TYPE pack_environment_status_enum IS 'Status of pack runtime environment installation';
-- Pack environment table
CREATE TABLE IF NOT EXISTS pack_environment (
id BIGSERIAL PRIMARY KEY,

View File

@@ -0,0 +1,104 @@
-- Migration: LISTEN/NOTIFY Triggers
-- Description: Consolidated PostgreSQL LISTEN/NOTIFY triggers for real-time event notifications
-- Version: 20250101000013
-- ============================================================================
-- EXECUTION CHANGE NOTIFICATION
-- ============================================================================
-- Function to notify on execution changes
CREATE OR REPLACE FUNCTION notify_execution_change()
RETURNS TRIGGER AS $$
DECLARE
payload JSON;
BEGIN
payload := json_build_object(
'id', NEW.id,
'ref', NEW.ref,
'action_ref', NEW.action_ref,
'status', NEW.status,
'rule', NEW.rule,
'rule_ref', NEW.rule_ref,
'created', NEW.created,
'updated', NEW.updated
);
PERFORM pg_notify('execution_change', payload::text);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger on execution table
CREATE TRIGGER execution_change_notify
AFTER INSERT OR UPDATE ON execution
FOR EACH ROW
EXECUTE FUNCTION notify_execution_change();
COMMENT ON FUNCTION notify_execution_change() IS 'Sends execution change notifications via PostgreSQL LISTEN/NOTIFY';
-- ============================================================================
-- EVENT CREATION NOTIFICATION
-- ============================================================================
-- Function to notify on event creation
CREATE OR REPLACE FUNCTION notify_event_created()
RETURNS TRIGGER AS $$
DECLARE
payload JSON;
BEGIN
payload := json_build_object(
'id', NEW.id,
'ref', NEW.ref,
'trigger_ref', NEW.trigger_ref,
'rule', NEW.rule,
'rule_ref', NEW.rule_ref,
'created', NEW.created
);
PERFORM pg_notify('event_created', payload::text);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger on event table
CREATE TRIGGER event_created_notify
AFTER INSERT ON event
FOR EACH ROW
EXECUTE FUNCTION notify_event_created();
COMMENT ON FUNCTION notify_event_created() IS 'Sends event creation notifications via PostgreSQL LISTEN/NOTIFY';
-- ============================================================================
-- ENFORCEMENT CHANGE NOTIFICATION
-- ============================================================================
-- Function to notify on enforcement changes
CREATE OR REPLACE FUNCTION notify_enforcement_change()
RETURNS TRIGGER AS $$
DECLARE
payload JSON;
BEGIN
payload := json_build_object(
'id', NEW.id,
'ref', NEW.ref,
'rule_ref', NEW.rule_ref,
'status', NEW.status,
'created', NEW.created,
'updated', NEW.updated
);
PERFORM pg_notify('enforcement_change', payload::text);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger on enforcement table
CREATE TRIGGER enforcement_change_notify
AFTER INSERT OR UPDATE ON enforcement
FOR EACH ROW
EXECUTE FUNCTION notify_enforcement_change();
COMMENT ON FUNCTION notify_enforcement_change() IS 'Sends enforcement change notifications via PostgreSQL LISTEN/NOTIFY';

View File

@@ -1,43 +0,0 @@
-- Migration: Add NOTIFY trigger for execution updates
-- This enables real-time SSE streaming of execution status changes
-- Function to send notifications on execution changes
CREATE OR REPLACE FUNCTION notify_execution_change()
RETURNS TRIGGER AS $$
DECLARE
payload JSONB;
BEGIN
-- Build JSON payload with execution details
payload := jsonb_build_object(
'entity_type', 'execution',
'entity_id', NEW.id,
'timestamp', NOW(),
'data', jsonb_build_object(
'id', NEW.id,
'status', NEW.status,
'action_id', NEW.action,
'action_ref', NEW.action_ref,
'result', NEW.result,
'created', NEW.created,
'updated', NEW.updated
)
);
-- Send notification to the attune_notifications channel
PERFORM pg_notify('attune_notifications', payload::text);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger to send pg_notify on execution insert or update
CREATE TRIGGER notify_execution_change
AFTER INSERT OR UPDATE ON execution
FOR EACH ROW
EXECUTE FUNCTION notify_execution_change();
-- Add comment
COMMENT ON FUNCTION notify_execution_change() IS
'Sends PostgreSQL NOTIFY for execution changes to enable real-time SSE streaming';
COMMENT ON TRIGGER notify_execution_change ON execution IS
'Broadcasts execution changes via pg_notify for SSE clients';

View File

@@ -1,245 +0,0 @@
-- Migration: Add Webhook Support to Triggers
-- Date: 2026-01-20
-- Description: Adds webhook capabilities to the trigger system, allowing any trigger
-- to be webhook-enabled with a unique webhook key for external integrations.
-- Add webhook columns to trigger table
ALTER TABLE trigger
ADD COLUMN IF NOT EXISTS webhook_enabled BOOLEAN NOT NULL DEFAULT FALSE,
ADD COLUMN IF NOT EXISTS webhook_key VARCHAR(64) UNIQUE,
ADD COLUMN IF NOT EXISTS webhook_secret VARCHAR(128);
-- Add comments for documentation
COMMENT ON COLUMN trigger.webhook_enabled IS
'Whether webhooks are enabled for this trigger. When enabled, external systems can POST to the webhook URL to create events.';
COMMENT ON COLUMN trigger.webhook_key IS
'Unique webhook key used in the webhook URL. Format: wh_[32 alphanumeric chars]. Acts as a bearer token for webhook authentication.';
COMMENT ON COLUMN trigger.webhook_secret IS
'Optional secret for HMAC signature verification. When set, webhook requests must include a valid X-Webhook-Signature header.';
-- Create index for fast webhook key lookup
CREATE INDEX IF NOT EXISTS idx_trigger_webhook_key
ON trigger(webhook_key)
WHERE webhook_key IS NOT NULL;
-- Create index for querying webhook-enabled triggers
CREATE INDEX IF NOT EXISTS idx_trigger_webhook_enabled
ON trigger(webhook_enabled)
WHERE webhook_enabled = TRUE;
-- Add webhook-related metadata tracking to events
-- Events use the 'config' JSONB column for metadata
-- We'll add indexes to efficiently query webhook-sourced events
-- Create index for webhook-sourced events (using config column)
CREATE INDEX IF NOT EXISTS idx_event_webhook_source
ON event((config->>'source'))
WHERE (config->>'source') = 'webhook';
-- Create index for webhook key lookup in event config
CREATE INDEX IF NOT EXISTS idx_event_webhook_key
ON event((config->>'webhook_key'))
WHERE config->>'webhook_key' IS NOT NULL;
-- Function to generate webhook key
CREATE OR REPLACE FUNCTION generate_webhook_key()
RETURNS VARCHAR(64) AS $$
DECLARE
key_prefix VARCHAR(3) := 'wh_';
random_suffix VARCHAR(32);
new_key VARCHAR(64);
max_attempts INT := 10;
attempt INT := 0;
BEGIN
LOOP
-- Generate 32 random alphanumeric characters
random_suffix := encode(gen_random_bytes(24), 'base64');
random_suffix := REPLACE(random_suffix, '/', '');
random_suffix := REPLACE(random_suffix, '+', '');
random_suffix := REPLACE(random_suffix, '=', '');
random_suffix := LOWER(LEFT(random_suffix, 32));
-- Construct full key
new_key := key_prefix || random_suffix;
-- Check if key already exists
IF NOT EXISTS (SELECT 1 FROM trigger WHERE webhook_key = new_key) THEN
RETURN new_key;
END IF;
-- Increment attempt counter
attempt := attempt + 1;
IF attempt >= max_attempts THEN
RAISE EXCEPTION 'Failed to generate unique webhook key after % attempts', max_attempts;
END IF;
END LOOP;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION generate_webhook_key() IS
'Generates a unique webhook key with format wh_[32 alphanumeric chars]. Ensures uniqueness by checking existing keys.';
-- Function to enable webhooks for a trigger
CREATE OR REPLACE FUNCTION enable_trigger_webhook(
p_trigger_id BIGINT
)
RETURNS TABLE(
webhook_enabled BOOLEAN,
webhook_key VARCHAR(64),
webhook_url TEXT
) AS $$
DECLARE
v_new_key VARCHAR(64);
v_existing_key VARCHAR(64);
v_base_url TEXT;
BEGIN
-- Check if trigger exists
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
END IF;
-- Get existing webhook key if any
SELECT t.webhook_key INTO v_existing_key
FROM trigger t
WHERE t.id = p_trigger_id;
-- Generate new key if one doesn't exist
IF v_existing_key IS NULL THEN
v_new_key := generate_webhook_key();
ELSE
v_new_key := v_existing_key;
END IF;
-- Update trigger to enable webhooks
UPDATE trigger
SET
webhook_enabled = TRUE,
webhook_key = v_new_key,
updated = NOW()
WHERE id = p_trigger_id;
-- Construct webhook URL (base URL should be configured elsewhere)
-- For now, return just the path
v_base_url := '/api/v1/webhooks/' || v_new_key;
-- Return result
RETURN QUERY
SELECT
TRUE::BOOLEAN as webhook_enabled,
v_new_key as webhook_key,
v_base_url as webhook_url;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION enable_trigger_webhook(BIGINT) IS
'Enables webhooks for a trigger. Generates a new webhook key if one does not exist. Returns webhook details.';
-- Function to disable webhooks for a trigger
CREATE OR REPLACE FUNCTION disable_trigger_webhook(
p_trigger_id BIGINT
)
RETURNS BOOLEAN AS $$
BEGIN
-- Check if trigger exists
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
END IF;
-- Update trigger to disable webhooks
-- Note: We keep the webhook_key for audit purposes
UPDATE trigger
SET
webhook_enabled = FALSE,
updated = NOW()
WHERE id = p_trigger_id;
RETURN TRUE;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION disable_trigger_webhook(BIGINT) IS
'Disables webhooks for a trigger. Webhook key is retained for audit purposes.';
-- Function to regenerate webhook key for a trigger
CREATE OR REPLACE FUNCTION regenerate_trigger_webhook_key(
p_trigger_id BIGINT
)
RETURNS TABLE(
webhook_key VARCHAR(64),
previous_key_revoked BOOLEAN
) AS $$
DECLARE
v_old_key VARCHAR(64);
v_new_key VARCHAR(64);
BEGIN
-- Check if trigger exists
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
END IF;
-- Get existing key
SELECT t.webhook_key INTO v_old_key
FROM trigger t
WHERE t.id = p_trigger_id;
-- Generate new key
v_new_key := generate_webhook_key();
-- Update trigger with new key
UPDATE trigger
SET
webhook_key = v_new_key,
updated = NOW()
WHERE id = p_trigger_id;
-- Return result
RETURN QUERY
SELECT
v_new_key as webhook_key,
(v_old_key IS NOT NULL)::BOOLEAN as previous_key_revoked;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION regenerate_trigger_webhook_key(BIGINT) IS
'Regenerates the webhook key for a trigger. The old key is immediately revoked.';
-- Create a view for webhook statistics
CREATE OR REPLACE VIEW webhook_stats AS
SELECT
t.id as trigger_id,
t.ref as trigger_ref,
t.webhook_enabled,
t.webhook_key,
t.created as webhook_created_at,
COUNT(e.id) as total_events,
MAX(e.created) as last_event_at,
MIN(e.created) as first_event_at
FROM trigger t
LEFT JOIN event e ON
e.trigger = t.id
AND (e.config->>'source') = 'webhook'
WHERE t.webhook_enabled = TRUE
GROUP BY t.id, t.ref, t.webhook_enabled, t.webhook_key, t.created;
COMMENT ON VIEW webhook_stats IS
'Statistics for webhook-enabled triggers including event counts and timestamps.';
-- Grant permissions (adjust as needed for your RBAC setup)
-- GRANT SELECT ON webhook_stats TO attune_api;
-- GRANT EXECUTE ON FUNCTION generate_webhook_key() TO attune_api;
-- GRANT EXECUTE ON FUNCTION enable_trigger_webhook(BIGINT) TO attune_api;
-- GRANT EXECUTE ON FUNCTION disable_trigger_webhook(BIGINT) TO attune_api;
-- GRANT EXECUTE ON FUNCTION regenerate_trigger_webhook_key(BIGINT) TO attune_api;
-- Trigger update timestamp is already handled by existing triggers
-- No need to add it again
-- Migration complete messages
DO $$
BEGIN
RAISE NOTICE 'Webhook support migration completed successfully';
RAISE NOTICE 'Webhook-enabled triggers can now receive events via POST /api/v1/webhooks/:webhook_key';
END $$;

View File

@@ -1,362 +0,0 @@
-- Migration: Add advanced webhook features (HMAC, rate limiting, IP whitelist)
-- Created: 2026-01-20
-- Phase: 3 - Advanced Security Features
-- Add advanced webhook configuration columns to trigger table
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
webhook_hmac_enabled BOOLEAN NOT NULL DEFAULT FALSE;
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
webhook_hmac_secret VARCHAR(128);
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
webhook_hmac_algorithm VARCHAR(32) DEFAULT 'sha256';
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
webhook_rate_limit_enabled BOOLEAN NOT NULL DEFAULT FALSE;
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
webhook_rate_limit_requests INTEGER DEFAULT 100;
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
webhook_rate_limit_window_seconds INTEGER DEFAULT 60;
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
webhook_ip_whitelist_enabled BOOLEAN NOT NULL DEFAULT FALSE;
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
webhook_ip_whitelist TEXT[]; -- Array of IP addresses/CIDR blocks
ALTER TABLE trigger ADD COLUMN IF NOT EXISTS
webhook_payload_size_limit_kb INTEGER DEFAULT 1024; -- Default 1MB
COMMENT ON COLUMN trigger.webhook_hmac_enabled IS 'Whether HMAC signature verification is required';
COMMENT ON COLUMN trigger.webhook_hmac_secret IS 'Secret key for HMAC signature verification';
COMMENT ON COLUMN trigger.webhook_hmac_algorithm IS 'HMAC algorithm (sha256, sha512, etc.)';
COMMENT ON COLUMN trigger.webhook_rate_limit_enabled IS 'Whether rate limiting is enabled';
COMMENT ON COLUMN trigger.webhook_rate_limit_requests IS 'Max requests allowed per window';
COMMENT ON COLUMN trigger.webhook_rate_limit_window_seconds IS 'Rate limit time window in seconds';
COMMENT ON COLUMN trigger.webhook_ip_whitelist_enabled IS 'Whether IP whitelist is enabled';
COMMENT ON COLUMN trigger.webhook_ip_whitelist IS 'Array of allowed IP addresses/CIDR blocks';
COMMENT ON COLUMN trigger.webhook_payload_size_limit_kb IS 'Maximum webhook payload size in KB';
-- Create webhook event log table for auditing and analytics
CREATE TABLE IF NOT EXISTS webhook_event_log (
id BIGSERIAL PRIMARY KEY,
trigger_id BIGINT NOT NULL REFERENCES trigger(id) ON DELETE CASCADE,
trigger_ref VARCHAR(255) NOT NULL,
webhook_key VARCHAR(64) NOT NULL,
event_id BIGINT REFERENCES event(id) ON DELETE SET NULL,
source_ip INET,
user_agent TEXT,
payload_size_bytes INTEGER,
headers JSONB,
status_code INTEGER NOT NULL,
error_message TEXT,
processing_time_ms INTEGER,
hmac_verified BOOLEAN,
rate_limited BOOLEAN DEFAULT FALSE,
ip_allowed BOOLEAN,
created TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_webhook_event_log_trigger_id ON webhook_event_log(trigger_id);
CREATE INDEX idx_webhook_event_log_webhook_key ON webhook_event_log(webhook_key);
CREATE INDEX idx_webhook_event_log_created ON webhook_event_log(created DESC);
CREATE INDEX idx_webhook_event_log_status ON webhook_event_log(status_code);
CREATE INDEX idx_webhook_event_log_source_ip ON webhook_event_log(source_ip);
COMMENT ON TABLE webhook_event_log IS 'Audit log of all webhook requests';
COMMENT ON COLUMN webhook_event_log.status_code IS 'HTTP status code returned (200, 400, 403, 429, etc.)';
COMMENT ON COLUMN webhook_event_log.error_message IS 'Error message if request failed';
COMMENT ON COLUMN webhook_event_log.processing_time_ms IS 'Time taken to process webhook in milliseconds';
COMMENT ON COLUMN webhook_event_log.hmac_verified IS 'Whether HMAC signature was verified successfully';
COMMENT ON COLUMN webhook_event_log.rate_limited IS 'Whether request was rate limited';
COMMENT ON COLUMN webhook_event_log.ip_allowed IS 'Whether source IP was in whitelist (if enabled)';
-- Create webhook rate limit tracking table
CREATE TABLE IF NOT EXISTS webhook_rate_limit (
id BIGSERIAL PRIMARY KEY,
webhook_key VARCHAR(64) NOT NULL,
window_start TIMESTAMPTZ NOT NULL,
request_count INTEGER NOT NULL DEFAULT 1,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(webhook_key, window_start)
);
CREATE INDEX idx_webhook_rate_limit_key ON webhook_rate_limit(webhook_key);
CREATE INDEX idx_webhook_rate_limit_window ON webhook_rate_limit(window_start DESC);
COMMENT ON TABLE webhook_rate_limit IS 'Tracks webhook request counts for rate limiting';
COMMENT ON COLUMN webhook_rate_limit.window_start IS 'Start of the rate limit time window';
COMMENT ON COLUMN webhook_rate_limit.request_count IS 'Number of requests in this window';
-- Function to generate HMAC secret
CREATE OR REPLACE FUNCTION generate_webhook_hmac_secret()
RETURNS VARCHAR(128) AS $$
DECLARE
secret VARCHAR(128);
BEGIN
-- Generate 64-byte (128 hex chars) random secret
SELECT encode(gen_random_bytes(64), 'hex') INTO secret;
RETURN secret;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION generate_webhook_hmac_secret() IS 'Generate a secure random HMAC secret';
-- Function to enable HMAC for a trigger
CREATE OR REPLACE FUNCTION enable_trigger_webhook_hmac(
p_trigger_id BIGINT,
p_algorithm VARCHAR(32) DEFAULT 'sha256'
)
RETURNS TABLE(
webhook_hmac_enabled BOOLEAN,
webhook_hmac_secret VARCHAR(128),
webhook_hmac_algorithm VARCHAR(32)
) AS $$
DECLARE
v_webhook_enabled BOOLEAN;
v_secret VARCHAR(128);
BEGIN
-- Check if webhooks are enabled
SELECT t.webhook_enabled INTO v_webhook_enabled
FROM trigger t
WHERE t.id = p_trigger_id;
IF NOT FOUND THEN
RAISE EXCEPTION 'Trigger with id % not found', p_trigger_id;
END IF;
IF NOT v_webhook_enabled THEN
RAISE EXCEPTION 'Webhooks must be enabled before enabling HMAC verification';
END IF;
-- Validate algorithm
IF p_algorithm NOT IN ('sha256', 'sha512', 'sha1') THEN
RAISE EXCEPTION 'Invalid HMAC algorithm. Supported: sha256, sha512, sha1';
END IF;
-- Generate new secret
v_secret := generate_webhook_hmac_secret();
-- Update trigger
UPDATE trigger
SET
webhook_hmac_enabled = TRUE,
webhook_hmac_secret = v_secret,
webhook_hmac_algorithm = p_algorithm,
updated = NOW()
WHERE id = p_trigger_id;
-- Return result
RETURN QUERY
SELECT
TRUE AS webhook_hmac_enabled,
v_secret AS webhook_hmac_secret,
p_algorithm AS webhook_hmac_algorithm;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION enable_trigger_webhook_hmac(BIGINT, VARCHAR) IS 'Enable HMAC signature verification for a trigger';
-- Function to disable HMAC for a trigger
CREATE OR REPLACE FUNCTION disable_trigger_webhook_hmac(p_trigger_id BIGINT)
RETURNS BOOLEAN AS $$
BEGIN
UPDATE trigger
SET
webhook_hmac_enabled = FALSE,
webhook_hmac_secret = NULL,
updated = NOW()
WHERE id = p_trigger_id;
RETURN FOUND;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION disable_trigger_webhook_hmac(BIGINT) IS 'Disable HMAC verification for a trigger';
-- Function to configure rate limiting
CREATE OR REPLACE FUNCTION configure_trigger_webhook_rate_limit(
p_trigger_id BIGINT,
p_enabled BOOLEAN,
p_requests INTEGER DEFAULT 100,
p_window_seconds INTEGER DEFAULT 60
)
RETURNS TABLE(
rate_limit_enabled BOOLEAN,
rate_limit_requests INTEGER,
rate_limit_window_seconds INTEGER
) AS $$
BEGIN
-- Validate inputs
IF p_requests < 1 OR p_requests > 10000 THEN
RAISE EXCEPTION 'Rate limit requests must be between 1 and 10000';
END IF;
IF p_window_seconds < 1 OR p_window_seconds > 3600 THEN
RAISE EXCEPTION 'Rate limit window must be between 1 and 3600 seconds';
END IF;
-- Update trigger
UPDATE trigger
SET
webhook_rate_limit_enabled = p_enabled,
webhook_rate_limit_requests = p_requests,
webhook_rate_limit_window_seconds = p_window_seconds,
updated = NOW()
WHERE id = p_trigger_id;
IF NOT FOUND THEN
RAISE EXCEPTION 'Trigger with id % not found', p_trigger_id;
END IF;
-- Return configuration
RETURN QUERY
SELECT
p_enabled AS rate_limit_enabled,
p_requests AS rate_limit_requests,
p_window_seconds AS rate_limit_window_seconds;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION configure_trigger_webhook_rate_limit(BIGINT, BOOLEAN, INTEGER, INTEGER) IS 'Configure rate limiting for a trigger webhook';
-- Function to configure IP whitelist
CREATE OR REPLACE FUNCTION configure_trigger_webhook_ip_whitelist(
p_trigger_id BIGINT,
p_enabled BOOLEAN,
p_ip_list TEXT[] DEFAULT ARRAY[]::TEXT[]
)
RETURNS TABLE(
ip_whitelist_enabled BOOLEAN,
ip_whitelist TEXT[]
) AS $$
BEGIN
-- Update trigger
UPDATE trigger
SET
webhook_ip_whitelist_enabled = p_enabled,
webhook_ip_whitelist = p_ip_list,
updated = NOW()
WHERE id = p_trigger_id;
IF NOT FOUND THEN
RAISE EXCEPTION 'Trigger with id % not found', p_trigger_id;
END IF;
-- Return configuration
RETURN QUERY
SELECT
p_enabled AS ip_whitelist_enabled,
p_ip_list AS ip_whitelist;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION configure_trigger_webhook_ip_whitelist(BIGINT, BOOLEAN, TEXT[]) IS 'Configure IP whitelist for a trigger webhook';
-- Function to check rate limit (call before processing webhook)
CREATE OR REPLACE FUNCTION check_webhook_rate_limit(
p_webhook_key VARCHAR(64),
p_max_requests INTEGER,
p_window_seconds INTEGER
)
RETURNS BOOLEAN AS $$
DECLARE
v_window_start TIMESTAMPTZ;
v_request_count INTEGER;
BEGIN
-- Calculate current window start (truncated to window boundary)
v_window_start := date_trunc('minute', NOW()) -
((EXTRACT(EPOCH FROM date_trunc('minute', NOW()))::INTEGER % p_window_seconds) || ' seconds')::INTERVAL;
-- Get or create rate limit record
INSERT INTO webhook_rate_limit (webhook_key, window_start, request_count)
VALUES (p_webhook_key, v_window_start, 1)
ON CONFLICT (webhook_key, window_start)
DO UPDATE SET
request_count = webhook_rate_limit.request_count + 1,
updated = NOW()
RETURNING request_count INTO v_request_count;
-- Clean up old rate limit records (older than 1 hour)
DELETE FROM webhook_rate_limit
WHERE window_start < NOW() - INTERVAL '1 hour';
-- Return TRUE if within limit, FALSE if exceeded
RETURN v_request_count <= p_max_requests;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION check_webhook_rate_limit(VARCHAR, INTEGER, INTEGER) IS 'Check if webhook request is within rate limit';
-- Function to check if IP is in whitelist (supports CIDR notation)
CREATE OR REPLACE FUNCTION check_webhook_ip_whitelist(
p_source_ip INET,
p_whitelist TEXT[]
)
RETURNS BOOLEAN AS $$
DECLARE
v_allowed_cidr TEXT;
BEGIN
-- If whitelist is empty, deny access
IF p_whitelist IS NULL OR array_length(p_whitelist, 1) IS NULL THEN
RETURN FALSE;
END IF;
-- Check if source IP matches any entry in whitelist
FOREACH v_allowed_cidr IN ARRAY p_whitelist
LOOP
-- Handle both single IPs and CIDR notation
IF p_source_ip <<= v_allowed_cidr::INET THEN
RETURN TRUE;
END IF;
END LOOP;
RETURN FALSE;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION check_webhook_ip_whitelist(INET, TEXT[]) IS 'Check if source IP is in whitelist (supports CIDR notation)';
-- View for webhook statistics
CREATE OR REPLACE VIEW webhook_stats_detailed AS
SELECT
t.id AS trigger_id,
t.ref AS trigger_ref,
t.label AS trigger_label,
t.webhook_enabled,
t.webhook_key,
t.webhook_hmac_enabled,
t.webhook_rate_limit_enabled,
t.webhook_rate_limit_requests,
t.webhook_rate_limit_window_seconds,
t.webhook_ip_whitelist_enabled,
COUNT(DISTINCT wel.id) AS total_requests,
COUNT(DISTINCT wel.id) FILTER (WHERE wel.status_code = 200) AS successful_requests,
COUNT(DISTINCT wel.id) FILTER (WHERE wel.status_code >= 400) AS failed_requests,
COUNT(DISTINCT wel.id) FILTER (WHERE wel.rate_limited = TRUE) AS rate_limited_requests,
COUNT(DISTINCT wel.id) FILTER (WHERE wel.hmac_verified = FALSE AND t.webhook_hmac_enabled = TRUE) AS hmac_failures,
COUNT(DISTINCT wel.id) FILTER (WHERE wel.ip_allowed = FALSE AND t.webhook_ip_whitelist_enabled = TRUE) AS ip_blocked_requests,
COUNT(DISTINCT wel.event_id) AS events_created,
AVG(wel.processing_time_ms) AS avg_processing_time_ms,
MAX(wel.created) AS last_request_at,
t.created AS webhook_enabled_at
FROM trigger t
LEFT JOIN webhook_event_log wel ON wel.trigger_id = t.id
WHERE t.webhook_enabled = TRUE
GROUP BY t.id, t.ref, t.label, t.webhook_enabled, t.webhook_key,
t.webhook_hmac_enabled, t.webhook_rate_limit_enabled,
t.webhook_rate_limit_requests, t.webhook_rate_limit_window_seconds,
t.webhook_ip_whitelist_enabled, t.created;
COMMENT ON VIEW webhook_stats_detailed IS 'Detailed statistics for webhook-enabled triggers';
-- Grant permissions (adjust as needed for your security model)
GRANT SELECT, INSERT ON webhook_event_log TO attune_api;
GRANT SELECT, INSERT, UPDATE, DELETE ON webhook_rate_limit TO attune_api;
GRANT SELECT ON webhook_stats_detailed TO attune_api;
GRANT USAGE, SELECT ON SEQUENCE webhook_event_log_id_seq TO attune_api;
GRANT USAGE, SELECT ON SEQUENCE webhook_rate_limit_id_seq TO attune_api;

View File

@@ -1,59 +0,0 @@
-- Migration: Pack Installation Metadata
-- Description: Tracks pack installation sources, checksums, and metadata
-- Created: 2026-01-22
-- Pack installation metadata table
CREATE TABLE IF NOT EXISTS pack_installation (
id BIGSERIAL PRIMARY KEY,
pack_id BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
-- Installation source information
source_type VARCHAR(50) NOT NULL CHECK (source_type IN ('git', 'archive', 'local_directory', 'local_archive', 'registry')),
source_url TEXT,
source_ref TEXT, -- git ref (branch/tag/commit) or registry version
-- Verification
checksum VARCHAR(64), -- SHA256 checksum of installed pack
checksum_verified BOOLEAN DEFAULT FALSE,
-- Installation metadata
installed_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
installed_by BIGINT REFERENCES identity(id) ON DELETE SET NULL,
installation_method VARCHAR(50) DEFAULT 'manual' CHECK (installation_method IN ('manual', 'api', 'cli', 'auto')),
-- Storage information
storage_path TEXT NOT NULL,
-- Additional metadata
meta JSONB DEFAULT '{}'::jsonb,
created TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
-- Constraints
CONSTRAINT pack_installation_unique_pack UNIQUE (pack_id)
);
-- Indexes
CREATE INDEX idx_pack_installation_pack_id ON pack_installation(pack_id);
CREATE INDEX idx_pack_installation_source_type ON pack_installation(source_type);
CREATE INDEX idx_pack_installation_installed_at ON pack_installation(installed_at);
CREATE INDEX idx_pack_installation_installed_by ON pack_installation(installed_by);
-- Trigger for updated timestamp
CREATE TRIGGER pack_installation_updated_trigger
BEFORE UPDATE ON pack_installation
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE pack_installation IS 'Tracks pack installation metadata including source, checksum, and storage location';
COMMENT ON COLUMN pack_installation.source_type IS 'Type of installation source (git, archive, local_directory, local_archive, registry)';
COMMENT ON COLUMN pack_installation.source_url IS 'URL or path of the installation source';
COMMENT ON COLUMN pack_installation.source_ref IS 'Git reference (branch/tag/commit) or registry version';
COMMENT ON COLUMN pack_installation.checksum IS 'SHA256 checksum of the installed pack contents';
COMMENT ON COLUMN pack_installation.checksum_verified IS 'Whether the checksum was verified during installation';
COMMENT ON COLUMN pack_installation.installed_by IS 'Identity that installed the pack';
COMMENT ON COLUMN pack_installation.installation_method IS 'Method used to install (manual, api, cli, auto)';
COMMENT ON COLUMN pack_installation.storage_path IS 'File system path where pack is stored';
COMMENT ON COLUMN pack_installation.meta IS 'Additional installation metadata (dependencies resolved, warnings, etc.)';

View File

@@ -1,249 +0,0 @@
-- Migration: Consolidate Webhook Configuration
-- Date: 2026-01-27
-- Description: Consolidates multiple webhook_* columns into a single webhook_config JSONB column
-- for cleaner schema and better flexibility. Keeps webhook_enabled and webhook_key
-- as separate columns for indexing and quick filtering.
-- Step 1: Add new webhook_config column
ALTER TABLE trigger
ADD COLUMN IF NOT EXISTS webhook_config JSONB DEFAULT '{}'::jsonb;
COMMENT ON COLUMN trigger.webhook_config IS
'Webhook configuration as JSON. Contains settings like secret, HMAC config, rate limits, IP whitelist, etc.';
-- Step 2: Migrate existing data to webhook_config
-- Build JSON object from existing columns
UPDATE trigger
SET webhook_config = jsonb_build_object(
'secret', COALESCE(webhook_secret, NULL),
'hmac', jsonb_build_object(
'enabled', COALESCE(webhook_hmac_enabled, false),
'secret', COALESCE(webhook_hmac_secret, NULL),
'algorithm', COALESCE(webhook_hmac_algorithm, 'sha256')
),
'rate_limit', jsonb_build_object(
'enabled', COALESCE(webhook_rate_limit_enabled, false),
'requests', COALESCE(webhook_rate_limit_requests, NULL),
'window_seconds', COALESCE(webhook_rate_limit_window_seconds, NULL)
),
'ip_whitelist', jsonb_build_object(
'enabled', COALESCE(webhook_ip_whitelist_enabled, false),
'ips', COALESCE(
(SELECT jsonb_agg(ip) FROM unnest(webhook_ip_whitelist) AS ip),
'[]'::jsonb
)
),
'payload_size_limit_kb', COALESCE(webhook_payload_size_limit_kb, NULL)
)
WHERE webhook_enabled = true OR webhook_key IS NOT NULL;
-- Step 3: Drop dependent views that reference the columns we're about to drop
DROP VIEW IF EXISTS webhook_stats;
DROP VIEW IF EXISTS webhook_stats_detailed;
-- Step 4: Drop NOT NULL constraints on columns we're about to drop
ALTER TABLE trigger
DROP CONSTRAINT IF EXISTS trigger_webhook_hmac_enabled_not_null,
DROP CONSTRAINT IF EXISTS trigger_webhook_rate_limit_enabled_not_null,
DROP CONSTRAINT IF EXISTS trigger_webhook_ip_whitelist_enabled_not_null;
-- Step 5: Drop old webhook columns (keeping webhook_enabled and webhook_key)
ALTER TABLE trigger
DROP COLUMN IF EXISTS webhook_secret,
DROP COLUMN IF EXISTS webhook_hmac_enabled,
DROP COLUMN IF EXISTS webhook_hmac_secret,
DROP COLUMN IF EXISTS webhook_hmac_algorithm,
DROP COLUMN IF EXISTS webhook_rate_limit_enabled,
DROP COLUMN IF EXISTS webhook_rate_limit_requests,
DROP COLUMN IF EXISTS webhook_rate_limit_window_seconds,
DROP COLUMN IF EXISTS webhook_ip_whitelist_enabled,
DROP COLUMN IF EXISTS webhook_ip_whitelist,
DROP COLUMN IF EXISTS webhook_payload_size_limit_kb;
-- Step 6: Drop old indexes that referenced removed columns
DROP INDEX IF EXISTS idx_trigger_webhook_enabled;
-- Step 7: Recreate index for webhook_enabled with better name
CREATE INDEX IF NOT EXISTS idx_trigger_webhook_enabled
ON trigger(webhook_enabled)
WHERE webhook_enabled = TRUE;
-- Index on webhook_key already exists from previous migration
-- CREATE INDEX IF NOT EXISTS idx_trigger_webhook_key ON trigger(webhook_key) WHERE webhook_key IS NOT NULL;
-- Step 8: Add GIN index for webhook_config JSONB queries
CREATE INDEX IF NOT EXISTS idx_trigger_webhook_config
ON trigger USING gin(webhook_config)
WHERE webhook_config IS NOT NULL AND webhook_config != '{}'::jsonb;
-- Step 9: Recreate webhook stats view with new schema
CREATE OR REPLACE VIEW webhook_stats AS
SELECT
t.id as trigger_id,
t.ref as trigger_ref,
t.webhook_enabled,
t.webhook_key,
t.webhook_config,
t.created as webhook_created_at,
COUNT(e.id) as total_events,
MAX(e.created) as last_event_at,
MIN(e.created) as first_event_at
FROM trigger t
LEFT JOIN event e ON
e.trigger = t.id
AND (e.config->>'source') = 'webhook'
WHERE t.webhook_enabled = TRUE
GROUP BY t.id, t.ref, t.webhook_enabled, t.webhook_key, t.webhook_config, t.created;
COMMENT ON VIEW webhook_stats IS
'Statistics for webhook-enabled triggers including event counts and timestamps.';
-- Step 10: Update helper functions to work with webhook_config
-- Update enable_trigger_webhook to work with new schema
CREATE OR REPLACE FUNCTION enable_trigger_webhook(
p_trigger_id BIGINT,
p_config JSONB DEFAULT '{}'::jsonb
)
RETURNS TABLE(
webhook_enabled BOOLEAN,
webhook_key VARCHAR(64),
webhook_url TEXT,
webhook_config JSONB
) AS $$
DECLARE
v_new_key VARCHAR(64);
v_existing_key VARCHAR(64);
v_base_url TEXT;
v_config JSONB;
BEGIN
-- Check if trigger exists
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
END IF;
-- Get existing webhook key if any
SELECT t.webhook_key INTO v_existing_key
FROM trigger t
WHERE t.id = p_trigger_id;
-- Generate new key if one doesn't exist
IF v_existing_key IS NULL THEN
v_new_key := generate_webhook_key();
ELSE
v_new_key := v_existing_key;
END IF;
-- Merge provided config with defaults
v_config := p_config || jsonb_build_object(
'hmac', COALESCE(p_config->'hmac', jsonb_build_object('enabled', false, 'algorithm', 'sha256')),
'rate_limit', COALESCE(p_config->'rate_limit', jsonb_build_object('enabled', false)),
'ip_whitelist', COALESCE(p_config->'ip_whitelist', jsonb_build_object('enabled', false, 'ips', '[]'::jsonb))
);
-- Update trigger to enable webhooks
UPDATE trigger
SET
webhook_enabled = TRUE,
webhook_key = v_new_key,
webhook_config = v_config,
updated = NOW()
WHERE id = p_trigger_id;
-- Construct webhook URL
v_base_url := '/api/v1/webhooks/' || v_new_key;
-- Return result
RETURN QUERY
SELECT
TRUE::BOOLEAN as webhook_enabled,
v_new_key as webhook_key,
v_base_url as webhook_url,
v_config as webhook_config;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION enable_trigger_webhook(BIGINT, JSONB) IS
'Enables webhooks for a trigger with optional configuration. Generates a new webhook key if one does not exist. Returns webhook details.';
-- Update disable_trigger_webhook (no changes needed, but recreate for consistency)
CREATE OR REPLACE FUNCTION disable_trigger_webhook(
p_trigger_id BIGINT
)
RETURNS BOOLEAN AS $$
BEGIN
-- Check if trigger exists
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
END IF;
-- Update trigger to disable webhooks
-- Note: We keep the webhook_key and webhook_config for audit purposes
UPDATE trigger
SET
webhook_enabled = FALSE,
updated = NOW()
WHERE id = p_trigger_id;
RETURN TRUE;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION disable_trigger_webhook(BIGINT) IS
'Disables webhooks for a trigger. Webhook key and config are retained for audit purposes.';
-- Update regenerate_trigger_webhook_key (no changes to logic)
CREATE OR REPLACE FUNCTION regenerate_trigger_webhook_key(
p_trigger_id BIGINT
)
RETURNS TABLE(
webhook_key VARCHAR(64),
previous_key_revoked BOOLEAN
) AS $$
DECLARE
v_old_key VARCHAR(64);
v_new_key VARCHAR(64);
BEGIN
-- Check if trigger exists
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
END IF;
-- Get existing key
SELECT t.webhook_key INTO v_old_key
FROM trigger t
WHERE t.id = p_trigger_id;
-- Generate new key
v_new_key := generate_webhook_key();
-- Update trigger with new key
UPDATE trigger
SET
webhook_key = v_new_key,
updated = NOW()
WHERE id = p_trigger_id;
-- Return result
RETURN QUERY
SELECT
v_new_key as webhook_key,
(v_old_key IS NOT NULL)::BOOLEAN as previous_key_revoked;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION regenerate_trigger_webhook_key(BIGINT) IS
'Regenerates the webhook key for a trigger. The old key is immediately revoked.';
-- Drop old webhook-specific functions that are no longer needed
DROP FUNCTION IF EXISTS enable_trigger_webhook_hmac(BIGINT, VARCHAR);
DROP FUNCTION IF EXISTS disable_trigger_webhook_hmac(BIGINT);
-- Migration complete messages
DO $$
BEGIN
RAISE NOTICE 'Webhook configuration consolidation completed successfully';
RAISE NOTICE 'Webhook settings now stored in webhook_config JSONB column';
RAISE NOTICE 'Kept separate columns: webhook_enabled (indexed), webhook_key (indexed)';
END $$;

View File

@@ -1,97 +0,0 @@
-- Migration: Consolidate workflow_task_execution into execution table
-- Description: Adds workflow_task JSONB column to execution table and migrates data from workflow_task_execution
-- Version: 20260127212500
-- ============================================================================
-- STEP 1: Add workflow_task column to execution table
-- ============================================================================
ALTER TABLE execution ADD COLUMN workflow_task JSONB;
COMMENT ON COLUMN execution.workflow_task IS 'Workflow task metadata (only populated for workflow task executions)';
-- ============================================================================
-- STEP 2: Migrate existing workflow_task_execution data to execution.workflow_task
-- ============================================================================
-- Update execution records with workflow task metadata
UPDATE execution e
SET workflow_task = jsonb_build_object(
'workflow_execution', wte.workflow_execution,
'task_name', wte.task_name,
'task_index', wte.task_index,
'task_batch', wte.task_batch,
'retry_count', wte.retry_count,
'max_retries', wte.max_retries,
'next_retry_at', to_char(wte.next_retry_at, 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"'),
'timeout_seconds', wte.timeout_seconds,
'timed_out', wte.timed_out,
'duration_ms', wte.duration_ms,
'started_at', to_char(wte.started_at, 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"'),
'completed_at', to_char(wte.completed_at, 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"')
)
FROM workflow_task_execution wte
WHERE e.id = wte.execution;
-- ============================================================================
-- STEP 3: Create indexes for efficient JSONB queries
-- ============================================================================
-- General GIN index for JSONB operations
CREATE INDEX idx_execution_workflow_task_gin ON execution USING GIN (workflow_task)
WHERE workflow_task IS NOT NULL;
-- Specific index for workflow_execution lookups (most common query)
CREATE INDEX idx_execution_workflow_execution ON execution ((workflow_task->>'workflow_execution'))
WHERE workflow_task IS NOT NULL;
-- Index for task name lookups
CREATE INDEX idx_execution_task_name ON execution ((workflow_task->>'task_name'))
WHERE workflow_task IS NOT NULL;
-- Index for retry queries (using text comparison to avoid IMMUTABLE issue)
CREATE INDEX idx_execution_pending_retries ON execution ((workflow_task->>'next_retry_at'))
WHERE workflow_task IS NOT NULL
AND workflow_task->>'next_retry_at' IS NOT NULL;
-- Index for timeout queries
CREATE INDEX idx_execution_timed_out ON execution ((workflow_task->>'timed_out'))
WHERE workflow_task IS NOT NULL;
-- Index for workflow task status queries (combined with execution status)
CREATE INDEX idx_execution_workflow_status ON execution (status, (workflow_task->>'workflow_execution'))
WHERE workflow_task IS NOT NULL;
-- ============================================================================
-- STEP 4: Drop the workflow_task_execution table
-- ============================================================================
-- Drop the old table (this will cascade delete any dependent objects)
DROP TABLE IF EXISTS workflow_task_execution CASCADE;
-- ============================================================================
-- STEP 5: Update comments and documentation
-- ============================================================================
COMMENT ON INDEX idx_execution_workflow_task_gin IS 'GIN index for general JSONB queries on workflow_task';
COMMENT ON INDEX idx_execution_workflow_execution IS 'Index for finding tasks by workflow execution ID';
COMMENT ON INDEX idx_execution_task_name IS 'Index for finding tasks by name';
COMMENT ON INDEX idx_execution_pending_retries IS 'Index for finding tasks pending retry';
COMMENT ON INDEX idx_execution_timed_out IS 'Index for finding timed out tasks';
COMMENT ON INDEX idx_execution_workflow_status IS 'Index for workflow task status queries';
-- ============================================================================
-- VERIFICATION QUERIES (for manual testing)
-- ============================================================================
-- Verify migration: Count workflow task executions
-- SELECT COUNT(*) FROM execution WHERE workflow_task IS NOT NULL;
-- Verify indexes exist
-- SELECT indexname, indexdef FROM pg_indexes WHERE tablename = 'execution' AND indexname LIKE '%workflow%';
-- Test workflow task queries
-- SELECT * FROM execution WHERE workflow_task->>'workflow_execution' = '1';
-- SELECT * FROM execution WHERE workflow_task->>'task_name' = 'example_task';
-- SELECT * FROM execution WHERE (workflow_task->>'timed_out')::boolean = true;

View File

@@ -1,42 +0,0 @@
-- Migration: Fix webhook function overload issue
-- Description: Drop the old enable_trigger_webhook(bigint) signature to resolve
-- "function is not unique" error when the newer version with config
-- parameter is present.
-- Date: 2026-01-29
-- Drop the old function signature from 20260120000001_add_webhook_support.sql
-- The newer version with JSONB config parameter should be the only one
DROP FUNCTION IF EXISTS enable_trigger_webhook(BIGINT);
-- The new signature with config parameter is already defined in
-- 20260127000001_consolidate_webhook_config.sql:
-- attune.enable_trigger_webhook(p_trigger_id BIGINT, p_config JSONB DEFAULT '{}'::jsonb)
-- Similarly, check and clean up any other webhook function overloads
-- Drop old disable_trigger_webhook if it has conflicts
DROP FUNCTION IF EXISTS disable_trigger_webhook(BIGINT);
-- Drop old regenerate_webhook_key if it has conflicts
DROP FUNCTION IF EXISTS regenerate_trigger_webhook_key(BIGINT);
-- Note: The current versions of these functions should be:
-- - attune.enable_trigger_webhook(BIGINT, JSONB DEFAULT '{}'::jsonb)
-- - attune.disable_trigger_webhook(BIGINT)
-- - attune.regenerate_trigger_webhook_key(BIGINT)
-- Verify functions exist after cleanup
DO $$
BEGIN
-- Check that enable_trigger_webhook exists with correct signature
-- Use current_schema() to work with both production (attune) and test schemas
IF NOT EXISTS (
SELECT 1 FROM pg_proc p
JOIN pg_namespace n ON p.pronamespace = n.oid
WHERE n.nspname = current_schema()
AND p.proname = 'enable_trigger_webhook'
AND pg_get_function_arguments(p.oid) LIKE '%jsonb%'
) THEN
RAISE EXCEPTION 'enable_trigger_webhook function with JSONB config not found after migration';
END IF;
END $$;

View File

@@ -1,43 +0,0 @@
-- Migration: Add is_adhoc flag to action, rule, and trigger tables
-- Description: Distinguishes between pack-installed components (is_adhoc=false) and manually created ad-hoc components (is_adhoc=true)
-- Version: 20260129140130
-- ============================================================================
-- Add is_adhoc column to action table
-- ============================================================================
ALTER TABLE action ADD COLUMN is_adhoc BOOLEAN DEFAULT false NOT NULL;
-- Index for filtering ad-hoc actions
CREATE INDEX idx_action_is_adhoc ON action(is_adhoc) WHERE is_adhoc = true;
COMMENT ON COLUMN action.is_adhoc IS 'True if action was manually created (ad-hoc), false if installed from pack';
-- ============================================================================
-- Add is_adhoc column to rule table
-- ============================================================================
ALTER TABLE rule ADD COLUMN is_adhoc BOOLEAN DEFAULT false NOT NULL;
-- Index for filtering ad-hoc rules
CREATE INDEX idx_rule_is_adhoc ON rule(is_adhoc) WHERE is_adhoc = true;
COMMENT ON COLUMN rule.is_adhoc IS 'True if rule was manually created (ad-hoc), false if installed from pack';
-- ============================================================================
-- Add is_adhoc column to trigger table
-- ============================================================================
ALTER TABLE trigger ADD COLUMN is_adhoc BOOLEAN DEFAULT false NOT NULL;
-- Index for filtering ad-hoc triggers
CREATE INDEX idx_trigger_is_adhoc ON trigger(is_adhoc) WHERE is_adhoc = true;
COMMENT ON COLUMN trigger.is_adhoc IS 'True if trigger was manually created (ad-hoc), false if installed from pack';
-- ============================================================================
-- Notes
-- ============================================================================
-- - Default is false (not ad-hoc) for backward compatibility with existing pack-installed components
-- - Ad-hoc components are eligible for deletion by users with appropriate permissions
-- - Pack-installed components (is_adhoc=false) should not be deletable directly, only via pack uninstallation

View File

@@ -1,43 +0,0 @@
-- Migration: Add NOTIFY trigger for event creation
-- This enables real-time notifications when events are created
-- Function to send notifications on event creation
CREATE OR REPLACE FUNCTION notify_event_created()
RETURNS TRIGGER AS $$
DECLARE
payload JSONB;
BEGIN
-- Build JSON payload with event details
payload := jsonb_build_object(
'entity_type', 'event',
'entity_id', NEW.id,
'timestamp', NOW(),
'data', jsonb_build_object(
'id', NEW.id,
'trigger', NEW.trigger,
'trigger_ref', NEW.trigger_ref,
'source', NEW.source,
'source_ref', NEW.source_ref,
'payload', NEW.payload,
'created', NEW.created
)
);
-- Send notification to the event_created channel
PERFORM pg_notify('event_created', payload::text);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger to send pg_notify on event insert
CREATE TRIGGER notify_event_created
AFTER INSERT ON event
FOR EACH ROW
EXECUTE FUNCTION notify_event_created();
-- Add comments
COMMENT ON FUNCTION notify_event_created() IS
'Sends PostgreSQL NOTIFY for event creation to enable real-time notifications';
COMMENT ON TRIGGER notify_event_created ON event IS
'Broadcasts event creation via pg_notify for real-time updates';

View File

@@ -1,61 +0,0 @@
-- Migration: Add rule association to event table
-- This enables events to be directly associated with specific rules,
-- improving query performance and enabling rule-specific event filtering.
-- Add rule and rule_ref columns to event table
ALTER TABLE event
ADD COLUMN rule BIGINT,
ADD COLUMN rule_ref TEXT;
-- Add foreign key constraint
ALTER TABLE event
ADD CONSTRAINT event_rule_fkey
FOREIGN KEY (rule) REFERENCES rule(id) ON DELETE SET NULL;
-- Add indexes for efficient querying
CREATE INDEX idx_event_rule ON event(rule);
CREATE INDEX idx_event_rule_ref ON event(rule_ref);
CREATE INDEX idx_event_rule_created ON event(rule, created DESC);
CREATE INDEX idx_event_trigger_rule ON event(trigger, rule);
-- Add comments
COMMENT ON COLUMN event.rule IS
'Optional reference to the specific rule that generated this event. Used by sensors that emit events for specific rule instances (e.g., timer sensors with multiple interval rules).';
COMMENT ON COLUMN event.rule_ref IS
'Human-readable reference to the rule (e.g., "core.echo_every_second"). Denormalized for query convenience.';
-- Update the notify trigger to include rule information if present
CREATE OR REPLACE FUNCTION notify_event_created()
RETURNS TRIGGER AS $$
DECLARE
payload JSONB;
BEGIN
-- Build JSON payload with event details
payload := jsonb_build_object(
'entity_type', 'event',
'entity_id', NEW.id,
'timestamp', NOW(),
'data', jsonb_build_object(
'id', NEW.id,
'trigger', NEW.trigger,
'trigger_ref', NEW.trigger_ref,
'rule', NEW.rule,
'rule_ref', NEW.rule_ref,
'source', NEW.source,
'source_ref', NEW.source_ref,
'payload', NEW.payload,
'created', NEW.created
)
);
-- Send notification to the event_created channel
PERFORM pg_notify('event_created', payload::text);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Add comment on updated function
COMMENT ON FUNCTION notify_event_created() IS
'Sends PostgreSQL NOTIFY for event creation with optional rule association';

View File

@@ -1,32 +0,0 @@
-- Migration: Add Worker Role
-- Description: Adds worker_role field to distinguish between action workers and sensor workers
-- Version: 20260131000001
-- ============================================================================
-- WORKER ROLE ENUM
-- ============================================================================
DO $$ BEGIN
CREATE TYPE worker_role_enum AS ENUM ('action', 'sensor', 'hybrid');
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
COMMENT ON TYPE worker_role_enum IS 'Worker role type: action (executes actions), sensor (monitors triggers), or hybrid (both)';
-- ============================================================================
-- ADD WORKER ROLE COLUMN
-- ============================================================================
ALTER TABLE worker
ADD COLUMN IF NOT EXISTS worker_role worker_role_enum NOT NULL DEFAULT 'action';
-- Create index for efficient role-based queries
CREATE INDEX IF NOT EXISTS idx_worker_role ON worker(worker_role);
CREATE INDEX IF NOT EXISTS idx_worker_role_status ON worker(worker_role, status);
-- Comments
COMMENT ON COLUMN worker.worker_role IS 'Worker role: action (executes actions), sensor (monitors for triggers), or hybrid (both capabilities)';
-- Update existing workers to be action workers (backward compatibility)
UPDATE worker SET worker_role = 'action' WHERE worker_role IS NULL;

View File

@@ -1,204 +0,0 @@
-- Migration: Add Sensor Runtimes
-- Description: Adds common sensor runtimes (Python, Node.js, Shell, Native) with verification metadata
-- Version: 20260202000001
-- ============================================================================
-- SENSOR RUNTIMES
-- ============================================================================
-- Insert Python sensor runtime
INSERT INTO runtime (ref, pack, pack_ref, description, runtime_type, name, distributions, installation)
VALUES (
'core.sensor.python',
(SELECT id FROM pack WHERE ref = 'core'),
'core',
'Python 3 sensor runtime with automatic environment management',
'sensor',
'Python',
jsonb_build_object(
'verification', jsonb_build_object(
'commands', jsonb_build_array(
jsonb_build_object(
'binary', 'python3',
'args', jsonb_build_array('--version'),
'exit_code', 0,
'pattern', 'Python 3\.',
'priority', 1
),
jsonb_build_object(
'binary', 'python',
'args', jsonb_build_array('--version'),
'exit_code', 0,
'pattern', 'Python 3\.',
'priority', 2
)
)
),
'min_version', '3.8',
'recommended_version', '3.11'
),
jsonb_build_object(
'package_managers', jsonb_build_array('pip', 'pipenv', 'poetry'),
'virtual_env_support', true
)
)
ON CONFLICT (ref) DO UPDATE SET
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW();
-- Insert Node.js sensor runtime
INSERT INTO runtime (ref, pack, pack_ref, description, runtime_type, name, distributions, installation)
VALUES (
'core.sensor.nodejs',
(SELECT id FROM pack WHERE ref = 'core'),
'core',
'Node.js sensor runtime for JavaScript-based sensors',
'sensor',
'Node.js',
jsonb_build_object(
'verification', jsonb_build_object(
'commands', jsonb_build_array(
jsonb_build_object(
'binary', 'node',
'args', jsonb_build_array('--version'),
'exit_code', 0,
'pattern', 'v\d+\.\d+\.\d+',
'priority', 1
)
)
),
'min_version', '16.0.0',
'recommended_version', '20.0.0'
),
jsonb_build_object(
'package_managers', jsonb_build_array('npm', 'yarn', 'pnpm'),
'module_support', true
)
)
ON CONFLICT (ref) DO UPDATE SET
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW();
-- Insert Shell sensor runtime
INSERT INTO runtime (ref, pack, pack_ref, description, runtime_type, name, distributions, installation)
VALUES (
'core.sensor.shell',
(SELECT id FROM pack WHERE ref = 'core'),
'core',
'Shell (bash/sh) sensor runtime - always available',
'sensor',
'Shell',
jsonb_build_object(
'verification', jsonb_build_object(
'commands', jsonb_build_array(
jsonb_build_object(
'binary', 'sh',
'args', jsonb_build_array('--version'),
'exit_code', 0,
'optional', true,
'priority', 1
),
jsonb_build_object(
'binary', 'bash',
'args', jsonb_build_array('--version'),
'exit_code', 0,
'optional', true,
'priority', 2
)
),
'always_available', true
)
),
jsonb_build_object(
'interpreters', jsonb_build_array('sh', 'bash', 'dash'),
'portable', true
)
)
ON CONFLICT (ref) DO UPDATE SET
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW();
-- Insert Native sensor runtime
INSERT INTO runtime (ref, pack, pack_ref, description, runtime_type, name, distributions, installation)
VALUES (
'core.sensor.native',
(SELECT id FROM pack WHERE ref = 'core'),
'core',
'Native compiled sensor runtime (Rust, Go, C, etc.) - always available',
'sensor',
'Native',
jsonb_build_object(
'verification', jsonb_build_object(
'always_available', true,
'check_required', false
),
'languages', jsonb_build_array('rust', 'go', 'c', 'c++')
),
jsonb_build_object(
'build_required', false,
'system_native', true
)
)
ON CONFLICT (ref) DO UPDATE SET
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW();
-- Update existing builtin sensor runtime with verification metadata
UPDATE runtime
SET distributions = jsonb_build_object(
'verification', jsonb_build_object(
'always_available', true,
'check_required', false
),
'type', 'builtin'
),
installation = jsonb_build_object(
'method', 'builtin',
'included_with_service', true
),
updated = NOW()
WHERE ref = 'core.sensor.builtin';
-- Add comments
COMMENT ON COLUMN runtime.distributions IS 'Runtime distribution metadata including verification commands, version requirements, and capabilities';
COMMENT ON COLUMN runtime.installation IS 'Installation requirements and instructions including package managers and setup steps';
-- Create index for efficient runtime verification queries
CREATE INDEX IF NOT EXISTS idx_runtime_type_sensor ON runtime(runtime_type) WHERE runtime_type = 'sensor';
-- Verification metadata structure documentation
/*
VERIFICATION METADATA STRUCTURE:
distributions->verification = {
"commands": [ // Array of verification commands to try (in priority order)
{
"binary": "python3", // Binary name to execute
"args": ["--version"], // Arguments to pass
"exit_code": 0, // Expected exit code (0 = success)
"pattern": "Python 3\.", // Optional regex pattern to match in output
"priority": 1, // Lower = higher priority (try first)
"optional": false // If true, failure doesn't mean runtime unavailable
}
],
"always_available": false, // If true, skip verification (shell, native)
"check_required": true // If false, assume available without checking
}
USAGE EXAMPLE:
To verify Python runtime availability:
1. Query: SELECT distributions->'verification'->'commands' FROM runtime WHERE ref = 'core.sensor.python'
2. Parse commands array
3. Try each command in priority order
4. If any command succeeds with expected exit_code and matches pattern (if provided), runtime is available
5. If all commands fail, runtime is not available
For always_available runtimes (shell, native):
1. Check distributions->'verification'->'always_available'
2. If true, skip verification and report as available
*/

View File

@@ -1,351 +0,0 @@
-- Migration: Unify Runtimes (Remove runtime_type distinction)
-- Description: Removes the runtime_type field and consolidates sensor/action runtimes
-- into a single unified runtime system. Both sensors and actions use the
-- same binaries and verification logic, so the distinction is redundant.
-- Version: 20260203000001
-- ============================================================================
-- STEP 0: Drop constraints that prevent unified runtime format
-- ============================================================================
-- Drop NOT NULL constraint from runtime_type to allow inserting unified runtimes
ALTER TABLE runtime ALTER COLUMN runtime_type DROP NOT NULL;
-- Drop the runtime_ref_format constraint (expects pack.type.name, we want pack.name)
ALTER TABLE runtime DROP CONSTRAINT IF EXISTS runtime_ref_format;
-- Drop the runtime_ref_lowercase constraint (will recreate after migration)
ALTER TABLE runtime DROP CONSTRAINT IF EXISTS runtime_ref_lowercase;
-- ============================================================================
-- STEP 1: Consolidate duplicate runtimes
-- ============================================================================
-- Consolidate Python runtimes (merge action and sensor into unified Python runtime)
DO $$
DECLARE
v_pack_id BIGINT;
v_python_runtime_id BIGINT;
BEGIN
SELECT id INTO v_pack_id FROM pack WHERE ref = 'core';
-- Insert or update unified Python runtime
INSERT INTO runtime (ref, pack, pack_ref, description, name, distributions, installation)
VALUES (
'core.python',
v_pack_id,
'core',
'Python 3 runtime for actions and sensors with automatic environment management',
'Python',
jsonb_build_object(
'verification', jsonb_build_object(
'commands', jsonb_build_array(
jsonb_build_object(
'binary', 'python3',
'args', jsonb_build_array('--version'),
'exit_code', 0,
'pattern', 'Python 3\.',
'priority', 1
),
jsonb_build_object(
'binary', 'python',
'args', jsonb_build_array('--version'),
'exit_code', 0,
'pattern', 'Python 3\.',
'priority', 2
)
)
),
'min_version', '3.8',
'recommended_version', '3.11'
),
jsonb_build_object(
'package_managers', jsonb_build_array('pip', 'pipenv', 'poetry'),
'virtual_env_support', true
)
)
ON CONFLICT (ref) DO UPDATE SET
description = EXCLUDED.description,
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW()
RETURNING id INTO v_python_runtime_id;
-- Migrate any references from old Python runtimes
UPDATE action SET runtime = v_python_runtime_id
WHERE runtime IN (
SELECT id FROM runtime WHERE ref IN ('core.action.python', 'core.sensor.python')
);
UPDATE sensor SET runtime = v_python_runtime_id
WHERE runtime IN (
SELECT id FROM runtime WHERE ref IN ('core.action.python', 'core.sensor.python')
);
-- Delete old Python runtime entries
DELETE FROM runtime WHERE ref IN ('core.action.python', 'core.sensor.python');
END $$;
-- Consolidate Node.js runtimes
DO $$
DECLARE
v_pack_id BIGINT;
v_nodejs_runtime_id BIGINT;
BEGIN
SELECT id INTO v_pack_id FROM pack WHERE ref = 'core';
INSERT INTO runtime (ref, pack, pack_ref, description, name, distributions, installation)
VALUES (
'core.nodejs',
v_pack_id,
'core',
'Node.js runtime for JavaScript-based actions and sensors',
'Node.js',
jsonb_build_object(
'verification', jsonb_build_object(
'commands', jsonb_build_array(
jsonb_build_object(
'binary', 'node',
'args', jsonb_build_array('--version'),
'exit_code', 0,
'pattern', 'v\d+\.\d+\.\d+',
'priority', 1
)
)
),
'min_version', '16.0.0',
'recommended_version', '20.0.0'
),
jsonb_build_object(
'package_managers', jsonb_build_array('npm', 'yarn', 'pnpm'),
'module_support', true
)
)
ON CONFLICT (ref) DO UPDATE SET
description = EXCLUDED.description,
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW()
RETURNING id INTO v_nodejs_runtime_id;
-- Migrate references
UPDATE action SET runtime = v_nodejs_runtime_id
WHERE runtime IN (
SELECT id FROM runtime WHERE ref IN ('core.action.nodejs', 'core.sensor.nodejs', 'core.action.node')
);
UPDATE sensor SET runtime = v_nodejs_runtime_id
WHERE runtime IN (
SELECT id FROM runtime WHERE ref IN ('core.action.nodejs', 'core.sensor.nodejs', 'core.action.node')
);
-- Delete old Node.js entries
DELETE FROM runtime WHERE ref IN ('core.action.nodejs', 'core.sensor.nodejs', 'core.action.node');
END $$;
-- Consolidate Shell runtimes
DO $$
DECLARE
v_pack_id BIGINT;
v_shell_runtime_id BIGINT;
BEGIN
SELECT id INTO v_pack_id FROM pack WHERE ref = 'core';
INSERT INTO runtime (ref, pack, pack_ref, description, name, distributions, installation)
VALUES (
'core.shell',
v_pack_id,
'core',
'Shell (bash/sh) runtime for script execution - always available',
'Shell',
jsonb_build_object(
'verification', jsonb_build_object(
'commands', jsonb_build_array(
jsonb_build_object(
'binary', 'sh',
'args', jsonb_build_array('--version'),
'exit_code', 0,
'optional', true,
'priority', 1
),
jsonb_build_object(
'binary', 'bash',
'args', jsonb_build_array('--version'),
'exit_code', 0,
'optional', true,
'priority', 2
)
),
'always_available', true
)
),
jsonb_build_object(
'interpreters', jsonb_build_array('sh', 'bash', 'dash'),
'portable', true
)
)
ON CONFLICT (ref) DO UPDATE SET
description = EXCLUDED.description,
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW()
RETURNING id INTO v_shell_runtime_id;
-- Migrate references
UPDATE action SET runtime = v_shell_runtime_id
WHERE runtime IN (
SELECT id FROM runtime WHERE ref IN ('core.action.shell', 'core.sensor.shell')
);
UPDATE sensor SET runtime = v_shell_runtime_id
WHERE runtime IN (
SELECT id FROM runtime WHERE ref IN ('core.action.shell', 'core.sensor.shell')
);
-- Delete old Shell entries
DELETE FROM runtime WHERE ref IN ('core.action.shell', 'core.sensor.shell');
END $$;
-- Consolidate Native runtimes
DO $$
DECLARE
v_pack_id BIGINT;
v_native_runtime_id BIGINT;
BEGIN
SELECT id INTO v_pack_id FROM pack WHERE ref = 'core';
INSERT INTO runtime (ref, pack, pack_ref, description, name, distributions, installation)
VALUES (
'core.native',
v_pack_id,
'core',
'Native compiled runtime (Rust, Go, C, etc.) - always available',
'Native',
jsonb_build_object(
'verification', jsonb_build_object(
'always_available', true,
'check_required', false
),
'languages', jsonb_build_array('rust', 'go', 'c', 'c++')
),
jsonb_build_object(
'build_required', false,
'system_native', true
)
)
ON CONFLICT (ref) DO UPDATE SET
description = EXCLUDED.description,
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW()
RETURNING id INTO v_native_runtime_id;
-- Migrate references
UPDATE action SET runtime = v_native_runtime_id
WHERE runtime IN (
SELECT id FROM runtime WHERE ref IN ('core.action.native', 'core.sensor.native')
);
UPDATE sensor SET runtime = v_native_runtime_id
WHERE runtime IN (
SELECT id FROM runtime WHERE ref IN ('core.action.native', 'core.sensor.native')
);
-- Delete old Native entries
DELETE FROM runtime WHERE ref IN ('core.action.native', 'core.sensor.native');
END $$;
-- Handle builtin sensor runtime (keep as-is, it's truly sensor-specific)
UPDATE runtime
SET distributions = jsonb_build_object(
'verification', jsonb_build_object(
'always_available', true,
'check_required', false
),
'type', 'builtin'
),
installation = jsonb_build_object(
'method', 'builtin',
'included_with_service', true
)
WHERE ref = 'core.sensor.builtin';
-- ============================================================================
-- STEP 2: Drop runtime_type column and related objects
-- ============================================================================
-- Drop indexes that reference runtime_type
DROP INDEX IF EXISTS idx_runtime_type;
DROP INDEX IF EXISTS idx_runtime_pack_type;
DROP INDEX IF EXISTS idx_runtime_type_created;
DROP INDEX IF EXISTS idx_runtime_type_sensor;
-- Drop the runtime_type column
ALTER TABLE runtime DROP COLUMN IF EXISTS runtime_type;
-- Drop the enum type
DROP TYPE IF EXISTS runtime_type_enum;
-- ============================================================================
-- STEP 3: Update comments and create new indexes
-- ============================================================================
COMMENT ON TABLE runtime IS 'Runtime environments for executing actions and sensors (unified)';
COMMENT ON COLUMN runtime.ref IS 'Unique runtime reference (format: pack.name, e.g., core.python)';
COMMENT ON COLUMN runtime.name IS 'Runtime name (e.g., "Python", "Node.js", "Shell")';
COMMENT ON COLUMN runtime.distributions IS 'Runtime distribution metadata including verification commands, version requirements, and capabilities';
COMMENT ON COLUMN runtime.installation IS 'Installation requirements and instructions including package managers and setup steps';
-- Create new indexes for efficient queries
CREATE INDEX IF NOT EXISTS idx_runtime_name ON runtime(name);
CREATE INDEX IF NOT EXISTS idx_runtime_verification ON runtime USING gin ((distributions->'verification'));
-- ============================================================================
-- VERIFICATION METADATA STRUCTURE DOCUMENTATION
-- ============================================================================
COMMENT ON COLUMN runtime.distributions IS 'Runtime verification and capability metadata. Structure:
{
"verification": {
"commands": [ // Array of verification commands (in priority order)
{
"binary": "python3", // Binary name to execute
"args": ["--version"], // Arguments to pass
"exit_code": 0, // Expected exit code
"pattern": "Python 3\\.", // Optional regex pattern to match in output
"priority": 1, // Lower = higher priority
"optional": false // If true, failure is non-fatal
}
],
"always_available": false, // If true, skip verification (shell, native)
"check_required": true // If false, assume available without checking
},
"min_version": "3.8", // Minimum supported version
"recommended_version": "3.11" // Recommended version
}';
-- ============================================================================
-- SUMMARY
-- ============================================================================
-- Final runtime records (expected):
-- 1. core.python - Python 3 runtime (unified)
-- 2. core.nodejs - Node.js runtime (unified)
-- 3. core.shell - Shell runtime (unified)
-- 4. core.native - Native runtime (unified)
-- 5. core.sensor.builtin - Built-in sensor runtime (sensor-specific timers, etc.)
-- Display final state
DO $$
BEGIN
RAISE NOTICE 'Runtime unification complete. Current runtimes:';
END $$;
SELECT ref, name,
CASE
WHEN distributions->'verification'->>'always_available' = 'true' THEN 'Always Available'
WHEN jsonb_array_length(distributions->'verification'->'commands') > 0 THEN 'Requires Verification'
ELSE 'Unknown'
END as availability_check
FROM runtime
ORDER BY ref;

View File

@@ -1,58 +0,0 @@
-- Migration: Add rule_ref and trigger_ref to execution notification payload
-- This includes enforcement information in real-time notifications to avoid additional API calls
-- Drop the existing trigger first
DROP TRIGGER IF EXISTS notify_execution_change ON execution;
-- Replace the notification function to include enforcement details
CREATE OR REPLACE FUNCTION notify_execution_change()
RETURNS TRIGGER AS $$
DECLARE
payload JSONB;
enforcement_rule_ref TEXT;
enforcement_trigger_ref TEXT;
BEGIN
-- Lookup enforcement details if this execution is linked to an enforcement
IF NEW.enforcement IS NOT NULL THEN
SELECT rule_ref, trigger_ref
INTO enforcement_rule_ref, enforcement_trigger_ref
FROM enforcement
WHERE id = NEW.enforcement;
END IF;
-- Build JSON payload with execution details including rule/trigger info
payload := jsonb_build_object(
'entity_type', 'execution',
'entity_id', NEW.id,
'timestamp', NOW(),
'data', jsonb_build_object(
'id', NEW.id,
'status', NEW.status,
'action_id', NEW.action,
'action_ref', NEW.action_ref,
'enforcement', NEW.enforcement,
'rule_ref', enforcement_rule_ref,
'trigger_ref', enforcement_trigger_ref,
'parent', NEW.parent,
'result', NEW.result,
'created', NEW.created,
'updated', NEW.updated
)
);
-- Send notification to the attune_notifications channel
PERFORM pg_notify('attune_notifications', payload::text);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Recreate the trigger
CREATE TRIGGER notify_execution_change
AFTER INSERT OR UPDATE ON execution
FOR EACH ROW
EXECUTE FUNCTION notify_execution_change();
-- Update comment
COMMENT ON FUNCTION notify_execution_change() IS
'Sends PostgreSQL NOTIFY for execution changes with enforcement details (rule_ref, trigger_ref) to enable real-time SSE streaming without additional API calls';

View File

@@ -1,59 +0,0 @@
-- Migration: Add NOTIFY trigger for enforcement creation
-- This enables real-time notifications when enforcements are created or updated
-- Function to send notifications on enforcement changes
CREATE OR REPLACE FUNCTION notify_enforcement_change()
RETURNS TRIGGER AS $$
DECLARE
payload JSONB;
operation TEXT;
BEGIN
-- Determine operation type
IF TG_OP = 'INSERT' THEN
operation := 'created';
ELSIF TG_OP = 'UPDATE' THEN
operation := 'updated';
ELSE
operation := 'deleted';
END IF;
-- Build JSON payload with enforcement details
payload := jsonb_build_object(
'entity_type', 'enforcement',
'entity_id', NEW.id,
'operation', operation,
'timestamp', NOW(),
'data', jsonb_build_object(
'id', NEW.id,
'rule', NEW.rule,
'rule_ref', NEW.rule_ref,
'trigger_ref', NEW.trigger_ref,
'event', NEW.event,
'status', NEW.status,
'condition', NEW.condition,
'conditions', NEW.conditions,
'config', NEW.config,
'payload', NEW.payload,
'created', NEW.created,
'updated', NEW.updated
)
);
-- Send notification to the attune_notifications channel
PERFORM pg_notify('attune_notifications', payload::text);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger to send pg_notify on enforcement insert
CREATE TRIGGER notify_enforcement_change
AFTER INSERT OR UPDATE ON enforcement
FOR EACH ROW
EXECUTE FUNCTION notify_enforcement_change();
-- Add comments
COMMENT ON FUNCTION notify_enforcement_change() IS
'Sends PostgreSQL NOTIFY for enforcement changes to enable real-time notifications';
COMMENT ON TRIGGER notify_enforcement_change ON enforcement IS
'Broadcasts enforcement changes via pg_notify for real-time updates';