change capture

This commit is contained in:
2026-02-26 14:34:02 -06:00
parent 7ee3604eb1
commit b43495b26d
47 changed files with 5785 additions and 1525 deletions

View File

@@ -1,5 +1,5 @@
-- Migration: Pack System
-- Description: Creates pack and runtime tables
-- Description: Creates pack, runtime, and runtime_version tables
-- Version: 20250101000002
-- ============================================================================
@@ -160,3 +160,85 @@ COMMENT ON COLUMN runtime.distributions IS 'Runtime distribution metadata includ
COMMENT ON COLUMN runtime.installation IS 'Installation requirements and instructions including package managers and setup steps';
COMMENT ON COLUMN runtime.installers IS 'Array of installer actions to create pack-specific runtime environments. Each installer defines commands to set up isolated environments (e.g., Python venv, npm install).';
COMMENT ON COLUMN runtime.execution_config IS 'Execution configuration: interpreter, environment setup, and dependency management. Drives how the worker executes actions and how pack install sets up environments.';
-- ============================================================================
-- RUNTIME VERSION TABLE
-- ============================================================================
CREATE TABLE runtime_version (
id BIGSERIAL PRIMARY KEY,
runtime BIGINT NOT NULL REFERENCES runtime(id) ON DELETE CASCADE,
runtime_ref TEXT NOT NULL,
-- Semantic version string (e.g., "3.12.1", "20.11.0")
version TEXT NOT NULL,
-- Individual version components for efficient range queries.
-- Nullable because some runtimes may use non-numeric versioning.
version_major INT,
version_minor INT,
version_patch INT,
-- Complete execution configuration for this specific version.
-- This is NOT a diff/override — it is a full standalone config that can
-- replace the parent runtime's execution_config when this version is selected.
-- Structure is identical to runtime.execution_config (RuntimeExecutionConfig).
execution_config JSONB NOT NULL DEFAULT '{}'::jsonb,
-- Version-specific distribution/verification metadata.
-- Structure mirrors runtime.distributions but with version-specific commands.
-- Example: verification commands that check for a specific binary like python3.12.
distributions JSONB NOT NULL DEFAULT '{}'::jsonb,
-- Whether this version is the default for the parent runtime.
-- At most one version per runtime should be marked as default.
is_default BOOLEAN NOT NULL DEFAULT FALSE,
-- Whether this version has been verified as available on the current system.
available BOOLEAN NOT NULL DEFAULT TRUE,
-- When this version was last verified (via running verification commands).
verified_at TIMESTAMPTZ,
-- Arbitrary version-specific metadata (e.g., EOL date, release notes URL,
-- feature flags, platform-specific notes).
meta JSONB NOT NULL DEFAULT '{}'::jsonb,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT runtime_version_unique UNIQUE(runtime, version)
);
-- Indexes
CREATE INDEX idx_runtime_version_runtime ON runtime_version(runtime);
CREATE INDEX idx_runtime_version_runtime_ref ON runtime_version(runtime_ref);
CREATE INDEX idx_runtime_version_version ON runtime_version(version);
CREATE INDEX idx_runtime_version_available ON runtime_version(available) WHERE available = TRUE;
CREATE INDEX idx_runtime_version_is_default ON runtime_version(is_default) WHERE is_default = TRUE;
CREATE INDEX idx_runtime_version_components ON runtime_version(runtime, version_major, version_minor, version_patch);
CREATE INDEX idx_runtime_version_created ON runtime_version(created DESC);
CREATE INDEX idx_runtime_version_execution_config ON runtime_version USING GIN (execution_config);
CREATE INDEX idx_runtime_version_meta ON runtime_version USING GIN (meta);
-- Trigger
CREATE TRIGGER update_runtime_version_updated
BEFORE UPDATE ON runtime_version
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE runtime_version IS 'Specific versions of a runtime (e.g., Python 3.11, 3.12) with version-specific execution configuration';
COMMENT ON COLUMN runtime_version.runtime IS 'Parent runtime this version belongs to';
COMMENT ON COLUMN runtime_version.runtime_ref IS 'Parent runtime ref (e.g., core.python) for display/filtering';
COMMENT ON COLUMN runtime_version.version IS 'Semantic version string (e.g., "3.12.1", "20.11.0")';
COMMENT ON COLUMN runtime_version.version_major IS 'Major version component for efficient range queries';
COMMENT ON COLUMN runtime_version.version_minor IS 'Minor version component for efficient range queries';
COMMENT ON COLUMN runtime_version.version_patch IS 'Patch version component for efficient range queries';
COMMENT ON COLUMN runtime_version.execution_config IS 'Complete execution configuration for this version (same structure as runtime.execution_config)';
COMMENT ON COLUMN runtime_version.distributions IS 'Version-specific distribution/verification metadata';
COMMENT ON COLUMN runtime_version.is_default IS 'Whether this is the default version for the parent runtime (at most one per runtime)';
COMMENT ON COLUMN runtime_version.available IS 'Whether this version has been verified as available on the system';
COMMENT ON COLUMN runtime_version.verified_at IS 'Timestamp of last availability verification';
COMMENT ON COLUMN runtime_version.meta IS 'Arbitrary version-specific metadata';

View File

@@ -1,6 +1,23 @@
-- Migration: Event System
-- Description: Creates trigger, sensor, event, and enforcement tables (with webhook_config, is_adhoc from start)
-- Version: 20250101000003
-- Migration: Event System and Actions
-- Description: Creates trigger, sensor, event, enforcement, and action tables
-- with runtime version constraint support. Includes webhook key
-- generation function used by webhook management functions in 000007.
-- Version: 20250101000004
-- ============================================================================
-- WEBHOOK KEY GENERATION
-- ============================================================================
-- Generates a unique webhook key in the format: wh_<32 random hex chars>
-- Used by enable_trigger_webhook() and regenerate_trigger_webhook_key() in 000007.
CREATE OR REPLACE FUNCTION generate_webhook_key()
RETURNS VARCHAR(64) AS $$
BEGIN
RETURN 'wh_' || encode(gen_random_bytes(16), 'hex');
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION generate_webhook_key() IS 'Generates a unique webhook key (format: wh_<32 hex chars>) for trigger webhook authentication';
-- ============================================================================
-- TRIGGER TABLE
@@ -74,6 +91,7 @@ CREATE TABLE sensor (
is_adhoc BOOLEAN NOT NULL DEFAULT FALSE,
param_schema JSONB,
config JSONB,
runtime_version_constraint TEXT,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
@@ -106,6 +124,7 @@ COMMENT ON COLUMN sensor.runtime IS 'Runtime environment for execution';
COMMENT ON COLUMN sensor.trigger IS 'Trigger type this sensor creates events for';
COMMENT ON COLUMN sensor.enabled IS 'Whether this sensor is active';
COMMENT ON COLUMN sensor.is_adhoc IS 'True if sensor was manually created (ad-hoc), false if installed from pack';
COMMENT ON COLUMN sensor.runtime_version_constraint IS 'Semver version constraint for the runtime (e.g., ">=3.12", ">=3.12,<4.0", "~18.0"). NULL means any version.';
-- ============================================================================
-- EVENT TABLE
@@ -155,7 +174,7 @@ COMMENT ON COLUMN event.source IS 'Sensor that generated this event';
CREATE TABLE enforcement (
id BIGSERIAL PRIMARY KEY,
rule BIGINT, -- Forward reference to rule table, will add constraint in next migration
rule BIGINT, -- Forward reference to rule table, will add constraint after rule is created
rule_ref TEXT NOT NULL,
trigger_ref TEXT NOT NULL,
config JSONB,
@@ -200,5 +219,78 @@ COMMENT ON COLUMN enforcement.payload IS 'Event payload for rule evaluation';
COMMENT ON COLUMN enforcement.condition IS 'Logical operator for conditions (any=OR, all=AND)';
COMMENT ON COLUMN enforcement.conditions IS 'Condition expressions to evaluate';
-- Note: Rule table will be created in migration 20250101000006 after action table exists
-- Note: Foreign key constraints for enforcement.rule and event.rule will be added in that migration
-- ============================================================================
-- ACTION TABLE
-- ============================================================================
CREATE TABLE action (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
pack_ref TEXT NOT NULL,
label TEXT NOT NULL,
description TEXT NOT NULL,
entrypoint TEXT NOT NULL,
runtime BIGINT REFERENCES runtime(id),
param_schema JSONB,
out_schema JSONB,
parameter_delivery TEXT NOT NULL DEFAULT 'stdin' CHECK (parameter_delivery IN ('stdin', 'file')),
parameter_format TEXT NOT NULL DEFAULT 'json' CHECK (parameter_format IN ('dotenv', 'json', 'yaml')),
output_format TEXT NOT NULL DEFAULT 'text' CHECK (output_format IN ('text', 'json', 'yaml', 'jsonl')),
is_adhoc BOOLEAN NOT NULL DEFAULT FALSE,
timeout_seconds INTEGER,
max_retries INTEGER DEFAULT 0,
runtime_version_constraint TEXT,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT action_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT action_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
);
-- Indexes
CREATE INDEX idx_action_ref ON action(ref);
CREATE INDEX idx_action_pack ON action(pack);
CREATE INDEX idx_action_runtime ON action(runtime);
CREATE INDEX idx_action_parameter_delivery ON action(parameter_delivery);
CREATE INDEX idx_action_parameter_format ON action(parameter_format);
CREATE INDEX idx_action_output_format ON action(output_format);
CREATE INDEX idx_action_is_adhoc ON action(is_adhoc) WHERE is_adhoc = true;
CREATE INDEX idx_action_created ON action(created DESC);
-- Trigger
CREATE TRIGGER update_action_updated
BEFORE UPDATE ON action
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE action IS 'Actions are executable tasks that can be triggered';
COMMENT ON COLUMN action.ref IS 'Unique action reference (format: pack.name)';
COMMENT ON COLUMN action.pack IS 'Pack this action belongs to';
COMMENT ON COLUMN action.label IS 'Human-readable action name';
COMMENT ON COLUMN action.entrypoint IS 'Script or command to execute';
COMMENT ON COLUMN action.runtime IS 'Runtime environment for execution';
COMMENT ON COLUMN action.param_schema IS 'JSON schema for action parameters';
COMMENT ON COLUMN action.out_schema IS 'JSON schema for action output';
COMMENT ON COLUMN action.parameter_delivery IS 'How parameters are delivered: stdin (standard input - secure), file (temporary file - secure for large payloads). Environment variables are set separately via execution.env_vars.';
COMMENT ON COLUMN action.parameter_format IS 'Parameter serialization format: json (JSON object - default), dotenv (KEY=''VALUE''), yaml (YAML format)';
COMMENT ON COLUMN action.output_format IS 'Output parsing format: text (no parsing - raw stdout), json (parse stdout as JSON), yaml (parse stdout as YAML), jsonl (parse each line as JSON, collect into array)';
COMMENT ON COLUMN action.is_adhoc IS 'True if action was manually created (ad-hoc), false if installed from pack';
COMMENT ON COLUMN action.timeout_seconds IS 'Worker queue TTL override in seconds. If NULL, uses global worker_queue_ttl_ms config. Allows per-action timeout tuning.';
COMMENT ON COLUMN action.max_retries IS 'Maximum number of automatic retry attempts for failed executions. 0 = no retries (default).';
COMMENT ON COLUMN action.runtime_version_constraint IS 'Semver version constraint for the runtime (e.g., ">=3.12", ">=3.12,<4.0", "~18.0"). NULL means any version.';
-- ============================================================================
-- Add foreign key constraint for policy table
ALTER TABLE policy
ADD CONSTRAINT policy_action_fkey
FOREIGN KEY (action) REFERENCES action(id) ON DELETE CASCADE;
-- Note: Foreign key constraints for key table (key_owner_action_fkey, key_owner_sensor_fkey)
-- will be added in migration 000007_supporting_systems.sql after the key table is created
-- Note: Rule table will be created in migration 000005 after execution table exists
-- Note: Foreign key constraints for enforcement.rule and event.rule will be added there

View File

@@ -1,70 +0,0 @@
-- Migration: Action
-- Description: Creates action table (with is_adhoc from start)
-- Version: 20250101000005
-- ============================================================================
-- ACTION TABLE
-- ============================================================================
CREATE TABLE action (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
pack_ref TEXT NOT NULL,
label TEXT NOT NULL,
description TEXT NOT NULL,
entrypoint TEXT NOT NULL,
runtime BIGINT REFERENCES runtime(id),
param_schema JSONB,
out_schema JSONB,
parameter_delivery TEXT NOT NULL DEFAULT 'stdin' CHECK (parameter_delivery IN ('stdin', 'file')),
parameter_format TEXT NOT NULL DEFAULT 'json' CHECK (parameter_format IN ('dotenv', 'json', 'yaml')),
output_format TEXT NOT NULL DEFAULT 'text' CHECK (output_format IN ('text', 'json', 'yaml', 'jsonl')),
is_adhoc BOOLEAN NOT NULL DEFAULT FALSE,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT action_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT action_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
);
-- Indexes
CREATE INDEX idx_action_ref ON action(ref);
CREATE INDEX idx_action_pack ON action(pack);
CREATE INDEX idx_action_runtime ON action(runtime);
CREATE INDEX idx_action_parameter_delivery ON action(parameter_delivery);
CREATE INDEX idx_action_parameter_format ON action(parameter_format);
CREATE INDEX idx_action_output_format ON action(output_format);
CREATE INDEX idx_action_is_adhoc ON action(is_adhoc) WHERE is_adhoc = true;
CREATE INDEX idx_action_created ON action(created DESC);
-- Trigger
CREATE TRIGGER update_action_updated
BEFORE UPDATE ON action
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE action IS 'Actions are executable tasks that can be triggered';
COMMENT ON COLUMN action.ref IS 'Unique action reference (format: pack.name)';
COMMENT ON COLUMN action.pack IS 'Pack this action belongs to';
COMMENT ON COLUMN action.label IS 'Human-readable action name';
COMMENT ON COLUMN action.entrypoint IS 'Script or command to execute';
COMMENT ON COLUMN action.runtime IS 'Runtime environment for execution';
COMMENT ON COLUMN action.param_schema IS 'JSON schema for action parameters';
COMMENT ON COLUMN action.out_schema IS 'JSON schema for action output';
COMMENT ON COLUMN action.parameter_delivery IS 'How parameters are delivered: stdin (standard input - secure), file (temporary file - secure for large payloads). Environment variables are set separately via execution.env_vars.';
COMMENT ON COLUMN action.parameter_format IS 'Parameter serialization format: json (JSON object - default), dotenv (KEY=''VALUE''), yaml (YAML format)';
COMMENT ON COLUMN action.output_format IS 'Output parsing format: text (no parsing - raw stdout), json (parse stdout as JSON), yaml (parse stdout as YAML), jsonl (parse each line as JSON, collect into array)';
COMMENT ON COLUMN action.is_adhoc IS 'True if action was manually created (ad-hoc), false if installed from pack';
-- ============================================================================
-- Add foreign key constraint for policy table
ALTER TABLE policy
ADD CONSTRAINT policy_action_fkey
FOREIGN KEY (action) REFERENCES action(id) ON DELETE CASCADE;
-- Note: Foreign key constraints for key table (key_owner_action_fkey, key_owner_sensor_fkey)
-- will be added in migration 20250101000009_keys_artifacts.sql after the key table is created

View File

@@ -0,0 +1,397 @@
-- Migration: Execution and Operations
-- Description: Creates execution, inquiry, rule, worker, and notification tables.
-- Includes retry tracking, worker health views, and helper functions.
-- Consolidates former migrations: 000006 (execution_system), 000008
-- (worker_notification), 000014 (worker_table), and 20260209 (phase3).
-- Version: 20250101000005
-- ============================================================================
-- EXECUTION TABLE
-- ============================================================================
CREATE TABLE execution (
id BIGSERIAL PRIMARY KEY,
action BIGINT REFERENCES action(id) ON DELETE SET NULL,
action_ref TEXT NOT NULL,
config JSONB,
env_vars JSONB,
parent BIGINT REFERENCES execution(id) ON DELETE SET NULL,
enforcement BIGINT REFERENCES enforcement(id) ON DELETE SET NULL,
executor BIGINT REFERENCES identity(id) ON DELETE SET NULL,
status execution_status_enum NOT NULL DEFAULT 'requested',
result JSONB,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
is_workflow BOOLEAN DEFAULT false NOT NULL,
workflow_def BIGINT,
workflow_task JSONB,
-- Retry tracking (baked in from phase 3)
retry_count INTEGER NOT NULL DEFAULT 0,
max_retries INTEGER,
retry_reason TEXT,
original_execution BIGINT REFERENCES execution(id) ON DELETE SET NULL,
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_execution_action ON execution(action);
CREATE INDEX idx_execution_action_ref ON execution(action_ref);
CREATE INDEX idx_execution_parent ON execution(parent);
CREATE INDEX idx_execution_enforcement ON execution(enforcement);
CREATE INDEX idx_execution_executor ON execution(executor);
CREATE INDEX idx_execution_status ON execution(status);
CREATE INDEX idx_execution_created ON execution(created DESC);
CREATE INDEX idx_execution_updated ON execution(updated DESC);
CREATE INDEX idx_execution_status_created ON execution(status, created DESC);
CREATE INDEX idx_execution_status_updated ON execution(status, updated DESC);
CREATE INDEX idx_execution_action_status ON execution(action, status);
CREATE INDEX idx_execution_executor_created ON execution(executor, created DESC);
CREATE INDEX idx_execution_parent_created ON execution(parent, created DESC);
CREATE INDEX idx_execution_result_gin ON execution USING GIN (result);
CREATE INDEX idx_execution_env_vars_gin ON execution USING GIN (env_vars);
CREATE INDEX idx_execution_original_execution ON execution(original_execution) WHERE original_execution IS NOT NULL;
CREATE INDEX idx_execution_status_retry ON execution(status, retry_count) WHERE status = 'failed' AND retry_count < COALESCE(max_retries, 0);
-- Trigger
CREATE TRIGGER update_execution_updated
BEFORE UPDATE ON execution
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE execution IS 'Executions represent action runs, supports nested workflows';
COMMENT ON COLUMN execution.action IS 'Action being executed (may be null if action deleted)';
COMMENT ON COLUMN execution.action_ref IS 'Action reference (preserved even if action deleted)';
COMMENT ON COLUMN execution.config IS 'Snapshot of action configuration at execution time';
COMMENT ON COLUMN execution.env_vars IS 'Environment variables for this execution as key-value pairs (string -> string). These are set in the execution environment and are separate from action parameters. Used for execution context, configuration, and non-sensitive metadata.';
COMMENT ON COLUMN execution.parent IS 'Parent execution ID for workflow hierarchies';
COMMENT ON COLUMN execution.enforcement IS 'Enforcement that triggered this execution (if rule-driven)';
COMMENT ON COLUMN execution.executor IS 'Identity that initiated the execution';
COMMENT ON COLUMN execution.status IS 'Current execution lifecycle status';
COMMENT ON COLUMN execution.result IS 'Execution output/results';
COMMENT ON COLUMN execution.retry_count IS 'Current retry attempt number (0 = first attempt, 1 = first retry, etc.)';
COMMENT ON COLUMN execution.max_retries IS 'Maximum retries for this execution. Copied from action.max_retries at creation time.';
COMMENT ON COLUMN execution.retry_reason IS 'Reason for retry (e.g., "worker_unavailable", "transient_error", "manual_retry")';
COMMENT ON COLUMN execution.original_execution IS 'ID of the original execution if this is a retry. Forms a retry chain.';
-- ============================================================================
-- ============================================================================
-- INQUIRY TABLE
-- ============================================================================
CREATE TABLE inquiry (
id BIGSERIAL PRIMARY KEY,
execution BIGINT NOT NULL REFERENCES execution(id) ON DELETE CASCADE,
prompt TEXT NOT NULL,
response_schema JSONB,
assigned_to BIGINT REFERENCES identity(id) ON DELETE SET NULL,
status inquiry_status_enum NOT NULL DEFAULT 'pending',
response JSONB,
timeout_at TIMESTAMPTZ,
responded_at TIMESTAMPTZ,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_inquiry_execution ON inquiry(execution);
CREATE INDEX idx_inquiry_assigned_to ON inquiry(assigned_to);
CREATE INDEX idx_inquiry_status ON inquiry(status);
CREATE INDEX idx_inquiry_timeout_at ON inquiry(timeout_at) WHERE timeout_at IS NOT NULL;
CREATE INDEX idx_inquiry_created ON inquiry(created DESC);
CREATE INDEX idx_inquiry_status_created ON inquiry(status, created DESC);
CREATE INDEX idx_inquiry_assigned_status ON inquiry(assigned_to, status);
CREATE INDEX idx_inquiry_execution_status ON inquiry(execution, status);
CREATE INDEX idx_inquiry_response_gin ON inquiry USING GIN (response);
-- Trigger
CREATE TRIGGER update_inquiry_updated
BEFORE UPDATE ON inquiry
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE inquiry IS 'Inquiries enable human-in-the-loop workflows with async user interactions';
COMMENT ON COLUMN inquiry.execution IS 'Execution that is waiting on this inquiry';
COMMENT ON COLUMN inquiry.prompt IS 'Question or prompt text for the user';
COMMENT ON COLUMN inquiry.response_schema IS 'JSON schema defining expected response format';
COMMENT ON COLUMN inquiry.assigned_to IS 'Identity who should respond to this inquiry';
COMMENT ON COLUMN inquiry.status IS 'Current inquiry lifecycle status';
COMMENT ON COLUMN inquiry.response IS 'User response data';
COMMENT ON COLUMN inquiry.timeout_at IS 'When this inquiry expires';
COMMENT ON COLUMN inquiry.responded_at IS 'When the response was received';
-- ============================================================================
-- ============================================================================
-- RULE TABLE
-- ============================================================================
CREATE TABLE rule (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
pack_ref TEXT NOT NULL,
label TEXT NOT NULL,
description TEXT NOT NULL,
action BIGINT REFERENCES action(id) ON DELETE SET NULL,
action_ref TEXT NOT NULL,
trigger BIGINT REFERENCES trigger(id) ON DELETE SET NULL,
trigger_ref TEXT NOT NULL,
conditions JSONB NOT NULL DEFAULT '[]'::jsonb,
action_params JSONB DEFAULT '{}'::jsonb,
trigger_params JSONB DEFAULT '{}'::jsonb,
enabled BOOLEAN NOT NULL,
is_adhoc BOOLEAN NOT NULL DEFAULT FALSE,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT rule_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT rule_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
);
-- Indexes
CREATE INDEX idx_rule_ref ON rule(ref);
CREATE INDEX idx_rule_pack ON rule(pack);
CREATE INDEX idx_rule_action ON rule(action);
CREATE INDEX idx_rule_trigger ON rule(trigger);
CREATE INDEX idx_rule_enabled ON rule(enabled) WHERE enabled = TRUE;
CREATE INDEX idx_rule_is_adhoc ON rule(is_adhoc) WHERE is_adhoc = true;
CREATE INDEX idx_rule_created ON rule(created DESC);
CREATE INDEX idx_rule_trigger_enabled ON rule(trigger, enabled);
CREATE INDEX idx_rule_action_enabled ON rule(action, enabled);
CREATE INDEX idx_rule_pack_enabled ON rule(pack, enabled);
CREATE INDEX idx_rule_action_params_gin ON rule USING GIN (action_params);
CREATE INDEX idx_rule_trigger_params_gin ON rule USING GIN (trigger_params);
-- Trigger
CREATE TRIGGER update_rule_updated
BEFORE UPDATE ON rule
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE rule IS 'Rules link triggers to actions with conditions';
COMMENT ON COLUMN rule.ref IS 'Unique rule reference (format: pack.name)';
COMMENT ON COLUMN rule.label IS 'Human-readable rule name';
COMMENT ON COLUMN rule.action IS 'Action to execute when rule triggers (null if action deleted)';
COMMENT ON COLUMN rule.trigger IS 'Trigger that activates this rule (null if trigger deleted)';
COMMENT ON COLUMN rule.conditions IS 'Condition expressions to evaluate before executing action';
COMMENT ON COLUMN rule.action_params IS 'Parameter overrides for the action';
COMMENT ON COLUMN rule.trigger_params IS 'Parameter overrides for the trigger';
COMMENT ON COLUMN rule.enabled IS 'Whether this rule is active';
COMMENT ON COLUMN rule.is_adhoc IS 'True if rule was manually created (ad-hoc), false if installed from pack';
-- ============================================================================
-- Add foreign key constraints now that rule table exists
ALTER TABLE enforcement
ADD CONSTRAINT enforcement_rule_fkey
FOREIGN KEY (rule) REFERENCES rule(id) ON DELETE SET NULL;
ALTER TABLE event
ADD CONSTRAINT event_rule_fkey
FOREIGN KEY (rule) REFERENCES rule(id) ON DELETE SET NULL;
-- ============================================================================
-- WORKER TABLE
-- ============================================================================
CREATE TABLE worker (
id BIGSERIAL PRIMARY KEY,
name TEXT NOT NULL UNIQUE,
worker_type worker_type_enum NOT NULL,
worker_role worker_role_enum NOT NULL,
runtime BIGINT REFERENCES runtime(id) ON DELETE SET NULL,
host TEXT,
port INTEGER,
status worker_status_enum NOT NULL DEFAULT 'active',
capabilities JSONB,
meta JSONB,
last_heartbeat TIMESTAMPTZ,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_worker_name ON worker(name);
CREATE INDEX idx_worker_type ON worker(worker_type);
CREATE INDEX idx_worker_role ON worker(worker_role);
CREATE INDEX idx_worker_runtime ON worker(runtime);
CREATE INDEX idx_worker_status ON worker(status);
CREATE INDEX idx_worker_last_heartbeat ON worker(last_heartbeat DESC) WHERE last_heartbeat IS NOT NULL;
CREATE INDEX idx_worker_created ON worker(created DESC);
CREATE INDEX idx_worker_status_role ON worker(status, worker_role);
CREATE INDEX idx_worker_capabilities_gin ON worker USING GIN (capabilities);
CREATE INDEX idx_worker_meta_gin ON worker USING GIN (meta);
CREATE INDEX idx_worker_capabilities_health_status ON worker USING GIN ((capabilities -> 'health' -> 'status'));
-- Trigger
CREATE TRIGGER update_worker_updated
BEFORE UPDATE ON worker
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE worker IS 'Worker registration and tracking table for action and sensor workers';
COMMENT ON COLUMN worker.name IS 'Unique worker identifier (typically hostname-based)';
COMMENT ON COLUMN worker.worker_type IS 'Worker deployment type (local or remote)';
COMMENT ON COLUMN worker.worker_role IS 'Worker role (action or sensor)';
COMMENT ON COLUMN worker.runtime IS 'Runtime environment this worker supports (optional)';
COMMENT ON COLUMN worker.host IS 'Worker host address';
COMMENT ON COLUMN worker.port IS 'Worker port number';
COMMENT ON COLUMN worker.status IS 'Worker operational status';
COMMENT ON COLUMN worker.capabilities IS 'Worker capabilities (e.g., max_concurrent_executions, supported runtimes)';
COMMENT ON COLUMN worker.meta IS 'Additional worker metadata';
COMMENT ON COLUMN worker.last_heartbeat IS 'Timestamp of last heartbeat from worker';
-- ============================================================================
-- NOTIFICATION TABLE
-- ============================================================================
CREATE TABLE notification (
id BIGSERIAL PRIMARY KEY,
channel TEXT NOT NULL,
entity_type TEXT NOT NULL,
entity TEXT NOT NULL,
activity TEXT NOT NULL,
state notification_status_enum NOT NULL DEFAULT 'created',
content JSONB,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_notification_channel ON notification(channel);
CREATE INDEX idx_notification_entity_type ON notification(entity_type);
CREATE INDEX idx_notification_entity ON notification(entity);
CREATE INDEX idx_notification_state ON notification(state);
CREATE INDEX idx_notification_created ON notification(created DESC);
CREATE INDEX idx_notification_channel_state ON notification(channel, state);
CREATE INDEX idx_notification_entity_type_entity ON notification(entity_type, entity);
CREATE INDEX idx_notification_state_created ON notification(state, created DESC);
CREATE INDEX idx_notification_content_gin ON notification USING GIN (content);
-- Trigger
CREATE TRIGGER update_notification_updated
BEFORE UPDATE ON notification
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Function for pg_notify on notification insert
CREATE OR REPLACE FUNCTION notify_on_insert()
RETURNS TRIGGER AS $$
DECLARE
payload TEXT;
BEGIN
-- Build JSON payload with id, entity, and activity
payload := json_build_object(
'id', NEW.id,
'entity_type', NEW.entity_type,
'entity', NEW.entity,
'activity', NEW.activity
)::text;
-- Send notification to the specified channel
PERFORM pg_notify(NEW.channel, payload);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger to send pg_notify on notification insert
CREATE TRIGGER notify_on_notification_insert
AFTER INSERT ON notification
FOR EACH ROW
EXECUTE FUNCTION notify_on_insert();
-- Comments
COMMENT ON TABLE notification IS 'System notifications about entity changes for real-time updates';
COMMENT ON COLUMN notification.channel IS 'Notification channel (typically table name)';
COMMENT ON COLUMN notification.entity_type IS 'Type of entity (table name)';
COMMENT ON COLUMN notification.entity IS 'Entity identifier (typically ID or ref)';
COMMENT ON COLUMN notification.activity IS 'Activity type (e.g., "created", "updated", "completed")';
COMMENT ON COLUMN notification.state IS 'Processing state of notification';
COMMENT ON COLUMN notification.content IS 'Optional notification payload data';
-- ============================================================================
-- WORKER HEALTH VIEWS AND FUNCTIONS
-- ============================================================================
-- View for healthy workers (convenience for queries)
CREATE OR REPLACE VIEW healthy_workers AS
SELECT
w.id,
w.name,
w.worker_type,
w.worker_role,
w.runtime,
w.status,
w.capabilities,
w.last_heartbeat,
(w.capabilities -> 'health' ->> 'status')::TEXT as health_status,
(w.capabilities -> 'health' ->> 'queue_depth')::INTEGER as queue_depth,
(w.capabilities -> 'health' ->> 'consecutive_failures')::INTEGER as consecutive_failures
FROM worker w
WHERE
w.status = 'active'
AND w.last_heartbeat > NOW() - INTERVAL '30 seconds'
AND (
-- Healthy if no health info (backward compatible)
w.capabilities -> 'health' IS NULL
OR
-- Or explicitly marked healthy
w.capabilities -> 'health' ->> 'status' IN ('healthy', 'degraded')
);
COMMENT ON VIEW healthy_workers IS 'Workers that are active, have fresh heartbeat, and are healthy or degraded (not unhealthy)';
-- Function to get worker queue depth estimate
CREATE OR REPLACE FUNCTION get_worker_queue_depth(worker_id_param BIGINT)
RETURNS INTEGER AS $$
BEGIN
RETURN (
SELECT (capabilities -> 'health' ->> 'queue_depth')::INTEGER
FROM worker
WHERE id = worker_id_param
);
END;
$$ LANGUAGE plpgsql STABLE;
COMMENT ON FUNCTION get_worker_queue_depth IS 'Extract current queue depth from worker health metadata';
-- Function to check if execution is retriable
CREATE OR REPLACE FUNCTION is_execution_retriable(execution_id_param BIGINT)
RETURNS BOOLEAN AS $$
DECLARE
exec_record RECORD;
BEGIN
SELECT
e.retry_count,
e.max_retries,
e.status
INTO exec_record
FROM execution e
WHERE e.id = execution_id_param;
IF NOT FOUND THEN
RETURN FALSE;
END IF;
-- Can retry if:
-- 1. Status is failed
-- 2. max_retries is set and > 0
-- 3. retry_count < max_retries
RETURN (
exec_record.status = 'failed'
AND exec_record.max_retries IS NOT NULL
AND exec_record.max_retries > 0
AND exec_record.retry_count < exec_record.max_retries
);
END;
$$ LANGUAGE plpgsql STABLE;
COMMENT ON FUNCTION is_execution_retriable IS 'Check if a failed execution can be automatically retried based on retry limits';

View File

@@ -1,183 +0,0 @@
-- Migration: Execution System
-- Description: Creates execution (with workflow columns), inquiry, and rule tables
-- Version: 20250101000006
-- ============================================================================
-- EXECUTION TABLE
-- ============================================================================
CREATE TABLE execution (
id BIGSERIAL PRIMARY KEY,
action BIGINT REFERENCES action(id) ON DELETE SET NULL,
action_ref TEXT NOT NULL,
config JSONB,
env_vars JSONB,
parent BIGINT REFERENCES execution(id) ON DELETE SET NULL,
enforcement BIGINT REFERENCES enforcement(id) ON DELETE SET NULL,
executor BIGINT REFERENCES identity(id) ON DELETE SET NULL,
status execution_status_enum NOT NULL DEFAULT 'requested',
result JSONB,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
is_workflow BOOLEAN DEFAULT false NOT NULL,
workflow_def BIGINT,
workflow_task JSONB,
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_execution_action ON execution(action);
CREATE INDEX idx_execution_action_ref ON execution(action_ref);
CREATE INDEX idx_execution_parent ON execution(parent);
CREATE INDEX idx_execution_enforcement ON execution(enforcement);
CREATE INDEX idx_execution_executor ON execution(executor);
CREATE INDEX idx_execution_status ON execution(status);
CREATE INDEX idx_execution_created ON execution(created DESC);
CREATE INDEX idx_execution_updated ON execution(updated DESC);
CREATE INDEX idx_execution_status_created ON execution(status, created DESC);
CREATE INDEX idx_execution_status_updated ON execution(status, updated DESC);
CREATE INDEX idx_execution_action_status ON execution(action, status);
CREATE INDEX idx_execution_executor_created ON execution(executor, created DESC);
CREATE INDEX idx_execution_parent_created ON execution(parent, created DESC);
CREATE INDEX idx_execution_result_gin ON execution USING GIN (result);
CREATE INDEX idx_execution_env_vars_gin ON execution USING GIN (env_vars);
-- Trigger
CREATE TRIGGER update_execution_updated
BEFORE UPDATE ON execution
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE execution IS 'Executions represent action runs, supports nested workflows';
COMMENT ON COLUMN execution.action IS 'Action being executed (may be null if action deleted)';
COMMENT ON COLUMN execution.action_ref IS 'Action reference (preserved even if action deleted)';
COMMENT ON COLUMN execution.config IS 'Snapshot of action configuration at execution time';
COMMENT ON COLUMN execution.env_vars IS 'Environment variables for this execution as key-value pairs (string -> string). These are set in the execution environment and are separate from action parameters. Used for execution context, configuration, and non-sensitive metadata.';
COMMENT ON COLUMN execution.parent IS 'Parent execution ID for workflow hierarchies';
COMMENT ON COLUMN execution.enforcement IS 'Enforcement that triggered this execution (if rule-driven)';
COMMENT ON COLUMN execution.executor IS 'Identity that initiated the execution';
COMMENT ON COLUMN execution.status IS 'Current execution lifecycle status';
COMMENT ON COLUMN execution.result IS 'Execution output/results';
-- ============================================================================
-- ============================================================================
-- INQUIRY TABLE
-- ============================================================================
CREATE TABLE inquiry (
id BIGSERIAL PRIMARY KEY,
execution BIGINT NOT NULL REFERENCES execution(id) ON DELETE CASCADE,
prompt TEXT NOT NULL,
response_schema JSONB,
assigned_to BIGINT REFERENCES identity(id) ON DELETE SET NULL,
status inquiry_status_enum NOT NULL DEFAULT 'pending',
response JSONB,
timeout_at TIMESTAMPTZ,
responded_at TIMESTAMPTZ,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_inquiry_execution ON inquiry(execution);
CREATE INDEX idx_inquiry_assigned_to ON inquiry(assigned_to);
CREATE INDEX idx_inquiry_status ON inquiry(status);
CREATE INDEX idx_inquiry_timeout_at ON inquiry(timeout_at) WHERE timeout_at IS NOT NULL;
CREATE INDEX idx_inquiry_created ON inquiry(created DESC);
CREATE INDEX idx_inquiry_status_created ON inquiry(status, created DESC);
CREATE INDEX idx_inquiry_assigned_status ON inquiry(assigned_to, status);
CREATE INDEX idx_inquiry_execution_status ON inquiry(execution, status);
CREATE INDEX idx_inquiry_response_gin ON inquiry USING GIN (response);
-- Trigger
CREATE TRIGGER update_inquiry_updated
BEFORE UPDATE ON inquiry
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE inquiry IS 'Inquiries enable human-in-the-loop workflows with async user interactions';
COMMENT ON COLUMN inquiry.execution IS 'Execution that is waiting on this inquiry';
COMMENT ON COLUMN inquiry.prompt IS 'Question or prompt text for the user';
COMMENT ON COLUMN inquiry.response_schema IS 'JSON schema defining expected response format';
COMMENT ON COLUMN inquiry.assigned_to IS 'Identity who should respond to this inquiry';
COMMENT ON COLUMN inquiry.status IS 'Current inquiry lifecycle status';
COMMENT ON COLUMN inquiry.response IS 'User response data';
COMMENT ON COLUMN inquiry.timeout_at IS 'When this inquiry expires';
COMMENT ON COLUMN inquiry.responded_at IS 'When the response was received';
-- ============================================================================
-- ============================================================================
-- RULE TABLE
-- ============================================================================
CREATE TABLE rule (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
pack_ref TEXT NOT NULL,
label TEXT NOT NULL,
description TEXT NOT NULL,
action BIGINT REFERENCES action(id) ON DELETE SET NULL,
action_ref TEXT NOT NULL,
trigger BIGINT REFERENCES trigger(id) ON DELETE SET NULL,
trigger_ref TEXT NOT NULL,
conditions JSONB NOT NULL DEFAULT '[]'::jsonb,
action_params JSONB DEFAULT '{}'::jsonb,
trigger_params JSONB DEFAULT '{}'::jsonb,
enabled BOOLEAN NOT NULL,
is_adhoc BOOLEAN NOT NULL DEFAULT FALSE,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT rule_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT rule_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
);
-- Indexes
CREATE INDEX idx_rule_ref ON rule(ref);
CREATE INDEX idx_rule_pack ON rule(pack);
CREATE INDEX idx_rule_action ON rule(action);
CREATE INDEX idx_rule_trigger ON rule(trigger);
CREATE INDEX idx_rule_enabled ON rule(enabled) WHERE enabled = TRUE;
CREATE INDEX idx_rule_is_adhoc ON rule(is_adhoc) WHERE is_adhoc = true;
CREATE INDEX idx_rule_created ON rule(created DESC);
CREATE INDEX idx_rule_trigger_enabled ON rule(trigger, enabled);
CREATE INDEX idx_rule_action_enabled ON rule(action, enabled);
CREATE INDEX idx_rule_pack_enabled ON rule(pack, enabled);
CREATE INDEX idx_rule_action_params_gin ON rule USING GIN (action_params);
CREATE INDEX idx_rule_trigger_params_gin ON rule USING GIN (trigger_params);
-- Trigger
CREATE TRIGGER update_rule_updated
BEFORE UPDATE ON rule
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE rule IS 'Rules link triggers to actions with conditions';
COMMENT ON COLUMN rule.ref IS 'Unique rule reference (format: pack.name)';
COMMENT ON COLUMN rule.label IS 'Human-readable rule name';
COMMENT ON COLUMN rule.action IS 'Action to execute when rule triggers (null if action deleted)';
COMMENT ON COLUMN rule.trigger IS 'Trigger that activates this rule (null if trigger deleted)';
COMMENT ON COLUMN rule.conditions IS 'Condition expressions to evaluate before executing action';
COMMENT ON COLUMN rule.action_params IS 'Parameter overrides for the action';
COMMENT ON COLUMN rule.trigger_params IS 'Parameter overrides for the trigger';
COMMENT ON COLUMN rule.enabled IS 'Whether this rule is active';
COMMENT ON COLUMN rule.is_adhoc IS 'True if rule was manually created (ad-hoc), false if installed from pack';
-- ============================================================================
-- Add foreign key constraints now that rule table exists
ALTER TABLE enforcement
ADD CONSTRAINT enforcement_rule_fkey
FOREIGN KEY (rule) REFERENCES rule(id) ON DELETE SET NULL;
ALTER TABLE event
ADD CONSTRAINT event_rule_fkey
FOREIGN KEY (rule) REFERENCES rule(id) ON DELETE SET NULL;
-- ============================================================================

View File

@@ -1,6 +1,7 @@
-- Migration: Workflow System
-- Description: Creates workflow_definition and workflow_execution tables (workflow_task_execution consolidated into execution.workflow_task JSONB)
-- Version: 20250101000007
-- Description: Creates workflow_definition and workflow_execution tables
-- (workflow_task_execution consolidated into execution.workflow_task JSONB)
-- Version: 20250101000006
-- ============================================================================
-- WORKFLOW DEFINITION TABLE

View File

@@ -0,0 +1,775 @@
-- Migration: Supporting Systems
-- Description: Creates keys, artifacts, queue_stats, pack_environment, pack_testing,
-- and webhook function tables.
-- Consolidates former migrations: 000009 (keys_artifacts), 000010 (webhook_system),
-- 000011 (pack_environments), and 000012 (pack_testing).
-- Version: 20250101000007
-- ============================================================================
-- KEY TABLE
-- ============================================================================
CREATE TABLE key (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
owner_type owner_type_enum NOT NULL,
owner TEXT,
owner_identity BIGINT REFERENCES identity(id),
owner_pack BIGINT REFERENCES pack(id),
owner_pack_ref TEXT,
owner_action BIGINT, -- Forward reference to action table
owner_action_ref TEXT,
owner_sensor BIGINT, -- Forward reference to sensor table
owner_sensor_ref TEXT,
name TEXT NOT NULL,
encrypted BOOLEAN NOT NULL,
encryption_key_hash TEXT,
value TEXT NOT NULL,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT key_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT key_ref_format CHECK (ref ~ '^[^.]+(\.[^.]+)*$')
);
-- Unique index on owner_type, owner, name
CREATE UNIQUE INDEX idx_key_unique ON key(owner_type, owner, name);
-- Indexes
CREATE INDEX idx_key_ref ON key(ref);
CREATE INDEX idx_key_owner_type ON key(owner_type);
CREATE INDEX idx_key_owner_identity ON key(owner_identity);
CREATE INDEX idx_key_owner_pack ON key(owner_pack);
CREATE INDEX idx_key_owner_action ON key(owner_action);
CREATE INDEX idx_key_owner_sensor ON key(owner_sensor);
CREATE INDEX idx_key_created ON key(created DESC);
CREATE INDEX idx_key_owner_type_owner ON key(owner_type, owner);
CREATE INDEX idx_key_owner_identity_name ON key(owner_identity, name);
CREATE INDEX idx_key_owner_pack_name ON key(owner_pack, name);
-- Function to validate and set owner fields
CREATE OR REPLACE FUNCTION validate_key_owner()
RETURNS TRIGGER AS $$
DECLARE
owner_count INTEGER := 0;
BEGIN
-- Count how many owner fields are set
IF NEW.owner_identity IS NOT NULL THEN owner_count := owner_count + 1; END IF;
IF NEW.owner_pack IS NOT NULL THEN owner_count := owner_count + 1; END IF;
IF NEW.owner_action IS NOT NULL THEN owner_count := owner_count + 1; END IF;
IF NEW.owner_sensor IS NOT NULL THEN owner_count := owner_count + 1; END IF;
-- System owner should have no owner fields set
IF NEW.owner_type = 'system' THEN
IF owner_count > 0 THEN
RAISE EXCEPTION 'System owner cannot have specific owner fields set';
END IF;
NEW.owner := 'system';
-- All other types must have exactly one owner field set
ELSIF owner_count != 1 THEN
RAISE EXCEPTION 'Exactly one owner field must be set for owner_type %', NEW.owner_type;
-- Validate owner_type matches the populated field and set owner
ELSIF NEW.owner_type = 'identity' THEN
IF NEW.owner_identity IS NULL THEN
RAISE EXCEPTION 'owner_identity must be set for owner_type identity';
END IF;
NEW.owner := NEW.owner_identity::TEXT;
ELSIF NEW.owner_type = 'pack' THEN
IF NEW.owner_pack IS NULL THEN
RAISE EXCEPTION 'owner_pack must be set for owner_type pack';
END IF;
NEW.owner := NEW.owner_pack::TEXT;
ELSIF NEW.owner_type = 'action' THEN
IF NEW.owner_action IS NULL THEN
RAISE EXCEPTION 'owner_action must be set for owner_type action';
END IF;
NEW.owner := NEW.owner_action::TEXT;
ELSIF NEW.owner_type = 'sensor' THEN
IF NEW.owner_sensor IS NULL THEN
RAISE EXCEPTION 'owner_sensor must be set for owner_type sensor';
END IF;
NEW.owner := NEW.owner_sensor::TEXT;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger to validate owner fields
CREATE TRIGGER validate_key_owner_trigger
BEFORE INSERT OR UPDATE ON key
FOR EACH ROW
EXECUTE FUNCTION validate_key_owner();
-- Trigger for updated timestamp
CREATE TRIGGER update_key_updated
BEFORE UPDATE ON key
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE key IS 'Keys store configuration values and secrets with ownership scoping';
COMMENT ON COLUMN key.ref IS 'Unique key reference (format: [owner.]name)';
COMMENT ON COLUMN key.owner_type IS 'Type of owner (system, identity, pack, action, sensor)';
COMMENT ON COLUMN key.owner IS 'Owner identifier (auto-populated by trigger)';
COMMENT ON COLUMN key.owner_identity IS 'Identity owner (if owner_type=identity)';
COMMENT ON COLUMN key.owner_pack IS 'Pack owner (if owner_type=pack)';
COMMENT ON COLUMN key.owner_pack_ref IS 'Pack reference for owner_pack';
COMMENT ON COLUMN key.owner_action IS 'Action owner (if owner_type=action)';
COMMENT ON COLUMN key.owner_sensor IS 'Sensor owner (if owner_type=sensor)';
COMMENT ON COLUMN key.name IS 'Key name within owner scope';
COMMENT ON COLUMN key.encrypted IS 'Whether the value is encrypted';
COMMENT ON COLUMN key.encryption_key_hash IS 'Hash of encryption key used';
COMMENT ON COLUMN key.value IS 'The actual value (encrypted if encrypted=true)';
-- Add foreign key constraints for action and sensor references
ALTER TABLE key
ADD CONSTRAINT key_owner_action_fkey
FOREIGN KEY (owner_action) REFERENCES action(id) ON DELETE CASCADE;
ALTER TABLE key
ADD CONSTRAINT key_owner_sensor_fkey
FOREIGN KEY (owner_sensor) REFERENCES sensor(id) ON DELETE CASCADE;
-- ============================================================================
-- ARTIFACT TABLE
-- ============================================================================
CREATE TABLE artifact (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL,
scope owner_type_enum NOT NULL DEFAULT 'system',
owner TEXT NOT NULL DEFAULT '',
type artifact_type_enum NOT NULL,
retention_policy artifact_retention_enum NOT NULL DEFAULT 'versions',
retention_limit INTEGER NOT NULL DEFAULT 1,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_artifact_ref ON artifact(ref);
CREATE INDEX idx_artifact_scope ON artifact(scope);
CREATE INDEX idx_artifact_owner ON artifact(owner);
CREATE INDEX idx_artifact_type ON artifact(type);
CREATE INDEX idx_artifact_created ON artifact(created DESC);
CREATE INDEX idx_artifact_scope_owner ON artifact(scope, owner);
CREATE INDEX idx_artifact_type_created ON artifact(type, created DESC);
-- Trigger
CREATE TRIGGER update_artifact_updated
BEFORE UPDATE ON artifact
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE artifact IS 'Artifacts track files, logs, and outputs from executions';
COMMENT ON COLUMN artifact.ref IS 'Artifact reference/path';
COMMENT ON COLUMN artifact.scope IS 'Owner type (system, identity, pack, action, sensor)';
COMMENT ON COLUMN artifact.owner IS 'Owner identifier';
COMMENT ON COLUMN artifact.type IS 'Artifact type (file, url, progress, etc.)';
COMMENT ON COLUMN artifact.retention_policy IS 'How to retain artifacts (versions, days, hours, minutes)';
COMMENT ON COLUMN artifact.retention_limit IS 'Numeric limit for retention policy';
-- ============================================================================
-- QUEUE_STATS TABLE
-- ============================================================================
CREATE TABLE queue_stats (
action_id BIGINT PRIMARY KEY REFERENCES action(id) ON DELETE CASCADE,
queue_length INTEGER NOT NULL DEFAULT 0,
active_count INTEGER NOT NULL DEFAULT 0,
max_concurrent INTEGER NOT NULL DEFAULT 1,
oldest_enqueued_at TIMESTAMPTZ,
total_enqueued BIGINT NOT NULL DEFAULT 0,
total_completed BIGINT NOT NULL DEFAULT 0,
last_updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_queue_stats_last_updated ON queue_stats(last_updated);
-- Comments
COMMENT ON TABLE queue_stats IS 'Real-time queue statistics for action execution ordering';
COMMENT ON COLUMN queue_stats.action_id IS 'Foreign key to action table';
COMMENT ON COLUMN queue_stats.queue_length IS 'Number of executions waiting in queue';
COMMENT ON COLUMN queue_stats.active_count IS 'Number of currently running executions';
COMMENT ON COLUMN queue_stats.max_concurrent IS 'Maximum concurrent executions allowed';
COMMENT ON COLUMN queue_stats.oldest_enqueued_at IS 'Timestamp of oldest queued execution (NULL if queue empty)';
COMMENT ON COLUMN queue_stats.total_enqueued IS 'Total executions enqueued since queue creation';
COMMENT ON COLUMN queue_stats.total_completed IS 'Total executions completed since queue creation';
COMMENT ON COLUMN queue_stats.last_updated IS 'Timestamp of last statistics update';
-- ============================================================================
-- PACK ENVIRONMENT TABLE
-- ============================================================================
CREATE TABLE IF NOT EXISTS pack_environment (
id BIGSERIAL PRIMARY KEY,
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
pack_ref TEXT NOT NULL,
runtime BIGINT NOT NULL REFERENCES runtime(id) ON DELETE CASCADE,
runtime_ref TEXT NOT NULL,
env_path TEXT NOT NULL,
status pack_environment_status_enum NOT NULL DEFAULT 'pending',
installed_at TIMESTAMPTZ,
last_verified TIMESTAMPTZ,
install_log TEXT,
install_error TEXT,
metadata JSONB DEFAULT '{}'::jsonb,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(pack, runtime)
);
-- Indexes
CREATE INDEX IF NOT EXISTS idx_pack_environment_pack ON pack_environment(pack);
CREATE INDEX IF NOT EXISTS idx_pack_environment_runtime ON pack_environment(runtime);
CREATE INDEX IF NOT EXISTS idx_pack_environment_status ON pack_environment(status);
CREATE INDEX IF NOT EXISTS idx_pack_environment_pack_ref ON pack_environment(pack_ref);
CREATE INDEX IF NOT EXISTS idx_pack_environment_runtime_ref ON pack_environment(runtime_ref);
CREATE INDEX IF NOT EXISTS idx_pack_environment_pack_runtime ON pack_environment(pack, runtime);
-- Trigger for updated timestamp
CREATE TRIGGER update_pack_environment_updated
BEFORE UPDATE ON pack_environment
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE pack_environment IS 'Tracks pack-specific runtime environments for dependency isolation';
COMMENT ON COLUMN pack_environment.pack IS 'Pack that owns this environment';
COMMENT ON COLUMN pack_environment.pack_ref IS 'Pack reference for quick lookup';
COMMENT ON COLUMN pack_environment.runtime IS 'Runtime used for this environment';
COMMENT ON COLUMN pack_environment.runtime_ref IS 'Runtime reference for quick lookup';
COMMENT ON COLUMN pack_environment.env_path IS 'Filesystem path to the environment directory (e.g., /opt/attune/packenvs/mypack/python)';
COMMENT ON COLUMN pack_environment.status IS 'Current installation status';
COMMENT ON COLUMN pack_environment.installed_at IS 'When the environment was successfully installed';
COMMENT ON COLUMN pack_environment.last_verified IS 'Last time the environment was verified as working';
COMMENT ON COLUMN pack_environment.install_log IS 'Installation output logs';
COMMENT ON COLUMN pack_environment.install_error IS 'Error message if installation failed';
COMMENT ON COLUMN pack_environment.metadata IS 'Additional metadata (installed packages, versions, etc.)';
-- ============================================================================
-- PACK ENVIRONMENT: Update existing runtimes with installer metadata
-- ============================================================================
-- Python runtime installers
UPDATE runtime
SET installers = jsonb_build_object(
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
'installers', jsonb_build_array(
jsonb_build_object(
'name', 'create_venv',
'description', 'Create Python virtual environment',
'command', 'python3',
'args', jsonb_build_array('-m', 'venv', '{env_path}'),
'cwd', '{pack_path}',
'env', jsonb_build_object(),
'order', 1,
'optional', false
),
jsonb_build_object(
'name', 'upgrade_pip',
'description', 'Upgrade pip to latest version',
'command', '{env_path}/bin/pip',
'args', jsonb_build_array('install', '--upgrade', 'pip'),
'cwd', '{pack_path}',
'env', jsonb_build_object(),
'order', 2,
'optional', true
),
jsonb_build_object(
'name', 'install_requirements',
'description', 'Install pack Python dependencies',
'command', '{env_path}/bin/pip',
'args', jsonb_build_array('install', '-r', '{pack_path}/requirements.txt'),
'cwd', '{pack_path}',
'env', jsonb_build_object(),
'order', 3,
'optional', false,
'condition', jsonb_build_object(
'file_exists', '{pack_path}/requirements.txt'
)
)
),
'executable_templates', jsonb_build_object(
'python', '{env_path}/bin/python',
'pip', '{env_path}/bin/pip'
)
)
WHERE ref = 'core.python';
-- Node.js runtime installers
UPDATE runtime
SET installers = jsonb_build_object(
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
'installers', jsonb_build_array(
jsonb_build_object(
'name', 'npm_install',
'description', 'Install Node.js dependencies',
'command', 'npm',
'args', jsonb_build_array('install', '--prefix', '{env_path}'),
'cwd', '{pack_path}',
'env', jsonb_build_object(
'NODE_PATH', '{env_path}/node_modules'
),
'order', 1,
'optional', false,
'condition', jsonb_build_object(
'file_exists', '{pack_path}/package.json'
)
)
),
'executable_templates', jsonb_build_object(
'node', 'node',
'npm', 'npm'
),
'env_vars', jsonb_build_object(
'NODE_PATH', '{env_path}/node_modules'
)
)
WHERE ref = 'core.nodejs';
-- Shell runtime (no environment needed, uses system shell)
UPDATE runtime
SET installers = jsonb_build_object(
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
'installers', jsonb_build_array(),
'executable_templates', jsonb_build_object(
'sh', 'sh',
'bash', 'bash'
),
'requires_environment', false
)
WHERE ref = 'core.shell';
-- Native runtime (no environment needed, binaries are standalone)
UPDATE runtime
SET installers = jsonb_build_object(
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
'installers', jsonb_build_array(),
'executable_templates', jsonb_build_object(),
'requires_environment', false
)
WHERE ref = 'core.native';
-- Built-in sensor runtime (internal, no environment)
UPDATE runtime
SET installers = jsonb_build_object(
'installers', jsonb_build_array(),
'requires_environment', false
)
WHERE ref = 'core.sensor.builtin';
-- ============================================================================
-- PACK ENVIRONMENT: Helper functions
-- ============================================================================
-- Function to get environment path for a pack/runtime combination
CREATE OR REPLACE FUNCTION get_pack_environment_path(p_pack_ref TEXT, p_runtime_ref TEXT)
RETURNS TEXT AS $$
DECLARE
v_runtime_name TEXT;
v_base_template TEXT;
v_result TEXT;
BEGIN
-- Get runtime name and base path template
SELECT
LOWER(name),
installers->>'base_path_template'
INTO v_runtime_name, v_base_template
FROM runtime
WHERE ref = p_runtime_ref;
IF v_base_template IS NULL THEN
v_base_template := '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}';
END IF;
-- Replace template variables
v_result := v_base_template;
v_result := REPLACE(v_result, '{pack_ref}', p_pack_ref);
v_result := REPLACE(v_result, '{runtime_ref}', p_runtime_ref);
v_result := REPLACE(v_result, '{runtime_name_lower}', v_runtime_name);
RETURN v_result;
END;
$$ LANGUAGE plpgsql IMMUTABLE;
COMMENT ON FUNCTION get_pack_environment_path IS 'Calculate the filesystem path for a pack runtime environment';
-- Function to check if a runtime requires an environment
CREATE OR REPLACE FUNCTION runtime_requires_environment(p_runtime_ref TEXT)
RETURNS BOOLEAN AS $$
DECLARE
v_requires BOOLEAN;
BEGIN
SELECT COALESCE((installers->>'requires_environment')::boolean, true)
INTO v_requires
FROM runtime
WHERE ref = p_runtime_ref;
RETURN COALESCE(v_requires, false);
END;
$$ LANGUAGE plpgsql STABLE;
COMMENT ON FUNCTION runtime_requires_environment IS 'Check if a runtime needs a pack-specific environment';
-- ============================================================================
-- PACK ENVIRONMENT: Status view
-- ============================================================================
CREATE OR REPLACE VIEW v_pack_environment_status AS
SELECT
pe.id,
pe.pack,
p.ref AS pack_ref,
p.label AS pack_name,
pe.runtime,
r.ref AS runtime_ref,
r.name AS runtime_name,
pe.env_path,
pe.status,
pe.installed_at,
pe.last_verified,
CASE
WHEN pe.status = 'ready' AND pe.last_verified < NOW() - INTERVAL '7 days' THEN true
ELSE false
END AS needs_verification,
CASE
WHEN pe.status = 'ready' THEN 'healthy'
WHEN pe.status = 'failed' THEN 'unhealthy'
WHEN pe.status IN ('pending', 'installing') THEN 'provisioning'
WHEN pe.status = 'outdated' THEN 'needs_update'
ELSE 'unknown'
END AS health_status,
pe.install_error,
pe.created,
pe.updated
FROM pack_environment pe
JOIN pack p ON pe.pack = p.id
JOIN runtime r ON pe.runtime = r.id;
COMMENT ON VIEW v_pack_environment_status IS 'Consolidated view of pack environment status with health indicators';
-- ============================================================================
-- PACK TEST EXECUTION TABLE
-- ============================================================================
CREATE TABLE IF NOT EXISTS pack_test_execution (
id BIGSERIAL PRIMARY KEY,
pack_id BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
pack_version VARCHAR(50) NOT NULL,
execution_time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
trigger_reason VARCHAR(50) NOT NULL, -- 'install', 'update', 'manual', 'validation'
total_tests INT NOT NULL,
passed INT NOT NULL,
failed INT NOT NULL,
skipped INT NOT NULL,
pass_rate DECIMAL(5,4) NOT NULL, -- 0.0000 to 1.0000
duration_ms BIGINT NOT NULL,
result JSONB NOT NULL, -- Full test result structure
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT valid_test_counts CHECK (total_tests >= 0 AND passed >= 0 AND failed >= 0 AND skipped >= 0),
CONSTRAINT valid_pass_rate CHECK (pass_rate >= 0.0 AND pass_rate <= 1.0),
CONSTRAINT valid_trigger_reason CHECK (trigger_reason IN ('install', 'update', 'manual', 'validation'))
);
-- Indexes for efficient queries
CREATE INDEX idx_pack_test_execution_pack_id ON pack_test_execution(pack_id);
CREATE INDEX idx_pack_test_execution_time ON pack_test_execution(execution_time DESC);
CREATE INDEX idx_pack_test_execution_pass_rate ON pack_test_execution(pass_rate);
CREATE INDEX idx_pack_test_execution_trigger ON pack_test_execution(trigger_reason);
-- Comments for documentation
COMMENT ON TABLE pack_test_execution IS 'Tracks pack test execution results for validation and auditing';
COMMENT ON COLUMN pack_test_execution.pack_id IS 'Reference to the pack being tested';
COMMENT ON COLUMN pack_test_execution.pack_version IS 'Version of the pack at test time';
COMMENT ON COLUMN pack_test_execution.trigger_reason IS 'What triggered the test: install, update, manual, validation';
COMMENT ON COLUMN pack_test_execution.pass_rate IS 'Percentage of tests passed (0.0 to 1.0)';
COMMENT ON COLUMN pack_test_execution.result IS 'Full JSON structure with detailed test results';
-- Pack test result summary view (all test executions with pack info)
CREATE OR REPLACE VIEW pack_test_summary AS
SELECT
p.id AS pack_id,
p.ref AS pack_ref,
p.label AS pack_label,
pte.id AS test_execution_id,
pte.pack_version,
pte.execution_time AS test_time,
pte.trigger_reason,
pte.total_tests,
pte.passed,
pte.failed,
pte.skipped,
pte.pass_rate,
pte.duration_ms,
ROW_NUMBER() OVER (PARTITION BY p.id ORDER BY pte.execution_time DESC) AS rn
FROM pack p
LEFT JOIN pack_test_execution pte ON p.id = pte.pack_id
WHERE pte.id IS NOT NULL;
COMMENT ON VIEW pack_test_summary IS 'Summary of all pack test executions with pack details';
-- Latest test results per pack view
CREATE OR REPLACE VIEW pack_latest_test AS
SELECT
pack_id,
pack_ref,
pack_label,
test_execution_id,
pack_version,
test_time,
trigger_reason,
total_tests,
passed,
failed,
skipped,
pass_rate,
duration_ms
FROM pack_test_summary
WHERE rn = 1;
COMMENT ON VIEW pack_latest_test IS 'Latest test results for each pack';
-- Function to get pack test statistics
CREATE OR REPLACE FUNCTION get_pack_test_stats(p_pack_id BIGINT)
RETURNS TABLE (
total_executions BIGINT,
successful_executions BIGINT,
failed_executions BIGINT,
avg_pass_rate DECIMAL,
avg_duration_ms BIGINT,
last_test_time TIMESTAMPTZ,
last_test_passed BOOLEAN
) AS $$
BEGIN
RETURN QUERY
SELECT
COUNT(*)::BIGINT AS total_executions,
COUNT(*) FILTER (WHERE passed = total_tests)::BIGINT AS successful_executions,
COUNT(*) FILTER (WHERE failed > 0)::BIGINT AS failed_executions,
AVG(pass_rate) AS avg_pass_rate,
AVG(duration_ms)::BIGINT AS avg_duration_ms,
MAX(execution_time) AS last_test_time,
(SELECT failed = 0 FROM pack_test_execution
WHERE pack_id = p_pack_id
ORDER BY execution_time DESC
LIMIT 1) AS last_test_passed
FROM pack_test_execution
WHERE pack_id = p_pack_id;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION get_pack_test_stats IS 'Get statistical summary of test executions for a pack';
-- Function to check if pack has recent passing tests
CREATE OR REPLACE FUNCTION pack_has_passing_tests(
p_pack_id BIGINT,
p_hours_ago INT DEFAULT 24
)
RETURNS BOOLEAN AS $$
DECLARE
v_has_passing_tests BOOLEAN;
BEGIN
SELECT EXISTS(
SELECT 1
FROM pack_test_execution
WHERE pack_id = p_pack_id
AND execution_time > NOW() - (p_hours_ago || ' hours')::INTERVAL
AND failed = 0
AND total_tests > 0
) INTO v_has_passing_tests;
RETURN v_has_passing_tests;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION pack_has_passing_tests IS 'Check if pack has recent passing test executions';
-- Add trigger to update pack metadata on test execution
CREATE OR REPLACE FUNCTION update_pack_test_metadata()
RETURNS TRIGGER AS $$
BEGIN
-- Could update pack table with last_tested timestamp if we add that column
-- For now, just a placeholder for future functionality
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trigger_update_pack_test_metadata
AFTER INSERT ON pack_test_execution
FOR EACH ROW
EXECUTE FUNCTION update_pack_test_metadata();
COMMENT ON TRIGGER trigger_update_pack_test_metadata ON pack_test_execution IS 'Updates pack metadata when tests are executed';
-- ============================================================================
-- WEBHOOK FUNCTIONS
-- ============================================================================
-- Drop existing functions to avoid signature conflicts
DROP FUNCTION IF EXISTS enable_trigger_webhook(BIGINT, JSONB);
DROP FUNCTION IF EXISTS enable_trigger_webhook(BIGINT);
DROP FUNCTION IF EXISTS disable_trigger_webhook(BIGINT);
DROP FUNCTION IF EXISTS regenerate_trigger_webhook_key(BIGINT);
-- Function to enable webhooks for a trigger
CREATE OR REPLACE FUNCTION enable_trigger_webhook(
p_trigger_id BIGINT,
p_config JSONB DEFAULT '{}'::jsonb
)
RETURNS TABLE(
webhook_enabled BOOLEAN,
webhook_key VARCHAR(255),
webhook_url TEXT
) AS $$
DECLARE
v_webhook_key VARCHAR(255);
v_api_base_url TEXT := 'http://localhost:8080'; -- Default, should be configured
BEGIN
-- Check if trigger exists
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
END IF;
-- Generate webhook key if one doesn't exist
SELECT t.webhook_key INTO v_webhook_key
FROM trigger t
WHERE t.id = p_trigger_id;
IF v_webhook_key IS NULL THEN
v_webhook_key := generate_webhook_key();
END IF;
-- Update trigger to enable webhooks
UPDATE trigger
SET
webhook_enabled = TRUE,
webhook_key = v_webhook_key,
webhook_config = p_config,
updated = NOW()
WHERE id = p_trigger_id;
-- Return webhook details
RETURN QUERY SELECT
TRUE,
v_webhook_key,
v_api_base_url || '/api/v1/webhooks/' || v_webhook_key;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION enable_trigger_webhook(BIGINT, JSONB) IS
'Enables webhooks for a trigger with optional configuration. Generates a new webhook key if one does not exist. Returns webhook details.';
-- Function to disable webhooks for a trigger
CREATE OR REPLACE FUNCTION disable_trigger_webhook(
p_trigger_id BIGINT
)
RETURNS BOOLEAN AS $$
BEGIN
-- Check if trigger exists
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
END IF;
-- Update trigger to disable webhooks
-- Set webhook_key to NULL when disabling to remove it from API responses
UPDATE trigger
SET
webhook_enabled = FALSE,
webhook_key = NULL,
updated = NOW()
WHERE id = p_trigger_id;
RETURN TRUE;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION disable_trigger_webhook(BIGINT) IS
'Disables webhooks for a trigger. Webhook key is removed when disabled.';
-- Function to regenerate webhook key for a trigger
CREATE OR REPLACE FUNCTION regenerate_trigger_webhook_key(
p_trigger_id BIGINT
)
RETURNS TABLE(
webhook_key VARCHAR(255),
previous_key_revoked BOOLEAN
) AS $$
DECLARE
v_new_key VARCHAR(255);
v_old_key VARCHAR(255);
v_webhook_enabled BOOLEAN;
BEGIN
-- Check if trigger exists
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
END IF;
-- Get current webhook state
SELECT t.webhook_key, t.webhook_enabled INTO v_old_key, v_webhook_enabled
FROM trigger t
WHERE t.id = p_trigger_id;
-- Check if webhooks are enabled
IF NOT v_webhook_enabled THEN
RAISE EXCEPTION 'Webhooks are not enabled for trigger %', p_trigger_id;
END IF;
-- Generate new key
v_new_key := generate_webhook_key();
-- Update trigger with new key
UPDATE trigger
SET
webhook_key = v_new_key,
updated = NOW()
WHERE id = p_trigger_id;
-- Return new key and whether old key was present
RETURN QUERY SELECT
v_new_key,
(v_old_key IS NOT NULL);
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION regenerate_trigger_webhook_key(BIGINT) IS
'Regenerates webhook key for a trigger. Returns new key and whether a previous key was revoked.';
-- Verify all webhook functions exist
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_proc p
JOIN pg_namespace n ON p.pronamespace = n.oid
WHERE n.nspname = current_schema()
AND p.proname = 'enable_trigger_webhook'
) THEN
RAISE EXCEPTION 'enable_trigger_webhook function not found after migration';
END IF;
IF NOT EXISTS (
SELECT 1 FROM pg_proc p
JOIN pg_namespace n ON p.pronamespace = n.oid
WHERE n.nspname = current_schema()
AND p.proname = 'disable_trigger_webhook'
) THEN
RAISE EXCEPTION 'disable_trigger_webhook function not found after migration';
END IF;
IF NOT EXISTS (
SELECT 1 FROM pg_proc p
JOIN pg_namespace n ON p.pronamespace = n.oid
WHERE n.nspname = current_schema()
AND p.proname = 'regenerate_trigger_webhook_key'
) THEN
RAISE EXCEPTION 'regenerate_trigger_webhook_key function not found after migration';
END IF;
RAISE NOTICE 'All webhook functions successfully created';
END $$;

View File

@@ -1,6 +1,6 @@
-- Migration: LISTEN/NOTIFY Triggers
-- Description: Consolidated PostgreSQL LISTEN/NOTIFY triggers for real-time event notifications
-- Version: 20250101000013
-- Version: 20250101000008
-- ============================================================================
-- EXECUTION CHANGE NOTIFICATION

View File

@@ -1,75 +0,0 @@
-- Migration: Supporting Tables and Indexes
-- Description: Creates notification and artifact tables plus performance optimization indexes
-- Version: 20250101000005
-- ============================================================================
-- NOTIFICATION TABLE
-- ============================================================================
CREATE TABLE notification (
id BIGSERIAL PRIMARY KEY,
channel TEXT NOT NULL,
entity_type TEXT NOT NULL,
entity TEXT NOT NULL,
activity TEXT NOT NULL,
state notification_status_enum NOT NULL DEFAULT 'created',
content JSONB,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_notification_channel ON notification(channel);
CREATE INDEX idx_notification_entity_type ON notification(entity_type);
CREATE INDEX idx_notification_entity ON notification(entity);
CREATE INDEX idx_notification_state ON notification(state);
CREATE INDEX idx_notification_created ON notification(created DESC);
CREATE INDEX idx_notification_channel_state ON notification(channel, state);
CREATE INDEX idx_notification_entity_type_entity ON notification(entity_type, entity);
CREATE INDEX idx_notification_state_created ON notification(state, created DESC);
CREATE INDEX idx_notification_content_gin ON notification USING GIN (content);
-- Trigger
CREATE TRIGGER update_notification_updated
BEFORE UPDATE ON notification
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Function for pg_notify on notification insert
CREATE OR REPLACE FUNCTION notify_on_insert()
RETURNS TRIGGER AS $$
DECLARE
payload TEXT;
BEGIN
-- Build JSON payload with id, entity, and activity
payload := json_build_object(
'id', NEW.id,
'entity_type', NEW.entity_type,
'entity', NEW.entity,
'activity', NEW.activity
)::text;
-- Send notification to the specified channel
PERFORM pg_notify(NEW.channel, payload);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger to send pg_notify on notification insert
CREATE TRIGGER notify_on_notification_insert
AFTER INSERT ON notification
FOR EACH ROW
EXECUTE FUNCTION notify_on_insert();
-- Comments
COMMENT ON TABLE notification IS 'System notifications about entity changes for real-time updates';
COMMENT ON COLUMN notification.channel IS 'Notification channel (typically table name)';
COMMENT ON COLUMN notification.entity_type IS 'Type of entity (table name)';
COMMENT ON COLUMN notification.entity IS 'Entity identifier (typically ID or ref)';
COMMENT ON COLUMN notification.activity IS 'Activity type (e.g., "created", "updated", "completed")';
COMMENT ON COLUMN notification.state IS 'Processing state of notification';
COMMENT ON COLUMN notification.content IS 'Optional notification payload data';
-- ============================================================================

View File

@@ -1,200 +0,0 @@
-- Migration: Keys and Artifacts
-- Description: Creates key table for secrets management and artifact table for execution outputs
-- Version: 20250101000009
-- ============================================================================
-- KEY TABLE
-- ============================================================================
CREATE TABLE key (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL UNIQUE,
owner_type owner_type_enum NOT NULL,
owner TEXT,
owner_identity BIGINT REFERENCES identity(id),
owner_pack BIGINT REFERENCES pack(id),
owner_pack_ref TEXT,
owner_action BIGINT, -- Forward reference to action table
owner_action_ref TEXT,
owner_sensor BIGINT, -- Forward reference to sensor table
owner_sensor_ref TEXT,
name TEXT NOT NULL,
encrypted BOOLEAN NOT NULL,
encryption_key_hash TEXT,
value TEXT NOT NULL,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT key_ref_lowercase CHECK (ref = LOWER(ref)),
CONSTRAINT key_ref_format CHECK (ref ~ '^[^.]+(\.[^.]+)*$')
);
-- Unique index on owner_type, owner, name
CREATE UNIQUE INDEX idx_key_unique ON key(owner_type, owner, name);
-- Indexes
CREATE INDEX idx_key_ref ON key(ref);
CREATE INDEX idx_key_owner_type ON key(owner_type);
CREATE INDEX idx_key_owner_identity ON key(owner_identity);
CREATE INDEX idx_key_owner_pack ON key(owner_pack);
CREATE INDEX idx_key_owner_action ON key(owner_action);
CREATE INDEX idx_key_owner_sensor ON key(owner_sensor);
CREATE INDEX idx_key_created ON key(created DESC);
CREATE INDEX idx_key_owner_type_owner ON key(owner_type, owner);
CREATE INDEX idx_key_owner_identity_name ON key(owner_identity, name);
CREATE INDEX idx_key_owner_pack_name ON key(owner_pack, name);
-- Function to validate and set owner fields
CREATE OR REPLACE FUNCTION validate_key_owner()
RETURNS TRIGGER AS $$
DECLARE
owner_count INTEGER := 0;
BEGIN
-- Count how many owner fields are set
IF NEW.owner_identity IS NOT NULL THEN owner_count := owner_count + 1; END IF;
IF NEW.owner_pack IS NOT NULL THEN owner_count := owner_count + 1; END IF;
IF NEW.owner_action IS NOT NULL THEN owner_count := owner_count + 1; END IF;
IF NEW.owner_sensor IS NOT NULL THEN owner_count := owner_count + 1; END IF;
-- System owner should have no owner fields set
IF NEW.owner_type = 'system' THEN
IF owner_count > 0 THEN
RAISE EXCEPTION 'System owner cannot have specific owner fields set';
END IF;
NEW.owner := 'system';
-- All other types must have exactly one owner field set
ELSIF owner_count != 1 THEN
RAISE EXCEPTION 'Exactly one owner field must be set for owner_type %', NEW.owner_type;
-- Validate owner_type matches the populated field and set owner
ELSIF NEW.owner_type = 'identity' THEN
IF NEW.owner_identity IS NULL THEN
RAISE EXCEPTION 'owner_identity must be set for owner_type identity';
END IF;
NEW.owner := NEW.owner_identity::TEXT;
ELSIF NEW.owner_type = 'pack' THEN
IF NEW.owner_pack IS NULL THEN
RAISE EXCEPTION 'owner_pack must be set for owner_type pack';
END IF;
NEW.owner := NEW.owner_pack::TEXT;
ELSIF NEW.owner_type = 'action' THEN
IF NEW.owner_action IS NULL THEN
RAISE EXCEPTION 'owner_action must be set for owner_type action';
END IF;
NEW.owner := NEW.owner_action::TEXT;
ELSIF NEW.owner_type = 'sensor' THEN
IF NEW.owner_sensor IS NULL THEN
RAISE EXCEPTION 'owner_sensor must be set for owner_type sensor';
END IF;
NEW.owner := NEW.owner_sensor::TEXT;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger to validate owner fields
CREATE TRIGGER validate_key_owner_trigger
BEFORE INSERT OR UPDATE ON key
FOR EACH ROW
EXECUTE FUNCTION validate_key_owner();
-- Trigger for updated timestamp
CREATE TRIGGER update_key_updated
BEFORE UPDATE ON key
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE key IS 'Keys store configuration values and secrets with ownership scoping';
COMMENT ON COLUMN key.ref IS 'Unique key reference (format: [owner.]name)';
COMMENT ON COLUMN key.owner_type IS 'Type of owner (system, identity, pack, action, sensor)';
COMMENT ON COLUMN key.owner IS 'Owner identifier (auto-populated by trigger)';
COMMENT ON COLUMN key.owner_identity IS 'Identity owner (if owner_type=identity)';
COMMENT ON COLUMN key.owner_pack IS 'Pack owner (if owner_type=pack)';
COMMENT ON COLUMN key.owner_pack_ref IS 'Pack reference for owner_pack';
COMMENT ON COLUMN key.owner_action IS 'Action owner (if owner_type=action)';
COMMENT ON COLUMN key.owner_sensor IS 'Sensor owner (if owner_type=sensor)';
COMMENT ON COLUMN key.name IS 'Key name within owner scope';
COMMENT ON COLUMN key.encrypted IS 'Whether the value is encrypted';
COMMENT ON COLUMN key.encryption_key_hash IS 'Hash of encryption key used';
COMMENT ON COLUMN key.value IS 'The actual value (encrypted if encrypted=true)';
-- Add foreign key constraints for action and sensor references
ALTER TABLE key
ADD CONSTRAINT key_owner_action_fkey
FOREIGN KEY (owner_action) REFERENCES action(id) ON DELETE CASCADE;
ALTER TABLE key
ADD CONSTRAINT key_owner_sensor_fkey
FOREIGN KEY (owner_sensor) REFERENCES sensor(id) ON DELETE CASCADE;
-- ============================================================================
-- ARTIFACT TABLE
-- ============================================================================
CREATE TABLE artifact (
id BIGSERIAL PRIMARY KEY,
ref TEXT NOT NULL,
scope owner_type_enum NOT NULL DEFAULT 'system',
owner TEXT NOT NULL DEFAULT '',
type artifact_type_enum NOT NULL,
retention_policy artifact_retention_enum NOT NULL DEFAULT 'versions',
retention_limit INTEGER NOT NULL DEFAULT 1,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_artifact_ref ON artifact(ref);
CREATE INDEX idx_artifact_scope ON artifact(scope);
CREATE INDEX idx_artifact_owner ON artifact(owner);
CREATE INDEX idx_artifact_type ON artifact(type);
CREATE INDEX idx_artifact_created ON artifact(created DESC);
CREATE INDEX idx_artifact_scope_owner ON artifact(scope, owner);
CREATE INDEX idx_artifact_type_created ON artifact(type, created DESC);
-- Trigger
CREATE TRIGGER update_artifact_updated
BEFORE UPDATE ON artifact
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE artifact IS 'Artifacts track files, logs, and outputs from executions';
COMMENT ON COLUMN artifact.ref IS 'Artifact reference/path';
COMMENT ON COLUMN artifact.scope IS 'Owner type (system, identity, pack, action, sensor)';
COMMENT ON COLUMN artifact.owner IS 'Owner identifier';
COMMENT ON COLUMN artifact.type IS 'Artifact type (file, url, progress, etc.)';
COMMENT ON COLUMN artifact.retention_policy IS 'How to retain artifacts (versions, days, hours, minutes)';
COMMENT ON COLUMN artifact.retention_limit IS 'Numeric limit for retention policy';
-- ============================================================================
-- QUEUE_STATS TABLE
-- ============================================================================
CREATE TABLE queue_stats (
action_id BIGINT PRIMARY KEY REFERENCES action(id) ON DELETE CASCADE,
queue_length INTEGER NOT NULL DEFAULT 0,
active_count INTEGER NOT NULL DEFAULT 0,
max_concurrent INTEGER NOT NULL DEFAULT 1,
oldest_enqueued_at TIMESTAMPTZ,
total_enqueued BIGINT NOT NULL DEFAULT 0,
total_completed BIGINT NOT NULL DEFAULT 0,
last_updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_queue_stats_last_updated ON queue_stats(last_updated);
-- Comments
COMMENT ON TABLE queue_stats IS 'Real-time queue statistics for action execution ordering';
COMMENT ON COLUMN queue_stats.action_id IS 'Foreign key to action table';
COMMENT ON COLUMN queue_stats.queue_length IS 'Number of executions waiting in queue';
COMMENT ON COLUMN queue_stats.active_count IS 'Number of currently running executions';
COMMENT ON COLUMN queue_stats.max_concurrent IS 'Maximum concurrent executions allowed';
COMMENT ON COLUMN queue_stats.oldest_enqueued_at IS 'Timestamp of oldest queued execution (NULL if queue empty)';
COMMENT ON COLUMN queue_stats.total_enqueued IS 'Total executions enqueued since queue creation';
COMMENT ON COLUMN queue_stats.total_completed IS 'Total executions completed since queue creation';
COMMENT ON COLUMN queue_stats.last_updated IS 'Timestamp of last statistics update';

View File

@@ -1,8 +1,11 @@
-- Migration: TimescaleDB Entity History Tracking
-- Migration: TimescaleDB Entity History and Analytics
-- Description: Creates append-only history hypertables for execution, worker, enforcement,
-- and event tables. Uses JSONB diff format to track field-level changes via
-- PostgreSQL triggers. See docs/plans/timescaledb-entity-history.md for full design.
-- Version: 20260226100000
-- PostgreSQL triggers. Includes continuous aggregates for dashboard analytics.
-- Consolidates former migrations: 20260226100000 (entity_history_timescaledb),
-- 20260226200000 (continuous_aggregates), and 20260226300000 (fix + result digest).
-- See docs/plans/timescaledb-entity-history.md for full design.
-- Version: 20250101000009
-- ============================================================================
-- EXTENSION
@@ -10,6 +13,31 @@
CREATE EXTENSION IF NOT EXISTS timescaledb;
-- ============================================================================
-- HELPER FUNCTIONS
-- ============================================================================
-- Returns a small {digest, size, type} object instead of the full JSONB value.
-- Used in history triggers for columns that can be arbitrarily large (e.g. result).
-- The full value is always available on the live row.
CREATE OR REPLACE FUNCTION _jsonb_digest_summary(val JSONB)
RETURNS JSONB AS $$
BEGIN
IF val IS NULL THEN
RETURN NULL;
END IF;
RETURN jsonb_build_object(
'digest', 'md5:' || md5(val::text),
'size', octet_length(val::text),
'type', jsonb_typeof(val)
);
END;
$$ LANGUAGE plpgsql IMMUTABLE;
COMMENT ON FUNCTION _jsonb_digest_summary(JSONB) IS
'Returns a compact {digest, size, type} summary of a JSONB value for use in history tables. '
'The digest is md5 of the text representation — sufficient for change-detection, not for security.';
-- ============================================================================
-- HISTORY TABLES
-- ============================================================================
@@ -155,6 +183,7 @@ COMMENT ON COLUMN event_history.entity_ref IS 'Denormalized trigger_ref for JOIN
-- ----------------------------------------------------------------------------
-- execution history trigger
-- Tracked fields: status, result, executor, workflow_task, env_vars
-- Note: result uses _jsonb_digest_summary() to avoid storing large payloads
-- ----------------------------------------------------------------------------
CREATE OR REPLACE FUNCTION record_execution_history()
@@ -184,32 +213,35 @@ BEGIN
END IF;
-- UPDATE: detect which fields changed
IF OLD.status IS DISTINCT FROM NEW.status THEN
changed := changed || 'status';
changed := array_append(changed, 'status');
old_vals := old_vals || jsonb_build_object('status', OLD.status);
new_vals := new_vals || jsonb_build_object('status', NEW.status);
END IF;
-- Result: store a compact digest instead of the full JSONB to avoid bloat.
-- The live execution row always has the complete result.
IF OLD.result IS DISTINCT FROM NEW.result THEN
changed := changed || 'result';
old_vals := old_vals || jsonb_build_object('result', OLD.result);
new_vals := new_vals || jsonb_build_object('result', NEW.result);
changed := array_append(changed, 'result');
old_vals := old_vals || jsonb_build_object('result', _jsonb_digest_summary(OLD.result));
new_vals := new_vals || jsonb_build_object('result', _jsonb_digest_summary(NEW.result));
END IF;
IF OLD.executor IS DISTINCT FROM NEW.executor THEN
changed := changed || 'executor';
changed := array_append(changed, 'executor');
old_vals := old_vals || jsonb_build_object('executor', OLD.executor);
new_vals := new_vals || jsonb_build_object('executor', NEW.executor);
END IF;
IF OLD.workflow_task IS DISTINCT FROM NEW.workflow_task THEN
changed := changed || 'workflow_task';
changed := array_append(changed, 'workflow_task');
old_vals := old_vals || jsonb_build_object('workflow_task', OLD.workflow_task);
new_vals := new_vals || jsonb_build_object('workflow_task', NEW.workflow_task);
END IF;
IF OLD.env_vars IS DISTINCT FROM NEW.env_vars THEN
changed := changed || 'env_vars';
changed := array_append(changed, 'env_vars');
old_vals := old_vals || jsonb_build_object('env_vars', OLD.env_vars);
new_vals := new_vals || jsonb_build_object('env_vars', NEW.env_vars);
END IF;
@@ -261,37 +293,37 @@ BEGIN
-- UPDATE: detect which fields changed
IF OLD.name IS DISTINCT FROM NEW.name THEN
changed := changed || 'name';
changed := array_append(changed, 'name');
old_vals := old_vals || jsonb_build_object('name', OLD.name);
new_vals := new_vals || jsonb_build_object('name', NEW.name);
END IF;
IF OLD.status IS DISTINCT FROM NEW.status THEN
changed := changed || 'status';
changed := array_append(changed, 'status');
old_vals := old_vals || jsonb_build_object('status', OLD.status);
new_vals := new_vals || jsonb_build_object('status', NEW.status);
END IF;
IF OLD.capabilities IS DISTINCT FROM NEW.capabilities THEN
changed := changed || 'capabilities';
changed := array_append(changed, 'capabilities');
old_vals := old_vals || jsonb_build_object('capabilities', OLD.capabilities);
new_vals := new_vals || jsonb_build_object('capabilities', NEW.capabilities);
END IF;
IF OLD.meta IS DISTINCT FROM NEW.meta THEN
changed := changed || 'meta';
changed := array_append(changed, 'meta');
old_vals := old_vals || jsonb_build_object('meta', OLD.meta);
new_vals := new_vals || jsonb_build_object('meta', NEW.meta);
END IF;
IF OLD.host IS DISTINCT FROM NEW.host THEN
changed := changed || 'host';
changed := array_append(changed, 'host');
old_vals := old_vals || jsonb_build_object('host', OLD.host);
new_vals := new_vals || jsonb_build_object('host', NEW.host);
END IF;
IF OLD.port IS DISTINCT FROM NEW.port THEN
changed := changed || 'port';
changed := array_append(changed, 'port');
old_vals := old_vals || jsonb_build_object('port', OLD.port);
new_vals := new_vals || jsonb_build_object('port', NEW.port);
END IF;
@@ -342,13 +374,13 @@ BEGIN
-- UPDATE: detect which fields changed
IF OLD.status IS DISTINCT FROM NEW.status THEN
changed := changed || 'status';
changed := array_append(changed, 'status');
old_vals := old_vals || jsonb_build_object('status', OLD.status);
new_vals := new_vals || jsonb_build_object('status', NEW.status);
END IF;
IF OLD.payload IS DISTINCT FROM NEW.payload THEN
changed := changed || 'payload';
changed := array_append(changed, 'payload');
old_vals := old_vals || jsonb_build_object('payload', OLD.payload);
new_vals := new_vals || jsonb_build_object('payload', NEW.payload);
END IF;
@@ -398,13 +430,13 @@ BEGIN
-- UPDATE: detect which fields changed
IF OLD.config IS DISTINCT FROM NEW.config THEN
changed := changed || 'config';
changed := array_append(changed, 'config');
old_vals := old_vals || jsonb_build_object('config', OLD.config);
new_vals := new_vals || jsonb_build_object('config', NEW.config);
END IF;
IF OLD.payload IS DISTINCT FROM NEW.payload THEN
changed := changed || 'payload';
changed := array_append(changed, 'payload');
old_vals := old_vals || jsonb_build_object('payload', OLD.payload);
new_vals := new_vals || jsonb_build_object('payload', NEW.payload);
END IF;
@@ -485,3 +517,150 @@ SELECT add_retention_policy('execution_history', INTERVAL '90 days');
SELECT add_retention_policy('enforcement_history', INTERVAL '90 days');
SELECT add_retention_policy('event_history', INTERVAL '30 days');
SELECT add_retention_policy('worker_history', INTERVAL '180 days');
-- ============================================================================
-- CONTINUOUS AGGREGATES
-- ============================================================================
-- Drop existing continuous aggregates if they exist, so this migration can be
-- re-run safely after a partial failure. (TimescaleDB continuous aggregates
-- must be dropped with CASCADE to remove their associated policies.)
DROP MATERIALIZED VIEW IF EXISTS execution_status_hourly CASCADE;
DROP MATERIALIZED VIEW IF EXISTS execution_throughput_hourly CASCADE;
DROP MATERIALIZED VIEW IF EXISTS event_volume_hourly CASCADE;
DROP MATERIALIZED VIEW IF EXISTS worker_status_hourly CASCADE;
DROP MATERIALIZED VIEW IF EXISTS enforcement_volume_hourly CASCADE;
-- ----------------------------------------------------------------------------
-- execution_status_hourly
-- Tracks execution status transitions per hour, grouped by action_ref and new status.
-- Powers: execution throughput chart, failure rate widget, status breakdown over time.
-- ----------------------------------------------------------------------------
CREATE MATERIALIZED VIEW execution_status_hourly
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 hour', time) AS bucket,
entity_ref AS action_ref,
new_values->>'status' AS new_status,
COUNT(*) AS transition_count
FROM execution_history
WHERE 'status' = ANY(changed_fields)
GROUP BY bucket, entity_ref, new_values->>'status'
WITH NO DATA;
SELECT add_continuous_aggregate_policy('execution_status_hourly',
start_offset => INTERVAL '7 days',
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '30 minutes'
);
-- ----------------------------------------------------------------------------
-- execution_throughput_hourly
-- Tracks total execution creation volume per hour, regardless of status.
-- Powers: execution throughput sparkline on the dashboard.
-- ----------------------------------------------------------------------------
CREATE MATERIALIZED VIEW execution_throughput_hourly
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 hour', time) AS bucket,
entity_ref AS action_ref,
COUNT(*) AS execution_count
FROM execution_history
WHERE operation = 'INSERT'
GROUP BY bucket, entity_ref
WITH NO DATA;
SELECT add_continuous_aggregate_policy('execution_throughput_hourly',
start_offset => INTERVAL '7 days',
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '30 minutes'
);
-- ----------------------------------------------------------------------------
-- event_volume_hourly
-- Tracks event creation volume per hour by trigger ref.
-- Powers: event throughput monitoring widget.
-- ----------------------------------------------------------------------------
CREATE MATERIALIZED VIEW event_volume_hourly
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 hour', time) AS bucket,
entity_ref AS trigger_ref,
COUNT(*) AS event_count
FROM event_history
WHERE operation = 'INSERT'
GROUP BY bucket, entity_ref
WITH NO DATA;
SELECT add_continuous_aggregate_policy('event_volume_hourly',
start_offset => INTERVAL '7 days',
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '30 minutes'
);
-- ----------------------------------------------------------------------------
-- worker_status_hourly
-- Tracks worker status changes per hour (online/offline/draining transitions).
-- Powers: worker health trends widget.
-- ----------------------------------------------------------------------------
CREATE MATERIALIZED VIEW worker_status_hourly
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 hour', time) AS bucket,
entity_ref AS worker_name,
new_values->>'status' AS new_status,
COUNT(*) AS transition_count
FROM worker_history
WHERE 'status' = ANY(changed_fields)
GROUP BY bucket, entity_ref, new_values->>'status'
WITH NO DATA;
SELECT add_continuous_aggregate_policy('worker_status_hourly',
start_offset => INTERVAL '30 days',
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '1 hour'
);
-- ----------------------------------------------------------------------------
-- enforcement_volume_hourly
-- Tracks enforcement creation volume per hour by rule ref.
-- Powers: rule activation rate monitoring.
-- ----------------------------------------------------------------------------
CREATE MATERIALIZED VIEW enforcement_volume_hourly
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 hour', time) AS bucket,
entity_ref AS rule_ref,
COUNT(*) AS enforcement_count
FROM enforcement_history
WHERE operation = 'INSERT'
GROUP BY bucket, entity_ref
WITH NO DATA;
SELECT add_continuous_aggregate_policy('enforcement_volume_hourly',
start_offset => INTERVAL '7 days',
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '30 minutes'
);
-- ============================================================================
-- INITIAL REFRESH NOTE
-- ============================================================================
-- NOTE: refresh_continuous_aggregate() cannot run inside a transaction block,
-- and the migration runner wraps each file in BEGIN/COMMIT. The continuous
-- aggregate policies configured above will automatically backfill data within
-- their first scheduled interval (30 min 1 hour). On a fresh database there
-- is no history data to backfill anyway.
--
-- If you need an immediate manual refresh after migration, run outside a
-- transaction:
-- CALL refresh_continuous_aggregate('execution_status_hourly', NULL, NOW());
-- CALL refresh_continuous_aggregate('execution_throughput_hourly', NULL, NOW());
-- CALL refresh_continuous_aggregate('event_volume_hourly', NULL, NOW());
-- CALL refresh_continuous_aggregate('worker_status_hourly', NULL, NOW());
-- CALL refresh_continuous_aggregate('enforcement_volume_hourly', NULL, NOW());

View File

@@ -1,168 +0,0 @@
-- Migration: Restore webhook functions
-- Description: Recreate webhook functions that were accidentally dropped in 20260129000001
-- Date: 2026-02-04
-- Drop existing functions to avoid signature conflicts
DROP FUNCTION IF EXISTS enable_trigger_webhook(BIGINT, JSONB);
DROP FUNCTION IF EXISTS enable_trigger_webhook(BIGINT);
DROP FUNCTION IF EXISTS disable_trigger_webhook(BIGINT);
DROP FUNCTION IF EXISTS regenerate_trigger_webhook_key(BIGINT);
-- Function to enable webhooks for a trigger
CREATE OR REPLACE FUNCTION enable_trigger_webhook(
p_trigger_id BIGINT,
p_config JSONB DEFAULT '{}'::jsonb
)
RETURNS TABLE(
webhook_enabled BOOLEAN,
webhook_key VARCHAR(255),
webhook_url TEXT
) AS $$
DECLARE
v_webhook_key VARCHAR(255);
v_api_base_url TEXT := 'http://localhost:8080'; -- Default, should be configured
BEGIN
-- Check if trigger exists
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
END IF;
-- Generate webhook key if one doesn't exist
SELECT t.webhook_key INTO v_webhook_key
FROM trigger t
WHERE t.id = p_trigger_id;
IF v_webhook_key IS NULL THEN
v_webhook_key := generate_webhook_key();
END IF;
-- Update trigger to enable webhooks
UPDATE trigger
SET
webhook_enabled = TRUE,
webhook_key = v_webhook_key,
webhook_config = p_config,
updated = NOW()
WHERE id = p_trigger_id;
-- Return webhook details
RETURN QUERY SELECT
TRUE,
v_webhook_key,
v_api_base_url || '/api/v1/webhooks/' || v_webhook_key;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION enable_trigger_webhook(BIGINT, JSONB) IS
'Enables webhooks for a trigger with optional configuration. Generates a new webhook key if one does not exist. Returns webhook details.';
-- Function to disable webhooks for a trigger
CREATE OR REPLACE FUNCTION disable_trigger_webhook(
p_trigger_id BIGINT
)
RETURNS BOOLEAN AS $$
BEGIN
-- Check if trigger exists
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
END IF;
-- Update trigger to disable webhooks
-- Set webhook_key to NULL when disabling to remove it from API responses
UPDATE trigger
SET
webhook_enabled = FALSE,
webhook_key = NULL,
updated = NOW()
WHERE id = p_trigger_id;
RETURN TRUE;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION disable_trigger_webhook(BIGINT) IS
'Disables webhooks for a trigger. Webhook key is removed when disabled.';
-- Function to regenerate webhook key for a trigger
CREATE OR REPLACE FUNCTION regenerate_trigger_webhook_key(
p_trigger_id BIGINT
)
RETURNS TABLE(
webhook_key VARCHAR(255),
previous_key_revoked BOOLEAN
) AS $$
DECLARE
v_new_key VARCHAR(255);
v_old_key VARCHAR(255);
v_webhook_enabled BOOLEAN;
BEGIN
-- Check if trigger exists
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
END IF;
-- Get current webhook state
SELECT t.webhook_key, t.webhook_enabled INTO v_old_key, v_webhook_enabled
FROM trigger t
WHERE t.id = p_trigger_id;
-- Check if webhooks are enabled
IF NOT v_webhook_enabled THEN
RAISE EXCEPTION 'Webhooks are not enabled for trigger %', p_trigger_id;
END IF;
-- Generate new key
v_new_key := generate_webhook_key();
-- Update trigger with new key
UPDATE trigger
SET
webhook_key = v_new_key,
updated = NOW()
WHERE id = p_trigger_id;
-- Return new key and whether old key was present
RETURN QUERY SELECT
v_new_key,
(v_old_key IS NOT NULL);
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION regenerate_trigger_webhook_key(BIGINT) IS
'Regenerates webhook key for a trigger. Returns new key and whether a previous key was revoked.';
-- Verify all functions exist
DO $$
BEGIN
-- Check enable_trigger_webhook exists
IF NOT EXISTS (
SELECT 1 FROM pg_proc p
JOIN pg_namespace n ON p.pronamespace = n.oid
WHERE n.nspname = current_schema()
AND p.proname = 'enable_trigger_webhook'
) THEN
RAISE EXCEPTION 'enable_trigger_webhook function not found after migration';
END IF;
-- Check disable_trigger_webhook exists
IF NOT EXISTS (
SELECT 1 FROM pg_proc p
JOIN pg_namespace n ON p.pronamespace = n.oid
WHERE n.nspname = current_schema()
AND p.proname = 'disable_trigger_webhook'
) THEN
RAISE EXCEPTION 'disable_trigger_webhook function not found after migration';
END IF;
-- Check regenerate_trigger_webhook_key exists
IF NOT EXISTS (
SELECT 1 FROM pg_proc p
JOIN pg_namespace n ON p.pronamespace = n.oid
WHERE n.nspname = current_schema()
AND p.proname = 'regenerate_trigger_webhook_key'
) THEN
RAISE EXCEPTION 'regenerate_trigger_webhook_key function not found after migration';
END IF;
RAISE NOTICE 'All webhook functions successfully restored';
END $$;

View File

@@ -1,274 +0,0 @@
-- Migration: Add Pack Runtime Environments
-- Description: Adds support for per-pack isolated runtime environments with installer metadata
-- Version: 20260203000002
-- Note: runtime.installers column is defined in migration 20250101000002_pack_system.sql
-- ============================================================================
-- PART 1: Create pack_environment table
-- ============================================================================
-- Pack environment table
CREATE TABLE IF NOT EXISTS pack_environment (
id BIGSERIAL PRIMARY KEY,
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
pack_ref TEXT NOT NULL,
runtime BIGINT NOT NULL REFERENCES runtime(id) ON DELETE CASCADE,
runtime_ref TEXT NOT NULL,
env_path TEXT NOT NULL,
status pack_environment_status_enum NOT NULL DEFAULT 'pending',
installed_at TIMESTAMPTZ,
last_verified TIMESTAMPTZ,
install_log TEXT,
install_error TEXT,
metadata JSONB DEFAULT '{}'::jsonb,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(pack, runtime)
);
-- Indexes
CREATE INDEX IF NOT EXISTS idx_pack_environment_pack ON pack_environment(pack);
CREATE INDEX IF NOT EXISTS idx_pack_environment_runtime ON pack_environment(runtime);
CREATE INDEX IF NOT EXISTS idx_pack_environment_status ON pack_environment(status);
CREATE INDEX IF NOT EXISTS idx_pack_environment_pack_ref ON pack_environment(pack_ref);
CREATE INDEX IF NOT EXISTS idx_pack_environment_runtime_ref ON pack_environment(runtime_ref);
CREATE INDEX IF NOT EXISTS idx_pack_environment_pack_runtime ON pack_environment(pack, runtime);
-- Trigger for updated timestamp
CREATE TRIGGER update_pack_environment_updated
BEFORE UPDATE ON pack_environment
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE pack_environment IS 'Tracks pack-specific runtime environments for dependency isolation';
COMMENT ON COLUMN pack_environment.pack IS 'Pack that owns this environment';
COMMENT ON COLUMN pack_environment.pack_ref IS 'Pack reference for quick lookup';
COMMENT ON COLUMN pack_environment.runtime IS 'Runtime used for this environment';
COMMENT ON COLUMN pack_environment.runtime_ref IS 'Runtime reference for quick lookup';
COMMENT ON COLUMN pack_environment.env_path IS 'Filesystem path to the environment directory (e.g., /opt/attune/packenvs/mypack/python)';
COMMENT ON COLUMN pack_environment.status IS 'Current installation status';
COMMENT ON COLUMN pack_environment.installed_at IS 'When the environment was successfully installed';
COMMENT ON COLUMN pack_environment.last_verified IS 'Last time the environment was verified as working';
COMMENT ON COLUMN pack_environment.install_log IS 'Installation output logs';
COMMENT ON COLUMN pack_environment.install_error IS 'Error message if installation failed';
COMMENT ON COLUMN pack_environment.metadata IS 'Additional metadata (installed packages, versions, etc.)';
-- ============================================================================
-- PART 2: Update existing runtimes with installer metadata
-- ============================================================================
-- Python runtime installers
UPDATE runtime
SET installers = jsonb_build_object(
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
'installers', jsonb_build_array(
jsonb_build_object(
'name', 'create_venv',
'description', 'Create Python virtual environment',
'command', 'python3',
'args', jsonb_build_array('-m', 'venv', '{env_path}'),
'cwd', '{pack_path}',
'env', jsonb_build_object(),
'order', 1,
'optional', false
),
jsonb_build_object(
'name', 'upgrade_pip',
'description', 'Upgrade pip to latest version',
'command', '{env_path}/bin/pip',
'args', jsonb_build_array('install', '--upgrade', 'pip'),
'cwd', '{pack_path}',
'env', jsonb_build_object(),
'order', 2,
'optional', true
),
jsonb_build_object(
'name', 'install_requirements',
'description', 'Install pack Python dependencies',
'command', '{env_path}/bin/pip',
'args', jsonb_build_array('install', '-r', '{pack_path}/requirements.txt'),
'cwd', '{pack_path}',
'env', jsonb_build_object(),
'order', 3,
'optional', false,
'condition', jsonb_build_object(
'file_exists', '{pack_path}/requirements.txt'
)
)
),
'executable_templates', jsonb_build_object(
'python', '{env_path}/bin/python',
'pip', '{env_path}/bin/pip'
)
)
WHERE ref = 'core.python';
-- Node.js runtime installers
UPDATE runtime
SET installers = jsonb_build_object(
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
'installers', jsonb_build_array(
jsonb_build_object(
'name', 'npm_install',
'description', 'Install Node.js dependencies',
'command', 'npm',
'args', jsonb_build_array('install', '--prefix', '{env_path}'),
'cwd', '{pack_path}',
'env', jsonb_build_object(
'NODE_PATH', '{env_path}/node_modules'
),
'order', 1,
'optional', false,
'condition', jsonb_build_object(
'file_exists', '{pack_path}/package.json'
)
)
),
'executable_templates', jsonb_build_object(
'node', 'node',
'npm', 'npm'
),
'env_vars', jsonb_build_object(
'NODE_PATH', '{env_path}/node_modules'
)
)
WHERE ref = 'core.nodejs';
-- Shell runtime (no environment needed, uses system shell)
UPDATE runtime
SET installers = jsonb_build_object(
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
'installers', jsonb_build_array(),
'executable_templates', jsonb_build_object(
'sh', 'sh',
'bash', 'bash'
),
'requires_environment', false
)
WHERE ref = 'core.shell';
-- Native runtime (no environment needed, binaries are standalone)
UPDATE runtime
SET installers = jsonb_build_object(
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
'installers', jsonb_build_array(),
'executable_templates', jsonb_build_object(),
'requires_environment', false
)
WHERE ref = 'core.native';
-- Built-in sensor runtime (internal, no environment)
UPDATE runtime
SET installers = jsonb_build_object(
'installers', jsonb_build_array(),
'requires_environment', false
)
WHERE ref = 'core.sensor.builtin';
-- ============================================================================
-- PART 3: Add helper functions
-- ============================================================================
-- Function to get environment path for a pack/runtime combination
CREATE OR REPLACE FUNCTION get_pack_environment_path(p_pack_ref TEXT, p_runtime_ref TEXT)
RETURNS TEXT AS $$
DECLARE
v_runtime_name TEXT;
v_base_template TEXT;
v_result TEXT;
BEGIN
-- Get runtime name and base path template
SELECT
LOWER(name),
installers->>'base_path_template'
INTO v_runtime_name, v_base_template
FROM runtime
WHERE ref = p_runtime_ref;
IF v_base_template IS NULL THEN
v_base_template := '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}';
END IF;
-- Replace template variables
v_result := v_base_template;
v_result := REPLACE(v_result, '{pack_ref}', p_pack_ref);
v_result := REPLACE(v_result, '{runtime_ref}', p_runtime_ref);
v_result := REPLACE(v_result, '{runtime_name_lower}', v_runtime_name);
RETURN v_result;
END;
$$ LANGUAGE plpgsql IMMUTABLE;
COMMENT ON FUNCTION get_pack_environment_path IS 'Calculate the filesystem path for a pack runtime environment';
-- Function to check if a runtime requires an environment
CREATE OR REPLACE FUNCTION runtime_requires_environment(p_runtime_ref TEXT)
RETURNS BOOLEAN AS $$
DECLARE
v_requires BOOLEAN;
BEGIN
SELECT COALESCE((installers->>'requires_environment')::boolean, true)
INTO v_requires
FROM runtime
WHERE ref = p_runtime_ref;
RETURN COALESCE(v_requires, false);
END;
$$ LANGUAGE plpgsql STABLE;
COMMENT ON FUNCTION runtime_requires_environment IS 'Check if a runtime needs a pack-specific environment';
-- ============================================================================
-- PART 4: Create view for environment status
-- ============================================================================
CREATE OR REPLACE VIEW v_pack_environment_status AS
SELECT
pe.id,
pe.pack,
p.ref AS pack_ref,
p.label AS pack_name,
pe.runtime,
r.ref AS runtime_ref,
r.name AS runtime_name,
pe.env_path,
pe.status,
pe.installed_at,
pe.last_verified,
CASE
WHEN pe.status = 'ready' AND pe.last_verified < NOW() - INTERVAL '7 days' THEN true
ELSE false
END AS needs_verification,
CASE
WHEN pe.status = 'ready' THEN 'healthy'
WHEN pe.status = 'failed' THEN 'unhealthy'
WHEN pe.status IN ('pending', 'installing') THEN 'provisioning'
WHEN pe.status = 'outdated' THEN 'needs_update'
ELSE 'unknown'
END AS health_status,
pe.install_error,
pe.created,
pe.updated
FROM pack_environment pe
JOIN pack p ON pe.pack = p.id
JOIN runtime r ON pe.runtime = r.id;
COMMENT ON VIEW v_pack_environment_status IS 'Consolidated view of pack environment status with health indicators';
-- ============================================================================
-- SUMMARY
-- ============================================================================
-- Display summary of changes
DO $$
BEGIN
RAISE NOTICE 'Pack environment system migration complete.';
RAISE NOTICE '';
RAISE NOTICE 'New table: pack_environment (tracks installed environments)';
RAISE NOTICE 'New column: runtime.installers (environment setup instructions)';
RAISE NOTICE 'New functions: get_pack_environment_path, runtime_requires_environment';
RAISE NOTICE 'New view: v_pack_environment_status';
RAISE NOTICE '';
RAISE NOTICE 'Environment paths will be: /opt/attune/packenvs/{pack_ref}/{runtime}';
END $$;

View File

@@ -1,154 +0,0 @@
-- Migration: Add Pack Test Results Tracking
-- Created: 2026-01-20
-- Description: Add tables and views for tracking pack test execution results
-- Pack test execution tracking table
CREATE TABLE IF NOT EXISTS pack_test_execution (
id BIGSERIAL PRIMARY KEY,
pack_id BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
pack_version VARCHAR(50) NOT NULL,
execution_time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
trigger_reason VARCHAR(50) NOT NULL, -- 'install', 'update', 'manual', 'validation'
total_tests INT NOT NULL,
passed INT NOT NULL,
failed INT NOT NULL,
skipped INT NOT NULL,
pass_rate DECIMAL(5,4) NOT NULL, -- 0.0000 to 1.0000
duration_ms BIGINT NOT NULL,
result JSONB NOT NULL, -- Full test result structure
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT valid_test_counts CHECK (total_tests >= 0 AND passed >= 0 AND failed >= 0 AND skipped >= 0),
CONSTRAINT valid_pass_rate CHECK (pass_rate >= 0.0 AND pass_rate <= 1.0),
CONSTRAINT valid_trigger_reason CHECK (trigger_reason IN ('install', 'update', 'manual', 'validation'))
);
-- Indexes for efficient queries
CREATE INDEX idx_pack_test_execution_pack_id ON pack_test_execution(pack_id);
CREATE INDEX idx_pack_test_execution_time ON pack_test_execution(execution_time DESC);
CREATE INDEX idx_pack_test_execution_pass_rate ON pack_test_execution(pass_rate);
CREATE INDEX idx_pack_test_execution_trigger ON pack_test_execution(trigger_reason);
-- Comments for documentation
COMMENT ON TABLE pack_test_execution IS 'Tracks pack test execution results for validation and auditing';
COMMENT ON COLUMN pack_test_execution.pack_id IS 'Reference to the pack being tested';
COMMENT ON COLUMN pack_test_execution.pack_version IS 'Version of the pack at test time';
COMMENT ON COLUMN pack_test_execution.trigger_reason IS 'What triggered the test: install, update, manual, validation';
COMMENT ON COLUMN pack_test_execution.pass_rate IS 'Percentage of tests passed (0.0 to 1.0)';
COMMENT ON COLUMN pack_test_execution.result IS 'Full JSON structure with detailed test results';
-- Pack test result summary view (all test executions with pack info)
CREATE OR REPLACE VIEW pack_test_summary AS
SELECT
p.id AS pack_id,
p.ref AS pack_ref,
p.label AS pack_label,
pte.id AS test_execution_id,
pte.pack_version,
pte.execution_time AS test_time,
pte.trigger_reason,
pte.total_tests,
pte.passed,
pte.failed,
pte.skipped,
pte.pass_rate,
pte.duration_ms,
ROW_NUMBER() OVER (PARTITION BY p.id ORDER BY pte.execution_time DESC) AS rn
FROM pack p
LEFT JOIN pack_test_execution pte ON p.id = pte.pack_id
WHERE pte.id IS NOT NULL;
COMMENT ON VIEW pack_test_summary IS 'Summary of all pack test executions with pack details';
-- Latest test results per pack view
CREATE OR REPLACE VIEW pack_latest_test AS
SELECT
pack_id,
pack_ref,
pack_label,
test_execution_id,
pack_version,
test_time,
trigger_reason,
total_tests,
passed,
failed,
skipped,
pass_rate,
duration_ms
FROM pack_test_summary
WHERE rn = 1;
COMMENT ON VIEW pack_latest_test IS 'Latest test results for each pack';
-- Function to get pack test statistics
CREATE OR REPLACE FUNCTION get_pack_test_stats(p_pack_id BIGINT)
RETURNS TABLE (
total_executions BIGINT,
successful_executions BIGINT,
failed_executions BIGINT,
avg_pass_rate DECIMAL,
avg_duration_ms BIGINT,
last_test_time TIMESTAMPTZ,
last_test_passed BOOLEAN
) AS $$
BEGIN
RETURN QUERY
SELECT
COUNT(*)::BIGINT AS total_executions,
COUNT(*) FILTER (WHERE passed = total_tests)::BIGINT AS successful_executions,
COUNT(*) FILTER (WHERE failed > 0)::BIGINT AS failed_executions,
AVG(pass_rate) AS avg_pass_rate,
AVG(duration_ms)::BIGINT AS avg_duration_ms,
MAX(execution_time) AS last_test_time,
(SELECT failed = 0 FROM pack_test_execution
WHERE pack_id = p_pack_id
ORDER BY execution_time DESC
LIMIT 1) AS last_test_passed
FROM pack_test_execution
WHERE pack_id = p_pack_id;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION get_pack_test_stats IS 'Get statistical summary of test executions for a pack';
-- Function to check if pack has recent passing tests
CREATE OR REPLACE FUNCTION pack_has_passing_tests(
p_pack_id BIGINT,
p_hours_ago INT DEFAULT 24
)
RETURNS BOOLEAN AS $$
DECLARE
v_has_passing_tests BOOLEAN;
BEGIN
SELECT EXISTS(
SELECT 1
FROM pack_test_execution
WHERE pack_id = p_pack_id
AND execution_time > NOW() - (p_hours_ago || ' hours')::INTERVAL
AND failed = 0
AND total_tests > 0
) INTO v_has_passing_tests;
RETURN v_has_passing_tests;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION pack_has_passing_tests IS 'Check if pack has recent passing test executions';
-- Add trigger to update pack metadata on test execution
CREATE OR REPLACE FUNCTION update_pack_test_metadata()
RETURNS TRIGGER AS $$
BEGIN
-- Could update pack table with last_tested timestamp if we add that column
-- For now, just a placeholder for future functionality
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trigger_update_pack_test_metadata
AFTER INSERT ON pack_test_execution
FOR EACH ROW
EXECUTE FUNCTION update_pack_test_metadata();
COMMENT ON TRIGGER trigger_update_pack_test_metadata ON pack_test_execution IS 'Updates pack metadata when tests are executed';

View File

@@ -1,56 +0,0 @@
-- Migration: Worker Table
-- Description: Creates worker table for tracking worker registration and heartbeat
-- Version: 20250101000014
-- ============================================================================
-- WORKER TABLE
-- ============================================================================
CREATE TABLE worker (
id BIGSERIAL PRIMARY KEY,
name TEXT NOT NULL UNIQUE,
worker_type worker_type_enum NOT NULL,
worker_role worker_role_enum NOT NULL,
runtime BIGINT REFERENCES runtime(id) ON DELETE SET NULL,
host TEXT,
port INTEGER,
status worker_status_enum NOT NULL DEFAULT 'active',
capabilities JSONB,
meta JSONB,
last_heartbeat TIMESTAMPTZ,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Indexes
CREATE INDEX idx_worker_name ON worker(name);
CREATE INDEX idx_worker_type ON worker(worker_type);
CREATE INDEX idx_worker_role ON worker(worker_role);
CREATE INDEX idx_worker_runtime ON worker(runtime);
CREATE INDEX idx_worker_status ON worker(status);
CREATE INDEX idx_worker_last_heartbeat ON worker(last_heartbeat DESC) WHERE last_heartbeat IS NOT NULL;
CREATE INDEX idx_worker_created ON worker(created DESC);
CREATE INDEX idx_worker_status_role ON worker(status, worker_role);
CREATE INDEX idx_worker_capabilities_gin ON worker USING GIN (capabilities);
CREATE INDEX idx_worker_meta_gin ON worker USING GIN (meta);
-- Trigger
CREATE TRIGGER update_worker_updated
BEFORE UPDATE ON worker
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE worker IS 'Worker registration and tracking table for action and sensor workers';
COMMENT ON COLUMN worker.name IS 'Unique worker identifier (typically hostname-based)';
COMMENT ON COLUMN worker.worker_type IS 'Worker deployment type (local or remote)';
COMMENT ON COLUMN worker.worker_role IS 'Worker role (action or sensor)';
COMMENT ON COLUMN worker.runtime IS 'Runtime environment this worker supports (optional)';
COMMENT ON COLUMN worker.host IS 'Worker host address';
COMMENT ON COLUMN worker.port IS 'Worker port number';
COMMENT ON COLUMN worker.status IS 'Worker operational status';
COMMENT ON COLUMN worker.capabilities IS 'Worker capabilities (e.g., max_concurrent_executions, supported runtimes)';
COMMENT ON COLUMN worker.meta IS 'Additional worker metadata';
COMMENT ON COLUMN worker.last_heartbeat IS 'Timestamp of last heartbeat from worker';
-- ============================================================================

View File

@@ -1,127 +0,0 @@
-- Phase 3: Retry Tracking and Action Timeout Configuration
-- This migration adds support for:
-- 1. Retry tracking on executions (attempt count, max attempts, retry reason)
-- 2. Action-level timeout configuration
-- 3. Worker health metrics
-- Add retry tracking fields to execution table
ALTER TABLE execution
ADD COLUMN retry_count INTEGER NOT NULL DEFAULT 0,
ADD COLUMN max_retries INTEGER,
ADD COLUMN retry_reason TEXT,
ADD COLUMN original_execution BIGINT REFERENCES execution(id) ON DELETE SET NULL;
-- Add index for finding retry chains
CREATE INDEX idx_execution_original_execution ON execution(original_execution) WHERE original_execution IS NOT NULL;
-- Add timeout configuration to action table
ALTER TABLE action
ADD COLUMN timeout_seconds INTEGER,
ADD COLUMN max_retries INTEGER DEFAULT 0;
-- Add comment explaining timeout behavior
COMMENT ON COLUMN action.timeout_seconds IS 'Worker queue TTL override in seconds. If NULL, uses global worker_queue_ttl_ms config. Allows per-action timeout tuning.';
COMMENT ON COLUMN action.max_retries IS 'Maximum number of automatic retry attempts for failed executions. 0 = no retries (default).';
COMMENT ON COLUMN execution.retry_count IS 'Current retry attempt number (0 = first attempt, 1 = first retry, etc.)';
COMMENT ON COLUMN execution.max_retries IS 'Maximum retries for this execution. Copied from action.max_retries at creation time.';
COMMENT ON COLUMN execution.retry_reason IS 'Reason for retry (e.g., "worker_unavailable", "transient_error", "manual_retry")';
COMMENT ON COLUMN execution.original_execution IS 'ID of the original execution if this is a retry. Forms a retry chain.';
-- Add worker health tracking fields
-- These are stored in the capabilities JSONB field as a "health" object:
-- {
-- "runtimes": [...],
-- "health": {
-- "status": "healthy|degraded|unhealthy",
-- "last_check": "2026-02-09T12:00:00Z",
-- "consecutive_failures": 0,
-- "total_executions": 100,
-- "failed_executions": 2,
-- "average_execution_time_ms": 1500,
-- "queue_depth": 5
-- }
-- }
-- Add index for health-based queries (using JSONB path operators)
CREATE INDEX idx_worker_capabilities_health_status ON worker
USING GIN ((capabilities -> 'health' -> 'status'));
-- Add view for healthy workers (convenience for queries)
CREATE OR REPLACE VIEW healthy_workers AS
SELECT
w.id,
w.name,
w.worker_type,
w.worker_role,
w.runtime,
w.status,
w.capabilities,
w.last_heartbeat,
(w.capabilities -> 'health' ->> 'status')::TEXT as health_status,
(w.capabilities -> 'health' ->> 'queue_depth')::INTEGER as queue_depth,
(w.capabilities -> 'health' ->> 'consecutive_failures')::INTEGER as consecutive_failures
FROM worker w
WHERE
w.status = 'active'
AND w.last_heartbeat > NOW() - INTERVAL '30 seconds'
AND (
-- Healthy if no health info (backward compatible)
w.capabilities -> 'health' IS NULL
OR
-- Or explicitly marked healthy
w.capabilities -> 'health' ->> 'status' IN ('healthy', 'degraded')
);
COMMENT ON VIEW healthy_workers IS 'Workers that are active, have fresh heartbeat, and are healthy or degraded (not unhealthy)';
-- Add function to get worker queue depth estimate
CREATE OR REPLACE FUNCTION get_worker_queue_depth(worker_id_param BIGINT)
RETURNS INTEGER AS $$
BEGIN
-- Extract queue depth from capabilities.health.queue_depth
-- Returns NULL if not available
RETURN (
SELECT (capabilities -> 'health' ->> 'queue_depth')::INTEGER
FROM worker
WHERE id = worker_id_param
);
END;
$$ LANGUAGE plpgsql STABLE;
COMMENT ON FUNCTION get_worker_queue_depth IS 'Extract current queue depth from worker health metadata';
-- Add function to check if execution is retriable
CREATE OR REPLACE FUNCTION is_execution_retriable(execution_id_param BIGINT)
RETURNS BOOLEAN AS $$
DECLARE
exec_record RECORD;
BEGIN
SELECT
e.retry_count,
e.max_retries,
e.status
INTO exec_record
FROM execution e
WHERE e.id = execution_id_param;
IF NOT FOUND THEN
RETURN FALSE;
END IF;
-- Can retry if:
-- 1. Status is failed
-- 2. max_retries is set and > 0
-- 3. retry_count < max_retries
RETURN (
exec_record.status = 'failed'
AND exec_record.max_retries IS NOT NULL
AND exec_record.max_retries > 0
AND exec_record.retry_count < exec_record.max_retries
);
END;
$$ LANGUAGE plpgsql STABLE;
COMMENT ON FUNCTION is_execution_retriable IS 'Check if a failed execution can be automatically retried based on retry limits';
-- Add indexes for retry queries
CREATE INDEX idx_execution_status_retry ON execution(status, retry_count) WHERE status = 'failed' AND retry_count < COALESCE(max_retries, 0);

View File

@@ -1,105 +0,0 @@
-- Migration: Runtime Versions
-- Description: Adds support for multiple versions of the same runtime (e.g., Python 3.11, 3.12, 3.14).
-- - New `runtime_version` table to store version-specific execution configurations
-- - New `runtime_version_constraint` columns on action and sensor tables
-- Version: 20260226000000
-- ============================================================================
-- RUNTIME VERSION TABLE
-- ============================================================================
CREATE TABLE runtime_version (
id BIGSERIAL PRIMARY KEY,
runtime BIGINT NOT NULL REFERENCES runtime(id) ON DELETE CASCADE,
runtime_ref TEXT NOT NULL,
-- Semantic version string (e.g., "3.12.1", "20.11.0")
version TEXT NOT NULL,
-- Individual version components for efficient range queries.
-- Nullable because some runtimes may use non-numeric versioning.
version_major INT,
version_minor INT,
version_patch INT,
-- Complete execution configuration for this specific version.
-- This is NOT a diff/override — it is a full standalone config that can
-- replace the parent runtime's execution_config when this version is selected.
-- Structure is identical to runtime.execution_config (RuntimeExecutionConfig).
execution_config JSONB NOT NULL DEFAULT '{}'::jsonb,
-- Version-specific distribution/verification metadata.
-- Structure mirrors runtime.distributions but with version-specific commands.
-- Example: verification commands that check for a specific binary like python3.12.
distributions JSONB NOT NULL DEFAULT '{}'::jsonb,
-- Whether this version is the default for the parent runtime.
-- At most one version per runtime should be marked as default.
is_default BOOLEAN NOT NULL DEFAULT FALSE,
-- Whether this version has been verified as available on the current system.
available BOOLEAN NOT NULL DEFAULT TRUE,
-- When this version was last verified (via running verification commands).
verified_at TIMESTAMPTZ,
-- Arbitrary version-specific metadata (e.g., EOL date, release notes URL,
-- feature flags, platform-specific notes).
meta JSONB NOT NULL DEFAULT '{}'::jsonb,
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Constraints
CONSTRAINT runtime_version_unique UNIQUE(runtime, version)
);
-- Indexes
CREATE INDEX idx_runtime_version_runtime ON runtime_version(runtime);
CREATE INDEX idx_runtime_version_runtime_ref ON runtime_version(runtime_ref);
CREATE INDEX idx_runtime_version_version ON runtime_version(version);
CREATE INDEX idx_runtime_version_available ON runtime_version(available) WHERE available = TRUE;
CREATE INDEX idx_runtime_version_is_default ON runtime_version(is_default) WHERE is_default = TRUE;
CREATE INDEX idx_runtime_version_components ON runtime_version(runtime, version_major, version_minor, version_patch);
CREATE INDEX idx_runtime_version_created ON runtime_version(created DESC);
CREATE INDEX idx_runtime_version_execution_config ON runtime_version USING GIN (execution_config);
CREATE INDEX idx_runtime_version_meta ON runtime_version USING GIN (meta);
-- Trigger
CREATE TRIGGER update_runtime_version_updated
BEFORE UPDATE ON runtime_version
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
-- Comments
COMMENT ON TABLE runtime_version IS 'Specific versions of a runtime (e.g., Python 3.11, 3.12) with version-specific execution configuration';
COMMENT ON COLUMN runtime_version.runtime IS 'Parent runtime this version belongs to';
COMMENT ON COLUMN runtime_version.runtime_ref IS 'Parent runtime ref (e.g., core.python) for display/filtering';
COMMENT ON COLUMN runtime_version.version IS 'Semantic version string (e.g., "3.12.1", "20.11.0")';
COMMENT ON COLUMN runtime_version.version_major IS 'Major version component for efficient range queries';
COMMENT ON COLUMN runtime_version.version_minor IS 'Minor version component for efficient range queries';
COMMENT ON COLUMN runtime_version.version_patch IS 'Patch version component for efficient range queries';
COMMENT ON COLUMN runtime_version.execution_config IS 'Complete execution configuration for this version (same structure as runtime.execution_config)';
COMMENT ON COLUMN runtime_version.distributions IS 'Version-specific distribution/verification metadata';
COMMENT ON COLUMN runtime_version.is_default IS 'Whether this is the default version for the parent runtime (at most one per runtime)';
COMMENT ON COLUMN runtime_version.available IS 'Whether this version has been verified as available on the system';
COMMENT ON COLUMN runtime_version.verified_at IS 'Timestamp of last availability verification';
COMMENT ON COLUMN runtime_version.meta IS 'Arbitrary version-specific metadata';
-- ============================================================================
-- ACTION TABLE: ADD RUNTIME VERSION CONSTRAINT
-- ============================================================================
ALTER TABLE action
ADD COLUMN runtime_version_constraint TEXT;
COMMENT ON COLUMN action.runtime_version_constraint IS 'Semver version constraint for the runtime (e.g., ">=3.12", ">=3.12,<4.0", "~18.0"). NULL means any version.';
-- ============================================================================
-- SENSOR TABLE: ADD RUNTIME VERSION CONSTRAINT
-- ============================================================================
ALTER TABLE sensor
ADD COLUMN runtime_version_constraint TEXT;
COMMENT ON COLUMN sensor.runtime_version_constraint IS 'Semver version constraint for the runtime (e.g., ">=3.12", ">=3.12,<4.0", "~18.0"). NULL means any version.';