re-uploading work

This commit is contained in:
2026-02-04 17:46:30 -06:00
commit 3b14c65998
1388 changed files with 381262 additions and 0 deletions

138
scripts/check-workspace-deps.sh Executable file
View File

@@ -0,0 +1,138 @@
#!/bin/bash
# Check that all dependencies use workspace = true
# This ensures consistent dependency versions across the workspace
set -e
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
echo "Checking workspace dependency compliance..."
echo ""
ERRORS=0
WARNINGS=0
# List of allowed exceptions (crate-specific dependencies that don't need workspace versions)
ALLOWED_EXCEPTIONS=(
# Executor-specific
"tera"
"serde_yaml"
# API-specific
"jsonwebtoken"
"hmac"
"sha1"
"hex"
"utoipa"
"utoipa-swagger-ui"
"argon2"
"rand"
# CLI-specific
"comfy-table"
"dialoguer"
"indicatif"
"dirs"
"urlencoding"
"colored"
# Sensor-specific
"cron"
# Worker-specific
"hostname"
# Common-specific
"async-recursion"
# Dev/test dependencies (crate-specific)
"mockito"
"wiremock"
"criterion"
"assert_cmd"
"predicates"
"tokio-test"
)
# Function to check if dependency is in allowed exceptions
is_allowed_exception() {
local dep="$1"
for exception in "${ALLOWED_EXCEPTIONS[@]}"; do
if [[ "$dep" == "$exception" ]]; then
return 0
fi
done
return 1
}
# Check each crate's Cargo.toml
for crate in crates/*/Cargo.toml; do
crate_name=$(basename $(dirname "$crate"))
# Find dependencies that specify version directly (not using workspace)
# Pattern: dep_name = "version" OR dep_name = { version = "..." } without workspace = true
# Only look in [dependencies], [dev-dependencies], and [build-dependencies] sections
violations=$(awk '
/^\[/ { in_deps=0 }
/^\[(dependencies|dev-dependencies|build-dependencies)\]/ { in_deps=1; next }
in_deps && /^[a-z][a-z0-9_-]+ = / && !/workspace = true/ && !/path = / && /(= "|version = ")/ {
match($0, /^[a-z][a-z0-9_-]+/);
print substr($0, RSTART, RLENGTH)
}
' "$crate" || true)
if [ -n "$violations" ]; then
has_real_violation=false
while IFS= read -r dep; do
dep=$(echo "$dep" | xargs) # trim whitespace
if [ -n "$dep" ]; then
if is_allowed_exception "$dep"; then
# Skip allowed exceptions
continue
else
if [ "$has_real_violation" = false ]; then
echo -e "${YELLOW}Checking $crate_name...${NC}"
has_real_violation=true
fi
# Show the actual line
line=$(grep "^$dep = " "$crate")
echo -e " ${RED}${NC} $dep"
echo -e " ${RED}$line${NC}"
ERRORS=$((ERRORS + 1))
fi
fi
done <<< "$violations"
if [ "$has_real_violation" = true ]; then
echo ""
fi
fi
done
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
if [ $ERRORS -gt 0 ]; then
echo -e "${RED}✗ Found $ERRORS dependency version violation(s)${NC}"
echo ""
echo "All dependencies should use 'workspace = true' unless they are:"
echo " 1. Crate-specific dependencies not used elsewhere"
echo " 2. Listed in the allowed exceptions"
echo ""
echo "To fix:"
echo " 1. Add the dependency to [workspace.dependencies] in Cargo.toml"
echo " 2. Update the crate to use: dep_name = { workspace = true }"
echo ""
echo "Or add to ALLOWED_EXCEPTIONS in this script if it's crate-specific."
exit 1
else
echo -e "${GREEN}✓ All crates use workspace dependencies correctly${NC}"
echo ""
echo "Allowed exceptions: ${#ALLOWED_EXCEPTIONS[@]} crate-specific dependencies"
fi

129
scripts/cleanup-test-schemas.sh Executable file
View File

@@ -0,0 +1,129 @@
#!/bin/bash
set -e
# Cleanup orphaned test schemas
# Run this periodically in development or CI to remove leftover test schemas
# Default to attune_test database, can be overridden with DATABASE_URL env var
DATABASE_URL="${DATABASE_URL:-postgresql://postgres:postgres@localhost:5432/attune_test}"
echo "============================================="
echo "Attune Test Schema Cleanup Utility"
echo "============================================="
echo "Target database: $DATABASE_URL"
echo ""
# Check if psql is available
if ! command -v psql &> /dev/null; then
echo "ERROR: psql command not found. Please install PostgreSQL client."
exit 1
fi
# Count schemas before cleanup
BEFORE_COUNT=$(psql "$DATABASE_URL" -t -c "SELECT COUNT(*) FROM pg_namespace WHERE nspname LIKE 'test_%';" 2>/dev/null || echo "0")
BEFORE_COUNT=$(echo "$BEFORE_COUNT" | xargs) # trim whitespace
echo "Found $BEFORE_COUNT test schema(s) to clean up"
echo ""
if [ "$BEFORE_COUNT" = "0" ]; then
echo "No test schemas to clean up. Exiting."
exit 0
fi
# Confirm cleanup in interactive mode (skip if CI or --force flag)
if [ -t 0 ] && [ "$1" != "--force" ] && [ "$CI" != "true" ]; then
read -p "Do you want to proceed with cleanup? (y/N) " -n 1 -r
echo ""
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Cleanup cancelled."
exit 0
fi
fi
echo "Starting cleanup..."
echo ""
# Process schemas in batches to avoid PostgreSQL shared memory issues
BATCH_SIZE=50
TOTAL_DROPPED=0
BATCH_NUM=1
while true; do
# Get next batch of schemas
SCHEMAS=$(psql "$DATABASE_URL" -t -c "SELECT nspname FROM pg_namespace WHERE nspname LIKE 'test_%' ORDER BY nspname LIMIT $BATCH_SIZE;" 2>/dev/null | xargs)
if [ -z "$SCHEMAS" ]; then
echo "No more schemas to clean up"
break
fi
echo "Processing batch $BATCH_NUM (up to $BATCH_SIZE schemas)..."
# Drop schemas in this batch
BATCH_DROPPED=$(psql "$DATABASE_URL" -t <<EOF 2>&1
DO \$\$
DECLARE
schema_name TEXT;
schema_count INTEGER := 0;
BEGIN
FOR schema_name IN
SELECT nspname
FROM pg_namespace
WHERE nspname LIKE 'test_%'
ORDER BY nspname
LIMIT $BATCH_SIZE
LOOP
BEGIN
EXECUTE format('DROP SCHEMA IF EXISTS %I CASCADE', schema_name);
schema_count := schema_count + 1;
EXCEPTION WHEN OTHERS THEN
RAISE WARNING 'Failed to drop schema %: %', schema_name, SQLERRM;
END;
END LOOP;
RAISE NOTICE 'Batch complete: % schemas dropped', schema_count;
-- Return the count
PERFORM schema_count;
END \$\$;
EOF
)
echo " Batch $BATCH_NUM complete"
BATCH_NUM=$((BATCH_NUM + 1))
TOTAL_DROPPED=$((TOTAL_DROPPED + BATCH_SIZE))
# Brief pause to let PostgreSQL clean up
sleep 0.5
done
echo ""
echo "============================================"
echo "Cleanup Summary"
echo "============================================"
echo "Total batches processed: $((BATCH_NUM - 1))"
echo "Estimated schemas processed: ~$TOTAL_DROPPED"
EXIT_CODE=$?
echo ""
if [ $EXIT_CODE -eq 0 ]; then
echo "✓ Cleanup completed successfully"
else
echo "✗ Cleanup failed with exit code $EXIT_CODE"
exit $EXIT_CODE
fi
# Verify cleanup
AFTER_COUNT=$(psql "$DATABASE_URL" -t -c "SELECT COUNT(*) FROM pg_namespace WHERE nspname LIKE 'test_%';" 2>/dev/null || echo "0")
AFTER_COUNT=$(echo "$AFTER_COUNT" | xargs) # trim whitespace
echo "Remaining test schemas: $AFTER_COUNT"
echo ""
if [ "$AFTER_COUNT" != "0" ]; then
echo "WARNING: Some test schemas were not cleaned up. Please investigate."
exit 1
fi
echo "All test schemas have been removed."

135
scripts/create-test-user.sh Executable file
View File

@@ -0,0 +1,135 @@
#!/bin/bash
# Create Test User Account
# This script creates a test user in the Attune database
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Default values
DB_NAME="${ATTUNE_DB_NAME:-attune}"
DB_USER="${ATTUNE_DB_USER:-postgres}"
DB_HOST="${ATTUNE_DB_HOST:-localhost}"
DB_PORT="${ATTUNE_DB_PORT:-5432}"
DB_PASSWORD="${ATTUNE_DB_PASSWORD:-postgres}"
# Test user defaults
TEST_LOGIN="${TEST_LOGIN:-test@attune.local}"
TEST_DISPLAY_NAME="${TEST_DISPLAY_NAME:-Test User}"
TEST_PASSWORD="${TEST_PASSWORD:-TestPass123!}"
# Pre-generated hash for default password "TestPass123!"
# If you change TEST_PASSWORD, you need to regenerate this with:
# cargo run --example hash_password "YourPassword"
DEFAULT_PASSWORD_HASH='$argon2id$v=19$m=19456,t=2,p=1$F0UlGNd21LBXF7TWmpD93w$F65DKRjPU6japrzYv3ZcddnMFCtjVIBDWIkiLbkqt2I'
echo -e "${BLUE}╔════════════════════════════════════════════════╗${NC}"
echo -e "${BLUE}║ Attune Test User Setup ║${NC}"
echo -e "${BLUE}╚════════════════════════════════════════════════╝${NC}"
echo ""
# Check PostgreSQL connection
echo -e "${YELLOW}${NC} Checking database connection..."
if ! PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c '\q' 2>/dev/null; then
echo -e "${RED}${NC} Cannot connect to database $DB_NAME"
echo ""
echo "Please check:"
echo " • Database exists: $DB_NAME"
echo " • PostgreSQL is running on $DB_HOST:$DB_PORT"
echo " • Credentials are correct"
exit 1
fi
echo -e "${GREEN}${NC} Database connection successful"
echo ""
# Determine password hash to use
PASSWORD_HASH="$DEFAULT_PASSWORD_HASH"
# If custom password, generate hash
if [ "$TEST_PASSWORD" != "TestPass123!" ]; then
echo -e "${YELLOW}${NC} Generating password hash for custom password..."
cd "$(dirname "$0")/.."
PASSWORD_HASH=$(cargo run --quiet --example hash_password "$TEST_PASSWORD" 2>/dev/null || echo "")
if [ -z "$PASSWORD_HASH" ]; then
echo -e "${RED}${NC} Failed to generate password hash"
echo ""
echo "Please ensure Rust toolchain is installed, or use default password."
echo "To manually hash a password:"
echo " cargo run --example hash_password \"YourPassword\""
exit 1
fi
echo -e "${GREEN}${NC} Password hash generated"
echo ""
else
echo -e "${GREEN}${NC} Using default password hash"
echo ""
fi
# Check if user already exists
echo -e "${YELLOW}${NC} Checking if user exists..."
USER_EXISTS=$(PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -tAc "SELECT COUNT(*) FROM identity WHERE login='$TEST_LOGIN'")
if [ "$USER_EXISTS" -gt 0 ]; then
echo -e "${YELLOW}!${NC} User '$TEST_LOGIN' already exists"
echo ""
read -p "Do you want to update the password? (y/N): " -n 1 -r
echo ""
if [[ $REPLY =~ ^[Yy]$ ]]; then
echo -e "${YELLOW}${NC} Updating user password..."
PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" << EOF
UPDATE identity
SET password_hash = '$PASSWORD_HASH',
display_name = '$TEST_DISPLAY_NAME',
updated = NOW()
WHERE login = '$TEST_LOGIN';
EOF
echo -e "${GREEN}${NC} User password updated"
else
echo -e "${BLUE}${NC} User not modified"
exit 0
fi
else
# Create new user
echo -e "${YELLOW}${NC} Creating user..."
PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" << EOF
INSERT INTO identity (login, display_name, password_hash, attributes)
VALUES ('$TEST_LOGIN', '$TEST_DISPLAY_NAME', '$PASSWORD_HASH', '{}');
EOF
echo -e "${GREEN}${NC} User created"
fi
echo ""
echo -e "${GREEN}╔════════════════════════════════════════════════╗${NC}"
echo -e "${GREEN}║ Test User Setup Complete! ║${NC}"
echo -e "${GREEN}╚════════════════════════════════════════════════╝${NC}"
echo ""
echo -e "${BLUE}User Credentials:${NC}"
echo -e " Login: ${YELLOW}$TEST_LOGIN${NC}"
echo -e " Password: ${YELLOW}$TEST_PASSWORD${NC}"
echo -e " Name: ${YELLOW}$TEST_DISPLAY_NAME${NC}"
echo ""
echo -e "${BLUE}Database:${NC}"
echo -e " Host: ${YELLOW}$DB_HOST:$DB_PORT${NC}"
echo -e " Database: ${YELLOW}$DB_NAME${NC}"
echo ""
echo -e "${BLUE}Test Login:${NC}"
echo -e " ${YELLOW}curl -X POST http://localhost:8080/auth/login \\${NC}"
echo -e " ${YELLOW} -H 'Content-Type: application/json' \\${NC}"
echo -e " ${YELLOW} -d '{\"login\":\"$TEST_LOGIN\",\"password\":\"$TEST_PASSWORD\"}'${NC}"
echo ""
echo -e "${BLUE}Custom User:${NC}"
echo -e " You can create a custom user by setting environment variables:"
echo -e " ${YELLOW}TEST_LOGIN=myuser@example.com TEST_PASSWORD=MyPass123! ./scripts/create-test-user.sh${NC}"
echo ""
echo -e "${BLUE}Generate Password Hash:${NC}"
echo -e " To generate a hash for a custom password:"
echo -e " ${YELLOW}cargo run --example hash_password \"YourPassword\"${NC}"
echo ""

181
scripts/create_test_user.sh Executable file
View File

@@ -0,0 +1,181 @@
#!/bin/bash
# Create or reset test admin user for local development
# Login: admin, Password: admin
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Default values
DB_NAME="${ATTUNE_DB_NAME:-attune}"
DB_USER="${ATTUNE_DB_USER:-postgres}"
DB_HOST="${ATTUNE_DB_HOST:-localhost}"
DB_PORT="${ATTUNE_DB_PORT:-5432}"
DB_PASSWORD="${ATTUNE_DB_PASSWORD:-postgres}"
# Admin user credentials
ADMIN_LOGIN="${1:-admin}"
ADMIN_PASSWORD="${2:-admin}"
ADMIN_DISPLAY_NAME="${3:-Administrator}"
print_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
# Check PostgreSQL connection
check_postgres() {
if ! PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c '\q' 2>/dev/null; then
print_error "Cannot connect to database '$DB_NAME' at $DB_HOST:$DB_PORT"
exit 1
fi
}
# Generate Argon2id hash for password
hash_password() {
local password="$1"
# Check if we can use Python with argon2-cffi
if command -v python3 &> /dev/null; then
python3 -c "
import sys
try:
from argon2 import PasswordHasher
ph = PasswordHasher()
print(ph.hash('$password'))
sys.exit(0)
except ImportError:
sys.exit(1)
" 2>/dev/null && return 0
fi
# Fallback: Use a pre-generated hash for 'admin' password
if [ "$password" = "admin" ]; then
# This is the Argon2id hash for password 'admin'
# Generated with: argon2-cffi default parameters
echo '$argon2id$v=19$m=19456,t=2,p=1$9Z0VWE8xbJMGPJ8kQ3qRmA$iGBqNEdvklvGLJH8TdUv6u+5c8WU8P9v7UzxQXmkFsE'
return 0
fi
print_error "Cannot hash password - Python with argon2-cffi not available"
print_error "Please install with: pip install argon2-cffi"
exit 1
}
# Create or update admin user
create_or_update_user() {
local login="$1"
local password="$2"
local display_name="$3"
print_info "Generating password hash..."
local password_hash
password_hash=$(hash_password "$password")
if [ -z "$password_hash" ]; then
print_error "Failed to generate password hash"
exit 1
fi
print_info "Checking if user '$login' exists..."
local user_exists
user_exists=$(PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -tAc \
"SELECT COUNT(*) FROM identity WHERE login='$login'")
if [ "$user_exists" -gt 0 ]; then
print_warn "User '$login' already exists. Updating password..."
PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" <<EOF
UPDATE identity
SET password_hash = '$password_hash',
display_name = '$display_name',
updated = NOW()
WHERE login = '$login';
EOF
print_info "User '$login' password updated successfully!"
else
print_info "Creating new user '$login'..."
PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" <<EOF
INSERT INTO identity (login, display_name, password_hash, attributes)
VALUES ('$login', '$display_name', '$password_hash', '{}');
EOF
print_info "User '$login' created successfully!"
fi
echo ""
print_info "======================================"
print_info "Test User Credentials:"
print_info " Login: $login"
print_info " Password: $password"
print_info "======================================"
echo ""
}
show_help() {
cat << EOF
Create or Reset Test Admin User
Usage: $0 [LOGIN] [PASSWORD] [DISPLAY_NAME]
Arguments:
LOGIN User login name (default: admin)
PASSWORD User password (default: admin)
DISPLAY_NAME User display name (default: Administrator)
Environment Variables:
ATTUNE_DB_NAME Database name (default: attune)
ATTUNE_DB_USER Database user (default: postgres)
ATTUNE_DB_HOST Database host (default: localhost)
ATTUNE_DB_PORT Database port (default: 5432)
ATTUNE_DB_PASSWORD Database password (default: postgres)
Examples:
# Create/reset default admin user (admin/admin)
$0
# Create/reset with custom credentials
$0 myuser mypassword "My User"
# Use custom database connection
ATTUNE_DB_PASSWORD=secret $0
Note: If Python with argon2-cffi is not available, only the default
'admin' password can be used (pre-hashed).
EOF
}
# Main script
main() {
if [ "$1" = "-h" ] || [ "$1" = "--help" ]; then
show_help
exit 0
fi
print_info "Attune Test User Setup"
print_info "======================"
print_info "Database: $DB_NAME"
print_info "Host: $DB_HOST:$DB_PORT"
echo ""
check_postgres
create_or_update_user "$ADMIN_LOGIN" "$ADMIN_PASSWORD" "$ADMIN_DISPLAY_NAME"
print_info "You can now login to the API:"
echo ""
echo " curl -X POST http://localhost:8080/auth/login \\"
echo " -H 'Content-Type: application/json' \\"
echo " -d '{\"login\":\"$ADMIN_LOGIN\",\"password\":\"$ADMIN_PASSWORD\"}'"
}
main "$@"

196
scripts/generate-python-client.sh Executable file
View File

@@ -0,0 +1,196 @@
#!/bin/bash
# Generate Python client from OpenAPI spec
# This script downloads the OpenAPI spec from the running API server
# and generates a Python client using openapi-python-client
set -e
# Configuration
API_URL="${ATTUNE_API_URL:-http://localhost:8080}"
OPENAPI_SPEC_URL="${API_URL}/api-spec/openapi.json"
OUTPUT_DIR="tests/generated_client"
TEMP_SPEC="/tmp/attune-openapi.json"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
echo -e "${BLUE}=== Attune Python Client Generator ===${NC}"
echo ""
# Check if openapi-python-client is installed
OPENAPI_CLIENT_CMD="tests/venvs/e2e/bin/openapi-python-client"
if [ ! -f "${OPENAPI_CLIENT_CMD}" ]; then
echo -e "${YELLOW}openapi-python-client not found. Installing...${NC}"
if [ -d "tests/venvs/e2e" ]; then
tests/venvs/e2e/bin/pip install openapi-python-client
else
echo -e "${RED}ERROR: E2E venv not found. Please create it first.${NC}"
exit 1
fi
echo ""
fi
# Check if API is running
echo -e "${BLUE}Checking API availability at ${API_URL}...${NC}"
if ! curl -s -f "${API_URL}/health" > /dev/null; then
echo -e "${RED}ERROR: API is not running at ${API_URL}${NC}"
echo "Please start the API service first:"
echo " cd tests && ./start_e2e_services.sh"
exit 1
fi
echo -e "${GREEN}✓ API is running${NC}"
echo ""
# Download OpenAPI spec
echo -e "${BLUE}Downloading OpenAPI spec from ${OPENAPI_SPEC_URL}...${NC}"
if ! curl -s -f "${OPENAPI_SPEC_URL}" -o "${TEMP_SPEC}"; then
echo -e "${RED}ERROR: Failed to download OpenAPI spec${NC}"
echo "Make sure the API is running and the spec endpoint is available"
exit 1
fi
echo -e "${GREEN}✓ OpenAPI spec downloaded${NC}"
echo ""
# Validate JSON
echo -e "${BLUE}Validating OpenAPI spec...${NC}"
if ! jq empty "${TEMP_SPEC}" 2>/dev/null; then
echo -e "${RED}ERROR: Invalid JSON in OpenAPI spec${NC}"
cat "${TEMP_SPEC}"
exit 1
fi
echo -e "${GREEN}✓ OpenAPI spec is valid${NC}"
echo ""
# Show spec info
SPEC_TITLE=$(jq -r '.info.title' "${TEMP_SPEC}")
SPEC_VERSION=$(jq -r '.info.version' "${TEMP_SPEC}")
PATH_COUNT=$(jq '.paths | length' "${TEMP_SPEC}")
echo -e "${BLUE}API Info:${NC}"
echo " Title: ${SPEC_TITLE}"
echo " Version: ${SPEC_VERSION}"
echo " Endpoints: ${PATH_COUNT}"
echo ""
# Remove old generated client if it exists
if [ -d "${OUTPUT_DIR}" ]; then
echo -e "${YELLOW}Removing old generated client...${NC}"
rm -rf "${OUTPUT_DIR}"
fi
# Generate Python client
echo -e "${BLUE}Generating Python client...${NC}"
"${OPENAPI_CLIENT_CMD}" generate \
--path "${TEMP_SPEC}" \
--output-path "${OUTPUT_DIR}" \
--overwrite \
--meta none
if [ $? -ne 0 ]; then
echo -e "${RED}ERROR: Client generation failed${NC}"
exit 1
fi
echo -e "${GREEN}✓ Python client generated${NC}"
echo ""
# Install the generated client
echo -e "${BLUE}Installing generated client into E2E venv...${NC}"
if [ -d "tests/venvs/e2e" ]; then
tests/venvs/e2e/bin/pip install -e "${OUTPUT_DIR}" --quiet
echo -e "${GREEN}✓ Client installed${NC}"
else
echo -e "${YELLOW}WARNING: E2E venv not found, skipping installation${NC}"
echo "Run this to install manually:"
echo " tests/venvs/e2e/bin/pip install -e ${OUTPUT_DIR}"
fi
echo ""
# Clean up
rm -f "${TEMP_SPEC}"
# Create a simple usage example
cat > "${OUTPUT_DIR}/USAGE.md" << 'EOF'
# Attune Python Client Usage
This client was auto-generated from the Attune OpenAPI specification.
## Installation
```bash
pip install -e tests/generated_client
```
## Basic Usage
```python
from attune_client import Client
from attune_client.api.auth import login
from attune_client.models import LoginRequest
# Create client
client = Client(base_url="http://localhost:8080")
# Login
login_request = LoginRequest(
login="test@attune.local",
password="TestPass123!"
)
response = login.sync(client=client, json_body=login_request)
token = response.data.access_token
# Use authenticated client
client = Client(
base_url="http://localhost:8080",
token=token
)
# List packs
from attune_client.api.packs import list_packs
packs = list_packs.sync(client=client)
print(f"Found {len(packs.data)} packs")
```
## Async Usage
All API calls have async equivalents:
```python
import asyncio
from attune_client import Client
from attune_client.api.packs import list_packs
async def main():
client = Client(base_url="http://localhost:8080", token="your-token")
packs = await list_packs.asyncio(client=client)
print(f"Found {len(packs.data)} packs")
asyncio.run(main())
```
## Regenerating
To regenerate the client after API changes:
```bash
./scripts/generate-python-client.sh
```
EOF
echo -e "${GREEN}=== Client Generation Complete ===${NC}"
echo ""
echo "Generated client location: ${OUTPUT_DIR}"
echo "Usage guide: ${OUTPUT_DIR}/USAGE.md"
echo ""
echo "To use the client in tests:"
echo " from attune_client import Client"
echo " from attune_client.api.packs import list_packs"
echo ""
echo -e "${BLUE}Next steps:${NC}"
echo " 1. Review the generated client in ${OUTPUT_DIR}"
echo " 2. Update test fixtures to use the generated client"
echo " 3. Remove manual client code in tests/helpers/client.py"
echo ""

View File

@@ -0,0 +1,228 @@
#!/usr/bin/env python3
"""
Generate AGENTS.md index file in minified format.
This script scans the docs, scripts, and work-summary directories
and generates a minified index file that helps AI agents quickly
understand the project structure and available documentation.
The script uses AGENTS.md.template as a base and injects the generated
index at the {{DOCUMENTATION_INDEX}} placeholder.
Usage:
python scripts/generate_agents_md_index.py
"""
import os
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Set
def get_project_root() -> Path:
"""Get the project root directory (parent of scripts/)."""
script_dir = Path(__file__).parent
return script_dir.parent
def scan_directory(
base_path: Path, extensions: Set[str] = None
) -> Dict[str, List[str]]:
"""
Scan a directory and organize files by subdirectory.
Args:
base_path: Directory to scan
extensions: Set of file extensions to include (e.g., {'.md', '.py'}). None means all files.
Returns:
Dictionary mapping relative directory paths to lists of filenames
"""
if not base_path.exists():
return {}
structure = defaultdict(list)
for item in sorted(base_path.rglob("*")):
if item.is_file():
# Filter by extension if specified
if extensions and item.suffix not in extensions:
continue
# Get relative path from base_path
rel_path = item.relative_to(base_path)
parent_dir = str(rel_path.parent) if rel_path.parent != Path(".") else ""
structure[parent_dir].append(item.name)
return structure
def format_directory_entry(
dir_path: str, files: List[str], max_files: int = None
) -> str:
"""
Format a directory entry in minified format.
Args:
dir_path: Directory path (empty string for root)
files: List of filenames in the directory
max_files: Maximum number of files to list before truncating
Returns:
Formatted string like "path:{file1,file2,...}"
"""
if not files:
return ""
# Sort files for consistency
sorted_files = sorted(files)
# Truncate if needed
if max_files and len(sorted_files) > max_files:
file_list = sorted_files[:max_files] + ["..."]
else:
file_list = sorted_files
files_str = ",".join(file_list)
if dir_path:
return f"{dir_path}:{{{files_str}}}"
else:
return f"root:{{{files_str}}}"
def generate_index_content(root_dirs: Dict[str, Dict[str, any]]) -> str:
"""
Generate the documentation index content.
Args:
root_dirs: Dictionary mapping directory names to their scan configs
Returns:
Formatted index content as a string
"""
lines = []
lines.append("[Attune Project Documentation Index]")
lines.append("|root: ./")
lines.append(
"|IMPORTANT: Prefer retrieval-led reasoning over pre-training-led reasoning"
)
lines.append(
"|IMPORTANT: This index provides a quick overview - use grep/read_file for details"
)
lines.append("|")
lines.append("| Format: path/to/dir:{file1,file2,...}")
lines.append(
"| '...' indicates truncated file list - use grep/list_directory for full contents"
)
lines.append("|")
lines.append("| To regenerate this index: make generate-agents-index")
lines.append("|")
# Process each root directory
for dir_name, config in root_dirs.items():
base_path = config["path"]
extensions = config.get("extensions")
max_files = config.get("max_files", 10)
structure = scan_directory(base_path, extensions)
if not structure:
lines.append(f"|{dir_name}: (empty)")
continue
# Sort directories for consistent output
sorted_dirs = sorted(structure.keys())
for dir_path in sorted_dirs:
files = structure[dir_path]
# Build the full path relative to project root
if dir_path:
full_path = f"{dir_name}/{dir_path}"
else:
full_path = dir_name
entry = format_directory_entry(full_path, files, max_files)
if entry:
lines.append(f"|{entry}")
return "\n".join(lines)
def generate_agents_md(
template_path: Path, output_path: Path, root_dirs: Dict[str, Dict[str, any]]
) -> None:
"""
Generate the AGENTS.md file using template.
Args:
template_path: Path to AGENTS.md.template file
output_path: Path where AGENTS.md should be written
root_dirs: Dictionary mapping directory names to their scan configs
"""
# Generate the index content
index_content = generate_index_content(root_dirs)
# Read the template
if not template_path.exists():
print(f"⚠️ Template not found at {template_path}")
print(f" Creating AGENTS.md without template...")
content = index_content + "\n"
else:
template = template_path.read_text()
# Inject the index into the template
content = template.replace("{{DOCUMENTATION_INDEX}}", index_content)
# Write to file
output_path.write_text(content)
print(f"✓ Generated {output_path}")
index_lines = index_content.count("\n") + 1
total_lines = content.count("\n") + 1
print(f" Index lines: {index_lines}")
print(f" Total lines: {total_lines}")
def main():
"""Main entry point."""
project_root = get_project_root()
# Configuration for directories to scan
root_dirs = {
"docs": {
"path": project_root / "docs",
"extensions": {".md", ".txt", ".yaml", ".yml", ".json", ".sh"},
"max_files": 15,
},
"scripts": {
"path": project_root / "scripts",
"extensions": {".sh", ".py", ".sql", ".js", ".html"},
"max_files": 20,
},
"work-summary": {
"path": project_root / "work-summary",
"extensions": {".md", ".txt"},
"max_files": 20,
},
}
template_path = project_root / "AGENTS.md.template"
output_path = project_root / "AGENTS.md"
print("Generating AGENTS.md index...")
print(f"Project root: {project_root}")
print(f"Template: {template_path}")
print()
# Generate the index
generate_agents_md(template_path, output_path, root_dirs)
print()
print("Index generation complete!")
print(f"Review the generated file at: {output_path}")
if __name__ == "__main__":
main()

237
scripts/load-core-pack.sh Executable file
View File

@@ -0,0 +1,237 @@
#!/bin/bash
# Wrapper script for loading the core pack into Attune database
# Usage: ./scripts/load-core-pack.sh [options]
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors for output
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Default values
DATABASE_URL="${DATABASE_URL:-postgresql://postgres:postgres@localhost:5432/attune}"
PACKS_DIR="${ATTUNE_PACKS_DIR:-$PROJECT_ROOT/packs}"
PYTHON_BIN="python3"
# Function to print colored messages
info() {
echo -e "${BLUE}${NC} $1"
}
success() {
echo -e "${GREEN}${NC} $1"
}
error() {
echo -e "${RED}${NC} $1"
}
warning() {
echo -e "${YELLOW}${NC} $1"
}
# Parse command line arguments
DRY_RUN=false
VERBOSE=false
while [[ $# -gt 0 ]]; do
case $1 in
--database-url)
DATABASE_URL="$2"
shift 2
;;
--pack-dir)
PACKS_DIR="$2"
shift 2
;;
--python)
PYTHON_BIN="$2"
shift 2
;;
--dry-run)
DRY_RUN=true
shift
;;
-v|--verbose)
VERBOSE=true
shift
;;
-h|--help)
cat << EOF
Usage: $0 [options]
Load the Attune core pack into the database.
Options:
--database-url URL PostgreSQL connection string
(default: postgresql://postgres:postgres@localhost:5432/attune)
--pack-dir DIR Base directory for packs (default: ./packs)
--python PATH Path to Python interpreter (default: python3)
--dry-run Show what would be done without making changes
-v, --verbose Show detailed output
-h, --help Show this help message
Environment Variables:
DATABASE_URL PostgreSQL connection string
ATTUNE_PACKS_DIR Base directory for packs
Examples:
# Load core pack with default settings
$0
# Use custom database URL
$0 --database-url "postgresql://user:pass@db:5432/attune"
# Dry run to see what would happen
$0 --dry-run
EOF
exit 0
;;
*)
error "Unknown option: $1"
echo "Use --help for usage information"
exit 1
;;
esac
done
# Print banner
echo ""
echo "═══════════════════════════════════════════════════════════"
echo " Attune Core Pack Loader"
echo "═══════════════════════════════════════════════════════════"
echo ""
# Check prerequisites
info "Checking prerequisites..."
# Check Python
if ! command -v "$PYTHON_BIN" &> /dev/null; then
error "Python 3 is required but not found: $PYTHON_BIN"
echo " Install Python 3.8+ and try again"
exit 1
fi
success "Python 3 found: $($PYTHON_BIN --version)"
# Check Python packages
MISSING_PACKAGES=()
if ! "$PYTHON_BIN" -c "import psycopg2" 2>/dev/null; then
MISSING_PACKAGES+=("psycopg2-binary")
fi
if ! "$PYTHON_BIN" -c "import yaml" 2>/dev/null; then
MISSING_PACKAGES+=("pyyaml")
fi
if [ ${#MISSING_PACKAGES[@]} -gt 0 ]; then
warning "Missing required Python packages: ${MISSING_PACKAGES[*]}"
echo ""
echo "Install them with:"
echo " pip install ${MISSING_PACKAGES[*]}"
echo ""
read -p "Install now? (y/N): " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
info "Installing packages..."
pip install "${MISSING_PACKAGES[@]}"
success "Packages installed"
else
error "Required packages not installed"
exit 1
fi
fi
success "Python packages installed"
# Check database connectivity
info "Testing database connection..."
if ! pg_isready -d "$DATABASE_URL" -q 2>/dev/null; then
# Try psql as fallback
if ! psql "$DATABASE_URL" -c "SELECT 1" >/dev/null 2>&1; then
error "Cannot connect to database"
echo " DATABASE_URL: $DATABASE_URL"
echo ""
echo "Troubleshooting:"
echo " - Check PostgreSQL is running"
echo " - Verify DATABASE_URL is correct"
echo " - Ensure database exists"
exit 1
fi
fi
success "Database connection OK"
# Check packs directory
info "Checking packs directory..."
if [ ! -d "$PACKS_DIR/core" ]; then
error "Core pack directory not found: $PACKS_DIR/core"
exit 1
fi
success "Core pack directory found"
# Check pack.yaml exists
if [ ! -f "$PACKS_DIR/core/pack.yaml" ]; then
error "pack.yaml not found in core pack directory"
exit 1
fi
success "pack.yaml found"
echo ""
info "Configuration:"
echo " Database URL: $DATABASE_URL"
echo " Packs Directory: $PACKS_DIR"
echo " Core Pack: $PACKS_DIR/core"
echo ""
# Run the Python loader
LOADER_SCRIPT="$SCRIPT_DIR/load_core_pack.py"
if [ ! -f "$LOADER_SCRIPT" ]; then
error "Loader script not found: $LOADER_SCRIPT"
exit 1
fi
LOADER_ARGS=(
"--database-url" "$DATABASE_URL"
"--pack-dir" "$PACKS_DIR"
)
if [ "$DRY_RUN" = true ]; then
LOADER_ARGS+=("--dry-run")
fi
if [ "$VERBOSE" = true ]; then
info "Running loader with verbose output..."
"$PYTHON_BIN" "$LOADER_SCRIPT" "${LOADER_ARGS[@]}"
else
"$PYTHON_BIN" "$LOADER_SCRIPT" "${LOADER_ARGS[@]}"
fi
LOADER_EXIT_CODE=$?
echo ""
if [ $LOADER_EXIT_CODE -eq 0 ]; then
echo "═══════════════════════════════════════════════════════════"
success "Core pack loaded successfully!"
echo "═══════════════════════════════════════════════════════════"
echo ""
echo "Next steps:"
echo " 1. Verify: attune pack show core"
echo " 2. List actions: attune action list --pack core"
echo " 3. Create a rule using core triggers and actions"
echo ""
else
echo "═══════════════════════════════════════════════════════════"
error "Failed to load core pack"
echo "═══════════════════════════════════════════════════════════"
echo ""
echo "Check the error messages above for details"
exit $LOADER_EXIT_CODE
fi

510
scripts/load_core_pack.py Executable file
View File

@@ -0,0 +1,510 @@
#!/usr/bin/env python3
"""
Core Pack Loader for Attune
This script loads the core pack from the filesystem into the database.
It reads pack.yaml, action definitions, trigger definitions, and sensor definitions
and creates all necessary database entries.
Usage:
python3 scripts/load_core_pack.py [--database-url URL] [--pack-dir DIR]
Environment Variables:
DATABASE_URL: PostgreSQL connection string (default: from config or localhost)
ATTUNE_PACKS_DIR: Base directory for packs (default: ./packs)
"""
import argparse
import json
import os
import sys
from pathlib import Path
from typing import Any, Dict, List, Optional
import psycopg2
import psycopg2.extras
import yaml
# Default configuration
DEFAULT_DATABASE_URL = "postgresql://postgres:postgres@localhost:5432/attune"
DEFAULT_PACKS_DIR = "./packs"
CORE_PACK_REF = "core"
def generate_label(name: str) -> str:
"""Generate a human-readable label from a name.
Examples:
'crontimer' -> 'Crontimer'
'http_request' -> 'Http Request'
'datetime_timer' -> 'Datetime Timer'
"""
# Replace underscores with spaces and capitalize each word
return " ".join(word.capitalize() for word in name.replace("_", " ").split())
class CorePackLoader:
"""Loads the core pack into the database"""
def __init__(self, database_url: str, packs_dir: Path, schema: str = "public"):
self.database_url = database_url
self.packs_dir = packs_dir
self.core_pack_dir = packs_dir / CORE_PACK_REF
self.schema = schema
self.conn = None
self.pack_id = None
def connect(self):
"""Connect to the database"""
print(f"Connecting to database...")
self.conn = psycopg2.connect(self.database_url)
self.conn.autocommit = False
# Set search_path to use the correct schema
cursor = self.conn.cursor()
cursor.execute(f"SET search_path TO {self.schema}, public")
cursor.close()
self.conn.commit()
print(f"✓ Connected to database (schema: {self.schema})")
def close(self):
"""Close database connection"""
if self.conn:
self.conn.close()
def load_yaml(self, file_path: Path) -> Dict[str, Any]:
"""Load and parse YAML file"""
with open(file_path, "r") as f:
return yaml.safe_load(f)
def upsert_pack(self) -> int:
"""Create or update the core pack"""
print("\n→ Loading pack metadata...")
pack_yaml_path = self.core_pack_dir / "pack.yaml"
if not pack_yaml_path.exists():
raise FileNotFoundError(f"pack.yaml not found at {pack_yaml_path}")
pack_data = self.load_yaml(pack_yaml_path)
cursor = self.conn.cursor()
# Prepare pack data
ref = pack_data["ref"]
label = pack_data["label"]
description = pack_data.get("description", "")
version = pack_data["version"]
conf_schema = json.dumps(pack_data.get("conf_schema", {}))
config = json.dumps(pack_data.get("config", {}))
meta = json.dumps(pack_data.get("meta", {}))
tags = pack_data.get("tags", [])
runtime_deps = pack_data.get("runtime_deps", [])
is_standard = pack_data.get("system", False)
# Upsert pack
cursor.execute(
"""
INSERT INTO pack (
ref, label, description, version,
conf_schema, config, meta, tags, runtime_deps, is_standard
)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (ref) DO UPDATE SET
label = EXCLUDED.label,
description = EXCLUDED.description,
version = EXCLUDED.version,
conf_schema = EXCLUDED.conf_schema,
config = EXCLUDED.config,
meta = EXCLUDED.meta,
tags = EXCLUDED.tags,
runtime_deps = EXCLUDED.runtime_deps,
is_standard = EXCLUDED.is_standard,
updated = NOW()
RETURNING id
""",
(
ref,
label,
description,
version,
conf_schema,
config,
meta,
tags,
runtime_deps,
is_standard,
),
)
self.pack_id = cursor.fetchone()[0]
cursor.close()
print(f"✓ Pack '{ref}' loaded (ID: {self.pack_id})")
return self.pack_id
def upsert_triggers(self) -> Dict[str, int]:
"""Load trigger definitions"""
print("\n→ Loading triggers...")
triggers_dir = self.core_pack_dir / "triggers"
if not triggers_dir.exists():
print(" No triggers directory found")
return {}
trigger_ids = {}
cursor = self.conn.cursor()
for yaml_file in sorted(triggers_dir.glob("*.yaml")):
trigger_data = self.load_yaml(yaml_file)
ref = f"{CORE_PACK_REF}.{trigger_data['name']}"
label = trigger_data.get("label") or generate_label(trigger_data["name"])
description = trigger_data.get("description", "")
enabled = trigger_data.get("enabled", True)
param_schema = json.dumps(trigger_data.get("parameters", {}))
out_schema = json.dumps(trigger_data.get("output", {}))
cursor.execute(
"""
INSERT INTO trigger (
ref, pack, pack_ref, label, description,
enabled, param_schema, out_schema, is_adhoc
)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (ref) DO UPDATE SET
label = EXCLUDED.label,
description = EXCLUDED.description,
enabled = EXCLUDED.enabled,
param_schema = EXCLUDED.param_schema,
out_schema = EXCLUDED.out_schema,
updated = NOW()
RETURNING id
""",
(
ref,
self.pack_id,
CORE_PACK_REF,
label,
description,
enabled,
param_schema,
out_schema,
False, # Pack-installed triggers are not ad-hoc
),
)
trigger_id = cursor.fetchone()[0]
trigger_ids[ref] = trigger_id
print(f" ✓ Trigger '{ref}' (ID: {trigger_id})")
cursor.close()
return trigger_ids
def upsert_actions(self) -> Dict[str, int]:
"""Load action definitions"""
print("\n→ Loading actions...")
actions_dir = self.core_pack_dir / "actions"
if not actions_dir.exists():
print(" No actions directory found")
return {}
action_ids = {}
cursor = self.conn.cursor()
# First, ensure we have a runtime for actions
runtime_id = self.ensure_shell_runtime(cursor)
for yaml_file in sorted(actions_dir.glob("*.yaml")):
action_data = self.load_yaml(yaml_file)
ref = f"{CORE_PACK_REF}.{action_data['name']}"
label = action_data.get("label") or generate_label(action_data["name"])
description = action_data.get("description", "")
# Determine entrypoint
entrypoint = action_data.get("entry_point", "")
if not entrypoint:
# Try to find corresponding script file
action_name = action_data["name"]
for ext in [".sh", ".py"]:
script_path = actions_dir / f"{action_name}{ext}"
if script_path.exists():
entrypoint = str(script_path.relative_to(self.packs_dir))
break
param_schema = json.dumps(action_data.get("parameters", {}))
out_schema = json.dumps(action_data.get("output", {}))
cursor.execute(
"""
INSERT INTO action (
ref, pack, pack_ref, label, description,
entrypoint, runtime, param_schema, out_schema, is_adhoc
)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (ref) DO UPDATE SET
label = EXCLUDED.label,
description = EXCLUDED.description,
entrypoint = EXCLUDED.entrypoint,
param_schema = EXCLUDED.param_schema,
out_schema = EXCLUDED.out_schema,
updated = NOW()
RETURNING id
""",
(
ref,
self.pack_id,
CORE_PACK_REF,
label,
description,
entrypoint,
runtime_id,
param_schema,
out_schema,
False, # Pack-installed actions are not ad-hoc
),
)
action_id = cursor.fetchone()[0]
action_ids[ref] = action_id
print(f" ✓ Action '{ref}' (ID: {action_id})")
cursor.close()
return action_ids
def ensure_shell_runtime(self, cursor) -> int:
"""Ensure shell runtime exists"""
cursor.execute(
"""
INSERT INTO runtime (
ref, pack, pack_ref, name, description, distributions
)
VALUES (%s, %s, %s, %s, %s, %s)
ON CONFLICT (ref) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
updated = NOW()
RETURNING id
""",
(
"core.action.shell",
self.pack_id,
CORE_PACK_REF,
"Shell",
"Shell script runtime",
json.dumps({"shell": {"command": "sh"}}),
),
)
return cursor.fetchone()[0]
def upsert_sensors(self, trigger_ids: Dict[str, int]) -> Dict[str, int]:
"""Load sensor definitions"""
print("\n→ Loading sensors...")
sensors_dir = self.core_pack_dir / "sensors"
if not sensors_dir.exists():
print(" No sensors directory found")
return {}
sensor_ids = {}
cursor = self.conn.cursor()
# Ensure sensor runtime exists
sensor_runtime_id = self.ensure_sensor_runtime(cursor)
for yaml_file in sorted(sensors_dir.glob("*.yaml")):
sensor_data = self.load_yaml(yaml_file)
ref = f"{CORE_PACK_REF}.{sensor_data['name']}"
label = sensor_data.get("label") or generate_label(sensor_data["name"])
description = sensor_data.get("description", "")
enabled = sensor_data.get("enabled", True)
# Get trigger reference (handle both trigger_type and trigger_types)
trigger_types = sensor_data.get("trigger_types", [])
if not trigger_types:
# Fallback to singular trigger_type
trigger_type = sensor_data.get("trigger_type", "")
trigger_types = [trigger_type] if trigger_type else []
# Use the first trigger type (sensors currently support one trigger)
trigger_ref = None
trigger_id = None
if trigger_types:
# Check if it's already a full ref or just the type name
first_trigger = trigger_types[0]
if "." in first_trigger:
trigger_ref = first_trigger
else:
trigger_ref = f"{CORE_PACK_REF}.{first_trigger}"
trigger_id = trigger_ids.get(trigger_ref)
# Determine entrypoint
entry_point = sensor_data.get("entry_point", "")
if not entry_point:
sensor_name = sensor_data["name"]
for ext in [".py", ".sh"]:
script_path = sensors_dir / f"{sensor_name}{ext}"
if script_path.exists():
entry_point = str(script_path.relative_to(self.packs_dir))
break
config = json.dumps(sensor_data.get("config", {}))
cursor.execute(
"""
INSERT INTO sensor (
ref, pack, pack_ref, label, description,
entrypoint, runtime, runtime_ref, trigger, trigger_ref,
enabled, config
)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (ref) DO UPDATE SET
label = EXCLUDED.label,
description = EXCLUDED.description,
entrypoint = EXCLUDED.entrypoint,
trigger = EXCLUDED.trigger,
trigger_ref = EXCLUDED.trigger_ref,
enabled = EXCLUDED.enabled,
config = EXCLUDED.config,
updated = NOW()
RETURNING id
""",
(
ref,
self.pack_id,
CORE_PACK_REF,
label,
description,
entry_point,
sensor_runtime_id,
"core.sensor.builtin",
trigger_id,
trigger_ref,
enabled,
config,
),
)
sensor_id = cursor.fetchone()[0]
sensor_ids[ref] = sensor_id
print(f" ✓ Sensor '{ref}' (ID: {sensor_id})")
cursor.close()
return sensor_ids
def ensure_sensor_runtime(self, cursor) -> int:
"""Ensure sensor runtime exists"""
cursor.execute(
"""
INSERT INTO runtime (
ref, pack, pack_ref, name, description, distributions
)
VALUES (%s, %s, %s, %s, %s, %s)
ON CONFLICT (ref) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
updated = NOW()
RETURNING id
""",
(
"core.sensor.builtin",
self.pack_id,
CORE_PACK_REF,
"Built-in Sensor",
"Built-in sensor runtime",
json.dumps([]),
),
)
return cursor.fetchone()[0]
def load_pack(self):
"""Main loading process"""
print("=" * 60)
print("Core Pack Loader")
print("=" * 60)
if not self.core_pack_dir.exists():
raise FileNotFoundError(
f"Core pack directory not found: {self.core_pack_dir}"
)
try:
self.connect()
# Load pack metadata
self.upsert_pack()
# Load triggers
trigger_ids = self.upsert_triggers()
# Load actions
action_ids = self.upsert_actions()
# Load sensors
sensor_ids = self.upsert_sensors(trigger_ids)
# Commit all changes
self.conn.commit()
print("\n" + "=" * 60)
print("✓ Core pack loaded successfully!")
print("=" * 60)
print(f" Pack ID: {self.pack_id}")
print(f" Triggers: {len(trigger_ids)}")
print(f" Actions: {len(action_ids)}")
print(f" Sensors: {len(sensor_ids)}")
print()
except Exception as e:
if self.conn:
self.conn.rollback()
print(f"\n✗ Error loading core pack: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
finally:
self.close()
def main():
parser = argparse.ArgumentParser(
description="Load the core pack into the Attune database"
)
parser.add_argument(
"--database-url",
default=os.getenv("DATABASE_URL", DEFAULT_DATABASE_URL),
help=f"PostgreSQL connection string (default: {DEFAULT_DATABASE_URL})",
)
parser.add_argument(
"--pack-dir",
type=Path,
default=Path(os.getenv("ATTUNE_PACKS_DIR", DEFAULT_PACKS_DIR)),
help=f"Base directory for packs (default: {DEFAULT_PACKS_DIR})",
)
parser.add_argument(
"--schema",
default=os.getenv("DB_SCHEMA", "public"),
help="Database schema to use (default: public)",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Print what would be done without making changes",
)
args = parser.parse_args()
if args.dry_run:
print("DRY RUN MODE: No changes will be made")
print()
loader = CorePackLoader(args.database_url, args.pack_dir, args.schema)
loader.load_pack()
if __name__ == "__main__":
main()

223
scripts/quick-test-happy-path.sh Executable file
View File

@@ -0,0 +1,223 @@
#!/bin/bash
# Quick Test: Timer Echo Happy Path
# Tests the complete event flow with unified runtime detection
set -e
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
# Configuration
API_URL="${ATTUNE_API_URL:-http://localhost:8080}"
LOGIN="${ATTUNE_LOGIN:-test@attune.local}"
PASSWORD="${ATTUNE_PASSWORD:-TestPass123!}"
echo -e "${BLUE}=== Quick Test: Timer Echo Happy Path ===${NC}\n"
# Step 1: Authenticate
echo -e "${YELLOW}Step 1:${NC} Authenticating..."
LOGIN_RESPONSE=$(curl -s -X POST "$API_URL/auth/login" \
-H "Content-Type: application/json" \
-d "{\"login\":\"$LOGIN\",\"password\":\"$PASSWORD\"}")
TOKEN=$(echo "$LOGIN_RESPONSE" | jq -r '.data.access_token // empty')
if [ -z "$TOKEN" ]; then
echo -e "${RED}✗ Authentication failed${NC}"
echo "Response: $LOGIN_RESPONSE"
exit 1
fi
echo -e "${GREEN}✓ Authenticated${NC}\n"
# Step 2: Check core pack
echo -e "${YELLOW}Step 2:${NC} Checking core pack..."
PACK=$(curl -s -H "Authorization: Bearer $TOKEN" "$API_URL/api/v1/packs/core")
PACK_ID=$(echo "$PACK" | jq -r '.data.id // empty')
if [ -z "$PACK_ID" ]; then
echo -e "${RED}✗ Core pack not loaded${NC}"
echo "Please load core pack first with: docker exec attune-api /opt/attune/scripts/load-core-pack.sh"
exit 1
fi
echo -e "${GREEN}✓ Core pack loaded (ID: $PACK_ID)${NC}\n"
# Step 3: Check for echo action
echo -e "${YELLOW}Step 3:${NC} Checking echo action..."
ACTIONS=$(curl -s -H "Authorization: Bearer $TOKEN" "$API_URL/api/v1/actions")
ECHO_ACTION=$(echo "$ACTIONS" | jq -r '.data[]? | select(.ref == "core.echo") | .ref')
if [ -z "$ECHO_ACTION" ]; then
echo -e "${RED}✗ Echo action not found${NC}"
exit 1
fi
echo -e "${GREEN}✓ Echo action found${NC}\n"
# Step 4: Check interval timer trigger
echo -e "${YELLOW}Step 4:${NC} Checking interval timer trigger..."
TRIGGERS=$(curl -s -H "Authorization: Bearer $TOKEN" "$API_URL/api/v1/triggers")
TIMER_TRIGGER=$(echo "$TRIGGERS" | jq -r '.data[]? | select(.ref == "core.intervaltimer") | .id')
if [ -z "$TIMER_TRIGGER" ]; then
echo -e "${RED}✗ Interval timer trigger not found${NC}"
exit 1
fi
echo -e "${GREEN}✓ Interval timer trigger found (ID: $TIMER_TRIGGER)${NC}\n"
# Step 5: Create rule with embedded timer config
echo -e "${YELLOW}Step 5:${NC} Creating rule with 1-second timer..."
RULE_REF="core.quicktest_echo_$(date +%s)"
RULE_RESPONSE=$(curl -s -X POST "$API_URL/api/v1/rules" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"ref\": \"$RULE_REF\",
\"pack_ref\": \"core\",
\"label\": \"Quick Test Echo\",
\"description\": \"Quick test - echo every second\",
\"enabled\": true,
\"trigger_ref\": \"core.intervaltimer\",
\"trigger_parameters\": {
\"unit\": \"seconds\",
\"interval\": 1
},
\"action_ref\": \"core.echo\",
\"action_parameters\": {
\"message\": \"Hello, World! (quick test)\"
}
}")
RULE_ID=$(echo "$RULE_RESPONSE" | jq -r '.data.id // empty')
if [ -z "$RULE_ID" ]; then
echo -e "${RED}✗ Failed to create rule${NC}"
echo "Response: $RULE_RESPONSE"
exit 1
fi
echo -e "${GREEN}✓ Rule created (ID: $RULE_ID, Ref: $RULE_REF)${NC}\n"
# Step 6: Wait for executions
echo -e "${YELLOW}Step 6:${NC} Waiting for executions..."
echo "Waiting 15 seconds for timer to fire and actions to execute..."
EXECUTION_COUNT=0
for i in {1..5}; do
sleep 3
EXECS=$(curl -s -H "Authorization: Bearer $TOKEN" "$API_URL/api/v1/executions?limit=50")
COUNT=$(echo "$EXECS" | jq '[.data[]? | select(.action_ref == "core.echo")] | length')
if [ "$COUNT" -gt "$EXECUTION_COUNT" ]; then
EXECUTION_COUNT=$COUNT
echo -e " ${GREEN}Found $EXECUTION_COUNT execution(s)${NC} (after $((i*3))s)"
fi
done
echo ""
if [ "$EXECUTION_COUNT" -eq 0 ]; then
echo -e "${RED}✗ No executions found${NC}\n"
echo "Checking events..."
EVENTS=$(curl -s -H "Authorization: Bearer $TOKEN" "$API_URL/api/v1/events?limit=10")
EVENT_COUNT=$(echo "$EVENTS" | jq '.data | length // 0')
echo " Events created: $EVENT_COUNT"
echo "Checking enforcements..."
ENFORCEMENTS=$(curl -s -H "Authorization: Bearer $TOKEN" "$API_URL/api/v1/enforcements?limit=10")
ENFORCEMENT_COUNT=$(echo "$ENFORCEMENTS" | jq '.data | length // 0')
echo " Enforcements created: $ENFORCEMENT_COUNT"
echo -e "\n${RED}TEST FAILED - Check service logs:${NC}"
echo " docker logs attune-sensor --tail 50 | grep -i timer"
echo " docker logs attune-executor --tail 50"
echo " docker logs attune-worker-shell --tail 50"
echo ""
exit 1
fi
# Step 7: Check execution status
echo -e "${YELLOW}Step 7:${NC} Verifying execution status..."
EXECS=$(curl -s -H "Authorization: Bearer $TOKEN" "$API_URL/api/v1/executions?limit=20")
SUCCESS_COUNT=$(echo "$EXECS" | jq '[.data[]? | select(.action_ref == "core.echo" and .status == "succeeded")] | length')
FAILED_COUNT=$(echo "$EXECS" | jq '[.data[]? | select(.action_ref == "core.echo" and .status == "failed")] | length')
RUNNING_COUNT=$(echo "$EXECS" | jq '[.data[]? | select(.action_ref == "core.echo" and .status == "running")] | length')
echo -e "${GREEN}✓ Total executions: $EXECUTION_COUNT${NC}"
echo -e "${GREEN}✓ Successful: $SUCCESS_COUNT${NC}"
if [ "$FAILED_COUNT" -gt 0 ]; then
echo -e "${RED}✗ Failed: $FAILED_COUNT${NC}"
fi
if [ "$RUNNING_COUNT" -gt 0 ]; then
echo -e "${YELLOW}⟳ Running: $RUNNING_COUNT${NC}"
fi
echo ""
# Show sample executions
echo "Sample executions:"
echo "$EXECS" | jq '.data[0:3] | .[] | select(.action_ref == "core.echo") | {id, status, action_ref, created}' 2>/dev/null || echo " (no execution details available)"
echo ""
# Step 8: Cleanup
echo -e "${YELLOW}Step 8:${NC} Cleanup..."
curl -s -X PUT "$API_URL/api/v1/rules/$RULE_REF" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d '{"enabled": false}' > /dev/null
echo -e "${GREEN}✓ Rule disabled${NC}\n"
# Final summary
echo -e "${BLUE}=== Test Summary ===${NC}\n"
echo -e "✓ Core pack loaded"
echo -e "✓ Timer trigger available (interval timer)"
echo -e "✓ Echo action available"
echo -e "✓ Rule created (1-second interval)"
echo -e "✓ Executions found: $EXECUTION_COUNT"
echo -e "✓ Successful executions: $SUCCESS_COUNT"
echo ""
if [ "$SUCCESS_COUNT" -gt 0 ]; then
echo -e "${GREEN}=== HAPPY PATH TEST PASSED ===${NC}\n"
echo "Complete event flow working:"
echo " Timer Sensor → Event → Rule → Enforcement → Execution → Worker → Shell Action"
echo ""
echo "The unified runtime detection system is functioning correctly!"
echo "The worker successfully detected the Shell runtime and executed the echo action."
echo ""
exit 0
elif [ "$EXECUTION_COUNT" -gt 0 ] && [ "$RUNNING_COUNT" -gt 0 ]; then
echo -e "${YELLOW}=== PARTIAL SUCCESS ===${NC}\n"
echo "Executions created and some are still running."
echo "This is expected - actions may complete after this script finishes."
echo ""
echo "To check final status:"
echo " curl -H 'Authorization: Bearer $TOKEN' $API_URL/api/v1/executions?limit=20 | jq '.data[] | select(.action_ref == \"core.echo\") | {id, status}'"
echo ""
exit 0
else
echo -e "${RED}=== TEST FAILED ===${NC}\n"
echo "Executions created but none succeeded."
echo ""
echo "To debug:"
echo " 1. Check sensor logs: docker logs attune-sensor --tail 100"
echo " 2. Check executor logs: docker logs attune-executor --tail 100"
echo " 3. Check worker logs: docker logs attune-worker-shell --tail 100"
echo ""
echo " 4. Check execution details:"
EXEC_ID=$(echo "$EXECS" | jq -r '.data[0].id // empty')
if [ -n "$EXEC_ID" ]; then
echo " curl -H 'Authorization: Bearer $TOKEN' $API_URL/api/v1/executions/$EXEC_ID | jq ."
fi
echo ""
exit 1
fi

431
scripts/seed_core_pack.sql Normal file
View File

@@ -0,0 +1,431 @@
-- Seed Core Pack with Generic Timer Triggers, Sensors, and Basic Actions
-- This script creates the core pack with the new trigger/sensor architecture
SET search_path TO attune, public;
-- Insert core pack
INSERT INTO attune.pack (ref, label, description, version)
VALUES (
'core',
'Core Pack',
'Built-in core functionality including timer triggers and basic actions',
'1.0.0'
)
ON CONFLICT (ref) DO UPDATE SET
label = EXCLUDED.label,
description = EXCLUDED.description,
version = EXCLUDED.version,
updated = NOW();
-- Get pack ID for reference
DO $$
DECLARE
v_pack_id BIGINT;
v_action_runtime_id BIGINT;
v_sensor_runtime_id BIGINT;
v_intervaltimer_id BIGINT;
v_crontimer_id BIGINT;
v_datetimetimer_id BIGINT;
v_echo_action_id BIGINT;
v_sensor_10s_id BIGINT;
BEGIN
-- Get core pack ID
SELECT id INTO v_pack_id FROM attune.pack WHERE ref = 'core';
-- Create shell runtime for actions
INSERT INTO attune.runtime (ref, pack, pack_ref, name, description, runtime_type, distributions)
VALUES (
'core.action.shell',
v_pack_id,
'core',
'shell',
'Execute shell commands',
'action',
'{"shell": {"command": "sh"}}'::jsonb
)
ON CONFLICT (ref) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
updated = NOW()
RETURNING id INTO v_action_runtime_id;
-- Create built-in runtime for sensors
INSERT INTO attune.runtime (ref, pack, pack_ref, name, description, runtime_type, distributions)
VALUES (
'core.sensor.builtin',
v_pack_id,
'core',
'Built-in',
'Built-in runtime for system timers and sensors',
'sensor',
'[]'::jsonb
)
ON CONFLICT (ref) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
updated = NOW()
RETURNING id INTO v_sensor_runtime_id;
-- Create generic timer triggers (these define trigger types, not instances)
-- Interval Timer Trigger Type
INSERT INTO attune.trigger (
ref,
pack,
pack_ref,
label,
description,
enabled,
param_schema,
out_schema
)
VALUES (
'core.intervaltimer',
v_pack_id,
'core',
'Interval Timer',
'Fires at regular intervals based on specified time unit and interval',
true,
'{
"type": "object",
"properties": {
"unit": {
"type": "string",
"enum": ["seconds", "minutes", "hours"],
"description": "Time unit for the interval"
},
"interval": {
"type": "integer",
"minimum": 1,
"description": "Number of time units between each trigger"
}
},
"required": ["unit", "interval"]
}'::jsonb,
'{
"type": "object",
"properties": {
"type": {"type": "string", "const": "interval"},
"interval_seconds": {"type": "integer"},
"fired_at": {"type": "string", "format": "date-time"}
}
}'::jsonb
)
ON CONFLICT (ref) DO UPDATE SET
label = EXCLUDED.label,
description = EXCLUDED.description,
param_schema = EXCLUDED.param_schema,
out_schema = EXCLUDED.out_schema,
updated = NOW()
RETURNING id INTO v_intervaltimer_id;
-- Cron Timer Trigger Type
INSERT INTO attune.trigger (
ref,
pack,
pack_ref,
label,
description,
enabled,
param_schema,
out_schema
)
VALUES (
'core.crontimer',
v_pack_id,
'core',
'Cron Timer',
'Fires based on a cron schedule expression',
true,
'{
"type": "object",
"properties": {
"expression": {
"type": "string",
"description": "Cron expression (e.g., \"0 0 * * * *\" for every hour)"
}
},
"required": ["expression"]
}'::jsonb,
'{
"type": "object",
"properties": {
"type": {"type": "string", "const": "cron"},
"fired_at": {"type": "string", "format": "date-time"},
"scheduled_at": {"type": "string", "format": "date-time"}
}
}'::jsonb
)
ON CONFLICT (ref) DO UPDATE SET
label = EXCLUDED.label,
description = EXCLUDED.description,
param_schema = EXCLUDED.param_schema,
out_schema = EXCLUDED.out_schema,
updated = NOW()
RETURNING id INTO v_crontimer_id;
-- Datetime Timer Trigger Type
INSERT INTO attune.trigger (
ref,
pack,
pack_ref,
label,
description,
enabled,
param_schema,
out_schema
)
VALUES (
'core.datetimetimer',
v_pack_id,
'core',
'Datetime Timer',
'Fires once at a specific date and time',
true,
'{
"type": "object",
"properties": {
"fire_at": {
"type": "string",
"format": "date-time",
"description": "ISO 8601 timestamp when the timer should fire"
}
},
"required": ["fire_at"]
}'::jsonb,
'{
"type": "object",
"properties": {
"type": {"type": "string", "const": "one_shot"},
"fire_at": {"type": "string", "format": "date-time"},
"fired_at": {"type": "string", "format": "date-time"}
}
}'::jsonb
)
ON CONFLICT (ref) DO UPDATE SET
label = EXCLUDED.label,
description = EXCLUDED.description,
param_schema = EXCLUDED.param_schema,
out_schema = EXCLUDED.out_schema,
updated = NOW()
RETURNING id INTO v_datetimetimer_id;
-- Create actions
-- Echo action
INSERT INTO attune.action (
ref,
pack,
pack_ref,
label,
description,
entrypoint,
runtime,
param_schema,
out_schema
)
VALUES (
'core.echo',
v_pack_id,
'core',
'Echo',
'Echo a message to stdout',
'echo "${message}"',
v_action_runtime_id,
jsonb_build_object(
'type', 'object',
'properties', jsonb_build_object(
'message', jsonb_build_object(
'type', 'string',
'description', 'Message to echo',
'default', 'Hello World'
)
),
'required', jsonb_build_array('message')
),
jsonb_build_object(
'type', 'object',
'properties', jsonb_build_object(
'stdout', jsonb_build_object('type', 'string'),
'stderr', jsonb_build_object('type', 'string'),
'exit_code', jsonb_build_object('type', 'integer')
)
)
)
ON CONFLICT (ref) DO UPDATE SET
label = EXCLUDED.label,
description = EXCLUDED.description,
entrypoint = EXCLUDED.entrypoint,
param_schema = EXCLUDED.param_schema,
out_schema = EXCLUDED.out_schema,
updated = NOW()
RETURNING id INTO v_echo_action_id;
-- Sleep action
INSERT INTO attune.action (
ref,
pack,
pack_ref,
label,
description,
entrypoint,
runtime,
param_schema,
out_schema
)
VALUES (
'core.sleep',
v_pack_id,
'core',
'Sleep',
'Sleep for a specified number of seconds',
'sleep ${seconds}',
v_action_runtime_id,
jsonb_build_object(
'type', 'object',
'properties', jsonb_build_object(
'seconds', jsonb_build_object(
'type', 'integer',
'description', 'Number of seconds to sleep',
'default', 1,
'minimum', 0
)
),
'required', jsonb_build_array('seconds')
),
jsonb_build_object(
'type', 'object',
'properties', jsonb_build_object(
'exit_code', jsonb_build_object('type', 'integer')
)
)
)
ON CONFLICT (ref) DO UPDATE SET
label = EXCLUDED.label,
description = EXCLUDED.description,
entrypoint = EXCLUDED.entrypoint,
param_schema = EXCLUDED.param_schema,
out_schema = EXCLUDED.out_schema,
updated = NOW();
-- Noop (no operation) action
INSERT INTO attune.action (
ref,
pack,
pack_ref,
label,
description,
entrypoint,
runtime,
param_schema,
out_schema
)
VALUES (
'core.noop',
v_pack_id,
'core',
'No Operation',
'Does nothing - useful for testing',
'exit 0',
v_action_runtime_id,
jsonb_build_object(
'type', 'object',
'properties', jsonb_build_object()
),
jsonb_build_object(
'type', 'object',
'properties', jsonb_build_object(
'exit_code', jsonb_build_object('type', 'integer')
)
)
)
ON CONFLICT (ref) DO UPDATE SET
label = EXCLUDED.label,
description = EXCLUDED.description,
entrypoint = EXCLUDED.entrypoint,
param_schema = EXCLUDED.param_schema,
out_schema = EXCLUDED.out_schema,
updated = NOW();
-- Create example sensor: 10-second interval timer
INSERT INTO attune.sensor (
ref,
pack,
pack_ref,
label,
description,
entrypoint,
runtime,
runtime_ref,
trigger,
trigger_ref,
enabled,
config
)
VALUES (
'core.timer_10s_sensor',
v_pack_id,
'core',
'10 Second Timer Sensor',
'Timer sensor that fires every 10 seconds',
'builtin:interval_timer',
v_sensor_runtime_id,
'core.sensor.builtin',
v_intervaltimer_id,
'core.intervaltimer',
true,
'{"unit": "seconds", "interval": 10}'::jsonb
)
ON CONFLICT (ref) DO UPDATE SET
label = EXCLUDED.label,
description = EXCLUDED.description,
config = EXCLUDED.config,
updated = NOW()
RETURNING id INTO v_sensor_10s_id;
-- Create example rule: 10-second timer triggers echo action with "hello, world"
INSERT INTO attune.rule (
ref,
pack,
pack_ref,
label,
description,
action,
action_ref,
trigger,
trigger_ref,
conditions,
action_params,
enabled
)
VALUES (
'core.rule.timer_10s_echo',
v_pack_id,
'core',
'Echo Hello World Every 10 Seconds',
'Example rule that echoes "hello, world" every 10 seconds',
v_echo_action_id,
'core.echo',
v_intervaltimer_id,
'core.intervaltimer',
jsonb_build_object(), -- No conditions
jsonb_build_object(
'message', 'hello, world'
),
true
)
ON CONFLICT (ref) DO UPDATE SET
label = EXCLUDED.label,
description = EXCLUDED.description,
action_params = EXCLUDED.action_params,
updated = NOW();
RAISE NOTICE 'Core pack seeded successfully';
RAISE NOTICE 'Pack ID: %', v_pack_id;
RAISE NOTICE 'Action Runtime ID: %', v_action_runtime_id;
RAISE NOTICE 'Sensor Runtime ID: %', v_sensor_runtime_id;
RAISE NOTICE 'Trigger Types: intervaltimer=%, crontimer=%, datetimetimer=%', v_intervaltimer_id, v_crontimer_id, v_datetimetimer_id;
RAISE NOTICE 'Actions: core.echo, core.sleep, core.noop';
RAISE NOTICE 'Sensors: core.timer_10s_sensor (id=%)', v_sensor_10s_id;
RAISE NOTICE 'Rules: core.rule.timer_10s_echo';
END $$;

256
scripts/seed_runtimes.sql Normal file
View File

@@ -0,0 +1,256 @@
-- Seed Default Runtimes
-- Description: Inserts default runtime configurations for actions and sensors
-- This should be run after migrations to populate the runtime table with core runtimes
SET search_path TO attune, public;
-- ============================================================================
-- ACTION RUNTIMES
-- ============================================================================
-- Python 3 Action Runtime
INSERT INTO attune.runtime (
ref,
pack_ref,
name,
description,
runtime_type,
distributions,
installation
) VALUES (
'core.action.python3',
'core',
'Python 3 Action Runtime',
'Execute actions using Python 3.x interpreter',
'action',
'["python3"]'::jsonb,
'{
"method": "system",
"package_manager": "pip",
"requirements_file": "requirements.txt"
}'::jsonb
) ON CONFLICT (ref) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW();
-- Shell Action Runtime
INSERT INTO attune.runtime (
ref,
pack_ref,
name,
description,
runtime_type,
distributions,
installation
) VALUES (
'core.action.shell',
'core',
'Shell Action Runtime',
'Execute actions using system shell (bash/sh)',
'action',
'["bash", "sh"]'::jsonb,
'{
"method": "system",
"shell": "/bin/bash"
}'::jsonb
) ON CONFLICT (ref) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW();
-- Node.js Action Runtime
INSERT INTO attune.runtime (
ref,
pack_ref,
name,
description,
runtime_type,
distributions,
installation
) VALUES (
'core.action.nodejs',
'core',
'Node.js Action Runtime',
'Execute actions using Node.js runtime',
'action',
'["nodejs", "node"]'::jsonb,
'{
"method": "system",
"package_manager": "npm",
"requirements_file": "package.json"
}'::jsonb
) ON CONFLICT (ref) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW();
-- Native Action Runtime (for compiled Rust binaries and other native executables)
INSERT INTO attune.runtime (
ref,
pack_ref,
name,
description,
runtime_type,
distributions,
installation
) VALUES (
'core.action.native',
'core',
'Native Action Runtime',
'Execute actions as native compiled binaries',
'action',
'["native"]'::jsonb,
'{
"method": "binary",
"description": "Native executable - no runtime installation required"
}'::jsonb
) ON CONFLICT (ref) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW();
-- ============================================================================
-- SENSOR RUNTIMES
-- ============================================================================
-- Python 3 Sensor Runtime
INSERT INTO attune.runtime (
ref,
pack_ref,
name,
description,
runtime_type,
distributions,
installation
) VALUES (
'core.sensor.python3',
'core',
'Python 3 Sensor Runtime',
'Execute sensors using Python 3.x interpreter',
'sensor',
'["python3"]'::jsonb,
'{
"method": "system",
"package_manager": "pip",
"requirements_file": "requirements.txt"
}'::jsonb
) ON CONFLICT (ref) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW();
-- Shell Sensor Runtime
INSERT INTO attune.runtime (
ref,
pack_ref,
name,
description,
runtime_type,
distributions,
installation
) VALUES (
'core.sensor.shell',
'core',
'Shell Sensor Runtime',
'Execute sensors using system shell (bash/sh)',
'sensor',
'["bash", "sh"]'::jsonb,
'{
"method": "system",
"shell": "/bin/bash"
}'::jsonb
) ON CONFLICT (ref) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW();
-- Node.js Sensor Runtime
INSERT INTO attune.runtime (
ref,
pack_ref,
name,
description,
runtime_type,
distributions,
installation
) VALUES (
'core.sensor.nodejs',
'core',
'Node.js Sensor Runtime',
'Execute sensors using Node.js runtime',
'sensor',
'["nodejs", "node"]'::jsonb,
'{
"method": "system",
"package_manager": "npm",
"requirements_file": "package.json"
}'::jsonb
) ON CONFLICT (ref) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW();
-- Native Sensor Runtime (for compiled Rust binaries and other native executables)
INSERT INTO attune.runtime (
ref,
pack_ref,
name,
description,
runtime_type,
distributions,
installation
) VALUES (
'core.sensor.native',
'core',
'Native Sensor Runtime',
'Execute sensors as native compiled binaries',
'sensor',
'["native"]'::jsonb,
'{
"method": "binary",
"description": "Native executable - no runtime installation required"
}'::jsonb
) ON CONFLICT (ref) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
distributions = EXCLUDED.distributions,
installation = EXCLUDED.installation,
updated = NOW();
-- ============================================================================
-- VERIFICATION
-- ============================================================================
-- Display seeded runtimes
DO $$
DECLARE
runtime_count INTEGER;
BEGIN
SELECT COUNT(*) INTO runtime_count FROM attune.runtime WHERE pack_ref = 'core';
RAISE NOTICE 'Seeded % core runtime(s)', runtime_count;
END $$;
-- Show summary
SELECT
runtime_type,
COUNT(*) as count,
ARRAY_AGG(ref ORDER BY ref) as refs
FROM attune.runtime
WHERE pack_ref = 'core'
GROUP BY runtime_type
ORDER BY runtime_type;

238
scripts/setup-db.sh Executable file
View File

@@ -0,0 +1,238 @@
#!/bin/bash
# Database Setup Script for Attune
# This script creates the database and runs migrations
set -e # Exit on error
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Default values
DB_NAME="${ATTUNE_DB_NAME:-attune}"
DB_USER="${ATTUNE_DB_USER:-postgres}"
DB_HOST="${ATTUNE_DB_HOST:-localhost}"
DB_PORT="${ATTUNE_DB_PORT:-5432}"
DB_PASSWORD="${ATTUNE_DB_PASSWORD:-postgres}"
# Functions
print_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
print_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
check_postgres() {
print_info "Checking PostgreSQL connection..."
if ! command -v psql &> /dev/null; then
print_error "psql command not found. Please install PostgreSQL client."
exit 1
fi
if ! PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d postgres -c '\q' 2>/dev/null; then
print_error "Cannot connect to PostgreSQL server at $DB_HOST:$DB_PORT"
print_error "Please check your database connection settings."
exit 1
fi
print_info "PostgreSQL connection successful!"
}
check_database_exists() {
PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d postgres -tAc "SELECT 1 FROM pg_database WHERE datname='$DB_NAME'" | grep -q 1
}
create_database() {
if check_database_exists; then
print_warn "Database '$DB_NAME' already exists."
read -p "Do you want to drop and recreate it? (y/N): " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
print_info "Dropping database '$DB_NAME'..."
PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d postgres -c "DROP DATABASE IF EXISTS $DB_NAME;"
else
print_info "Keeping existing database."
return 0
fi
fi
print_info "Creating database '$DB_NAME'..."
PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d postgres -c "CREATE DATABASE $DB_NAME;"
print_info "Database created successfully!"
}
run_migrations() {
print_info "Running migrations..."
export DATABASE_URL="postgresql://$DB_USER:$DB_PASSWORD@$DB_HOST:$DB_PORT/$DB_NAME"
# Check if sqlx-cli is installed
if ! command -v sqlx &> /dev/null; then
print_warn "sqlx-cli not found. Installing..."
cargo install sqlx-cli --no-default-features --features postgres
fi
# Run migrations
cd "$(dirname "$0")/.."
if sqlx migrate run; then
print_info "Migrations completed successfully!"
else
print_error "Migration failed!"
exit 1
fi
}
run_manual_migrations() {
print_info "Running migrations manually with psql..."
cd "$(dirname "$0")/.."
for migration_file in migrations/*.sql; do
if [ -f "$migration_file" ]; then
print_info "Applying $(basename "$migration_file")..."
if ! PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -f "$migration_file"; then
print_error "Failed to apply $(basename "$migration_file")"
exit 1
fi
fi
done
print_info "All migrations applied successfully!"
}
verify_schema() {
print_info "Verifying schema..."
# Check if attune schema exists
schema_exists=$(PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -tAc "SELECT 1 FROM information_schema.schemata WHERE schema_name='attune'")
if [ "$schema_exists" = "1" ]; then
print_info "Schema 'attune' exists."
# Count tables
table_count=$(PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -tAc "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema='attune'")
print_info "Found $table_count tables in attune schema."
# List tables
print_info "Tables:"
PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "\dt attune.*"
else
print_error "Schema 'attune' not found!"
exit 1
fi
}
show_help() {
cat << EOF
Attune Database Setup Script
Usage: $0 [OPTIONS]
Options:
-h, --help Show this help message
-c, --create-only Only create database (don't run migrations)
-m, --migrate-only Only run migrations (don't create database)
-M, --manual Run migrations manually with psql (without sqlx-cli)
-v, --verify Verify schema after setup
Environment Variables:
ATTUNE_DB_NAME Database name (default: attune)
ATTUNE_DB_USER Database user (default: postgres)
ATTUNE_DB_HOST Database host (default: localhost)
ATTUNE_DB_PORT Database port (default: 5432)
ATTUNE_DB_PASSWORD Database password (default: postgres)
Example:
# Full setup
$0
# Create database only
$0 --create-only
# Run migrations only
$0 --migrate-only
# Use custom connection
ATTUNE_DB_NAME=mydb ATTUNE_DB_PASSWORD=secret $0
EOF
}
# Main script
main() {
local create_only=false
local migrate_only=false
local manual_migrations=false
local verify=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
-h|--help)
show_help
exit 0
;;
-c|--create-only)
create_only=true
shift
;;
-m|--migrate-only)
migrate_only=true
shift
;;
-M|--manual)
manual_migrations=true
shift
;;
-v|--verify)
verify=true
shift
;;
*)
print_error "Unknown option: $1"
show_help
exit 1
;;
esac
done
print_info "Attune Database Setup"
print_info "====================="
print_info "Database: $DB_NAME"
print_info "Host: $DB_HOST:$DB_PORT"
print_info "User: $DB_USER"
echo
check_postgres
if [ "$migrate_only" = false ]; then
create_database
fi
if [ "$create_only" = false ]; then
if [ "$manual_migrations" = true ]; then
run_manual_migrations
else
run_migrations
fi
fi
if [ "$verify" = true ]; then
verify_schema
fi
echo
print_info "Database setup complete!"
print_info "Connection string: postgresql://$DB_USER:***@$DB_HOST:$DB_PORT/$DB_NAME"
}
# Run main function
main "$@"

154
scripts/setup-e2e-db.sh Executable file
View File

@@ -0,0 +1,154 @@
#!/bin/bash
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Configuration
DB_HOST="${DB_HOST:-localhost}"
DB_PORT="${DB_PORT:-5432}"
DB_USER="${DB_USER:-postgres}"
DB_PASSWORD="${DB_PASSWORD:-postgres}"
DB_NAME="attune_e2e"
echo -e "${GREEN}=== Attune E2E Database Setup ===${NC}\n"
# Check if PostgreSQL is running
echo -e "${YELLOW}${NC} Checking PostgreSQL connection..."
if ! PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d postgres -c '\q' 2>/dev/null; then
echo -e "${RED}${NC} Cannot connect to PostgreSQL at $DB_HOST:$DB_PORT"
echo " Please ensure PostgreSQL is running and credentials are correct."
exit 1
fi
echo -e "${GREEN}${NC} PostgreSQL is running\n"
# Drop existing E2E database if it exists
echo -e "${YELLOW}${NC} Checking for existing E2E database..."
if PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -p $DB_PORT -U $DB_USER -lqt | cut -d \| -f 1 | grep -qw $DB_NAME; then
echo -e "${YELLOW}!${NC} Found existing database '$DB_NAME', dropping it..."
# Force terminate all connections and drop in single transaction
PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d postgres <<EOF
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity
WHERE datname = '$DB_NAME' AND pid <> pg_backend_pid();
DROP DATABASE IF EXISTS $DB_NAME;
EOF
echo -e "${GREEN}${NC} Dropped existing database"
fi
# Create E2E database
echo -e "${YELLOW}${NC} Creating E2E database '$DB_NAME'..."
PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d postgres -c "CREATE DATABASE $DB_NAME;" 2>&1
if [ $? -eq 0 ]; then
echo -e "${GREEN}${NC} Database created\n"
else
echo -e "${RED}${NC} Failed to create database"
exit 1
fi
# Create attune schema and set search_path
echo -e "${YELLOW}${NC} Creating attune schema..."
PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME <<EOF
CREATE SCHEMA IF NOT EXISTS attune;
ALTER DATABASE $DB_NAME SET search_path TO attune, public;
EOF
if [ $? -eq 0 ]; then
echo -e "${GREEN}${NC} Schema created and search_path configured\n"
else
echo -e "${RED}${NC} Failed to create schema"
exit 1
fi
# Run migrations (they will use the attune schema via search_path)
echo -e "${YELLOW}${NC} Running database migrations..."
DATABASE_URL="postgresql://$DB_USER:$DB_PASSWORD@$DB_HOST:$DB_PORT/$DB_NAME" sqlx migrate run --source ./migrations
if [ $? -eq 0 ]; then
echo -e "${GREEN}${NC} Migrations completed successfully\n"
else
echo -e "${RED}${NC} Migration failed"
exit 1
fi
# Seed default runtimes
echo -e "${YELLOW}${NC} Seeding default runtimes..."
PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME -f ./scripts/seed_runtimes.sql > /dev/null
if [ $? -eq 0 ]; then
echo -e "${GREEN}${NC} Runtimes seeded successfully\n"
else
echo -e "${RED}${NC} Runtime seeding failed"
exit 1
fi
# Verify database schema
echo -e "${YELLOW}${NC} Verifying database schema..."
TABLES=$(PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME -t -c "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'attune';")
TABLES=$(echo $TABLES | tr -d ' ')
if [ "$TABLES" -gt 0 ]; then
echo -e "${GREEN}${NC} Found $TABLES tables in 'attune' schema"
else
echo -e "${RED}${NC} No tables found in 'attune' schema"
exit 1
fi
# List all tables
echo -e "\n${YELLOW}Tables in attune schema:${NC}"
PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME -c "SELECT table_name FROM information_schema.tables WHERE table_schema = 'attune' ORDER BY table_name;"
# Create default test user
echo -e "\n${YELLOW}${NC} Creating default test user..."
# Generate a fresh password hash
echo -e "${YELLOW} Generating password hash...${NC}"
HASH=$(cd crates/common && cargo run --example hash_password TestPass123! 2>/dev/null | tail -1)
if [ -z "$HASH" ]; then
echo -e "${RED}${NC} Failed to generate password hash"
exit 1
fi
PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME <<EOF
INSERT INTO attune.identity (login, display_name, password_hash, attributes)
VALUES (
'test@attune.local',
'E2E Test User',
'$HASH',
jsonb_build_object(
'email', 'test@attune.local',
'is_active', true,
'is_system', false,
'type', 'user'
)
)
ON CONFLICT (login) DO UPDATE SET
password_hash = EXCLUDED.password_hash,
attributes = EXCLUDED.attributes;
EOF
if [ $? -eq 0 ]; then
echo -e "${GREEN}${NC} Test user created (or already exists)"
else
echo -e "${YELLOW}!${NC} Could not create test user (might already exist)"
fi
echo -e "\n${GREEN}=== E2E Database Setup Complete ===${NC}"
echo -e "\nDatabase Details:"
echo -e " • Name: ${GREEN}$DB_NAME${NC}"
echo -e " • Host: ${GREEN}$DB_HOST:$DB_PORT${NC}"
echo -e " • User: ${GREEN}$DB_USER${NC}"
echo -e " • URL: ${GREEN}postgresql://$DB_USER:****@$DB_HOST:$DB_PORT/$DB_NAME${NC}"
echo -e "\nTest User:"
echo -e " • Login: ${GREEN}test@attune.local${NC}"
echo -e " • Password: ${GREEN}TestPass123!${NC}"
echo -e "\nNext Steps:"
echo -e " 1. Run ${YELLOW}./scripts/start-e2e-services.sh${NC} to start all services"
echo -e " 2. Run ${YELLOW}cargo test --test integration${NC} to execute E2E tests"
echo ""

160
scripts/setup_timer_echo_rule.sh Executable file
View File

@@ -0,0 +1,160 @@
#!/bin/bash
# Setup Timer Echo Rule
# Creates a rule that runs "echo Hello World" every 10 seconds using the timer trigger
set -e
# Configuration
API_URL="${ATTUNE_API_URL:-http://localhost:8080}"
API_USER="${ATTUNE_API_USER:-admin}"
API_PASSWORD="${ATTUNE_API_PASSWORD:-admin}"
echo "=== Attune Timer Echo Rule Setup ==="
echo "API URL: $API_URL"
echo ""
# Step 1: Login and get JWT token
echo "Step 1: Authenticating..."
LOGIN_RESPONSE=$(curl -s -X POST "$API_URL/auth/login" \
-H "Content-Type: application/json" \
-d "{\"username\":\"$API_USER\",\"password\":\"$API_PASSWORD\"}")
ACCESS_TOKEN=$(echo "$LOGIN_RESPONSE" | jq -r '.data.access_token')
if [ "$ACCESS_TOKEN" == "null" ] || [ -z "$ACCESS_TOKEN" ]; then
echo "Error: Failed to authenticate"
echo "Response: $LOGIN_RESPONSE"
exit 1
fi
echo "✓ Authentication successful"
echo ""
# Step 2: Check if core pack exists
echo "Step 2: Checking for core pack..."
PACK_RESPONSE=$(curl -s -X GET "$API_URL/api/v1/packs/core" \
-H "Authorization: Bearer $ACCESS_TOKEN")
PACK_ID=$(echo "$PACK_RESPONSE" | jq -r '.data.id')
if [ "$PACK_ID" == "null" ] || [ -z "$PACK_ID" ]; then
echo "Error: Core pack not found. Please run seed_core_pack.sql first"
echo "Response: $PACK_RESPONSE"
exit 1
fi
echo "✓ Core pack found (ID: $PACK_ID)"
echo ""
# Step 3: Check if timer trigger exists
echo "Step 3: Checking for timer trigger..."
TRIGGER_RESPONSE=$(curl -s -X GET "$API_URL/api/v1/triggers/core.timer_10s" \
-H "Authorization: Bearer $ACCESS_TOKEN")
TRIGGER_ID=$(echo "$TRIGGER_RESPONSE" | jq -r '.data.id')
if [ "$TRIGGER_ID" == "null" ] || [ -z "$TRIGGER_ID" ]; then
echo "Error: Timer trigger core.timer_10s not found. Please run seed_core_pack.sql first"
echo "Response: $TRIGGER_RESPONSE"
exit 1
fi
echo "✓ Timer trigger found (ID: $TRIGGER_ID)"
echo ""
# Step 4: Check if echo action exists
echo "Step 4: Checking for echo action..."
ACTION_RESPONSE=$(curl -s -X GET "$API_URL/api/v1/actions/core.echo" \
-H "Authorization: Bearer $ACCESS_TOKEN")
ACTION_ID=$(echo "$ACTION_RESPONSE" | jq -r '.data.id')
if [ "$ACTION_ID" == "null" ] || [ -z "$ACTION_ID" ]; then
echo "Error: Echo action core.echo not found. Please run seed_core_pack.sql first"
echo "Response: $ACTION_RESPONSE"
exit 1
fi
echo "✓ Echo action found (ID: $ACTION_ID)"
echo ""
# Step 5: Create or update the rule
echo "Step 5: Creating timer echo rule..."
RULE_REF="core.timer_echo_10s"
# Check if rule already exists
EXISTING_RULE=$(curl -s -X GET "$API_URL/api/v1/rules/$RULE_REF" \
-H "Authorization: Bearer $ACCESS_TOKEN")
EXISTING_RULE_ID=$(echo "$EXISTING_RULE" | jq -r '.data.id // empty')
if [ -n "$EXISTING_RULE_ID" ]; then
echo "Rule already exists (ID: $EXISTING_RULE_ID), updating..."
UPDATE_RESPONSE=$(curl -s -X PUT "$API_URL/api/v1/rules/$RULE_REF" \
-H "Authorization: Bearer $ACCESS_TOKEN" \
-H "Content-Type: application/json" \
-d '{
"enabled": true,
"label": "Timer Echo Every 10 Seconds",
"description": "Echoes Hello World every 10 seconds using timer trigger"
}')
RULE_ID=$(echo "$UPDATE_RESPONSE" | jq -r '.data.id')
echo "✓ Rule updated (ID: $RULE_ID)"
else
echo "Creating new rule..."
CREATE_RESPONSE=$(curl -s -X POST "$API_URL/api/v1/rules" \
-H "Authorization: Bearer $ACCESS_TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"ref\": \"$RULE_REF\",
\"pack\": $PACK_ID,
\"pack_ref\": \"core\",
\"label\": \"Timer Echo Every 10 Seconds\",
\"description\": \"Echoes Hello World every 10 seconds using timer trigger\",
\"enabled\": true,
\"trigger\": $TRIGGER_ID,
\"trigger_ref\": \"core.timer_10s\",
\"action\": $ACTION_ID,
\"action_ref\": \"core.echo\",
\"action_params\": {
\"message\": \"Hello World from timer trigger!\"
}
}")
RULE_ID=$(echo "$CREATE_RESPONSE" | jq -r '.data.id')
if [ "$RULE_ID" == "null" ] || [ -z "$RULE_ID" ]; then
echo "Error: Failed to create rule"
echo "Response: $CREATE_RESPONSE"
exit 1
fi
echo "✓ Rule created (ID: $RULE_ID)"
fi
echo ""
echo "=== Setup Complete ==="
echo ""
echo "Rule Details:"
echo " Ref: $RULE_REF"
echo " ID: $RULE_ID"
echo " Trigger: core.timer_10s (every 10 seconds)"
echo " Action: core.echo"
echo " Message: Hello World from timer trigger!"
echo ""
echo "The rule is now active. The echo action will run every 10 seconds."
echo "Check logs with:"
echo " - Sensor service logs for timer events"
echo " - Executor service logs for enforcement/scheduling"
echo " - Worker service logs for action execution"
echo ""
echo "To monitor executions via API:"
echo " curl -H 'Authorization: Bearer $ACCESS_TOKEN' $API_URL/api/v1/executions"
echo ""
echo "To disable the rule:"
echo " curl -X PUT -H 'Authorization: Bearer $ACCESS_TOKEN' -H 'Content-Type: application/json' \\"
echo " -d '{\"enabled\": false}' $API_URL/api/v1/rules/$RULE_REF"

43
scripts/start-all-services.sh Executable file
View File

@@ -0,0 +1,43 @@
#!/bin/bash
# Start all Attune services in the background
echo "Building services first..."
cargo build
echo "Starting services..."
# Create logs directory if it doesn't exist
mkdir -p logs
# Start each service in the background, logging to separate files
echo "Starting API service..."
cargo run --bin attune-api > logs/api.log 2>&1 &
echo $! > logs/api.pid
echo "Starting Executor service..."
cargo run --bin attune-executor > logs/executor.log 2>&1 &
echo $! > logs/executor.pid
echo "Starting Worker service..."
cargo run --bin attune-worker > logs/worker.log 2>&1 &
echo $! > logs/worker.pid
echo "Starting Sensor service..."
cargo run --bin attune-sensor > logs/sensor.log 2>&1 &
echo $! > logs/sensor.pid
echo "Starting Notifier service..."
cargo run --bin attune-notifier > logs/notifier.log 2>&1 &
echo $! > logs/notifier.pid
echo ""
echo "All services started!"
echo "Logs are in the logs/ directory"
echo "To stop services, run: ./scripts/stop-all-services.sh"
echo ""
echo "Service PIDs:"
echo " API: $(cat logs/api.pid)"
echo " Executor: $(cat logs/executor.pid)"
echo " Worker: $(cat logs/worker.pid)"
echo " Sensor: $(cat logs/sensor.pid)"
echo " Notifier: $(cat logs/notifier.pid)"

357
scripts/start-e2e-services.sh Executable file
View File

@@ -0,0 +1,357 @@
#!/bin/bash
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
CONFIG_FILE="${CONFIG_FILE:-config.e2e.yaml}"
LOG_DIR="./tests/logs"
PID_DIR="./tests/pids"
# Detect database schema from config file if possible
DETECTED_SCHEMA=""
if [ -f "$CONFIG_FILE" ]; then
DETECTED_SCHEMA=$(grep -E '^\s*schema:' "$CONFIG_FILE" | sed -E 's/^\s*schema:\s*"?([^"]+)"?.*/\1/' | tr -d ' ')
fi
# Service ports (from config.e2e.yaml)
API_PORT=8080
NOTIFIER_WS_PORT=8081
echo -e "${GREEN}=== Attune E2E Services Startup ===${NC}\n"
# Display configuration info
echo -e "${BLUE}Configuration:${NC}"
echo -e " • Config file: ${YELLOW}$CONFIG_FILE${NC}"
if [ -n "$DETECTED_SCHEMA" ]; then
echo -e " • Database schema: ${YELLOW}$DETECTED_SCHEMA${NC}"
else
echo -e " • Database schema: ${YELLOW}attune${NC} (default)"
fi
echo ""
# Create necessary directories
mkdir -p "$LOG_DIR"
mkdir -p "$PID_DIR"
mkdir -p "./tests/artifacts"
mkdir -p "./tests/venvs"
# Function to check if a service is running
is_service_running() {
local pid_file="$PID_DIR/$1.pid"
if [ -f "$pid_file" ]; then
local pid=$(cat "$pid_file")
if ps -p $pid > /dev/null 2>&1; then
return 0
else
rm -f "$pid_file"
return 1
fi
fi
return 1
}
# Function to stop a service
stop_service() {
local service_name=$1
local pid_file="$PID_DIR/$service_name.pid"
if [ -f "$pid_file" ]; then
local pid=$(cat "$pid_file")
echo -e "${YELLOW}${NC} Stopping $service_name (PID: $pid)..."
kill $pid 2>/dev/null || true
sleep 2
if ps -p $pid > /dev/null 2>&1; then
echo -e "${YELLOW}!${NC} Forcefully killing $service_name..."
kill -9 $pid 2>/dev/null || true
fi
rm -f "$pid_file"
echo -e "${GREEN}${NC} $service_name stopped"
fi
}
# Function to start a service
start_service() {
local service_name=$1
local binary_name=$2
local log_file="$LOG_DIR/$service_name.log"
local pid_file="$PID_DIR/$service_name.pid"
echo -e "${YELLOW}${NC} Starting $service_name..."
# Build the service if not already built
if [ ! -f "./target/debug/$binary_name" ]; then
echo -e "${BLUE} Building $binary_name...${NC}"
cargo build --bin $binary_name 2>&1 | tee "$LOG_DIR/$service_name-build.log"
fi
# Start the service
# Only set ATTUNE__ENVIRONMENT if using default e2e config
# Otherwise, let the config file determine the environment
if [ "$CONFIG_FILE" = "config.e2e.yaml" ]; then
ATTUNE__ENVIRONMENT=e2e ATTUNE_CONFIG="$CONFIG_FILE" ./target/debug/$binary_name > "$log_file" 2>&1 &
else
ATTUNE_CONFIG="$CONFIG_FILE" ./target/debug/$binary_name > "$log_file" 2>&1 &
fi
local pid=$!
echo $pid > "$pid_file"
# Wait a moment and check if it's still running
sleep 2
if ps -p $pid > /dev/null 2>&1; then
echo -e "${GREEN}${NC} $service_name started (PID: $pid)"
echo -e " Log: ${BLUE}$log_file${NC}"
return 0
else
echo -e "${RED}${NC} $service_name failed to start"
echo -e " Check log: ${RED}$log_file${NC}"
tail -20 "$log_file"
return 1
fi
}
# Function to check service health
check_service_health() {
local service_name=$1
local health_url=$2
local max_attempts=${3:-30}
local attempt=0
echo -e "${YELLOW}${NC} Checking $service_name health..."
while [ $attempt -lt $max_attempts ]; do
if curl -s -f "$health_url" > /dev/null 2>&1; then
echo -e "${GREEN}${NC} $service_name is healthy"
return 0
fi
attempt=$((attempt + 1))
sleep 1
done
echo -e "${RED}${NC} $service_name health check failed after $max_attempts attempts"
return 1
}
# Check for existing services and stop them
echo -e "${YELLOW}Checking for running E2E services...${NC}"
for service in api executor worker sensor notifier; do
if is_service_running $service; then
stop_service $service
fi
done
echo ""
# Check dependencies
echo -e "${YELLOW}Checking dependencies...${NC}"
# Check PostgreSQL
echo -e "${YELLOW}${NC} Checking PostgreSQL..."
echo -e " Attempting connection to: ${BLUE}postgresql://postgres@localhost:5432/attune_e2e${NC}"
PGPASSWORD=postgres psql -h localhost -p 5432 -U postgres -d attune_e2e -c '\q' 2>/tmp/pg_check_error.txt
PG_EXIT=$?
if [ $PG_EXIT -eq 0 ]; then
echo -e "${GREEN}${NC} PostgreSQL connection successful"
else
echo -e "${RED}${NC} Cannot connect to PostgreSQL database 'attune_e2e'"
echo ""
echo -e "${YELLOW}Diagnostic Information:${NC}"
# Check if PostgreSQL is running at all
if ! pg_isready -h localhost -p 5432 > /dev/null 2>&1; then
echo -e " ${RED}${NC} PostgreSQL server is not running on localhost:5432"
echo -e " Start it with: ${YELLOW}sudo systemctl start postgresql${NC}"
else
echo -e " ${GREEN}${NC} PostgreSQL server is running"
# Check if the database exists
if ! PGPASSWORD=postgres psql -h localhost -p 5432 -U postgres -lqt 2>/dev/null | cut -d \| -f 1 | grep -qw attune_e2e; then
echo -e " ${RED}${NC} Database 'attune_e2e' does not exist"
echo -e " Create it with: ${YELLOW}./scripts/setup-e2e-db.sh${NC}"
else
echo -e " ${GREEN}${NC} Database 'attune_e2e' exists"
echo -e " ${RED}${NC} Connection failed for another reason"
# Show the actual error
if [ -f /tmp/pg_check_error.txt ] && [ -s /tmp/pg_check_error.txt ]; then
echo -e "\n${YELLOW}Error details:${NC}"
cat /tmp/pg_check_error.txt | sed 's/^/ /'
fi
fi
fi
rm -f /tmp/pg_check_error.txt
exit 1
fi
# Check RabbitMQ
echo -e "${YELLOW}${NC} Checking RabbitMQ..."
echo -e " Testing connection to: ${BLUE}localhost:5672 (AMQP)${NC}"
if nc -z localhost 5672 2>/dev/null || timeout 1 bash -c 'cat < /dev/null > /dev/tcp/localhost/5672' 2>/dev/null; then
echo -e "${GREEN}${NC} RabbitMQ is running (AMQP port 5672)"
else
echo -e "${RED}${NC} RabbitMQ is not running on port 5672"
echo ""
echo -e "${YELLOW}Diagnostic Information:${NC}"
# Check if RabbitMQ process is running
if pgrep -x rabbitmq-server > /dev/null || pgrep -x beam.smp > /dev/null; then
echo -e " ${YELLOW}!${NC} RabbitMQ process is running but port 5672 is not accessible"
echo -e " The service may still be starting up"
echo -e " Check status: ${YELLOW}sudo rabbitmq-diagnostics status${NC}"
echo -e " View logs: ${YELLOW}sudo journalctl -u rabbitmq-server -n 50${NC}"
else
echo -e " ${RED}${NC} RabbitMQ process is not running"
echo -e " Start it with: ${YELLOW}sudo systemctl start rabbitmq-server${NC}"
echo -e " Or: ${YELLOW}sudo service rabbitmq-server start${NC}"
fi
# Check if port might be in use by something else
if netstat -tuln 2>/dev/null | grep -q :5672 || ss -tuln 2>/dev/null | grep -q :5672; then
echo -e " ${YELLOW}!${NC} Port 5672 appears to be in use"
echo -e " Check what's using it: ${YELLOW}sudo lsof -i :5672${NC}"
fi
exit 1
fi
echo ""
# Build all services first
echo -e "${YELLOW}Building all services...${NC}"
echo -e " Building binaries: ${BLUE}api, executor, worker, sensor, notifier${NC}"
echo -e " This may take a few moments..."
cargo build --bins 2>&1 | tee "$LOG_DIR/build.log"
BUILD_EXIT=${PIPESTATUS[0]}
if [ $BUILD_EXIT -ne 0 ]; then
echo -e "${RED}${NC} Build failed"
echo ""
echo -e "${YELLOW}Last 30 lines of build output:${NC}"
tail -30 "$LOG_DIR/build.log" | sed 's/^/ /'
echo ""
echo -e "Full build log: ${RED}$LOG_DIR/build.log${NC}"
exit 1
fi
echo -e "${GREEN}${NC} All services built successfully\n"
# Start services in order
echo -e "${GREEN}=== Starting Services ===${NC}\n"
# 1. Start API service
if ! start_service "api" "attune-api"; then
echo -e "${RED}Failed to start API service${NC}"
exit 1
fi
sleep 2
# Check API health
if ! check_service_health "API" "http://127.0.0.1:$API_PORT/health"; then
echo -e "${RED}API service is not healthy${NC}"
echo ""
echo -e "${YELLOW}Last 20 lines of API log:${NC}"
tail -20 "$LOG_DIR/api.log" | sed 's/^/ /'
echo ""
echo -e "Full log: ${RED}$LOG_DIR/api.log${NC}"
exit 1
fi
echo ""
# 2. Start Executor service
if ! start_service "executor" "attune-executor"; then
echo -e "${RED}Failed to start Executor service${NC}"
echo ""
echo -e "${YELLOW}Last 20 lines of Executor log:${NC}"
tail -20 "$LOG_DIR/executor.log" | sed 's/^/ /'
echo ""
echo -e "Full log: ${RED}$LOG_DIR/executor.log${NC}"
exit 1
fi
sleep 2
echo ""
# 3. Start Worker service
if ! start_service "worker" "attune-worker"; then
echo -e "${RED}Failed to start Worker service${NC}"
echo ""
echo -e "${YELLOW}Last 20 lines of Worker log:${NC}"
tail -20 "$LOG_DIR/worker.log" | sed 's/^/ /'
echo ""
echo -e "Full log: ${RED}$LOG_DIR/worker.log${NC}"
exit 1
fi
sleep 2
echo ""
# 4. Start Sensor service
if ! start_service "sensor" "attune-sensor"; then
echo -e "${RED}Failed to start Sensor service${NC}"
echo ""
echo -e "${YELLOW}Last 20 lines of Sensor log:${NC}"
tail -20 "$LOG_DIR/sensor.log" | sed 's/^/ /'
echo ""
echo -e "Full log: ${RED}$LOG_DIR/sensor.log${NC}"
exit 1
fi
sleep 2
echo ""
# 5. Start Notifier service
if ! start_service "notifier" "attune-notifier"; then
echo -e "${RED}Failed to start Notifier service${NC}"
echo ""
echo -e "${YELLOW}Last 20 lines of Notifier log:${NC}"
tail -20 "$LOG_DIR/notifier.log" | sed 's/^/ /'
echo ""
echo -e "Full log: ${RED}$LOG_DIR/notifier.log${NC}"
exit 1
fi
sleep 2
echo ""
# Display running services
echo -e "${GREEN}=== All Services Started ===${NC}\n"
echo -e "${GREEN}Running Services:${NC}"
for service in api executor worker sensor notifier; do
pid_file="$PID_DIR/$service.pid"
if [ -f "$pid_file" ]; then
pid=$(cat "$pid_file")
echo -e "${GREEN}$service${NC} (PID: $pid)"
fi
done
echo -e "\n${GREEN}Service Endpoints:${NC}"
echo -e " • API: ${BLUE}http://127.0.0.1:$API_PORT${NC}"
echo -e " • Health: ${BLUE}http://127.0.0.1:$API_PORT/health${NC}"
echo -e " • Docs: ${BLUE}http://127.0.0.1:$API_PORT/docs${NC}"
echo -e " • WebSocket: ${BLUE}ws://127.0.0.1:$NOTIFIER_WS_PORT${NC}"
echo -e "\n${GREEN}Logs:${NC}"
for service in api executor worker sensor notifier; do
echo -e "$service: ${BLUE}$LOG_DIR/$service.log${NC}"
done
echo -e "\n${GREEN}Management:${NC}"
echo -e " • View logs: ${YELLOW}tail -f $LOG_DIR/<service>.log${NC}"
echo -e " • Stop all: ${YELLOW}./scripts/stop-e2e-services.sh${NC}"
echo -e " • Run tests: ${YELLOW}cargo test --test integration${NC}"
echo -e "\n${GREEN}Test User:${NC}"
echo -e " • Login: ${YELLOW}test@attune.local${NC}"
echo -e " • Password: ${YELLOW}TestPass123!${NC}"
echo -e "\n${GREEN}Quick Test:${NC}"
echo -e " ${YELLOW}curl http://127.0.0.1:$API_PORT/health"
echo -e " ${YELLOW}curl -X POST http://127.0.0.1:$API_PORT/auth/login \\"
echo -e " ${YELLOW} -H 'Content-Type: application/json' \\"
echo -e " ${YELLOW} -d '{\"login\":\"test@attune.local\",\"password\":\"TestPass123!\"}'${NC}"
echo ""

106
scripts/start_services_test.sh Executable file
View File

@@ -0,0 +1,106 @@
#!/bin/bash
# Start all Attune services for testing the timer demo
# This script starts API, Sensor, Executor, and Worker services in separate tmux panes
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
echo -e "${BLUE}=== Attune Service Startup Script ===${NC}"
echo ""
# Check if tmux is available
if ! command -v tmux &> /dev/null; then
echo -e "${RED}Error: tmux is not installed${NC}"
echo "Please install tmux to use this script, or start services manually in separate terminals"
exit 1
fi
# Set environment variables
export DATABASE_URL="postgresql://postgres:postgres@localhost:5432/attune"
export ATTUNE__DATABASE__URL="$DATABASE_URL"
export ATTUNE__MESSAGE_QUEUE__URL="amqp://guest:guest@localhost:5672/%2F"
export ATTUNE__JWT__SECRET="dev-secret-not-for-production"
echo -e "${GREEN}✓ Environment variables set${NC}"
echo ""
# Check if services are already running
if tmux has-session -t attune 2>/dev/null; then
echo -e "${YELLOW}Attune session already exists${NC}"
echo "Do you want to kill it and start fresh? (y/n)"
read -r response
if [[ "$response" =~ ^[Yy]$ ]]; then
tmux kill-session -t attune
echo -e "${GREEN}✓ Old session killed${NC}"
else
echo "Attaching to existing session..."
tmux attach -t attune
exit 0
fi
fi
echo -e "${BLUE}Starting services in tmux session 'attune'...${NC}"
echo ""
# Create new tmux session with 4 panes
tmux new-session -d -s attune -n services
# Split into 4 panes
tmux split-window -h -t attune
tmux split-window -v -t attune:0.0
tmux split-window -v -t attune:0.2
# Set environment in all panes
for pane in 0 1 2 3; do
tmux send-keys -t attune:0.$pane "cd $(pwd)" C-m
tmux send-keys -t attune:0.$pane "export DATABASE_URL='$DATABASE_URL'" C-m
tmux send-keys -t attune:0.$pane "export ATTUNE__DATABASE__URL='$DATABASE_URL'" C-m
tmux send-keys -t attune:0.$pane "export ATTUNE__MESSAGE_QUEUE__URL='amqp://guest:guest@localhost:5672/%2F'" C-m
tmux send-keys -t attune:0.$pane "export ATTUNE__JWT__SECRET='dev-secret-not-for-production'" C-m
done
# Start API service (top-left)
echo -e "${GREEN}Starting API service...${NC}"
tmux send-keys -t attune:0.0 "echo '=== API Service ===' && cargo run --bin attune-api" C-m
# Start Sensor service (top-right)
echo -e "${GREEN}Starting Sensor service...${NC}"
tmux send-keys -t attune:0.1 "echo '=== Sensor Service ===' && sleep 5 && cargo run --bin attune-sensor" C-m
# Start Executor service (bottom-left)
echo -e "${GREEN}Starting Executor service...${NC}"
tmux send-keys -t attune:0.2 "echo '=== Executor Service ===' && sleep 5 && cargo run --bin attune-executor" C-m
# Start Worker service (bottom-right)
echo -e "${GREEN}Starting Worker service...${NC}"
tmux send-keys -t attune:0.3 "echo '=== Worker Service ===' && sleep 5 && cargo run --bin attune-worker" C-m
echo ""
echo -e "${GREEN}✓ All services starting in tmux session 'attune'${NC}"
echo ""
echo -e "${BLUE}Tmux commands:${NC}"
echo " Attach to session: tmux attach -t attune"
echo " Detach from session: Ctrl+b, then d"
echo " Switch panes: Ctrl+b, then arrow keys"
echo " Kill session: tmux kill-session -t attune"
echo ""
echo -e "${BLUE}Service layout:${NC}"
echo " ┌─────────────┬─────────────┐"
echo " │ API │ Sensor │"
echo " ├─────────────┼─────────────┤"
echo " │ Executor │ Worker │"
echo " └─────────────┴─────────────┘"
echo ""
echo -e "${YELLOW}Wait 30-60 seconds for all services to compile and start...${NC}"
echo ""
echo "Attaching to tmux session in 3 seconds..."
sleep 3
# Attach to the session
tmux attach -t attune

30
scripts/status-all-services.sh Executable file
View File

@@ -0,0 +1,30 @@
#!/bin/bash
# Check status of all Attune services
echo "Service Status:"
echo "==============="
check_service() {
local name=$1
local pidfile="logs/${name}.pid"
if [ -f "$pidfile" ]; then
local pid=$(cat "$pidfile")
if ps -p $pid > /dev/null 2>&1; then
echo "$name (PID: $pid) - RUNNING"
else
echo "$name - NOT RUNNING (stale PID file)"
fi
else
echo "$name - NOT RUNNING"
fi
}
check_service "API"
check_service "Executor"
check_service "Worker"
check_service "Sensor"
check_service "Notifier"
echo ""
echo "To view logs: tail -f logs/<service>.log"

31
scripts/stop-all-services.sh Executable file
View File

@@ -0,0 +1,31 @@
#!/bin/bash
# Stop all Attune services
echo "Stopping services..."
if [ -f logs/api.pid ]; then
kill $(cat logs/api.pid) 2>/dev/null && echo "Stopped API service" || echo "API service not running"
rm logs/api.pid
fi
if [ -f logs/executor.pid ]; then
kill $(cat logs/executor.pid) 2>/dev/null && echo "Stopped Executor service" || echo "Executor service not running"
rm logs/executor.pid
fi
if [ -f logs/worker.pid ]; then
kill $(cat logs/worker.pid) 2>/dev/null && echo "Stopped Worker service" || echo "Worker service not running"
rm logs/worker.pid
fi
if [ -f logs/sensor.pid ]; then
kill $(cat logs/sensor.pid) 2>/dev/null && echo "Stopped Sensor service" || echo "Sensor service not running"
rm logs/sensor.pid
fi
if [ -f logs/notifier.pid ]; then
kill $(cat logs/notifier.pid) 2>/dev/null && echo "Stopped Notifier service" || echo "Notifier service not running"
rm logs/notifier.pid
fi
echo "All services stopped"

74
scripts/stop-e2e-services.sh Executable file
View File

@@ -0,0 +1,74 @@
#!/bin/bash
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Configuration
PID_DIR="./tests/pids"
echo -e "${GREEN}=== Stopping Attune E2E Services ===${NC}\n"
# Function to stop a service
stop_service() {
local service_name=$1
local pid_file="$PID_DIR/$service_name.pid"
if [ -f "$pid_file" ]; then
local pid=$(cat "$pid_file")
if ps -p $pid > /dev/null 2>&1; then
echo -e "${YELLOW}${NC} Stopping $service_name (PID: $pid)..."
kill $pid 2>/dev/null || true
# Wait up to 5 seconds for graceful shutdown
local count=0
while ps -p $pid > /dev/null 2>&1 && [ $count -lt 5 ]; do
sleep 1
count=$((count + 1))
done
# Force kill if still running
if ps -p $pid > /dev/null 2>&1; then
echo -e "${YELLOW}!${NC} Forcefully killing $service_name..."
kill -9 $pid 2>/dev/null || true
sleep 1
fi
echo -e "${GREEN}${NC} $service_name stopped"
else
echo -e "${YELLOW}!${NC} $service_name PID file exists but process not running"
fi
rm -f "$pid_file"
else
echo -e "${YELLOW}!${NC} No PID file found for $service_name"
fi
}
# Check if any services are running
if [ ! -d "$PID_DIR" ] || [ -z "$(ls -A $PID_DIR 2>/dev/null)" ]; then
echo -e "${YELLOW}No E2E services appear to be running${NC}"
exit 0
fi
# Stop services in reverse order
echo -e "${YELLOW}Stopping services...${NC}\n"
# Stop in reverse dependency order
stop_service "notifier"
stop_service "sensor"
stop_service "worker"
stop_service "executor"
stop_service "api"
echo -e "\n${GREEN}=== All E2E Services Stopped ===${NC}\n"
# Clean up PID directory if empty
if [ -d "$PID_DIR" ] && [ -z "$(ls -A $PID_DIR)" ]; then
rmdir "$PID_DIR" 2>/dev/null || true
fi
echo -e "To restart services:"
echo -e " ${YELLOW}./scripts/start-e2e-services.sh${NC}\n"

184
scripts/stop-system-services.sh Executable file
View File

@@ -0,0 +1,184 @@
#!/bin/bash
# Script to stop system services that conflict with Docker Compose services
# Run this before starting Docker Compose to avoid port conflicts
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
echo "=========================================="
echo "Stopping System Services for Docker"
echo "=========================================="
echo ""
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Function to check if service is running
is_service_running() {
local service=$1
systemctl is-active --quiet "$service" 2>/dev/null
}
# Function to stop a system service
stop_service() {
local service=$1
local port=$2
echo -n "Checking $service (port $port)... "
if is_service_running "$service"; then
echo -e "${YELLOW}RUNNING${NC}"
echo -n " Stopping $service... "
if sudo systemctl stop "$service" 2>/dev/null; then
echo -e "${GREEN}STOPPED${NC}"
# Optionally disable to prevent auto-restart on boot
read -p " Disable $service on boot? (y/N) " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
sudo systemctl disable "$service" 2>/dev/null
echo -e " ${GREEN}DISABLED on boot${NC}"
fi
else
echo -e "${RED}FAILED${NC}"
echo " You may need to stop it manually: sudo systemctl stop $service"
fi
else
echo -e "${GREEN}NOT RUNNING${NC}"
fi
}
# Function to check if port is in use
check_port() {
local port=$1
local service=$2
echo -n "Checking port $port ($service)... "
if nc -z localhost "$port" 2>/dev/null; then
echo -e "${YELLOW}IN USE${NC}"
# Try to find what's using it
local pid=$(lsof -ti tcp:"$port" 2>/dev/null || fuser "$port"/tcp 2>/dev/null | awk '{print $1}')
if [ -n "$pid" ]; then
local process=$(ps -p "$pid" -o comm= 2>/dev/null || echo "unknown")
echo " Process: $process (PID: $pid)"
echo " To kill: sudo kill $pid"
fi
else
echo -e "${GREEN}FREE${NC}"
fi
}
echo "Step 1: Stopping System Services"
echo "----------------------------------"
# PostgreSQL (port 5432)
stop_service "postgresql" "5432"
# RabbitMQ (ports 5672, 15672)
stop_service "rabbitmq-server" "5672"
# Redis (port 6379)
stop_service "redis" "6379"
stop_service "redis-server" "6379"
echo ""
echo "Step 2: Verifying Ports are Free"
echo "----------------------------------"
# Check critical ports
check_port 5432 "PostgreSQL"
check_port 5672 "RabbitMQ AMQP"
check_port 15672 "RabbitMQ Management"
check_port 6379 "Redis"
check_port 8080 "API Service"
check_port 8081 "Notifier Service"
check_port 3000 "Web UI"
echo ""
echo "Step 3: Cleanup Docker Resources"
echo "----------------------------------"
# Check for any existing Attune containers
echo -n "Checking for existing Attune containers... "
if docker ps -a --format '{{.Names}}' | grep -q "attune-"; then
echo -e "${YELLOW}FOUND${NC}"
echo " Stopping and removing existing containers..."
docker compose -f "$PROJECT_ROOT/docker compose.yaml" down 2>/dev/null || true
echo -e " ${GREEN}CLEANED${NC}"
else
echo -e "${GREEN}NONE${NC}"
fi
# Check for orphaned containers on these ports
echo -n "Checking for orphaned containers on critical ports... "
ORPHANED=$(docker ps --format '{{.ID}} {{.Ports}}' | grep -E '5432|5672|6379|8080|8081|3000' | awk '{print $1}' || true)
if [ -n "$ORPHANED" ]; then
echo -e "${YELLOW}FOUND${NC}"
echo " Orphaned container IDs: $ORPHANED"
read -p " Stop these containers? (y/N) " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
echo "$ORPHANED" | xargs docker stop 2>/dev/null || true
echo "$ORPHANED" | xargs docker rm 2>/dev/null || true
echo -e " ${GREEN}REMOVED${NC}"
fi
else
echo -e "${GREEN}NONE${NC}"
fi
echo ""
echo "=========================================="
echo "Summary"
echo "=========================================="
echo ""
# Final port check
ALL_CLEAR=true
for port in 5432 5672 6379 8080 8081 3000; do
if nc -z localhost "$port" 2>/dev/null; then
echo -e "${RED}${NC} Port $port is still in use"
ALL_CLEAR=false
else
echo -e "${GREEN}${NC} Port $port is free"
fi
done
echo ""
if $ALL_CLEAR; then
echo -e "${GREEN}All ports are clear! You can now run:${NC}"
echo ""
echo " cd $PROJECT_ROOT"
echo " docker compose up -d"
echo ""
echo "Or use the Makefile:"
echo ""
echo " make docker-up"
echo ""
else
echo -e "${YELLOW}Some ports are still in use. Please resolve manually.${NC}"
echo ""
echo "Helpful commands:"
echo " lsof -i :PORT # Find process using PORT"
echo " sudo kill PID # Kill process by PID"
echo " docker ps -a # List all containers"
echo " docker stop NAME # Stop container"
echo ""
fi
echo "To re-enable system services later:"
echo " sudo systemctl start postgresql"
echo " sudo systemctl start rabbitmq-server"
echo " sudo systemctl start redis-server"
echo ""

259
scripts/test-db-setup.sh Executable file
View File

@@ -0,0 +1,259 @@
#!/bin/bash
# Test Database Setup Script
# This script helps set up and manage the test database for Attune
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Configuration
DB_NAME="attune_test"
DB_USER="${POSTGRES_USER:-postgres}"
DB_HOST="${POSTGRES_HOST:-localhost}"
DB_PORT="${POSTGRES_PORT:-5432}"
# Handle password: use env var if set, otherwise prompt once
if [ -z "$POSTGRES_PASSWORD" ]; then
if [ -z "$PGPASSWORD" ]; then
# Prompt for password once
read -sp "Enter PostgreSQL password for user $DB_USER: " DB_PASSWORD
echo ""
export PGPASSWORD="$DB_PASSWORD"
fi
# else PGPASSWORD is already set, use it
else
# POSTGRES_PASSWORD was provided, use it
export PGPASSWORD="$POSTGRES_PASSWORD"
fi
DB_URL="postgresql://${DB_USER}:${PGPASSWORD}@${DB_HOST}:${DB_PORT}/${DB_NAME}"
# Functions
print_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
print_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
check_postgres() {
print_info "Checking PostgreSQL connection..."
if ! pg_isready -h "$DB_HOST" -p "$DB_PORT" > /dev/null 2>&1; then
print_error "PostgreSQL is not running or not accessible at ${DB_HOST}:${DB_PORT}"
exit 1
fi
print_info "PostgreSQL is running"
}
db_exists() {
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -lqt | cut -d \| -f 1 | grep -qw "$DB_NAME"
}
create_database() {
print_info "Creating test database: $DB_NAME"
if db_exists; then
print_warn "Database $DB_NAME already exists"
else
createdb -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" "$DB_NAME"
print_info "Database $DB_NAME created successfully"
fi
}
drop_database() {
print_info "Dropping test database: $DB_NAME"
if db_exists; then
dropdb -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" "$DB_NAME"
print_info "Database $DB_NAME dropped successfully"
else
print_warn "Database $DB_NAME does not exist"
fi
}
run_migrations() {
print_info "Running migrations on test database..."
DATABASE_URL="$DB_URL" sqlx migrate run
print_info "Migrations completed successfully"
}
clean_database() {
print_info "Cleaning test database..."
PSQL_CMD="psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME"
# Disable triggers to avoid constraint issues
$PSQL_CMD -c "SET session_replication_role = replica;"
# Delete in reverse dependency order
$PSQL_CMD -c "DELETE FROM executions;"
$PSQL_CMD -c "DELETE FROM inquiries;"
$PSQL_CMD -c "DELETE FROM enforcements;"
$PSQL_CMD -c "DELETE FROM events;"
$PSQL_CMD -c "DELETE FROM rules;"
$PSQL_CMD -c "DELETE FROM triggers;"
$PSQL_CMD -c "DELETE FROM notifications;"
$PSQL_CMD -c "DELETE FROM keys;"
$PSQL_CMD -c "DELETE FROM identities;"
$PSQL_CMD -c "DELETE FROM workers;"
$PSQL_CMD -c "DELETE FROM runtimes;"
$PSQL_CMD -c "DELETE FROM actions;"
$PSQL_CMD -c "DELETE FROM packs;"
# Re-enable triggers
$PSQL_CMD -c "SET session_replication_role = DEFAULT;"
print_info "Database cleaned successfully"
}
verify_schema() {
print_info "Verifying database schema..."
PSQL_CMD="psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME -t"
# Check for essential tables in attune schema
TABLES=("pack" "action" "runtime" "worker" "trigger" "rule" "event" "enforcement" "execution" "inquiry" "identity" "key" "notification")
for table in "${TABLES[@]}"; do
if $PSQL_CMD -c "SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_schema = 'attune' AND table_name = '$table');" | grep -q 't'; then
echo -e " ${GREEN}${NC} Table 'attune.$table' exists"
else
echo -e " ${RED}${NC} Table 'attune.$table' missing"
return 1
fi
done
print_info "Schema verification passed"
}
show_status() {
print_info "Test Database Status"
echo " Database: $DB_NAME"
echo " Host: $DB_HOST:$DB_PORT"
echo " User: $DB_USER"
echo " URL: $DB_URL"
echo ""
if db_exists; then
echo -e " Status: ${GREEN}EXISTS${NC}"
PSQL_CMD="psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME -t -c"
# Count tables
TABLE_COUNT=$($PSQL_CMD "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'attune' AND table_type = 'BASE TABLE';" | tr -d ' ')
echo " Tables: $TABLE_COUNT"
# Count migrations
MIGRATION_COUNT=$($PSQL_CMD "SELECT COUNT(*) FROM _sqlx_migrations WHERE success = true;" 2>/dev/null | tr -d ' ' || echo "0")
echo " Migrations: $MIGRATION_COUNT"
# Count records in each table
echo ""
echo " Record counts:"
for table in pack action runtime worker trigger rule event enforcement execution inquiry identity key notification; do
COUNT=$($PSQL_CMD "SELECT COUNT(*) FROM attune.$table;" 2>/dev/null | tr -d ' ' || echo "0")
printf " %-15s %s\n" "$table:" "$COUNT"
done
else
echo -e " Status: ${RED}DOES NOT EXIST${NC}"
fi
}
show_help() {
cat << EOF
Test Database Setup Script for Attune
Usage: $0 [command]
Commands:
setup Create database and run migrations (default)
create Create the test database
drop Drop the test database
reset Drop, create, and migrate the database
migrate Run migrations on existing database
clean Delete all data from tables
verify Verify database schema
status Show database status and record counts
help Show this help message
Environment Variables:
POSTGRES_USER PostgreSQL user (default: postgres)
POSTGRES_PASSWORD PostgreSQL password (prompted if not set)
PGPASSWORD PostgreSQL password (alternative to POSTGRES_PASSWORD)
POSTGRES_HOST PostgreSQL host (default: localhost)
POSTGRES_PORT PostgreSQL port (default: 5432)
Examples:
$0 setup # Create and setup test database
$0 reset # Reset test database
$0 clean # Clean all data
$0 status # Show database status
EOF
}
# Main
case "${1:-setup}" in
setup)
check_postgres
create_database
run_migrations
verify_schema
print_info "Test database setup complete!"
;;
create)
check_postgres
create_database
;;
drop)
check_postgres
drop_database
;;
reset)
check_postgres
drop_database
create_database
run_migrations
verify_schema
print_info "Test database reset complete!"
;;
migrate)
check_postgres
run_migrations
;;
clean)
check_postgres
if ! db_exists; then
print_error "Database $DB_NAME does not exist"
exit 1
fi
clean_database
;;
verify)
check_postgres
if ! db_exists; then
print_error "Database $DB_NAME does not exist"
exit 1
fi
verify_schema
;;
status)
check_postgres
show_status
;;
help|--help|-h)
show_help
;;
*)
print_error "Unknown command: $1"
show_help
exit 1
;;
esac

354
scripts/test-end-to-end-flow.sh Executable file
View File

@@ -0,0 +1,354 @@
#!/bin/bash
# End-to-End Flow Test
#
# Tests the complete event lifecycle:
# 1. Sensor generates event
# 2. Rule matcher creates enforcement
# 3. Executor schedules execution
# 4. Worker executes action
# 5. Results are recorded
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Database connection
DB_URL="postgresql://postgres:postgres@localhost:5432/attune"
# Service PIDs
SENSOR_PID=""
EXECUTOR_PID=""
WORKER_PID=""
# Log files
SENSOR_LOG=$(mktemp /tmp/attune-sensor-e2e.XXXXXX)
EXECUTOR_LOG=$(mktemp /tmp/attune-executor-e2e.XXXXXX)
WORKER_LOG=$(mktemp /tmp/attune-worker-e2e.XXXXXX)
echo -e "${BLUE}╔════════════════════════════════════════════════╗${NC}"
echo -e "${BLUE}║ Attune End-to-End Flow Test ║${NC}"
echo -e "${BLUE}║ Sensor → Event → Enforcement → Execution ║${NC}"
echo -e "${BLUE}╚════════════════════════════════════════════════╝${NC}"
echo ""
# Cleanup function
cleanup() {
echo ""
echo -e "${YELLOW}Cleaning up services...${NC}"
if [ -n "$WORKER_PID" ]; then
kill $WORKER_PID 2>/dev/null || true
wait $WORKER_PID 2>/dev/null || true
echo " Stopped worker service"
fi
if [ -n "$EXECUTOR_PID" ]; then
kill $EXECUTOR_PID 2>/dev/null || true
wait $EXECUTOR_PID 2>/dev/null || true
echo " Stopped executor service"
fi
if [ -n "$SENSOR_PID" ]; then
kill $SENSOR_PID 2>/dev/null || true
wait $SENSOR_PID 2>/dev/null || true
echo " Stopped sensor service"
fi
echo -e "${GREEN}✓ Cleanup complete${NC}"
echo ""
echo "Log files (preserved for inspection):"
echo " Sensor: $SENSOR_LOG"
echo " Executor: $EXECUTOR_LOG"
echo " Worker: $WORKER_LOG"
}
trap cleanup EXIT INT TERM
# Check prerequisites
echo -e "${YELLOW}1. Checking prerequisites...${NC}"
if ! command -v psql &> /dev/null; then
echo -e "${RED}ERROR: psql not found${NC}"
exit 1
fi
if ! command -v cargo &> /dev/null; then
echo -e "${RED}ERROR: cargo not found${NC}"
exit 1
fi
if ! psql "$DB_URL" -c "SELECT 1" &> /dev/null; then
echo -e "${RED}ERROR: Cannot connect to database${NC}"
exit 1
fi
echo -e "${GREEN}✓ Prerequisites OK${NC}"
echo ""
# Get initial counts
echo -e "${YELLOW}2. Recording initial state...${NC}"
INITIAL_EVENTS=$(psql "$DB_URL" -t -c "SELECT COUNT(*) FROM event" | tr -d ' ')
INITIAL_ENFORCEMENTS=$(psql "$DB_URL" -t -c "SELECT COUNT(*) FROM enforcement" | tr -d ' ')
INITIAL_EXECUTIONS=$(psql "$DB_URL" -t -c "SELECT COUNT(*) FROM execution" | tr -d ' ')
echo " Events: $INITIAL_EVENTS"
echo " Enforcements: $INITIAL_ENFORCEMENTS"
echo " Executions: $INITIAL_EXECUTIONS"
echo ""
# Verify configuration
echo -e "${YELLOW}3. Verifying configuration...${NC}"
TIMER_SENSOR=$(psql "$DB_URL" -t -c "SELECT ref FROM sensor WHERE ref = 'core.interval_timer_sensor'" | tr -d ' ')
TIMER_RULE=$(psql "$DB_URL" -t -c "SELECT ref FROM rule WHERE trigger_ref = 'core.intervaltimer' AND enabled = true LIMIT 1" | tr -d ' ')
ECHO_ACTION=$(psql "$DB_URL" -t -c "SELECT ref FROM action WHERE ref = 'core.echo'" | tr -d ' ')
if [ -z "$TIMER_SENSOR" ]; then
echo -e "${RED}ERROR: Timer sensor not found${NC}"
exit 1
fi
if [ -z "$TIMER_RULE" ]; then
echo -e "${RED}ERROR: No enabled timer rules found${NC}"
exit 1
fi
if [ -z "$ECHO_ACTION" ]; then
echo -e "${RED}ERROR: Echo action not found${NC}"
exit 1
fi
echo -e "${GREEN}✓ Sensor: $TIMER_SENSOR${NC}"
echo -e "${GREEN}✓ Rule: $TIMER_RULE${NC}"
echo -e "${GREEN}✓ Action: $ECHO_ACTION${NC}"
echo ""
# Start sensor service
echo -e "${YELLOW}4. Starting sensor service...${NC}"
cargo build --quiet --bin attune-sensor 2>&1 > /dev/null
cargo run --quiet --bin attune-sensor > "$SENSOR_LOG" 2>&1 &
SENSOR_PID=$!
echo " PID: $SENSOR_PID"
sleep 3
if ! kill -0 $SENSOR_PID 2>/dev/null; then
echo -e "${RED}ERROR: Sensor service failed to start${NC}"
tail -30 "$SENSOR_LOG"
exit 1
fi
echo -e "${GREEN}✓ Sensor service running${NC}"
echo ""
# Start executor service
echo -e "${YELLOW}5. Starting executor service...${NC}"
cargo build --quiet --bin attune-executor 2>&1 > /dev/null
cargo run --quiet --bin attune-executor > "$EXECUTOR_LOG" 2>&1 &
EXECUTOR_PID=$!
echo " PID: $EXECUTOR_PID"
sleep 3
if ! kill -0 $EXECUTOR_PID 2>/dev/null; then
echo -e "${RED}ERROR: Executor service failed to start${NC}"
tail -30 "$EXECUTOR_LOG"
exit 1
fi
echo -e "${GREEN}✓ Executor service running${NC}"
echo ""
# Start worker service
echo -e "${YELLOW}6. Starting worker service...${NC}"
cargo build --quiet --bin attune-worker 2>&1 > /dev/null
cargo run --quiet --bin attune-worker > "$WORKER_LOG" 2>&1 &
WORKER_PID=$!
echo " PID: $WORKER_PID"
sleep 3
if ! kill -0 $WORKER_PID 2>/dev/null; then
echo -e "${RED}ERROR: Worker service failed to start${NC}"
tail -30 "$WORKER_LOG"
exit 1
fi
echo -e "${GREEN}✓ Worker service running${NC}"
echo ""
# Monitor for events
echo -e "${YELLOW}7. Monitoring for events (max 30 seconds)...${NC}"
TIMEOUT=30
ELAPSED=0
while [ $ELAPSED -lt $TIMEOUT ]; do
CURRENT_EVENTS=$(psql "$DB_URL" -t -c "SELECT COUNT(*) FROM event" | tr -d ' ')
NEW_EVENTS=$((CURRENT_EVENTS - INITIAL_EVENTS))
if [ $NEW_EVENTS -gt 0 ]; then
echo -e "${GREEN}✓ Generated $NEW_EVENTS new event(s)${NC}"
break
fi
echo -n "."
sleep 1
ELAPSED=$((ELAPSED + 1))
done
echo ""
if [ $NEW_EVENTS -eq 0 ]; then
echo -e "${RED}ERROR: No events generated${NC}"
exit 1
fi
# Monitor for enforcements
echo -e "${YELLOW}8. Monitoring for enforcements (max 10 seconds)...${NC}"
TIMEOUT=10
ELAPSED=0
while [ $ELAPSED -lt $TIMEOUT ]; do
CURRENT_ENFORCEMENTS=$(psql "$DB_URL" -t -c "SELECT COUNT(*) FROM enforcement" | tr -d ' ')
NEW_ENFORCEMENTS=$((CURRENT_ENFORCEMENTS - INITIAL_ENFORCEMENTS))
if [ $NEW_ENFORCEMENTS -gt 0 ]; then
echo -e "${GREEN}✓ Created $NEW_ENFORCEMENTS enforcement(s)${NC}"
break
fi
echo -n "."
sleep 1
ELAPSED=$((ELAPSED + 1))
done
echo ""
if [ $NEW_ENFORCEMENTS -eq 0 ]; then
echo -e "${RED}ERROR: No enforcements created${NC}"
exit 1
fi
# Monitor for executions
echo -e "${YELLOW}9. Monitoring for executions (max 15 seconds)...${NC}"
TIMEOUT=15
ELAPSED=0
while [ $ELAPSED -lt $TIMEOUT ]; do
CURRENT_EXECUTIONS=$(psql "$DB_URL" -t -c "SELECT COUNT(*) FROM execution" | tr -d ' ')
NEW_EXECUTIONS=$((CURRENT_EXECUTIONS - INITIAL_EXECUTIONS))
if [ $NEW_EXECUTIONS -gt 0 ]; then
echo -e "${GREEN}✓ Created $NEW_EXECUTIONS execution(s)${NC}"
break
fi
echo -n "."
sleep 1
ELAPSED=$((ELAPSED + 1))
done
echo ""
if [ $NEW_EXECUTIONS -eq 0 ]; then
echo -e "${RED}ERROR: No executions created${NC}"
echo -e "${YELLOW}This might indicate executor service is not processing enforcements${NC}"
exit 1
fi
# Check for completed executions
echo -e "${YELLOW}10. Waiting for execution completion (max 15 seconds)...${NC}"
TIMEOUT=15
ELAPSED=0
COMPLETED=0
while [ $ELAPSED -lt $TIMEOUT ]; do
COMPLETED=$(psql "$DB_URL" -t -c "SELECT COUNT(*) FROM execution WHERE status = 'succeeded' AND created > NOW() - INTERVAL '1 minute'" | tr -d ' ')
if [ $COMPLETED -gt 0 ]; then
echo -e "${GREEN}$COMPLETED execution(s) completed successfully${NC}"
break
fi
echo -n "."
sleep 1
ELAPSED=$((ELAPSED + 1))
done
echo ""
# Display results
echo -e "${BLUE}╔════════════════════════════════════════════════╗${NC}"
echo -e "${BLUE}║ Test Results ║${NC}"
echo -e "${BLUE}╚════════════════════════════════════════════════╝${NC}"
echo ""
echo -e "${YELLOW}Recent Events:${NC}"
psql "$DB_URL" -c "
SELECT id, trigger_ref, LEFT(payload::text, 50) as payload_snippet, created
FROM event
ORDER BY created DESC
LIMIT 3
" 2>/dev/null
echo ""
echo -e "${YELLOW}Recent Enforcements:${NC}"
psql "$DB_URL" -c "
SELECT id, rule_ref, status, created
FROM enforcement
ORDER BY created DESC
LIMIT 3
" 2>/dev/null
echo ""
echo -e "${YELLOW}Recent Executions:${NC}"
psql "$DB_URL" -c "
SELECT id, action_ref, status, LEFT(result::text, 40) as result_snippet, created
FROM execution
ORDER BY created DESC
LIMIT 3
" 2>/dev/null
echo ""
# Final summary
echo -e "${BLUE}╔════════════════════════════════════════════════╗${NC}"
echo -e "${BLUE}║ Summary ║${NC}"
echo -e "${BLUE}╚════════════════════════════════════════════════╝${NC}"
echo ""
echo " Events created: $NEW_EVENTS"
echo " Enforcements created: $NEW_ENFORCEMENTS"
echo " Executions created: $NEW_EXECUTIONS"
echo " Executions completed: $COMPLETED"
echo ""
# Determine overall result
if [ $NEW_EVENTS -gt 0 ] && [ $NEW_ENFORCEMENTS -gt 0 ] && [ $NEW_EXECUTIONS -gt 0 ] && [ $COMPLETED -gt 0 ]; then
echo -e "${GREEN}╔════════════════════════════════════════════════╗${NC}"
echo -e "${GREEN}║ ✓ END-TO-END TEST PASSED ║${NC}"
echo -e "${GREEN}║ ║${NC}"
echo -e "${GREEN}║ Complete flow verified: ║${NC}"
echo -e "${GREEN}║ Sensor → Event → Rule → Enforcement → ║${NC}"
echo -e "${GREEN}║ Execution → Worker → Completion ║${NC}"
echo -e "${GREEN}╚════════════════════════════════════════════════╝${NC}"
exit 0
elif [ $NEW_EVENTS -gt 0 ] && [ $NEW_ENFORCEMENTS -gt 0 ] && [ $NEW_EXECUTIONS -gt 0 ]; then
echo -e "${YELLOW}╔════════════════════════════════════════════════╗${NC}"
echo -e "${YELLOW}║ ⚠ PARTIAL SUCCESS ║${NC}"
echo -e "${YELLOW}║ ║${NC}"
echo -e "${YELLOW}║ Flow works up to execution creation but ║${NC}"
echo -e "${YELLOW}║ executions haven't completed yet. ║${NC}"
echo -e "${YELLOW}║ This may be timing - check worker logs. ║${NC}"
echo -e "${YELLOW}╚════════════════════════════════════════════════╝${NC}"
exit 0
else
echo -e "${RED}╔════════════════════════════════════════════════╗${NC}"
echo -e "${RED}║ ✗ TEST FAILED ║${NC}"
echo -e "${RED}║ ║${NC}"
echo -e "${RED}║ Flow did not complete as expected. ║${NC}"
echo -e "${RED}║ Check service logs for details. ║${NC}"
echo -e "${RED}╚════════════════════════════════════════════════╝${NC}"
exit 1
fi

189
scripts/test-sensor-service.sh Executable file
View File

@@ -0,0 +1,189 @@
#!/bin/bash
# Test Sensor Service - Verify end-to-end event flow
#
# This script:
# 1. Starts the sensor service
# 2. Monitors for events being created
# 3. Monitors for enforcements being created
# 4. Verifies the event->enforcement flow works
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Database connection
DB_URL="postgresql://postgres:postgres@localhost:5432/attune"
echo -e "${GREEN}=== Attune Sensor Service Test ===${NC}"
echo ""
# Check prerequisites
echo -e "${YELLOW}Checking prerequisites...${NC}"
if ! command -v psql &> /dev/null; then
echo -e "${RED}ERROR: psql not found. Please install PostgreSQL client.${NC}"
exit 1
fi
if ! command -v cargo &> /dev/null; then
echo -e "${RED}ERROR: cargo not found. Please install Rust.${NC}"
exit 1
fi
# Test database connection
if ! psql "$DB_URL" -c "SELECT 1" &> /dev/null; then
echo -e "${RED}ERROR: Cannot connect to database${NC}"
exit 1
fi
echo -e "${GREEN}✓ Prerequisites OK${NC}"
echo ""
# Get initial counts
echo -e "${YELLOW}Getting initial event/enforcement counts...${NC}"
INITIAL_EVENTS=$(psql "$DB_URL" -t -c "SELECT COUNT(*) FROM event" | tr -d ' ')
INITIAL_ENFORCEMENTS=$(psql "$DB_URL" -t -c "SELECT COUNT(*) FROM enforcement" | tr -d ' ')
echo "Initial events: $INITIAL_EVENTS"
echo "Initial enforcements: $INITIAL_ENFORCEMENTS"
echo ""
# Check if timer sensor and rule exist
echo -e "${YELLOW}Checking sensor and rule configuration...${NC}"
TIMER_SENSOR=$(psql "$DB_URL" -t -c "SELECT ref FROM sensor WHERE ref = 'core.interval_timer_sensor'" | tr -d ' ')
TIMER_RULE=$(psql "$DB_URL" -t -c "SELECT ref FROM rule WHERE trigger_ref = 'core.intervaltimer' AND enabled = true" | tr -d ' ')
if [ -z "$TIMER_SENSOR" ]; then
echo -e "${RED}ERROR: Timer sensor not found in database${NC}"
exit 1
fi
if [ -z "$TIMER_RULE" ]; then
echo -e "${RED}ERROR: No enabled timer rules found${NC}"
exit 1
fi
echo -e "${GREEN}✓ Found sensor: $TIMER_SENSOR${NC}"
echo -e "${GREEN}✓ Found rule: $TIMER_RULE${NC}"
echo ""
# Start sensor service in background
echo -e "${YELLOW}Starting sensor service...${NC}"
SENSOR_LOG=$(mktemp /tmp/attune-sensor-test.XXXXXX)
echo "Logs: $SENSOR_LOG"
cargo run --quiet --bin attune-sensor -- --log-level debug > "$SENSOR_LOG" 2>&1 &
SENSOR_PID=$!
echo "Sensor service PID: $SENSOR_PID"
# Cleanup function
cleanup() {
echo ""
echo -e "${YELLOW}Cleaning up...${NC}"
if [ -n "$SENSOR_PID" ]; then
kill $SENSOR_PID 2>/dev/null || true
wait $SENSOR_PID 2>/dev/null || true
fi
echo -e "${GREEN}✓ Cleanup complete${NC}"
}
trap cleanup EXIT INT TERM
# Wait for service to start
echo -e "${YELLOW}Waiting for service to initialize...${NC}"
sleep 5
# Check if process is still running
if ! kill -0 $SENSOR_PID 2>/dev/null; then
echo -e "${RED}ERROR: Sensor service failed to start${NC}"
echo -e "${YELLOW}Last 50 lines of log:${NC}"
tail -50 "$SENSOR_LOG"
exit 1
fi
echo -e "${GREEN}✓ Sensor service started${NC}"
echo ""
# Monitor for events (30 second timeout)
echo -e "${YELLOW}Monitoring for events (waiting up to 30 seconds)...${NC}"
TIMEOUT=30
ELAPSED=0
while [ $ELAPSED -lt $TIMEOUT ]; do
CURRENT_EVENTS=$(psql "$DB_URL" -t -c "SELECT COUNT(*) FROM event" | tr -d ' ')
NEW_EVENTS=$((CURRENT_EVENTS - INITIAL_EVENTS))
if [ $NEW_EVENTS -gt 0 ]; then
echo -e "${GREEN}✓ Generated $NEW_EVENTS new event(s)!${NC}"
# Show recent events
echo -e "${YELLOW}Recent events:${NC}"
psql "$DB_URL" -c "
SELECT id, trigger_ref, created
FROM event
ORDER BY created DESC
LIMIT 5
"
break
fi
echo -n "."
sleep 1
ELAPSED=$((ELAPSED + 1))
done
echo ""
if [ $NEW_EVENTS -eq 0 ]; then
echo -e "${RED}ERROR: No events generated after $TIMEOUT seconds${NC}"
echo -e "${YELLOW}Sensor service logs:${NC}"
tail -100 "$SENSOR_LOG"
exit 1
fi
# Check for enforcements
echo -e "${YELLOW}Checking for enforcements...${NC}"
sleep 2
CURRENT_ENFORCEMENTS=$(psql "$DB_URL" -t -c "SELECT COUNT(*) FROM enforcement" | tr -d ' ')
NEW_ENFORCEMENTS=$((CURRENT_ENFORCEMENTS - INITIAL_ENFORCEMENTS))
if [ $NEW_ENFORCEMENTS -gt 0 ]; then
echo -e "${GREEN}✓ Created $NEW_ENFORCEMENTS enforcement(s)!${NC}"
# Show recent enforcements
echo -e "${YELLOW}Recent enforcements:${NC}"
psql "$DB_URL" -c "
SELECT e.id, e.rule_ref, e.status, e.created
FROM enforcement e
ORDER BY e.created DESC
LIMIT 5
"
else
echo -e "${YELLOW}⚠ No enforcements created yet (this might be OK if rule matching hasn't run)${NC}"
fi
echo ""
# Show sensor service logs
echo -e "${YELLOW}Sensor service logs (last 30 lines):${NC}"
tail -30 "$SENSOR_LOG"
echo ""
# Summary
echo -e "${GREEN}=== Test Summary ===${NC}"
echo "Events generated: $NEW_EVENTS"
echo "Enforcements created: $NEW_ENFORCEMENTS"
if [ $NEW_EVENTS -gt 0 ]; then
echo -e "${GREEN}✓ TEST PASSED: Sensor service is generating events!${NC}"
exit 0
else
echo -e "${RED}✗ TEST FAILED: No events generated${NC}"
exit 1
fi

386
scripts/test-timer-echo-docker.sh Executable file
View File

@@ -0,0 +1,386 @@
#!/bin/bash
# Test Timer Echo Happy Path (Docker Environment)
# Verifies the complete event flow with unified runtime detection
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
API_URL="${ATTUNE_API_URL:-http://localhost:8080}"
API_USER="${ATTUNE_API_USER:-admin}"
API_PASSWORD="${ATTUNE_API_PASSWORD:-admin}"
WAIT_TIME=15 # Time to wait for executions
POLL_INTERVAL=2 # How often to check for executions
echo -e "${BLUE}=== Attune Timer Echo Happy Path Test (Docker) ===${NC}"
echo "API URL: $API_URL"
echo ""
# Function to print colored status
print_status() {
echo -e "${GREEN}${NC} $1"
}
print_error() {
echo -e "${RED}${NC} $1"
}
print_info() {
echo -e "${YELLOW}${NC} $1"
}
# Function to check if a service is healthy
check_service() {
local service=$1
if docker ps --format '{{.Names}}' | grep -q "^${service}$"; then
if docker ps --filter "name=${service}" --filter "health=healthy" --format '{{.Names}}' | grep -q "^${service}$"; then
print_status "Service $service is healthy"
return 0
else
print_error "Service $service is not healthy yet"
return 1
fi
else
print_error "Service $service is not running"
return 1
fi
}
# Step 0: Check Docker services
echo -e "${BLUE}Step 0: Checking Docker services...${NC}"
SERVICES=("attune-api" "attune-executor" "attune-worker" "attune-sensor" "postgres" "rabbitmq")
ALL_HEALTHY=true
for service in "${SERVICES[@]}"; do
if ! check_service "$service" 2>/dev/null; then
ALL_HEALTHY=false
print_info "Service $service not ready yet"
fi
done
if [ "$ALL_HEALTHY" = false ]; then
print_info "Some services are not ready. Waiting 10 seconds..."
sleep 10
fi
echo ""
# Step 1: Login and get JWT token
echo -e "${BLUE}Step 1: Authenticating...${NC}"
LOGIN_RESPONSE=$(curl -s -X POST "$API_URL/auth/login" \
-H "Content-Type: application/json" \
-d "{\"username\":\"$API_USER\",\"password\":\"$API_PASSWORD\"}")
ACCESS_TOKEN=$(echo "$LOGIN_RESPONSE" | jq -r '.data.access_token // empty')
if [ -z "$ACCESS_TOKEN" ]; then
print_error "Failed to authenticate"
echo "Response: $LOGIN_RESPONSE"
exit 1
fi
print_status "Authentication successful"
echo ""
# Step 2: Verify runtime detection
echo -e "${BLUE}Step 2: Verifying runtime detection...${NC}"
RUNTIMES_RESPONSE=$(curl -s -X GET "$API_URL/api/v1/runtimes" \
-H "Authorization: Bearer $ACCESS_TOKEN")
SHELL_RUNTIME=$(echo "$RUNTIMES_RESPONSE" | jq -r '.data[] | select(.name == "shell" or .name == "Shell") | .name')
if [ -n "$SHELL_RUNTIME" ]; then
print_status "Shell runtime detected: $SHELL_RUNTIME"
# Get runtime details
RUNTIME_DETAILS=$(echo "$RUNTIMES_RESPONSE" | jq -r ".data[] | select(.name == \"$SHELL_RUNTIME\")")
echo "$RUNTIME_DETAILS" | jq '{name, enabled, distributions}' || echo "$RUNTIME_DETAILS"
else
print_error "Shell runtime not found"
echo "Available runtimes:"
echo "$RUNTIMES_RESPONSE" | jq '.data[] | {name, enabled}'
exit 1
fi
echo ""
# Step 3: Check if core pack exists
echo -e "${BLUE}Step 3: Checking for core pack...${NC}"
PACK_RESPONSE=$(curl -s -X GET "$API_URL/api/v1/packs/core" \
-H "Authorization: Bearer $ACCESS_TOKEN")
PACK_ID=$(echo "$PACK_RESPONSE" | jq -r '.data.id // empty')
if [ -z "$PACK_ID" ]; then
print_error "Core pack not found"
print_info "Attempting to load core pack..."
# Try to load core pack via docker exec
if docker ps --format '{{.Names}}' | grep -q "^attune-api$"; then
docker exec attune-api /opt/attune/scripts/load-core-pack.sh || true
sleep 2
# Retry
PACK_RESPONSE=$(curl -s -X GET "$API_URL/api/v1/packs/core" \
-H "Authorization: Bearer $ACCESS_TOKEN")
PACK_ID=$(echo "$PACK_RESPONSE" | jq -r '.data.id // empty')
if [ -z "$PACK_ID" ]; then
print_error "Failed to load core pack"
exit 1
fi
else
print_error "Cannot load core pack - API container not accessible"
exit 1
fi
fi
print_status "Core pack found (ID: $PACK_ID)"
echo ""
# Step 4: Check interval timer trigger
echo -e "${BLUE}Step 4: Checking for interval timer trigger...${NC}"
TRIGGERS_RESPONSE=$(curl -s -X GET "$API_URL/api/v1/triggers" \
-H "Authorization: Bearer $ACCESS_TOKEN")
INTERVAL_TRIGGER=$(echo "$TRIGGERS_RESPONSE" | jq -r '.data[] | select(.ref == "core.intervaltimer") | .ref')
if [ -z "$INTERVAL_TRIGGER" ]; then
print_error "Interval timer trigger not found"
echo "Available triggers:"
echo "$TRIGGERS_RESPONSE" | jq '.data[] | {ref, name}'
exit 1
fi
print_status "Interval timer trigger found"
echo ""
# Step 5: Check echo action
echo -e "${BLUE}Step 5: Checking for echo action...${NC}"
ACTIONS_RESPONSE=$(curl -s -X GET "$API_URL/api/v1/actions" \
-H "Authorization: Bearer $ACCESS_TOKEN")
ECHO_ACTION=$(echo "$ACTIONS_RESPONSE" | jq -r '.data[] | select(.ref == "core.echo") | .ref')
if [ -z "$ECHO_ACTION" ]; then
print_error "Echo action not found"
echo "Available actions:"
echo "$ACTIONS_RESPONSE" | jq '.data[] | {ref, name, runtime}'
exit 1
fi
print_status "Echo action found"
ACTION_DETAILS=$(echo "$ACTIONS_RESPONSE" | jq -r '.data[] | select(.ref == "core.echo")')
echo "$ACTION_DETAILS" | jq '{ref, name, runtime, entry_point}'
echo ""
# Step 6: Create trigger instance for 1-second interval
echo -e "${BLUE}Step 6: Creating trigger instance...${NC}"
TRIGGER_INSTANCE_REF="test.timer_1s_$(date +%s)"
CREATE_TRIGGER_RESPONSE=$(curl -s -X POST "$API_URL/api/v1/trigger-instances" \
-H "Authorization: Bearer $ACCESS_TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"trigger_type_ref\": \"core.intervaltimer\",
\"ref\": \"$TRIGGER_INSTANCE_REF\",
\"description\": \"Test timer - 1 second interval\",
\"enabled\": true,
\"parameters\": {
\"unit\": \"seconds\",
\"interval\": 1
}
}")
TRIGGER_INSTANCE_ID=$(echo "$CREATE_TRIGGER_RESPONSE" | jq -r '.data.id // empty')
if [ -z "$TRIGGER_INSTANCE_ID" ]; then
print_error "Failed to create trigger instance"
echo "Response: $CREATE_TRIGGER_RESPONSE"
exit 1
fi
print_status "Trigger instance created (ID: $TRIGGER_INSTANCE_ID, Ref: $TRIGGER_INSTANCE_REF)"
echo ""
# Step 7: Create rule linking timer to echo
echo -e "${BLUE}Step 7: Creating rule...${NC}"
RULE_REF="test.timer_echo_1s_$(date +%s)"
CREATE_RULE_RESPONSE=$(curl -s -X POST "$API_URL/api/v1/rules" \
-H "Authorization: Bearer $ACCESS_TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"ref\": \"$RULE_REF\",
\"pack_ref\": \"core\",
\"name\": \"Test Timer Echo 1s\",
\"description\": \"Test rule - echoes Hello World every second\",
\"enabled\": true,
\"trigger_instance_ref\": \"$TRIGGER_INSTANCE_REF\",
\"action_ref\": \"core.echo\",
\"action_parameters\": {
\"message\": \"Hello, World! (from unified runtime detection test)\"
}
}")
RULE_ID=$(echo "$CREATE_RULE_RESPONSE" | jq -r '.data.id // empty')
if [ -z "$RULE_ID" ]; then
print_error "Failed to create rule"
echo "Response: $CREATE_RULE_RESPONSE"
exit 1
fi
print_status "Rule created (ID: $RULE_ID, Ref: $RULE_REF)"
echo ""
# Step 8: Wait for executions
echo -e "${BLUE}Step 8: Waiting for executions...${NC}"
print_info "Waiting $WAIT_TIME seconds for timer to fire and action to execute..."
EXECUTION_COUNT=0
START_TIME=$(date +%s)
MAX_WAIT=$((START_TIME + WAIT_TIME))
while [ $(date +%s) -lt $MAX_WAIT ]; do
sleep $POLL_INTERVAL
# Check for executions
EXECUTIONS_RESPONSE=$(curl -s -X GET "$API_URL/api/v1/executions?limit=50" \
-H "Authorization: Bearer $ACCESS_TOKEN")
CURRENT_COUNT=$(echo "$EXECUTIONS_RESPONSE" | jq '[.data[] | select(.action_ref == "core.echo")] | length')
if [ "$CURRENT_COUNT" -gt "$EXECUTION_COUNT" ]; then
EXECUTION_COUNT=$CURRENT_COUNT
ELAPSED=$(($(date +%s) - START_TIME))
print_status "Found $EXECUTION_COUNT execution(s) after ${ELAPSED}s"
fi
done
echo ""
# Step 9: Verify executions
echo -e "${BLUE}Step 9: Verifying executions...${NC}"
if [ "$EXECUTION_COUNT" -eq 0 ]; then
print_error "No executions found!"
print_info "Checking system status..."
# Check for events
EVENTS_RESPONSE=$(curl -s -X GET "$API_URL/api/v1/events?limit=10" \
-H "Authorization: Bearer $ACCESS_TOKEN")
EVENT_COUNT=$(echo "$EVENTS_RESPONSE" | jq '.data | length')
echo " Events created: $EVENT_COUNT"
# Check for enforcements
ENFORCEMENTS_RESPONSE=$(curl -s -X GET "$API_URL/api/v1/enforcements?limit=10" \
-H "Authorization: Bearer $ACCESS_TOKEN")
ENFORCEMENT_COUNT=$(echo "$ENFORCEMENTS_RESPONSE" | jq '.data | length')
echo " Enforcements created: $ENFORCEMENT_COUNT"
print_error "Happy path test FAILED - no executions"
exit 1
fi
print_status "Found $EXECUTION_COUNT execution(s)"
# Get execution details
EXECUTIONS_RESPONSE=$(curl -s -X GET "$API_URL/api/v1/executions?limit=5" \
-H "Authorization: Bearer $ACCESS_TOKEN")
echo ""
echo "Recent executions:"
echo "$EXECUTIONS_RESPONSE" | jq '.data[] | select(.action_ref == "core.echo") | {id, status, action_ref, result: .result.stdout // .result}' | head -20
# Check for successful executions
SUCCESS_COUNT=$(echo "$EXECUTIONS_RESPONSE" | jq '[.data[] | select(.action_ref == "core.echo" and .status == "succeeded")] | length')
if [ "$SUCCESS_COUNT" -gt 0 ]; then
print_status "$SUCCESS_COUNT execution(s) succeeded"
else
print_error "No successful executions found"
echo ""
echo "Execution statuses:"
echo "$EXECUTIONS_RESPONSE" | jq '.data[] | {id, status, action_ref}'
fi
echo ""
# Step 10: Check worker logs for runtime detection
echo -e "${BLUE}Step 10: Checking worker logs for runtime execution...${NC}"
if docker ps --format '{{.Names}}' | grep -q "^attune-worker$"; then
print_info "Recent worker logs:"
docker logs attune-worker --tail 30 | grep -i "runtime\|shell\|echo\|executing" || echo " (no matching log entries)"
else
print_info "Worker container not accessible for log inspection"
fi
echo ""
# Step 11: Cleanup
echo -e "${BLUE}Step 11: Cleanup...${NC}"
# Disable the rule
print_info "Disabling rule..."
curl -s -X PUT "$API_URL/api/v1/rules/$RULE_REF" \
-H "Authorization: Bearer $ACCESS_TOKEN" \
-H "Content-Type: application/json" \
-d '{"enabled": false}' > /dev/null
print_status "Rule disabled"
# Optionally delete the rule and trigger instance
read -p "Delete test rule and trigger instance? (y/N) " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
curl -s -X DELETE "$API_URL/api/v1/rules/$RULE_REF" \
-H "Authorization: Bearer $ACCESS_TOKEN" > /dev/null
curl -s -X DELETE "$API_URL/api/v1/trigger-instances/$TRIGGER_INSTANCE_REF" \
-H "Authorization: Bearer $ACCESS_TOKEN" > /dev/null
print_status "Test resources deleted"
else
print_info "Test resources left in place (disabled)"
fi
echo ""
# Final summary
echo -e "${BLUE}=== Test Summary ===${NC}"
echo ""
echo "✓ Runtime detection working (Shell runtime detected)"
echo "✓ Core pack loaded with echo action"
echo "✓ Trigger instance created (1-second interval timer)"
echo "✓ Rule created and enabled"
echo "✓ Executions observed: $EXECUTION_COUNT"
echo "✓ Successful executions: $SUCCESS_COUNT"
echo ""
if [ "$SUCCESS_COUNT" -gt 0 ]; then
echo -e "${GREEN}=== HAPPY PATH TEST PASSED ===${NC}"
echo ""
echo "The complete event flow is working:"
echo " Timer Sensor → Event → Rule → Enforcement → Execution → Worker → Shell Action"
echo ""
exit 0
else
echo -e "${RED}=== HAPPY PATH TEST FAILED ===${NC}"
echo ""
echo "Executions were created but none succeeded."
echo "Check service logs for errors:"
echo " docker logs attune-sensor"
echo " docker logs attune-executor"
echo " docker logs attune-worker"
echo ""
exit 1
fi

View File

@@ -0,0 +1,155 @@
#!/bin/bash
# Test script to verify webhook events properly trigger rule processing
# This script demonstrates that webhook events now correctly publish EventCreated messages
set -e
API_URL="${API_URL:-http://localhost:8080}"
WEBHOOK_TRIGGER="${WEBHOOK_TRIGGER:-default.example}"
echo "=================================================="
echo "Webhook Event Processing Test"
echo "=================================================="
echo ""
echo "This script tests that webhook events properly trigger rule processing"
echo "by verifying the EventCreated message is published to the message queue."
echo ""
# Step 1: Check if the trigger exists
echo "Step 1: Checking if trigger '${WEBHOOK_TRIGGER}' exists..."
TRIGGER_CHECK=$(curl -s -w "\n%{http_code}" "${API_URL}/api/v1/triggers/${WEBHOOK_TRIGGER}")
HTTP_CODE=$(echo "$TRIGGER_CHECK" | tail -n1)
TRIGGER_DATA=$(echo "$TRIGGER_CHECK" | head -n-1)
if [ "$HTTP_CODE" != "200" ]; then
echo "❌ Trigger '${WEBHOOK_TRIGGER}' not found (HTTP ${HTTP_CODE})"
echo " Please create the trigger first or set WEBHOOK_TRIGGER environment variable"
exit 1
fi
echo "✅ Trigger '${WEBHOOK_TRIGGER}' exists"
echo ""
# Step 2: Check if there are any rules for this trigger
echo "Step 2: Checking for rules that subscribe to '${WEBHOOK_TRIGGER}'..."
RULES_CHECK=$(curl -s "${API_URL}/api/v1/rules")
MATCHING_RULES=$(echo "$RULES_CHECK" | jq -r ".data[] | select(.trigger_ref == \"${WEBHOOK_TRIGGER}\") | .ref")
if [ -z "$MATCHING_RULES" ]; then
echo "⚠️ No rules found for trigger '${WEBHOOK_TRIGGER}'"
echo " Events will be created but no enforcements will be generated"
else
echo "✅ Found rules for trigger '${WEBHOOK_TRIGGER}':"
echo "$MATCHING_RULES" | while read -r rule; do
echo " - $rule"
done
fi
echo ""
# Step 3: Send a webhook
echo "Step 3: Sending webhook to trigger '${WEBHOOK_TRIGGER}'..."
WEBHOOK_PAYLOAD='{"test": "data", "timestamp": "'$(date -u +"%Y-%m-%dT%H:%M:%SZ")'"}'
WEBHOOK_RESPONSE=$(curl -s -w "\n%{http_code}" \
-X POST \
-H "Content-Type: application/json" \
-d "$WEBHOOK_PAYLOAD" \
"${API_URL}/api/v1/webhooks/${WEBHOOK_TRIGGER}")
HTTP_CODE=$(echo "$WEBHOOK_RESPONSE" | tail -n1)
RESPONSE_DATA=$(echo "$WEBHOOK_RESPONSE" | head -n-1)
if [ "$HTTP_CODE" != "200" ]; then
echo "❌ Webhook submission failed (HTTP ${HTTP_CODE})"
echo "$RESPONSE_DATA" | jq '.' 2>/dev/null || echo "$RESPONSE_DATA"
exit 1
fi
EVENT_ID=$(echo "$RESPONSE_DATA" | jq -r '.data.event_id')
echo "✅ Webhook received successfully"
echo " Event ID: ${EVENT_ID}"
echo ""
# Step 4: Check the event was created
echo "Step 4: Verifying event was created in database..."
sleep 1
EVENT_CHECK=$(curl -s "${API_URL}/api/v1/events/${EVENT_ID}")
EVENT_TRIGGER=$(echo "$EVENT_CHECK" | jq -r '.data.trigger_ref')
EVENT_RULE=$(echo "$EVENT_CHECK" | jq -r '.data.rule')
echo "✅ Event ${EVENT_ID} exists"
echo " Trigger: ${EVENT_TRIGGER}"
echo " Associated Rule: ${EVENT_RULE}"
echo ""
# Step 5: Check API logs for EventCreated message publishing
echo "Step 5: Checking API logs for EventCreated message..."
echo " (Looking for 'Published EventCreated message for event ${EVENT_ID}')"
echo ""
if command -v docker &> /dev/null; then
# Check if running in Docker
if docker compose ps api &> /dev/null; then
echo " Docker logs from API service:"
docker compose logs api --tail=50 | grep -i "event ${EVENT_ID}" || echo " No logs found (service may not be running in Docker)"
else
echo " ⚠️ Docker Compose not running, skipping log check"
fi
else
echo " ⚠️ Docker not available, skipping log check"
fi
echo ""
# Step 6: Check for enforcements
echo "Step 6: Checking if enforcements were created..."
sleep 2
ENFORCEMENTS_CHECK=$(curl -s "${API_URL}/api/v1/events/${EVENT_ID}/enforcements" 2>/dev/null || echo '{"data": []}')
ENFORCEMENT_COUNT=$(echo "$ENFORCEMENTS_CHECK" | jq -r '.data | length')
if [ "$ENFORCEMENT_COUNT" -gt 0 ]; then
echo "${ENFORCEMENT_COUNT} enforcement(s) created for event ${EVENT_ID}"
echo "$ENFORCEMENTS_CHECK" | jq -r '.data[] | " - Enforcement \(.id): \(.rule_ref) (\(.status))"'
else
if [ -z "$MATCHING_RULES" ]; then
echo " No enforcements created (expected - no rules for this trigger)"
else
echo "⚠️ No enforcements found (unexpected - rules exist for this trigger)"
echo " This may indicate the EventCreated message was not published or processed"
fi
fi
echo ""
# Step 7: Check for executions
echo "Step 7: Checking if executions were created..."
if [ "$ENFORCEMENT_COUNT" -gt 0 ]; then
EXECUTIONS_CHECK=$(curl -s "${API_URL}/api/v1/executions?limit=10")
EVENT_EXECUTIONS=$(echo "$EXECUTIONS_CHECK" | jq -r ".data[] | select(.event == ${EVENT_ID})")
if [ -n "$EVENT_EXECUTIONS" ]; then
echo "✅ Executions created for event ${EVENT_ID}:"
echo "$EVENT_EXECUTIONS" | jq -r '" - Execution \(.id): \(.action_ref) (\(.status))"'
else
echo "⚠️ No executions found yet (may still be processing)"
fi
else
echo " Skipping execution check (no enforcements created)"
fi
echo ""
# Summary
echo "=================================================="
echo "Test Summary"
echo "=================================================="
echo "✅ Webhook received and event created: ${EVENT_ID}"
if [ "$ENFORCEMENT_COUNT" -gt 0 ]; then
echo "✅ Event processing working: ${ENFORCEMENT_COUNT} enforcement(s) created"
echo ""
echo "🎉 SUCCESS: Webhook events are properly triggering rule processing!"
else
if [ -z "$MATCHING_RULES" ]; then
echo " No rules to process (create a rule for '${WEBHOOK_TRIGGER}' to test full flow)"
else
echo "⚠️ Event processing may not be working (check executor logs)"
fi
fi
echo ""

331
scripts/test-websocket.html Normal file
View File

@@ -0,0 +1,331 @@
<!DOCTYPE html>
<!--
Attune WebSocket Test Page
This tool tests WebSocket connectivity to the Attune notifier service.
Usage:
1. Ensure notifier service is running: `make run-notifier`
2. Open this file in a web browser: `open scripts/test-websocket.html`
3. Click "Connect" to establish WebSocket connection
4. Monitor real-time event notifications in the log
Features:
- Live connection status indicator
- Real-time message statistics (total messages, event count, connection time)
- Color-coded message log (info, events, errors)
- Manual connect/disconnect controls
- Subscription to event notifications
Default URL: ws://localhost:8081/ws
Change the URL in the input field before connecting if needed.
-->
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Attune WebSocket Test</title>
<style>
body {
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
max-width: 900px;
margin: 40px auto;
padding: 20px;
background: #f5f5f5;
}
.container {
background: white;
border-radius: 8px;
padding: 30px;
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
}
h1 {
color: #333;
margin-top: 0;
}
.status {
padding: 10px 15px;
border-radius: 5px;
margin: 15px 0;
font-weight: bold;
}
.status.disconnected {
background: #fee;
color: #c33;
border-left: 4px solid #c33;
}
.status.connected {
background: #efe;
color: #3a3;
border-left: 4px solid #3a3;
}
.controls {
display: flex;
gap: 10px;
margin: 20px 0;
}
button {
padding: 10px 20px;
border: none;
border-radius: 5px;
cursor: pointer;
font-size: 14px;
transition: background 0.2s;
}
button.primary {
background: #4CAF50;
color: white;
}
button.primary:hover {
background: #45a049;
}
button.danger {
background: #f44336;
color: white;
}
button.danger:hover {
background: #da190b;
}
button:disabled {
background: #ccc;
cursor: not-allowed;
}
.stats {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
gap: 15px;
margin: 20px 0;
}
.stat {
background: #f9f9f9;
padding: 15px;
border-radius: 5px;
border-left: 3px solid #4CAF50;
}
.stat-label {
color: #666;
font-size: 12px;
text-transform: uppercase;
}
.stat-value {
font-size: 24px;
font-weight: bold;
color: #333;
margin-top: 5px;
}
.log {
background: #1e1e1e;
color: #d4d4d4;
padding: 15px;
border-radius: 5px;
max-height: 400px;
overflow-y: auto;
font-family: 'Courier New', monospace;
font-size: 13px;
margin-top: 20px;
}
.log-entry {
margin: 5px 0;
padding: 5px;
border-left: 3px solid transparent;
}
.log-entry.info {
border-left-color: #4CAF50;
}
.log-entry.event {
border-left-color: #2196F3;
background: rgba(33, 150, 243, 0.1);
}
.log-entry.error {
border-left-color: #f44336;
background: rgba(244, 67, 54, 0.1);
}
.timestamp {
color: #888;
margin-right: 10px;
}
input {
padding: 8px 12px;
border: 1px solid #ddd;
border-radius: 4px;
font-size: 14px;
flex: 1;
}
</style>
</head>
<body>
<div class="container">
<h1>🔌 Attune WebSocket Tester</h1></h1>
<div id="status" class="status disconnected">
⚫ Disconnected
</div>
<div class="controls">
<input type="text" id="wsUrl" value="ws://localhost:8081/ws" placeholder="WebSocket URL">
<button id="connectBtn" class="primary" onclick="connect()">Connect</button>
<button id="disconnectBtn" class="danger" onclick="disconnect()" disabled>Disconnect</button>
<button onclick="clearLog()">Clear Log</button>
</div>
<div class="stats">
<div class="stat">
<div class="stat-label">Messages Received</div>
<div class="stat-value" id="messageCount">0</div>
</div>
<div class="stat">
<div class="stat-label">Event Notifications</div>
<div class="stat-value" id="eventCount">0</div>
</div>
<div class="stat">
<div class="stat-label">Connection Time</div>
<div class="stat-value" id="connectionTime">--</div>
</div>
</div>
<h3>Message Log</h3>
<div id="log" class="log"></div>
</div>
<script>
let ws = null;
let messageCount = 0;
let eventCount = 0;
let connectionStart = null;
let connectionTimeInterval = null;
function log(message, type = 'info') {
const timestamp = new Date().toLocaleTimeString();
const logEntry = document.createElement('div');
logEntry.className = `log-entry ${type}`;
logEntry.innerHTML = `<span class="timestamp">[${timestamp}]</span>${message}`;
const logContainer = document.getElementById('log');
logContainer.appendChild(logEntry);
logContainer.scrollTop = logContainer.scrollHeight;
}
function updateStats() {
document.getElementById('messageCount').textContent = messageCount;
document.getElementById('eventCount').textContent = eventCount;
}
function updateConnectionTime() {
if (connectionStart) {
const elapsed = Math.floor((Date.now() - connectionStart) / 1000);
const minutes = Math.floor(elapsed / 60);
const seconds = elapsed % 60;
document.getElementById('connectionTime').textContent =
`${minutes}:${seconds.toString().padStart(2, '0')}`;
}
}
function setStatus(connected) {
const statusEl = document.getElementById('status');
const connectBtn = document.getElementById('connectBtn');
const disconnectBtn = document.getElementById('disconnectBtn');
if (connected) {
statusEl.className = 'status connected';
statusEl.textContent = '🟢 Connected';
connectBtn.disabled = true;
disconnectBtn.disabled = false;
connectionStart = Date.now();
connectionTimeInterval = setInterval(updateConnectionTime, 1000);
} else {
statusEl.className = 'status disconnected';
statusEl.textContent = '⚫ Disconnected';
connectBtn.disabled = false;
disconnectBtn.disabled = true;
document.getElementById('connectionTime').textContent = '--';
if (connectionTimeInterval) {
clearInterval(connectionTimeInterval);
connectionTimeInterval = null;
}
}
}
function connect() {
const url = document.getElementById('wsUrl').value;
log(`Connecting to ${url}...`, 'info');
try {
ws = new WebSocket(url);
ws.onopen = () => {
log('✅ Connected to notifier service', 'info');
setStatus(true);
// Subscribe to event notifications
const subscribeMsg = JSON.stringify({
type: 'subscribe',
filter: 'entity_type:event'
});
ws.send(subscribeMsg);
log('📡 Subscribed to entity_type:event', 'info');
};
ws.onmessage = (event) => {
messageCount++;
updateStats();
try {
const message = JSON.parse(event.data);
if (message.type === 'welcome') {
log(`👋 Welcome: ${message.message} (Client ID: ${message.client_id})`, 'info');
} else if (message.notification_type) {
// This is a notification
eventCount++;
updateStats();
const data = message.payload?.data || {};
log(
`🔔 Event #${message.entity_id}: ${message.notification_type} | ` +
`Trigger: ${data.trigger_ref || 'N/A'} | ` +
`Source: ${data.source_ref || 'N/A'}`,
'event'
);
} else {
log(`📨 Message: ${JSON.stringify(message)}`, 'info');
}
} catch (error) {
log(`❌ Failed to parse message: ${error.message}`, 'error');
}
};
ws.onerror = (error) => {
log(`❌ WebSocket error: ${error.message || 'Unknown error'}`, 'error');
};
ws.onclose = () => {
log('🔌 Connection closed', 'info');
setStatus(false);
};
} catch (error) {
log(`❌ Failed to connect: ${error.message}`, 'error');
setStatus(false);
}
}
function disconnect() {
if (ws) {
log('Disconnecting...', 'info');
ws.close();
ws = null;
}
}
function clearLog() {
document.getElementById('log').innerHTML = '';
messageCount = 0;
eventCount = 0;
updateStats();
log('Log cleared', 'info');
}
// Initialize
log('WebSocket tester ready. Click Connect to start.', 'info');
</script>
</body>
</html>

99
scripts/test-websocket.js Normal file
View File

@@ -0,0 +1,99 @@
#!/usr/bin/env node
/**
* Test script to verify WebSocket event notifications from the Attune notifier service
*
* Usage: node scripts/test-websocket.js
*/
const WebSocket = require('ws');
const WS_URL = process.env.NOTIFIER_URL || 'ws://localhost:8081/ws';
const RECONNECT_DELAY = 3000;
console.log('🔌 Connecting to Attune Notifier Service...');
console.log(` URL: ${WS_URL}\n`);
let ws;
let messageCount = 0;
let eventCount = 0;
function connect() {
ws = new WebSocket(WS_URL);
ws.on('open', () => {
console.log('✅ Connected to notifier service');
console.log('📡 Subscribing to event notifications...\n');
// Subscribe to all event notifications
ws.send(JSON.stringify({
type: 'subscribe',
filter: 'entity_type:event'
}));
});
ws.on('message', (data) => {
messageCount++;
try {
const message = JSON.parse(data.toString());
if (message.type === 'welcome') {
console.log('👋 Welcome message received');
console.log(` Client ID: ${message.client_id}`);
console.log(` Message: ${message.message}\n`);
} else if (message.notification_type) {
// This is a notification
eventCount++;
const timestamp = new Date(message.timestamp).toLocaleTimeString();
console.log(`🔔 [${timestamp}] Event notification #${eventCount}`);
console.log(` Type: ${message.notification_type}`);
console.log(` Entity: ${message.entity_type} (ID: ${message.entity_id})`);
if (message.payload && message.payload.data) {
const data = message.payload.data;
console.log(` Trigger: ${data.trigger_ref || 'N/A'}`);
console.log(` Source: ${data.source_ref || 'N/A'}`);
}
console.log('');
} else {
console.log('📨 Unknown message format:', message);
}
} catch (error) {
console.error('❌ Failed to parse message:', error.message);
console.error(' Raw data:', data.toString());
}
});
ws.on('error', (error) => {
console.error('❌ WebSocket error:', error.message);
});
ws.on('close', () => {
console.log('\n🔌 Connection closed');
console.log(` Total messages: ${messageCount}`);
console.log(` Event notifications: ${eventCount}`);
console.log(`\n⏳ Reconnecting in ${RECONNECT_DELAY}ms...`);
setTimeout(connect, RECONNECT_DELAY);
});
}
// Handle graceful shutdown
process.on('SIGINT', () => {
console.log('\n\n👋 Shutting down...');
console.log(` Total messages received: ${messageCount}`);
console.log(` Event notifications: ${eventCount}`);
if (ws) {
ws.close();
}
process.exit(0);
});
// Start connection
connect();
console.log('⏱️ Waiting for event notifications... (Press Ctrl+C to exit)\n');

107
scripts/test-websocket.py Normal file
View File

@@ -0,0 +1,107 @@
#!/usr/bin/env python3
"""
Simple WebSocket test script for Attune Notifier Service
Usage: python3 scripts/test-websocket.py
"""
import asyncio
import json
import sys
from datetime import datetime
try:
import websockets
except ImportError:
print("❌ Error: websockets library not installed")
print(" Install with: pip3 install websockets")
sys.exit(1)
WS_URL = "ws://localhost:8081/ws"
RECONNECT_DELAY = 3 # seconds
async def test_websocket():
"""Connect to WebSocket and test event notifications"""
print(f"🔌 Connecting to Attune Notifier Service...")
print(f" URL: {WS_URL}\n")
message_count = 0
event_count = 0
try:
async with websockets.connect(WS_URL) as websocket:
print("✅ Connected to notifier service\n")
# Subscribe to event notifications
subscribe_msg = {"type": "subscribe", "filter": "entity_type:event"}
await websocket.send(json.dumps(subscribe_msg))
print("📡 Subscribed to entity_type:event\n")
print("⏱️ Waiting for notifications... (Press Ctrl+C to exit)\n")
# Listen for messages
async for message in websocket:
message_count += 1
try:
data = json.loads(message)
if data.get("type") == "welcome":
timestamp = datetime.now().strftime("%H:%M:%S")
print(f"[{timestamp}] 👋 Welcome message received")
print(f" Client ID: {data.get('client_id')}")
print(f" Message: {data.get('message')}\n")
elif "notification_type" in data:
# This is a notification
event_count += 1
timestamp = datetime.now().strftime("%H:%M:%S")
print(f"[{timestamp}] 🔔 Event notification #{event_count}")
print(f" Type: {data.get('notification_type')}")
print(
f" Entity: {data.get('entity_type')} (ID: {data.get('entity_id')})"
)
payload_data = data.get("payload", {}).get("data", {})
if payload_data:
print(
f" Trigger: {payload_data.get('trigger_ref', 'N/A')}"
)
print(
f" Source: {payload_data.get('source_ref', 'N/A')}"
)
print()
else:
print(f"📨 Unknown message format: {data}\n")
except json.JSONDecodeError as e:
print(f"❌ Failed to parse message: {e}")
print(f" Raw data: {message}\n")
except websockets.exceptions.WebSocketException as e:
print(f"\n❌ WebSocket error: {e}")
except KeyboardInterrupt:
print(f"\n\n👋 Shutting down...")
print(f" Total messages received: {message_count}")
print(f" Event notifications: {event_count}")
except Exception as e:
print(f"\n❌ Unexpected error: {e}")
import traceback
traceback.print_exc()
def main():
"""Main entry point"""
try:
asyncio.run(test_websocket())
except KeyboardInterrupt:
print("\n\nExiting...")
sys.exit(0)
if __name__ == "__main__":
main()

108
scripts/test_timer_echo.sh Executable file
View File

@@ -0,0 +1,108 @@
#!/bin/bash
# Test script for timer-driven echo action
# This script starts the sensor, executor, and worker services to test the happy path
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
echo -e "${BLUE}========================================${NC}"
echo -e "${BLUE}Attune Timer Echo Test${NC}"
echo -e "${BLUE}========================================${NC}"
echo ""
# Set environment variables
export DATABASE_URL="postgresql://postgres:postgres@localhost:5432/attune"
export RUST_LOG="info,attune_sensor=debug,attune_executor=debug,attune_worker=debug"
# Check if services are built
if [ ! -f "target/debug/attune-sensor" ]; then
echo -e "${YELLOW}Building sensor service...${NC}"
cargo build --bin attune-sensor
fi
if [ ! -f "target/debug/attune-executor" ]; then
echo -e "${YELLOW}Building executor service...${NC}"
cargo build --bin attune-executor
fi
if [ ! -f "target/debug/attune-worker" ]; then
echo -e "${YELLOW}Building worker service...${NC}"
cargo build --bin attune-worker
fi
# Create log directory
mkdir -p logs
# Verify database has the rule and action parameters
echo -e "${BLUE}Checking database setup...${NC}"
RULE_CHECK=$(PGPASSWORD=postgres psql -h localhost -U postgres -d attune -t -c "SELECT action_params::text FROM attune.rule WHERE ref = 'core.timer_echo_10s';" 2>/dev/null || echo "")
if [ -z "$RULE_CHECK" ]; then
echo -e "${RED}ERROR: Rule 'core.timer_echo_10s' not found!${NC}"
echo -e "${YELLOW}Please ensure the database is seeded properly.${NC}"
exit 1
fi
echo -e "${GREEN}✓ Rule found with action_params: $RULE_CHECK${NC}"
echo ""
# Function to cleanup on exit
cleanup() {
echo -e "\n${YELLOW}Stopping services...${NC}"
kill $SENSOR_PID $EXECUTOR_PID $WORKER_PID 2>/dev/null || true
wait 2>/dev/null || true
echo -e "${GREEN}Services stopped${NC}"
}
trap cleanup EXIT INT TERM
# Start services
echo -e "${BLUE}Starting services...${NC}"
echo ""
echo -e "${GREEN}Starting Sensor Service...${NC}"
./target/debug/attune-sensor > logs/sensor.log 2>&1 &
SENSOR_PID=$!
sleep 2
echo -e "${GREEN}Starting Executor Service...${NC}"
./target/debug/attune-executor > logs/executor.log 2>&1 &
EXECUTOR_PID=$!
sleep 2
echo -e "${GREEN}Starting Worker Service...${NC}"
./target/debug/attune-worker > logs/worker.log 2>&1 &
WORKER_PID=$!
sleep 2
echo ""
echo -e "${GREEN}✓ All services started${NC}"
echo -e "${BLUE} Sensor PID: $SENSOR_PID${NC}"
echo -e "${BLUE} Executor PID: $EXECUTOR_PID${NC}"
echo -e "${BLUE} Worker PID: $WORKER_PID${NC}"
echo ""
echo -e "${YELLOW}Monitoring logs for 'hello, world' message...${NC}"
echo -e "${YELLOW}Press Ctrl+C to stop${NC}"
echo ""
echo -e "${BLUE}========================================${NC}"
echo ""
# Monitor logs for the expected output
tail -f logs/sensor.log logs/executor.log logs/worker.log | while read line; do
# Highlight "hello, world" in the output
if echo "$line" | grep -qi "hello.*world"; then
echo -e "${GREEN}>>> $line${NC}"
elif echo "$line" | grep -qi "error\|failed"; then
echo -e "${RED}$line${NC}"
elif echo "$line" | grep -qi "event.*created\|enforcement.*created\|execution.*created"; then
echo -e "${YELLOW}$line${NC}"
else
echo "$line"
fi
done

100
scripts/verify-schema-cleanup.sh Executable file
View File

@@ -0,0 +1,100 @@
#!/bin/bash
set -e
# Verification script to demonstrate automatic schema cleanup in tests
# This script runs a test and verifies the schema is cleaned up automatically
echo "============================================="
echo "Schema Cleanup Verification Script"
echo "============================================="
echo ""
DATABASE_URL="${DATABASE_URL:-postgresql://postgres:postgres@localhost:5432/attune_test}"
# Check if psql is available
if ! command -v psql &> /dev/null; then
echo "ERROR: psql command not found. Please install PostgreSQL client."
exit 1
fi
# Check if database is accessible
if ! psql "$DATABASE_URL" -c "SELECT 1" > /dev/null 2>&1; then
echo "ERROR: Cannot connect to database: $DATABASE_URL"
exit 1
fi
echo "✓ Database connection verified"
echo ""
# Count schemas before test
BEFORE_COUNT=$(psql "$DATABASE_URL" -t -c "SELECT COUNT(*) FROM pg_namespace WHERE nspname LIKE 'test_%';" 2>/dev/null | xargs)
echo "Test schemas before test: $BEFORE_COUNT"
# Get list of schemas before
SCHEMAS_BEFORE=$(psql "$DATABASE_URL" -t -c "SELECT nspname FROM pg_namespace WHERE nspname LIKE 'test_%' ORDER BY nspname;" 2>/dev/null | xargs)
echo ""
echo "Running a single test to verify cleanup..."
echo ""
# Run a single test (health check is fast and simple)
cd "$(dirname "$0")/.."
cargo test --package attune-api --test health_and_auth_tests test_health_check -- --test-threads=1 2>&1 | grep -E "(running|test result)" || true
echo ""
echo "Test completed. Checking cleanup..."
echo ""
# Give a moment for cleanup to complete
sleep 2
# Count schemas after test
AFTER_COUNT=$(psql "$DATABASE_URL" -t -c "SELECT COUNT(*) FROM pg_namespace WHERE nspname LIKE 'test_%';" 2>/dev/null | xargs)
echo "Test schemas after test: $AFTER_COUNT"
# Get list of schemas after
SCHEMAS_AFTER=$(psql "$DATABASE_URL" -t -c "SELECT nspname FROM pg_namespace WHERE nspname LIKE 'test_%' ORDER BY nspname;" 2>/dev/null | xargs)
echo ""
echo "============================================="
echo "Verification Results"
echo "============================================="
if [ "$BEFORE_COUNT" -eq "$AFTER_COUNT" ]; then
echo "✓ SUCCESS: Schema count unchanged ($BEFORE_COUNT$AFTER_COUNT)"
echo "✓ Test schemas were automatically cleaned up via Drop trait"
echo ""
echo "This demonstrates that:"
echo " 1. Each test creates a unique schema (test_<uuid>)"
echo " 2. Schema is automatically dropped when TestContext goes out of scope"
echo " 3. No manual cleanup needed in test code"
echo " 4. No schemas accumulate during normal test execution"
echo ""
exit 0
else
echo "⚠ WARNING: Schema count changed ($BEFORE_COUNT$AFTER_COUNT)"
echo ""
if [ "$AFTER_COUNT" -gt "$BEFORE_COUNT" ]; then
echo "New schemas detected (cleanup may have failed):"
# Show new schemas
for schema in $SCHEMAS_AFTER; do
if [[ ! " $SCHEMAS_BEFORE " =~ " $schema " ]]; then
echo " - $schema (NEW)"
fi
done
echo ""
echo "This could indicate:"
echo " 1. Test was interrupted (Ctrl+C, crash, panic)"
echo " 2. Drop trait not executing properly"
echo " 3. Async cleanup not completing"
echo ""
echo "Run cleanup script to remove orphaned schemas:"
echo " ./scripts/cleanup-test-schemas.sh --force"
exit 1
else
echo "Schemas were cleaned up (count decreased)"
echo "This is actually good - leftover schemas were removed!"
exit 0
fi
fi

220
scripts/verify_migrations.sh Executable file
View File

@@ -0,0 +1,220 @@
#!/bin/bash
# Migration Verification Script
# Tests the new consolidated migrations on a fresh database
set -e
echo "=========================================="
echo "Attune Migration Verification Script"
echo "=========================================="
echo ""
# Configuration
TEST_DB="attune_migration_test"
POSTGRES_USER="${POSTGRES_USER:-postgres}"
POSTGRES_HOST="${POSTGRES_HOST:-localhost}"
POSTGRES_PORT="${POSTGRES_PORT:-5432}"
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Helper functions
print_success() {
echo -e "${GREEN}${NC} $1"
}
print_error() {
echo -e "${RED}${NC} $1"
}
print_info() {
echo -e "${YELLOW}${NC} $1"
}
# Step 1: Drop test database if exists
echo "Step 1: Cleaning up existing test database..."
psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -d postgres -c "DROP DATABASE IF EXISTS $TEST_DB;" 2>/dev/null
print_success "Cleaned up existing test database"
# Step 2: Create fresh test database
echo ""
echo "Step 2: Creating fresh test database..."
psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -d postgres -c "CREATE DATABASE $TEST_DB;" 2>/dev/null
print_success "Created test database: $TEST_DB"
# Step 3: Run migrations
echo ""
echo "Step 3: Running consolidated migrations..."
export DATABASE_URL="postgresql://$POSTGRES_USER@$POSTGRES_HOST:$POSTGRES_PORT/$TEST_DB"
if command -v sqlx &> /dev/null; then
sqlx migrate run --source migrations
print_success "Migrations applied successfully via sqlx"
else
# Fallback to psql if sqlx not available
print_info "sqlx-cli not found, using psql..."
for migration in migrations/202501*.sql; do
if [ -f "$migration" ]; then
echo " Applying $(basename $migration)..."
psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -d "$TEST_DB" -f "$migration" > /dev/null
print_success " Applied $(basename $migration)"
fi
done
fi
# Step 4: Verify schema
echo ""
echo "Step 4: Verifying schema..."
TABLE_COUNT=$(psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -d "$TEST_DB" -t -c "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'attune';")
TABLE_COUNT=$(echo $TABLE_COUNT | xargs) # Trim whitespace
if [ "$TABLE_COUNT" -eq "18" ]; then
print_success "Correct number of tables: $TABLE_COUNT"
else
print_error "Expected 18 tables, found $TABLE_COUNT"
exit 1
fi
# Step 5: Verify all expected tables
echo ""
echo "Step 5: Verifying all expected tables exist..."
EXPECTED_TABLES=(
"pack" "runtime" "worker" "identity" "permission_set" "permission_assignment" "policy" "key"
"trigger" "sensor" "event" "enforcement"
"action" "rule" "execution" "inquiry"
"notification" "artifact"
)
MISSING_TABLES=()
for table in "${EXPECTED_TABLES[@]}"; do
EXISTS=$(psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -d "$TEST_DB" -t -c "SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_schema = 'attune' AND table_name = '$table');")
EXISTS=$(echo $EXISTS | xargs)
if [ "$EXISTS" = "t" ]; then
echo "$table"
else
MISSING_TABLES+=("$table")
echo "$table"
fi
done
if [ ${#MISSING_TABLES[@]} -eq 0 ]; then
print_success "All 18 tables exist"
else
print_error "Missing tables: ${MISSING_TABLES[*]}"
exit 1
fi
# Step 6: Verify enum types
echo ""
echo "Step 6: Verifying enum types..."
ENUM_COUNT=$(psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -d "$TEST_DB" -t -c "SELECT COUNT(*) FROM pg_type WHERE typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'attune') AND typtype = 'e';")
ENUM_COUNT=$(echo $ENUM_COUNT | xargs)
if [ "$ENUM_COUNT" -eq "12" ]; then
print_success "Correct number of enum types: $ENUM_COUNT"
else
print_error "Expected 12 enum types, found $ENUM_COUNT"
fi
# Step 7: Verify indexes
echo ""
echo "Step 7: Verifying indexes..."
INDEX_COUNT=$(psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -d "$TEST_DB" -t -c "SELECT COUNT(*) FROM pg_indexes WHERE schemaname = 'attune';")
INDEX_COUNT=$(echo $INDEX_COUNT | xargs)
if [ "$INDEX_COUNT" -gt "100" ]; then
print_success "Found $INDEX_COUNT indexes (expected >100)"
else
print_error "Expected >100 indexes, found $INDEX_COUNT"
fi
# Step 8: Verify foreign key constraints
echo ""
echo "Step 8: Verifying foreign key constraints..."
FK_COUNT=$(psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -d "$TEST_DB" -t -c "SELECT COUNT(*) FROM information_schema.table_constraints WHERE constraint_schema = 'attune' AND constraint_type = 'FOREIGN KEY';")
FK_COUNT=$(echo $FK_COUNT | xargs)
if [ "$FK_COUNT" -gt "20" ]; then
print_success "Found $FK_COUNT foreign key constraints"
else
print_error "Expected >20 foreign keys, found $FK_COUNT"
fi
# Step 9: Verify triggers
echo ""
echo "Step 9: Verifying triggers..."
TRIGGER_COUNT=$(psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -d "$TEST_DB" -t -c "SELECT COUNT(*) FROM information_schema.triggers WHERE trigger_schema = 'attune';")
TRIGGER_COUNT=$(echo $TRIGGER_COUNT | xargs)
if [ "$TRIGGER_COUNT" -gt "15" ]; then
print_success "Found $TRIGGER_COUNT triggers (expected >15)"
else
print_info "Found $TRIGGER_COUNT triggers"
fi
# Step 10: Verify functions
echo ""
echo "Step 10: Verifying functions..."
FUNCTION_COUNT=$(psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -d "$TEST_DB" -t -c "SELECT COUNT(*) FROM pg_proc WHERE pronamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'attune');")
FUNCTION_COUNT=$(echo $FUNCTION_COUNT | xargs)
if [ "$FUNCTION_COUNT" -ge "3" ]; then
print_success "Found $FUNCTION_COUNT functions"
else
print_error "Expected at least 3 functions, found $FUNCTION_COUNT"
fi
# Step 11: Test basic inserts
echo ""
echo "Step 11: Testing basic data operations..."
# Insert a pack
psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -d "$TEST_DB" -c "INSERT INTO attune.pack (ref, label, version) VALUES ('test', 'Test Pack', '1.0.0');" > /dev/null 2>&1
if [ $? -eq 0 ]; then
print_success "Can insert pack"
else
print_error "Failed to insert pack"
exit 1
fi
# Insert an identity
psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -d "$TEST_DB" -c "INSERT INTO attune.identity (login, display_name) VALUES ('testuser', 'Test User');" > /dev/null 2>&1
if [ $? -eq 0 ]; then
print_success "Can insert identity"
else
print_error "Failed to insert identity"
exit 1
fi
# Verify timestamps are auto-populated
CREATED_COUNT=$(psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -d "$TEST_DB" -t -c "SELECT COUNT(*) FROM attune.pack WHERE created IS NOT NULL AND updated IS NOT NULL;")
CREATED_COUNT=$(echo $CREATED_COUNT | xargs)
if [ "$CREATED_COUNT" -eq "1" ]; then
print_success "Timestamps auto-populated correctly"
else
print_error "Timestamp triggers not working"
fi
# Summary
echo ""
echo "=========================================="
echo "Verification Summary"
echo "=========================================="
echo "Database: $TEST_DB"
echo "Tables: $TABLE_COUNT"
echo "Enums: $ENUM_COUNT"
echo "Indexes: $INDEX_COUNT"
echo "Foreign Keys: $FK_COUNT"
echo "Triggers: $TRIGGER_COUNT"
echo "Functions: $FUNCTION_COUNT"
echo ""
print_success "All verification checks passed!"
echo ""
print_info "Test database '$TEST_DB' is ready for testing"
print_info "To clean up: dropdb $TEST_DB"
echo ""