Compare commits
18 Commits
af5175b96a
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 3a13bf754a | |||
| f4ef823f43 | |||
| ab7d31de2f | |||
| 938c271ff5 | |||
| da8055cb79 | |||
| 03a239d22b | |||
| ba83958337 | |||
| c11bc1a2bf | |||
| eb82755137 | |||
| 058f392616 | |||
| 0264a66b5a | |||
| 542e72a454 | |||
| a118563366 | |||
| a057ad5db5 | |||
| 8e273ec683 | |||
| 16f1c2f079 | |||
| 62307e8c65 | |||
| 2ebb03b868 |
0
.codex_write_test
Normal file
0
.codex_write_test
Normal file
@@ -19,7 +19,7 @@ env:
|
|||||||
jobs:
|
jobs:
|
||||||
rust-fmt:
|
rust-fmt:
|
||||||
name: Rustfmt
|
name: Rustfmt
|
||||||
runs-on: ubuntu-latest
|
runs-on: build-amd64
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -45,7 +45,7 @@ jobs:
|
|||||||
|
|
||||||
rust-clippy:
|
rust-clippy:
|
||||||
name: Clippy
|
name: Clippy
|
||||||
runs-on: ubuntu-latest
|
runs-on: build-amd64
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -91,7 +91,7 @@ jobs:
|
|||||||
|
|
||||||
rust-test:
|
rust-test:
|
||||||
name: Tests
|
name: Tests
|
||||||
runs-on: ubuntu-latest
|
runs-on: build-amd64
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -135,7 +135,7 @@ jobs:
|
|||||||
|
|
||||||
rust-audit:
|
rust-audit:
|
||||||
name: Cargo Audit & Deny
|
name: Cargo Audit & Deny
|
||||||
runs-on: ubuntu-latest
|
runs-on: build-amd64
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -188,7 +188,7 @@ jobs:
|
|||||||
|
|
||||||
web-blocking:
|
web-blocking:
|
||||||
name: Web Blocking Checks
|
name: Web Blocking Checks
|
||||||
runs-on: ubuntu-latest
|
runs-on: build-amd64
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: web
|
working-directory: web
|
||||||
@@ -217,7 +217,7 @@ jobs:
|
|||||||
|
|
||||||
security-blocking:
|
security-blocking:
|
||||||
name: Security Blocking Checks
|
name: Security Blocking Checks
|
||||||
runs-on: ubuntu-latest
|
runs-on: build-amd64
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -250,7 +250,7 @@ jobs:
|
|||||||
|
|
||||||
web-advisory:
|
web-advisory:
|
||||||
name: Web Advisory Checks
|
name: Web Advisory Checks
|
||||||
runs-on: ubuntu-latest
|
runs-on: build-amd64
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
@@ -279,7 +279,7 @@ jobs:
|
|||||||
|
|
||||||
security-advisory:
|
security-advisory:
|
||||||
name: Security Advisory Checks
|
name: Security Advisory Checks
|
||||||
runs-on: ubuntu-latest
|
runs-on: build-amd64
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
3
.gitignore
vendored
3
.gitignore
vendored
@@ -11,6 +11,7 @@ target/
|
|||||||
# Configuration files (keep *.example.yaml)
|
# Configuration files (keep *.example.yaml)
|
||||||
config.yaml
|
config.yaml
|
||||||
config.*.yaml
|
config.*.yaml
|
||||||
|
!docker/distributable/config.docker.yaml
|
||||||
!config.example.yaml
|
!config.example.yaml
|
||||||
!config.development.yaml
|
!config.development.yaml
|
||||||
!config.test.yaml
|
!config.test.yaml
|
||||||
@@ -35,6 +36,7 @@ logs/
|
|||||||
# Build artifacts
|
# Build artifacts
|
||||||
dist/
|
dist/
|
||||||
build/
|
build/
|
||||||
|
artifacts/
|
||||||
|
|
||||||
# Testing
|
# Testing
|
||||||
coverage/
|
coverage/
|
||||||
@@ -78,4 +80,5 @@ docker-compose.override.yml
|
|||||||
*.pid
|
*.pid
|
||||||
|
|
||||||
packs.examples/
|
packs.examples/
|
||||||
|
packs.external/
|
||||||
codex/
|
codex/
|
||||||
|
|||||||
96
Cargo.lock
generated
96
Cargo.lock
generated
@@ -2150,21 +2150,6 @@ version = "0.2.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb"
|
checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "foreign-types"
|
|
||||||
version = "0.3.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
|
|
||||||
dependencies = [
|
|
||||||
"foreign-types-shared",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "foreign-types-shared"
|
|
||||||
version = "0.1.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "form_urlencoded"
|
name = "form_urlencoded"
|
||||||
version = "1.2.2"
|
version = "1.2.2"
|
||||||
@@ -3065,15 +3050,17 @@ dependencies = [
|
|||||||
"futures-util",
|
"futures-util",
|
||||||
"lber",
|
"lber",
|
||||||
"log",
|
"log",
|
||||||
"native-tls",
|
|
||||||
"nom 7.1.3",
|
"nom 7.1.3",
|
||||||
"percent-encoding",
|
"percent-encoding",
|
||||||
|
"rustls",
|
||||||
|
"rustls-native-certs",
|
||||||
"thiserror 2.0.18",
|
"thiserror 2.0.18",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-native-tls",
|
"tokio-rustls",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
"url",
|
"url",
|
||||||
|
"x509-parser",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -3314,23 +3301,6 @@ dependencies = [
|
|||||||
"version_check",
|
"version_check",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "native-tls"
|
|
||||||
version = "0.2.18"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "465500e14ea162429d264d44189adc38b199b62b1c21eea9f69e4b73cb03bbf2"
|
|
||||||
dependencies = [
|
|
||||||
"libc",
|
|
||||||
"log",
|
|
||||||
"openssl",
|
|
||||||
"openssl-probe",
|
|
||||||
"openssl-sys",
|
|
||||||
"schannel",
|
|
||||||
"security-framework",
|
|
||||||
"security-framework-sys",
|
|
||||||
"tempfile",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "nom"
|
name = "nom"
|
||||||
version = "7.1.3"
|
version = "7.1.3"
|
||||||
@@ -3576,50 +3546,12 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "openssl"
|
|
||||||
version = "0.10.76"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "951c002c75e16ea2c65b8c7e4d3d51d5530d8dfa7d060b4776828c88cfb18ecf"
|
|
||||||
dependencies = [
|
|
||||||
"bitflags",
|
|
||||||
"cfg-if",
|
|
||||||
"foreign-types",
|
|
||||||
"libc",
|
|
||||||
"once_cell",
|
|
||||||
"openssl-macros",
|
|
||||||
"openssl-sys",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "openssl-macros"
|
|
||||||
version = "0.1.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
|
|
||||||
dependencies = [
|
|
||||||
"proc-macro2",
|
|
||||||
"quote",
|
|
||||||
"syn",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "openssl-probe"
|
name = "openssl-probe"
|
||||||
version = "0.2.1"
|
version = "0.2.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe"
|
checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "openssl-sys"
|
|
||||||
version = "0.9.112"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "57d55af3b3e226502be1526dfdba67ab0e9c96fc293004e79576b2b9edb0dbdb"
|
|
||||||
dependencies = [
|
|
||||||
"cc",
|
|
||||||
"libc",
|
|
||||||
"pkg-config",
|
|
||||||
"vcpkg",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "option-ext"
|
name = "option-ext"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
@@ -4642,6 +4574,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4"
|
checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aws-lc-rs",
|
"aws-lc-rs",
|
||||||
|
"log",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"ring",
|
"ring",
|
||||||
"rustls-pki-types",
|
"rustls-pki-types",
|
||||||
@@ -5698,16 +5631,6 @@ dependencies = [
|
|||||||
"syn",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "tokio-native-tls"
|
|
||||||
version = "0.3.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2"
|
|
||||||
dependencies = [
|
|
||||||
"native-tls",
|
|
||||||
"tokio",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio-rustls"
|
name = "tokio-rustls"
|
||||||
version = "0.26.4"
|
version = "0.26.4"
|
||||||
@@ -5749,9 +5672,11 @@ checksum = "d25a406cddcc431a75d3d9afc6a7c0f7428d4891dd973e4d54c56b46127bf857"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"log",
|
"log",
|
||||||
"native-tls",
|
"rustls",
|
||||||
|
"rustls-native-certs",
|
||||||
|
"rustls-pki-types",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-native-tls",
|
"tokio-rustls",
|
||||||
"tungstenite",
|
"tungstenite",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -5938,8 +5863,9 @@ dependencies = [
|
|||||||
"http",
|
"http",
|
||||||
"httparse",
|
"httparse",
|
||||||
"log",
|
"log",
|
||||||
"native-tls",
|
|
||||||
"rand 0.9.2",
|
"rand 0.9.2",
|
||||||
|
"rustls",
|
||||||
|
"rustls-pki-types",
|
||||||
"sha1",
|
"sha1",
|
||||||
"thiserror 2.0.18",
|
"thiserror 2.0.18",
|
||||||
"utf-8",
|
"utf-8",
|
||||||
|
|||||||
@@ -101,7 +101,7 @@ tar = "0.4"
|
|||||||
flate2 = "1.1"
|
flate2 = "1.1"
|
||||||
|
|
||||||
# WebSocket client
|
# WebSocket client
|
||||||
tokio-tungstenite = { version = "0.28", features = ["native-tls"] }
|
tokio-tungstenite = { version = "0.28", features = ["rustls-tls-native-roots"] }
|
||||||
|
|
||||||
# URL parsing
|
# URL parsing
|
||||||
url = "2.5"
|
url = "2.5"
|
||||||
|
|||||||
20
Makefile
20
Makefile
@@ -238,22 +238,24 @@ docker-build-web:
|
|||||||
docker compose build web
|
docker compose build web
|
||||||
|
|
||||||
# Agent binary (statically-linked for injection into any container)
|
# Agent binary (statically-linked for injection into any container)
|
||||||
|
AGENT_RUST_TARGET ?= x86_64-unknown-linux-musl
|
||||||
|
|
||||||
build-agent:
|
build-agent:
|
||||||
@echo "Installing musl target (if not already installed)..."
|
@echo "Installing musl target (if not already installed)..."
|
||||||
rustup target add x86_64-unknown-linux-musl 2>/dev/null || true
|
rustup target add $(AGENT_RUST_TARGET) 2>/dev/null || true
|
||||||
@echo "Building statically-linked worker and sensor agent binaries..."
|
@echo "Building statically-linked worker and sensor agent binaries..."
|
||||||
SQLX_OFFLINE=true cargo build --release --target x86_64-unknown-linux-musl --bin attune-agent --bin attune-sensor-agent
|
SQLX_OFFLINE=true cargo build --release --target $(AGENT_RUST_TARGET) --bin attune-agent --bin attune-sensor-agent
|
||||||
strip target/x86_64-unknown-linux-musl/release/attune-agent
|
strip target/$(AGENT_RUST_TARGET)/release/attune-agent
|
||||||
strip target/x86_64-unknown-linux-musl/release/attune-sensor-agent
|
strip target/$(AGENT_RUST_TARGET)/release/attune-sensor-agent
|
||||||
@echo "✅ Agent binaries built:"
|
@echo "✅ Agent binaries built:"
|
||||||
@echo " - target/x86_64-unknown-linux-musl/release/attune-agent"
|
@echo " - target/$(AGENT_RUST_TARGET)/release/attune-agent"
|
||||||
@echo " - target/x86_64-unknown-linux-musl/release/attune-sensor-agent"
|
@echo " - target/$(AGENT_RUST_TARGET)/release/attune-sensor-agent"
|
||||||
@ls -lh target/x86_64-unknown-linux-musl/release/attune-agent
|
@ls -lh target/$(AGENT_RUST_TARGET)/release/attune-agent
|
||||||
@ls -lh target/x86_64-unknown-linux-musl/release/attune-sensor-agent
|
@ls -lh target/$(AGENT_RUST_TARGET)/release/attune-sensor-agent
|
||||||
|
|
||||||
docker-build-agent:
|
docker-build-agent:
|
||||||
@echo "Building agent Docker image (statically-linked binary)..."
|
@echo "Building agent Docker image (statically-linked binary)..."
|
||||||
DOCKER_BUILDKIT=1 docker buildx build --target agent-init -f docker/Dockerfile.agent -t attune-agent:latest .
|
DOCKER_BUILDKIT=1 docker buildx build --build-arg RUST_TARGET=$(AGENT_RUST_TARGET) --target agent-init -f docker/Dockerfile.agent -t attune-agent:latest .
|
||||||
@echo "✅ Agent image built: attune-agent:latest"
|
@echo "✅ Agent image built: attune-agent:latest"
|
||||||
|
|
||||||
run-agent:
|
run-agent:
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ jsonschema = { workspace = true }
|
|||||||
# HTTP client
|
# HTTP client
|
||||||
reqwest = { workspace = true }
|
reqwest = { workspace = true }
|
||||||
openidconnect = "4.0"
|
openidconnect = "4.0"
|
||||||
ldap3 = "0.12"
|
ldap3 = { version = "0.12", default-features = false, features = ["sync", "tls-rustls-ring"] }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
|
|
||||||
# Archive/compression
|
# Archive/compression
|
||||||
|
|||||||
@@ -3,7 +3,10 @@
|
|||||||
use attune_common::{
|
use attune_common::{
|
||||||
config::LdapConfig,
|
config::LdapConfig,
|
||||||
repositories::{
|
repositories::{
|
||||||
identity::{CreateIdentityInput, IdentityRepository, UpdateIdentityInput},
|
identity::{
|
||||||
|
CreateIdentityInput, IdentityRepository, IdentityRoleAssignmentRepository,
|
||||||
|
UpdateIdentityInput,
|
||||||
|
},
|
||||||
Create, Update,
|
Create, Update,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@@ -63,6 +66,11 @@ pub async fn authenticate(
|
|||||||
|
|
||||||
// Upsert identity in DB and issue JWT tokens
|
// Upsert identity in DB and issue JWT tokens
|
||||||
let identity = upsert_identity(state, &claims).await?;
|
let identity = upsert_identity(state, &claims).await?;
|
||||||
|
if identity.frozen {
|
||||||
|
return Err(ApiError::Forbidden(
|
||||||
|
"Identity is frozen and cannot authenticate".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
let access_token = generate_access_token(identity.id, &identity.login, &state.jwt_config)?;
|
let access_token = generate_access_token(identity.id, &identity.login, &state.jwt_config)?;
|
||||||
let refresh_token = generate_refresh_token(identity.id, &identity.login, &state.jwt_config)?;
|
let refresh_token = generate_refresh_token(identity.id, &identity.login, &state.jwt_config)?;
|
||||||
|
|
||||||
@@ -351,10 +359,13 @@ async fn upsert_identity(
|
|||||||
display_name,
|
display_name,
|
||||||
password_hash: None,
|
password_hash: None,
|
||||||
attributes: Some(attributes),
|
attributes: Some(attributes),
|
||||||
|
frozen: None,
|
||||||
};
|
};
|
||||||
IdentityRepository::update(&state.db, identity.id, updated)
|
let identity = IdentityRepository::update(&state.db, identity.id, updated)
|
||||||
.await
|
.await
|
||||||
.map_err(Into::into)
|
.map_err(ApiError::from)?;
|
||||||
|
sync_roles(&state.db, identity.id, "ldap", &claims.groups).await?;
|
||||||
|
Ok(identity)
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
// Avoid login collisions
|
// Avoid login collisions
|
||||||
@@ -363,7 +374,7 @@ async fn upsert_identity(
|
|||||||
None => desired_login,
|
None => desired_login,
|
||||||
};
|
};
|
||||||
|
|
||||||
IdentityRepository::create(
|
let identity = IdentityRepository::create(
|
||||||
&state.db,
|
&state.db,
|
||||||
CreateIdentityInput {
|
CreateIdentityInput {
|
||||||
login,
|
login,
|
||||||
@@ -372,10 +383,23 @@ async fn upsert_identity(
|
|||||||
attributes,
|
attributes,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
.await
|
||||||
|
.map_err(ApiError::from)?;
|
||||||
|
sync_roles(&state.db, identity.id, "ldap", &claims.groups).await?;
|
||||||
|
Ok(identity)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn sync_roles(
|
||||||
|
db: &sqlx::PgPool,
|
||||||
|
identity_id: i64,
|
||||||
|
source: &str,
|
||||||
|
roles: &[String],
|
||||||
|
) -> Result<(), ApiError> {
|
||||||
|
IdentityRoleAssignmentRepository::replace_managed_roles(db, identity_id, source, roles)
|
||||||
.await
|
.await
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Derive the login name from LDAP claims.
|
/// Derive the login name from LDAP claims.
|
||||||
|
|||||||
@@ -3,7 +3,10 @@
|
|||||||
use attune_common::{
|
use attune_common::{
|
||||||
config::OidcConfig,
|
config::OidcConfig,
|
||||||
repositories::{
|
repositories::{
|
||||||
identity::{CreateIdentityInput, IdentityRepository, UpdateIdentityInput},
|
identity::{
|
||||||
|
CreateIdentityInput, IdentityRepository, IdentityRoleAssignmentRepository,
|
||||||
|
UpdateIdentityInput,
|
||||||
|
},
|
||||||
Create, Update,
|
Create, Update,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@@ -282,6 +285,11 @@ pub async fn handle_callback(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let identity = upsert_identity(state, &oidc_claims).await?;
|
let identity = upsert_identity(state, &oidc_claims).await?;
|
||||||
|
if identity.frozen {
|
||||||
|
return Err(ApiError::Forbidden(
|
||||||
|
"Identity is frozen and cannot authenticate".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
let access_token = generate_access_token(identity.id, &identity.login, &state.jwt_config)?;
|
let access_token = generate_access_token(identity.id, &identity.login, &state.jwt_config)?;
|
||||||
let refresh_token = generate_refresh_token(identity.id, &identity.login, &state.jwt_config)?;
|
let refresh_token = generate_refresh_token(identity.id, &identity.login, &state.jwt_config)?;
|
||||||
|
|
||||||
@@ -511,10 +519,13 @@ async fn upsert_identity(
|
|||||||
display_name,
|
display_name,
|
||||||
password_hash: None,
|
password_hash: None,
|
||||||
attributes: Some(attributes.clone()),
|
attributes: Some(attributes.clone()),
|
||||||
|
frozen: None,
|
||||||
};
|
};
|
||||||
IdentityRepository::update(&state.db, identity.id, updated)
|
let identity = IdentityRepository::update(&state.db, identity.id, updated)
|
||||||
.await
|
.await
|
||||||
.map_err(Into::into)
|
.map_err(ApiError::from)?;
|
||||||
|
sync_roles(&state.db, identity.id, "oidc", &oidc_claims.groups).await?;
|
||||||
|
Ok(identity)
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
let login = match IdentityRepository::find_by_login(&state.db, &desired_login).await? {
|
let login = match IdentityRepository::find_by_login(&state.db, &desired_login).await? {
|
||||||
@@ -522,7 +533,7 @@ async fn upsert_identity(
|
|||||||
None => desired_login,
|
None => desired_login,
|
||||||
};
|
};
|
||||||
|
|
||||||
IdentityRepository::create(
|
let identity = IdentityRepository::create(
|
||||||
&state.db,
|
&state.db,
|
||||||
CreateIdentityInput {
|
CreateIdentityInput {
|
||||||
login,
|
login,
|
||||||
@@ -531,10 +542,23 @@ async fn upsert_identity(
|
|||||||
attributes,
|
attributes,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
.await
|
||||||
|
.map_err(ApiError::from)?;
|
||||||
|
sync_roles(&state.db, identity.id, "oidc", &oidc_claims.groups).await?;
|
||||||
|
Ok(identity)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn sync_roles(
|
||||||
|
db: &sqlx::PgPool,
|
||||||
|
identity_id: i64,
|
||||||
|
source: &str,
|
||||||
|
roles: &[String],
|
||||||
|
) -> Result<(), ApiError> {
|
||||||
|
IdentityRoleAssignmentRepository::replace_managed_roles(db, identity_id, source, roles)
|
||||||
.await
|
.await
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn derive_login(oidc_claims: &OidcIdentityClaims) -> String {
|
fn derive_login(oidc_claims: &OidcIdentityClaims) -> String {
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use crate::{
|
|||||||
use attune_common::{
|
use attune_common::{
|
||||||
rbac::{Action, AuthorizationContext, Grant, Resource},
|
rbac::{Action, AuthorizationContext, Grant, Resource},
|
||||||
repositories::{
|
repositories::{
|
||||||
identity::{IdentityRepository, PermissionSetRepository},
|
identity::{IdentityRepository, IdentityRoleAssignmentRepository, PermissionSetRepository},
|
||||||
FindById,
|
FindById,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@@ -95,8 +95,16 @@ impl AuthorizationService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn load_effective_grants(&self, identity_id: i64) -> Result<Vec<Grant>, ApiError> {
|
async fn load_effective_grants(&self, identity_id: i64) -> Result<Vec<Grant>, ApiError> {
|
||||||
let permission_sets =
|
let mut permission_sets =
|
||||||
PermissionSetRepository::find_by_identity(&self.db, identity_id).await?;
|
PermissionSetRepository::find_by_identity(&self.db, identity_id).await?;
|
||||||
|
let roles =
|
||||||
|
IdentityRoleAssignmentRepository::find_role_names_by_identity(&self.db, identity_id)
|
||||||
|
.await?;
|
||||||
|
let role_permission_sets = PermissionSetRepository::find_by_roles(&self.db, &roles).await?;
|
||||||
|
permission_sets.extend(role_permission_sets);
|
||||||
|
|
||||||
|
let mut seen_permission_sets = std::collections::HashSet::new();
|
||||||
|
permission_sets.retain(|permission_set| seen_permission_sets.insert(permission_set.id));
|
||||||
|
|
||||||
let mut grants = Vec::new();
|
let mut grants = Vec::new();
|
||||||
for permission_set in permission_sets {
|
for permission_set in permission_sets {
|
||||||
@@ -126,10 +134,6 @@ fn resource_name(resource: Resource) -> &'static str {
|
|||||||
Resource::Inquiries => "inquiries",
|
Resource::Inquiries => "inquiries",
|
||||||
Resource::Keys => "keys",
|
Resource::Keys => "keys",
|
||||||
Resource::Artifacts => "artifacts",
|
Resource::Artifacts => "artifacts",
|
||||||
Resource::Workflows => "workflows",
|
|
||||||
Resource::Webhooks => "webhooks",
|
|
||||||
Resource::Analytics => "analytics",
|
|
||||||
Resource::History => "history",
|
|
||||||
Resource::Identities => "identities",
|
Resource::Identities => "identities",
|
||||||
Resource::Permissions => "permissions",
|
Resource::Permissions => "permissions",
|
||||||
}
|
}
|
||||||
@@ -145,5 +149,6 @@ fn action_name(action: Action) -> &'static str {
|
|||||||
Action::Cancel => "cancel",
|
Action::Cancel => "cancel",
|
||||||
Action::Respond => "respond",
|
Action::Respond => "respond",
|
||||||
Action::Manage => "manage",
|
Action::Manage => "manage",
|
||||||
|
Action::Decrypt => "decrypt",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,9 +25,8 @@ pub struct CreateActionRequest {
|
|||||||
pub label: String,
|
pub label: String,
|
||||||
|
|
||||||
/// Action description
|
/// Action description
|
||||||
#[validate(length(min = 1))]
|
|
||||||
#[schema(example = "Posts a message to a Slack channel")]
|
#[schema(example = "Posts a message to a Slack channel")]
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
|
|
||||||
/// Entry point for action execution (e.g., path to script, function name)
|
/// Entry point for action execution (e.g., path to script, function name)
|
||||||
#[validate(length(min = 1, max = 1024))]
|
#[validate(length(min = 1, max = 1024))]
|
||||||
@@ -63,7 +62,6 @@ pub struct UpdateActionRequest {
|
|||||||
pub label: Option<String>,
|
pub label: Option<String>,
|
||||||
|
|
||||||
/// Action description
|
/// Action description
|
||||||
#[validate(length(min = 1))]
|
|
||||||
#[schema(example = "Posts a message to a Slack channel with enhanced features")]
|
#[schema(example = "Posts a message to a Slack channel with enhanced features")]
|
||||||
pub description: Option<String>,
|
pub description: Option<String>,
|
||||||
|
|
||||||
@@ -121,7 +119,7 @@ pub struct ActionResponse {
|
|||||||
|
|
||||||
/// Action description
|
/// Action description
|
||||||
#[schema(example = "Posts a message to a Slack channel")]
|
#[schema(example = "Posts a message to a Slack channel")]
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
|
|
||||||
/// Entry point
|
/// Entry point
|
||||||
#[schema(example = "/actions/slack/post_message.py")]
|
#[schema(example = "/actions/slack/post_message.py")]
|
||||||
@@ -183,7 +181,7 @@ pub struct ActionSummary {
|
|||||||
|
|
||||||
/// Action description
|
/// Action description
|
||||||
#[schema(example = "Posts a message to a Slack channel")]
|
#[schema(example = "Posts a message to a Slack channel")]
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
|
|
||||||
/// Entry point
|
/// Entry point
|
||||||
#[schema(example = "/actions/slack/post_message.py")]
|
#[schema(example = "/actions/slack/post_message.py")]
|
||||||
@@ -321,7 +319,7 @@ mod tests {
|
|||||||
r#ref: "".to_string(), // Invalid: empty
|
r#ref: "".to_string(), // Invalid: empty
|
||||||
pack_ref: "test-pack".to_string(),
|
pack_ref: "test-pack".to_string(),
|
||||||
label: "Test Action".to_string(),
|
label: "Test Action".to_string(),
|
||||||
description: "Test description".to_string(),
|
description: Some("Test description".to_string()),
|
||||||
entrypoint: "/actions/test.py".to_string(),
|
entrypoint: "/actions/test.py".to_string(),
|
||||||
runtime: None,
|
runtime: None,
|
||||||
runtime_version_constraint: None,
|
runtime_version_constraint: None,
|
||||||
@@ -338,7 +336,7 @@ mod tests {
|
|||||||
r#ref: "test.action".to_string(),
|
r#ref: "test.action".to_string(),
|
||||||
pack_ref: "test-pack".to_string(),
|
pack_ref: "test-pack".to_string(),
|
||||||
label: "Test Action".to_string(),
|
label: "Test Action".to_string(),
|
||||||
description: "Test description".to_string(),
|
description: Some("Test description".to_string()),
|
||||||
entrypoint: "/actions/test.py".to_string(),
|
entrypoint: "/actions/test.py".to_string(),
|
||||||
runtime: None,
|
runtime: None,
|
||||||
runtime_version_constraint: None,
|
runtime_version_constraint: None,
|
||||||
|
|||||||
@@ -51,9 +51,10 @@ pub use inquiry::{
|
|||||||
pub use key::{CreateKeyRequest, KeyQueryParams, KeyResponse, KeySummary, UpdateKeyRequest};
|
pub use key::{CreateKeyRequest, KeyQueryParams, KeyResponse, KeySummary, UpdateKeyRequest};
|
||||||
pub use pack::{CreatePackRequest, PackResponse, PackSummary, UpdatePackRequest};
|
pub use pack::{CreatePackRequest, PackResponse, PackSummary, UpdatePackRequest};
|
||||||
pub use permission::{
|
pub use permission::{
|
||||||
CreateIdentityRequest, CreatePermissionAssignmentRequest, IdentityResponse, IdentitySummary,
|
CreateIdentityRequest, CreateIdentityRoleAssignmentRequest, CreatePermissionAssignmentRequest,
|
||||||
PermissionAssignmentResponse, PermissionSetQueryParams, PermissionSetSummary,
|
CreatePermissionSetRoleAssignmentRequest, IdentityResponse, IdentityRoleAssignmentResponse,
|
||||||
UpdateIdentityRequest,
|
IdentitySummary, PermissionAssignmentResponse, PermissionSetQueryParams,
|
||||||
|
PermissionSetRoleAssignmentResponse, PermissionSetSummary, UpdateIdentityRequest,
|
||||||
};
|
};
|
||||||
pub use rule::{CreateRuleRequest, RuleResponse, RuleSummary, UpdateRuleRequest};
|
pub use rule::{CreateRuleRequest, RuleResponse, RuleSummary, UpdateRuleRequest};
|
||||||
pub use runtime::{CreateRuntimeRequest, RuntimeResponse, RuntimeSummary, UpdateRuntimeRequest};
|
pub use runtime::{CreateRuntimeRequest, RuntimeResponse, RuntimeSummary, UpdateRuntimeRequest};
|
||||||
|
|||||||
@@ -14,10 +14,32 @@ pub struct IdentitySummary {
|
|||||||
pub id: i64,
|
pub id: i64,
|
||||||
pub login: String,
|
pub login: String,
|
||||||
pub display_name: Option<String>,
|
pub display_name: Option<String>,
|
||||||
|
pub frozen: bool,
|
||||||
pub attributes: JsonValue,
|
pub attributes: JsonValue,
|
||||||
|
pub roles: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type IdentityResponse = IdentitySummary;
|
#[derive(Debug, Clone, Serialize, ToSchema)]
|
||||||
|
pub struct IdentityRoleAssignmentResponse {
|
||||||
|
pub id: i64,
|
||||||
|
pub identity_id: i64,
|
||||||
|
pub role: String,
|
||||||
|
pub source: String,
|
||||||
|
pub managed: bool,
|
||||||
|
pub created: chrono::DateTime<chrono::Utc>,
|
||||||
|
pub updated: chrono::DateTime<chrono::Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, ToSchema)]
|
||||||
|
pub struct IdentityResponse {
|
||||||
|
pub id: i64,
|
||||||
|
pub login: String,
|
||||||
|
pub display_name: Option<String>,
|
||||||
|
pub frozen: bool,
|
||||||
|
pub attributes: JsonValue,
|
||||||
|
pub roles: Vec<IdentityRoleAssignmentResponse>,
|
||||||
|
pub direct_permissions: Vec<PermissionAssignmentResponse>,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, ToSchema)]
|
#[derive(Debug, Clone, Serialize, ToSchema)]
|
||||||
pub struct PermissionSetSummary {
|
pub struct PermissionSetSummary {
|
||||||
@@ -27,6 +49,7 @@ pub struct PermissionSetSummary {
|
|||||||
pub label: Option<String>,
|
pub label: Option<String>,
|
||||||
pub description: Option<String>,
|
pub description: Option<String>,
|
||||||
pub grants: JsonValue,
|
pub grants: JsonValue,
|
||||||
|
pub roles: Vec<PermissionSetRoleAssignmentResponse>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, ToSchema)]
|
#[derive(Debug, Clone, Serialize, ToSchema)]
|
||||||
@@ -38,6 +61,15 @@ pub struct PermissionAssignmentResponse {
|
|||||||
pub created: chrono::DateTime<chrono::Utc>,
|
pub created: chrono::DateTime<chrono::Utc>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, ToSchema)]
|
||||||
|
pub struct PermissionSetRoleAssignmentResponse {
|
||||||
|
pub id: i64,
|
||||||
|
pub permission_set_id: i64,
|
||||||
|
pub permission_set_ref: Option<String>,
|
||||||
|
pub role: String,
|
||||||
|
pub created: chrono::DateTime<chrono::Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize, ToSchema)]
|
#[derive(Debug, Clone, Deserialize, ToSchema)]
|
||||||
pub struct CreatePermissionAssignmentRequest {
|
pub struct CreatePermissionAssignmentRequest {
|
||||||
pub identity_id: Option<i64>,
|
pub identity_id: Option<i64>,
|
||||||
@@ -45,6 +77,18 @@ pub struct CreatePermissionAssignmentRequest {
|
|||||||
pub permission_set_ref: String,
|
pub permission_set_ref: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize, Validate, ToSchema)]
|
||||||
|
pub struct CreateIdentityRoleAssignmentRequest {
|
||||||
|
#[validate(length(min = 1, max = 255))]
|
||||||
|
pub role: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize, Validate, ToSchema)]
|
||||||
|
pub struct CreatePermissionSetRoleAssignmentRequest {
|
||||||
|
#[validate(length(min = 1, max = 255))]
|
||||||
|
pub role: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize, Validate, ToSchema)]
|
#[derive(Debug, Clone, Deserialize, Validate, ToSchema)]
|
||||||
pub struct CreateIdentityRequest {
|
pub struct CreateIdentityRequest {
|
||||||
#[validate(length(min = 3, max = 255))]
|
#[validate(length(min = 3, max = 255))]
|
||||||
@@ -62,4 +106,5 @@ pub struct UpdateIdentityRequest {
|
|||||||
pub display_name: Option<String>,
|
pub display_name: Option<String>,
|
||||||
pub password: Option<String>,
|
pub password: Option<String>,
|
||||||
pub attributes: Option<JsonValue>,
|
pub attributes: Option<JsonValue>,
|
||||||
|
pub frozen: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,9 +25,8 @@ pub struct CreateRuleRequest {
|
|||||||
pub label: String,
|
pub label: String,
|
||||||
|
|
||||||
/// Rule description
|
/// Rule description
|
||||||
#[validate(length(min = 1))]
|
|
||||||
#[schema(example = "Send Slack notification when an error occurs")]
|
#[schema(example = "Send Slack notification when an error occurs")]
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
|
|
||||||
/// Action reference to execute when rule matches
|
/// Action reference to execute when rule matches
|
||||||
#[validate(length(min = 1, max = 255))]
|
#[validate(length(min = 1, max = 255))]
|
||||||
@@ -69,7 +68,6 @@ pub struct UpdateRuleRequest {
|
|||||||
pub label: Option<String>,
|
pub label: Option<String>,
|
||||||
|
|
||||||
/// Rule description
|
/// Rule description
|
||||||
#[validate(length(min = 1))]
|
|
||||||
#[schema(example = "Enhanced error notification with filtering")]
|
#[schema(example = "Enhanced error notification with filtering")]
|
||||||
pub description: Option<String>,
|
pub description: Option<String>,
|
||||||
|
|
||||||
@@ -115,7 +113,7 @@ pub struct RuleResponse {
|
|||||||
|
|
||||||
/// Rule description
|
/// Rule description
|
||||||
#[schema(example = "Send Slack notification when an error occurs")]
|
#[schema(example = "Send Slack notification when an error occurs")]
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
|
|
||||||
/// Action ID (null if the referenced action has been deleted)
|
/// Action ID (null if the referenced action has been deleted)
|
||||||
#[schema(example = 1)]
|
#[schema(example = 1)]
|
||||||
@@ -183,7 +181,7 @@ pub struct RuleSummary {
|
|||||||
|
|
||||||
/// Rule description
|
/// Rule description
|
||||||
#[schema(example = "Send Slack notification when an error occurs")]
|
#[schema(example = "Send Slack notification when an error occurs")]
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
|
|
||||||
/// Action reference
|
/// Action reference
|
||||||
#[schema(example = "slack.post_message")]
|
#[schema(example = "slack.post_message")]
|
||||||
@@ -297,7 +295,7 @@ mod tests {
|
|||||||
r#ref: "".to_string(), // Invalid: empty
|
r#ref: "".to_string(), // Invalid: empty
|
||||||
pack_ref: "test-pack".to_string(),
|
pack_ref: "test-pack".to_string(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test description".to_string(),
|
description: Some("Test description".to_string()),
|
||||||
action_ref: "test.action".to_string(),
|
action_ref: "test.action".to_string(),
|
||||||
trigger_ref: "test.trigger".to_string(),
|
trigger_ref: "test.trigger".to_string(),
|
||||||
conditions: default_empty_object(),
|
conditions: default_empty_object(),
|
||||||
@@ -315,7 +313,7 @@ mod tests {
|
|||||||
r#ref: "test.rule".to_string(),
|
r#ref: "test.rule".to_string(),
|
||||||
pack_ref: "test-pack".to_string(),
|
pack_ref: "test-pack".to_string(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test description".to_string(),
|
description: Some("Test description".to_string()),
|
||||||
action_ref: "test.action".to_string(),
|
action_ref: "test.action".to_string(),
|
||||||
trigger_ref: "test.trigger".to_string(),
|
trigger_ref: "test.trigger".to_string(),
|
||||||
conditions: serde_json::json!({
|
conditions: serde_json::json!({
|
||||||
|
|||||||
@@ -203,9 +203,8 @@ pub struct CreateSensorRequest {
|
|||||||
pub label: String,
|
pub label: String,
|
||||||
|
|
||||||
/// Sensor description
|
/// Sensor description
|
||||||
#[validate(length(min = 1))]
|
|
||||||
#[schema(example = "Monitors CPU usage and generates events")]
|
#[schema(example = "Monitors CPU usage and generates events")]
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
|
|
||||||
/// Entry point for sensor execution (e.g., path to script, function name)
|
/// Entry point for sensor execution (e.g., path to script, function name)
|
||||||
#[validate(length(min = 1, max = 1024))]
|
#[validate(length(min = 1, max = 1024))]
|
||||||
@@ -247,7 +246,6 @@ pub struct UpdateSensorRequest {
|
|||||||
pub label: Option<String>,
|
pub label: Option<String>,
|
||||||
|
|
||||||
/// Sensor description
|
/// Sensor description
|
||||||
#[validate(length(min = 1))]
|
|
||||||
#[schema(example = "Enhanced CPU monitoring with alerts")]
|
#[schema(example = "Enhanced CPU monitoring with alerts")]
|
||||||
pub description: Option<String>,
|
pub description: Option<String>,
|
||||||
|
|
||||||
@@ -297,7 +295,7 @@ pub struct SensorResponse {
|
|||||||
|
|
||||||
/// Sensor description
|
/// Sensor description
|
||||||
#[schema(example = "Monitors CPU usage and generates events")]
|
#[schema(example = "Monitors CPU usage and generates events")]
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
|
|
||||||
/// Entry point
|
/// Entry point
|
||||||
#[schema(example = "/sensors/monitoring/cpu_monitor.py")]
|
#[schema(example = "/sensors/monitoring/cpu_monitor.py")]
|
||||||
@@ -357,7 +355,7 @@ pub struct SensorSummary {
|
|||||||
|
|
||||||
/// Sensor description
|
/// Sensor description
|
||||||
#[schema(example = "Monitors CPU usage and generates events")]
|
#[schema(example = "Monitors CPU usage and generates events")]
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
|
|
||||||
/// Trigger reference
|
/// Trigger reference
|
||||||
#[schema(example = "monitoring.cpu_threshold")]
|
#[schema(example = "monitoring.cpu_threshold")]
|
||||||
@@ -499,7 +497,7 @@ mod tests {
|
|||||||
r#ref: "test.sensor".to_string(),
|
r#ref: "test.sensor".to_string(),
|
||||||
pack_ref: "test-pack".to_string(),
|
pack_ref: "test-pack".to_string(),
|
||||||
label: "Test Sensor".to_string(),
|
label: "Test Sensor".to_string(),
|
||||||
description: "Test description".to_string(),
|
description: Some("Test description".to_string()),
|
||||||
entrypoint: "/sensors/test.py".to_string(),
|
entrypoint: "/sensors/test.py".to_string(),
|
||||||
runtime_ref: "python3".to_string(),
|
runtime_ref: "python3".to_string(),
|
||||||
trigger_ref: "test.trigger".to_string(),
|
trigger_ref: "test.trigger".to_string(),
|
||||||
|
|||||||
@@ -27,8 +27,11 @@ use crate::dto::{
|
|||||||
UpdatePackRequest, WorkflowSyncResult,
|
UpdatePackRequest, WorkflowSyncResult,
|
||||||
},
|
},
|
||||||
permission::{
|
permission::{
|
||||||
CreateIdentityRequest, CreatePermissionAssignmentRequest, IdentityResponse,
|
CreateIdentityRequest, CreateIdentityRoleAssignmentRequest,
|
||||||
IdentitySummary, PermissionAssignmentResponse, PermissionSetSummary, UpdateIdentityRequest,
|
CreatePermissionAssignmentRequest, CreatePermissionSetRoleAssignmentRequest,
|
||||||
|
IdentityResponse, IdentityRoleAssignmentResponse, IdentitySummary,
|
||||||
|
PermissionAssignmentResponse, PermissionSetRoleAssignmentResponse, PermissionSetSummary,
|
||||||
|
UpdateIdentityRequest,
|
||||||
},
|
},
|
||||||
rule::{CreateRuleRequest, RuleResponse, RuleSummary, UpdateRuleRequest},
|
rule::{CreateRuleRequest, RuleResponse, RuleSummary, UpdateRuleRequest},
|
||||||
runtime::{CreateRuntimeRequest, RuntimeResponse, RuntimeSummary, UpdateRuntimeRequest},
|
runtime::{CreateRuntimeRequest, RuntimeResponse, RuntimeSummary, UpdateRuntimeRequest},
|
||||||
@@ -185,6 +188,12 @@ use crate::dto::{
|
|||||||
crate::routes::permissions::list_identity_permissions,
|
crate::routes::permissions::list_identity_permissions,
|
||||||
crate::routes::permissions::create_permission_assignment,
|
crate::routes::permissions::create_permission_assignment,
|
||||||
crate::routes::permissions::delete_permission_assignment,
|
crate::routes::permissions::delete_permission_assignment,
|
||||||
|
crate::routes::permissions::create_identity_role_assignment,
|
||||||
|
crate::routes::permissions::delete_identity_role_assignment,
|
||||||
|
crate::routes::permissions::create_permission_set_role_assignment,
|
||||||
|
crate::routes::permissions::delete_permission_set_role_assignment,
|
||||||
|
crate::routes::permissions::freeze_identity,
|
||||||
|
crate::routes::permissions::unfreeze_identity,
|
||||||
|
|
||||||
// Workflows
|
// Workflows
|
||||||
crate::routes::workflows::list_workflows,
|
crate::routes::workflows::list_workflows,
|
||||||
@@ -277,6 +286,10 @@ use crate::dto::{
|
|||||||
PermissionSetSummary,
|
PermissionSetSummary,
|
||||||
PermissionAssignmentResponse,
|
PermissionAssignmentResponse,
|
||||||
CreatePermissionAssignmentRequest,
|
CreatePermissionAssignmentRequest,
|
||||||
|
CreateIdentityRoleAssignmentRequest,
|
||||||
|
IdentityRoleAssignmentResponse,
|
||||||
|
CreatePermissionSetRoleAssignmentRequest,
|
||||||
|
PermissionSetRoleAssignmentResponse,
|
||||||
|
|
||||||
// Runtime DTOs
|
// Runtime DTOs
|
||||||
CreateRuntimeRequest,
|
CreateRuntimeRequest,
|
||||||
|
|||||||
@@ -277,7 +277,7 @@ pub async fn update_action(
|
|||||||
// Create update input
|
// Create update input
|
||||||
let update_input = UpdateActionInput {
|
let update_input = UpdateActionInput {
|
||||||
label: request.label,
|
label: request.label,
|
||||||
description: request.description,
|
description: request.description.map(Patch::Set),
|
||||||
entrypoint: request.entrypoint,
|
entrypoint: request.entrypoint,
|
||||||
runtime: request.runtime,
|
runtime: request.runtime,
|
||||||
runtime_version_constraint: request.runtime_version_constraint.map(|patch| match patch {
|
runtime_version_constraint: request.runtime_version_constraint.map(|patch| match patch {
|
||||||
|
|||||||
@@ -40,7 +40,8 @@ use attune_common::repositories::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
auth::middleware::RequireAuth,
|
auth::{jwt::TokenType, middleware::AuthenticatedUser, middleware::RequireAuth},
|
||||||
|
authz::{AuthorizationCheck, AuthorizationService},
|
||||||
dto::{
|
dto::{
|
||||||
artifact::{
|
artifact::{
|
||||||
AllocateFileVersionByRefRequest, AppendProgressRequest, ArtifactExecutionPatch,
|
AllocateFileVersionByRefRequest, AppendProgressRequest, ArtifactExecutionPatch,
|
||||||
@@ -55,6 +56,7 @@ use crate::{
|
|||||||
middleware::{ApiError, ApiResult},
|
middleware::{ApiError, ApiResult},
|
||||||
state::AppState,
|
state::AppState,
|
||||||
};
|
};
|
||||||
|
use attune_common::rbac::{Action, AuthorizationContext, Resource};
|
||||||
|
|
||||||
// ============================================================================
|
// ============================================================================
|
||||||
// Artifact CRUD
|
// Artifact CRUD
|
||||||
@@ -72,7 +74,7 @@ use crate::{
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn list_artifacts(
|
pub async fn list_artifacts(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Query(query): Query<ArtifactQueryParams>,
|
Query(query): Query<ArtifactQueryParams>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
@@ -88,8 +90,16 @@ pub async fn list_artifacts(
|
|||||||
};
|
};
|
||||||
|
|
||||||
let result = ArtifactRepository::search(&state.db, &filters).await?;
|
let result = ArtifactRepository::search(&state.db, &filters).await?;
|
||||||
|
let mut rows = result.rows;
|
||||||
|
|
||||||
let items: Vec<ArtifactSummary> = result.rows.into_iter().map(ArtifactSummary::from).collect();
|
if let Some((identity_id, grants)) = ensure_can_read_any_artifact(&state, &user).await? {
|
||||||
|
rows.retain(|artifact| {
|
||||||
|
let ctx = artifact_authorization_context(identity_id, artifact);
|
||||||
|
AuthorizationService::is_allowed(&grants, Resource::Artifacts, Action::Read, &ctx)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let items: Vec<ArtifactSummary> = rows.into_iter().map(ArtifactSummary::from).collect();
|
||||||
|
|
||||||
let pagination = PaginationParams {
|
let pagination = PaginationParams {
|
||||||
page: query.page,
|
page: query.page,
|
||||||
@@ -113,7 +123,7 @@ pub async fn list_artifacts(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn get_artifact(
|
pub async fn get_artifact(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path(id): Path<i64>,
|
Path(id): Path<i64>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
@@ -121,6 +131,10 @@ pub async fn get_artifact(
|
|||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Read, &artifact)
|
||||||
|
.await
|
||||||
|
.map_err(|_| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
StatusCode::OK,
|
StatusCode::OK,
|
||||||
Json(ApiResponse::new(ArtifactResponse::from(artifact))),
|
Json(ApiResponse::new(ArtifactResponse::from(artifact))),
|
||||||
@@ -140,7 +154,7 @@ pub async fn get_artifact(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn get_artifact_by_ref(
|
pub async fn get_artifact_by_ref(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path(artifact_ref): Path<String>,
|
Path(artifact_ref): Path<String>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
@@ -148,6 +162,10 @@ pub async fn get_artifact_by_ref(
|
|||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact '{}' not found", artifact_ref)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact '{}' not found", artifact_ref)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Read, &artifact)
|
||||||
|
.await
|
||||||
|
.map_err(|_| ApiError::NotFound(format!("Artifact '{}' not found", artifact_ref)))?;
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
StatusCode::OK,
|
StatusCode::OK,
|
||||||
Json(ApiResponse::new(ArtifactResponse::from(artifact))),
|
Json(ApiResponse::new(ArtifactResponse::from(artifact))),
|
||||||
@@ -168,7 +186,7 @@ pub async fn get_artifact_by_ref(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn create_artifact(
|
pub async fn create_artifact(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Json(request): Json<CreateArtifactRequest>,
|
Json(request): Json<CreateArtifactRequest>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
@@ -200,6 +218,16 @@ pub async fn create_artifact(
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
authorize_artifact_create(
|
||||||
|
&state,
|
||||||
|
&user,
|
||||||
|
&request.r#ref,
|
||||||
|
request.scope,
|
||||||
|
&request.owner,
|
||||||
|
visibility,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let input = CreateArtifactInput {
|
let input = CreateArtifactInput {
|
||||||
r#ref: request.r#ref,
|
r#ref: request.r#ref,
|
||||||
scope: request.scope,
|
scope: request.scope,
|
||||||
@@ -240,16 +268,18 @@ pub async fn create_artifact(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn update_artifact(
|
pub async fn update_artifact(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path(id): Path<i64>,
|
Path(id): Path<i64>,
|
||||||
Json(request): Json<UpdateArtifactRequest>,
|
Json(request): Json<UpdateArtifactRequest>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
// Verify artifact exists
|
// Verify artifact exists
|
||||||
ArtifactRepository::find_by_id(&state.db, id)
|
let artifact = ArtifactRepository::find_by_id(&state.db, id)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Update, &artifact).await?;
|
||||||
|
|
||||||
let input = UpdateArtifactInput {
|
let input = UpdateArtifactInput {
|
||||||
r#ref: None, // Ref is immutable after creation
|
r#ref: None, // Ref is immutable after creation
|
||||||
scope: request.scope,
|
scope: request.scope,
|
||||||
@@ -305,7 +335,7 @@ pub async fn update_artifact(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn delete_artifact(
|
pub async fn delete_artifact(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path(id): Path<i64>,
|
Path(id): Path<i64>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
@@ -313,6 +343,8 @@ pub async fn delete_artifact(
|
|||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Delete, &artifact).await?;
|
||||||
|
|
||||||
// Before deleting DB rows, clean up any file-backed versions on disk
|
// Before deleting DB rows, clean up any file-backed versions on disk
|
||||||
let file_versions =
|
let file_versions =
|
||||||
ArtifactVersionRepository::find_file_versions_by_artifact(&state.db, id).await?;
|
ArtifactVersionRepository::find_file_versions_by_artifact(&state.db, id).await?;
|
||||||
@@ -355,11 +387,17 @@ pub async fn delete_artifact(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn list_artifacts_by_execution(
|
pub async fn list_artifacts_by_execution(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path(execution_id): Path<i64>,
|
Path(execution_id): Path<i64>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
let artifacts = ArtifactRepository::find_by_execution(&state.db, execution_id).await?;
|
let mut artifacts = ArtifactRepository::find_by_execution(&state.db, execution_id).await?;
|
||||||
|
if let Some((identity_id, grants)) = ensure_can_read_any_artifact(&state, &user).await? {
|
||||||
|
artifacts.retain(|artifact| {
|
||||||
|
let ctx = artifact_authorization_context(identity_id, artifact);
|
||||||
|
AuthorizationService::is_allowed(&grants, Resource::Artifacts, Action::Read, &ctx)
|
||||||
|
});
|
||||||
|
}
|
||||||
let items: Vec<ArtifactSummary> = artifacts.into_iter().map(ArtifactSummary::from).collect();
|
let items: Vec<ArtifactSummary> = artifacts.into_iter().map(ArtifactSummary::from).collect();
|
||||||
|
|
||||||
Ok((StatusCode::OK, Json(ApiResponse::new(items))))
|
Ok((StatusCode::OK, Json(ApiResponse::new(items))))
|
||||||
@@ -387,7 +425,7 @@ pub async fn list_artifacts_by_execution(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn append_progress(
|
pub async fn append_progress(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path(id): Path<i64>,
|
Path(id): Path<i64>,
|
||||||
Json(request): Json<AppendProgressRequest>,
|
Json(request): Json<AppendProgressRequest>,
|
||||||
@@ -396,6 +434,8 @@ pub async fn append_progress(
|
|||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Update, &artifact).await?;
|
||||||
|
|
||||||
if artifact.r#type != ArtifactType::Progress {
|
if artifact.r#type != ArtifactType::Progress {
|
||||||
return Err(ApiError::BadRequest(format!(
|
return Err(ApiError::BadRequest(format!(
|
||||||
"Artifact '{}' is type {:?}, not progress. Use version endpoints for file artifacts.",
|
"Artifact '{}' is type {:?}, not progress. Use version endpoints for file artifacts.",
|
||||||
@@ -430,16 +470,18 @@ pub async fn append_progress(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn set_artifact_data(
|
pub async fn set_artifact_data(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path(id): Path<i64>,
|
Path(id): Path<i64>,
|
||||||
Json(request): Json<SetDataRequest>,
|
Json(request): Json<SetDataRequest>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
// Verify exists
|
// Verify exists
|
||||||
ArtifactRepository::find_by_id(&state.db, id)
|
let artifact = ArtifactRepository::find_by_id(&state.db, id)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Update, &artifact).await?;
|
||||||
|
|
||||||
let updated = ArtifactRepository::set_data(&state.db, id, &request.data).await?;
|
let updated = ArtifactRepository::set_data(&state.db, id, &request.data).await?;
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
@@ -468,15 +510,19 @@ pub async fn set_artifact_data(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn list_versions(
|
pub async fn list_versions(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path(id): Path<i64>,
|
Path(id): Path<i64>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
// Verify artifact exists
|
// Verify artifact exists
|
||||||
ArtifactRepository::find_by_id(&state.db, id)
|
let artifact = ArtifactRepository::find_by_id(&state.db, id)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Read, &artifact)
|
||||||
|
.await
|
||||||
|
.map_err(|_| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
let versions = ArtifactVersionRepository::list_by_artifact(&state.db, id).await?;
|
let versions = ArtifactVersionRepository::list_by_artifact(&state.db, id).await?;
|
||||||
let items: Vec<ArtifactVersionSummary> = versions
|
let items: Vec<ArtifactVersionSummary> = versions
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -502,15 +548,19 @@ pub async fn list_versions(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn get_version(
|
pub async fn get_version(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path((id, version)): Path<(i64, i32)>,
|
Path((id, version)): Path<(i64, i32)>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
// Verify artifact exists
|
// Verify artifact exists
|
||||||
ArtifactRepository::find_by_id(&state.db, id)
|
let artifact = ArtifactRepository::find_by_id(&state.db, id)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Read, &artifact)
|
||||||
|
.await
|
||||||
|
.map_err(|_| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
let ver = ArtifactVersionRepository::find_by_version(&state.db, id, version)
|
let ver = ArtifactVersionRepository::find_by_version(&state.db, id, version)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
@@ -536,14 +586,18 @@ pub async fn get_version(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn get_latest_version(
|
pub async fn get_latest_version(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path(id): Path<i64>,
|
Path(id): Path<i64>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
ArtifactRepository::find_by_id(&state.db, id)
|
let artifact = ArtifactRepository::find_by_id(&state.db, id)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Read, &artifact)
|
||||||
|
.await
|
||||||
|
.map_err(|_| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
let ver = ArtifactVersionRepository::find_latest(&state.db, id)
|
let ver = ArtifactVersionRepository::find_latest(&state.db, id)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("No versions found for artifact {}", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("No versions found for artifact {}", id)))?;
|
||||||
@@ -568,15 +622,17 @@ pub async fn get_latest_version(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn create_version_json(
|
pub async fn create_version_json(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path(id): Path<i64>,
|
Path(id): Path<i64>,
|
||||||
Json(request): Json<CreateVersionJsonRequest>,
|
Json(request): Json<CreateVersionJsonRequest>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
ArtifactRepository::find_by_id(&state.db, id)
|
let artifact = ArtifactRepository::find_by_id(&state.db, id)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Update, &artifact).await?;
|
||||||
|
|
||||||
let input = CreateArtifactVersionInput {
|
let input = CreateArtifactVersionInput {
|
||||||
artifact: id,
|
artifact: id,
|
||||||
content_type: Some(
|
content_type: Some(
|
||||||
@@ -624,7 +680,7 @@ pub async fn create_version_json(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn create_version_file(
|
pub async fn create_version_file(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path(id): Path<i64>,
|
Path(id): Path<i64>,
|
||||||
Json(request): Json<CreateFileVersionRequest>,
|
Json(request): Json<CreateFileVersionRequest>,
|
||||||
@@ -633,6 +689,8 @@ pub async fn create_version_file(
|
|||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Update, &artifact).await?;
|
||||||
|
|
||||||
// Validate this is a file-type artifact
|
// Validate this is a file-type artifact
|
||||||
if !is_file_backed_type(artifact.r#type) {
|
if !is_file_backed_type(artifact.r#type) {
|
||||||
return Err(ApiError::BadRequest(format!(
|
return Err(ApiError::BadRequest(format!(
|
||||||
@@ -726,15 +784,17 @@ pub async fn create_version_file(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn upload_version(
|
pub async fn upload_version(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path(id): Path<i64>,
|
Path(id): Path<i64>,
|
||||||
mut multipart: Multipart,
|
mut multipart: Multipart,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
ArtifactRepository::find_by_id(&state.db, id)
|
let artifact = ArtifactRepository::find_by_id(&state.db, id)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Update, &artifact).await?;
|
||||||
|
|
||||||
let mut file_data: Option<Vec<u8>> = None;
|
let mut file_data: Option<Vec<u8>> = None;
|
||||||
let mut content_type: Option<String> = None;
|
let mut content_type: Option<String> = None;
|
||||||
let mut meta: Option<serde_json::Value> = None;
|
let mut meta: Option<serde_json::Value> = None;
|
||||||
@@ -854,7 +914,7 @@ pub async fn upload_version(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn download_version(
|
pub async fn download_version(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path((id, version)): Path<(i64, i32)>,
|
Path((id, version)): Path<(i64, i32)>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
@@ -862,6 +922,10 @@ pub async fn download_version(
|
|||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Read, &artifact)
|
||||||
|
.await
|
||||||
|
.map_err(|_| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
// First try without content (cheaper query) to check for file_path
|
// First try without content (cheaper query) to check for file_path
|
||||||
let ver = ArtifactVersionRepository::find_by_version(&state.db, id, version)
|
let ver = ArtifactVersionRepository::find_by_version(&state.db, id, version)
|
||||||
.await?
|
.await?
|
||||||
@@ -904,7 +968,7 @@ pub async fn download_version(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn download_latest(
|
pub async fn download_latest(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path(id): Path<i64>,
|
Path(id): Path<i64>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
@@ -912,6 +976,10 @@ pub async fn download_latest(
|
|||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Read, &artifact)
|
||||||
|
.await
|
||||||
|
.map_err(|_| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
// First try without content (cheaper query) to check for file_path
|
// First try without content (cheaper query) to check for file_path
|
||||||
let ver = ArtifactVersionRepository::find_latest(&state.db, id)
|
let ver = ArtifactVersionRepository::find_latest(&state.db, id)
|
||||||
.await?
|
.await?
|
||||||
@@ -955,7 +1023,7 @@ pub async fn download_latest(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn delete_version(
|
pub async fn delete_version(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path((id, version)): Path<(i64, i32)>,
|
Path((id, version)): Path<(i64, i32)>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
@@ -964,6 +1032,8 @@ pub async fn delete_version(
|
|||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Delete, &artifact).await?;
|
||||||
|
|
||||||
// Find the version by artifact + version number
|
// Find the version by artifact + version number
|
||||||
let ver = ArtifactVersionRepository::find_by_version(&state.db, id, version)
|
let ver = ArtifactVersionRepository::find_by_version(&state.db, id, version)
|
||||||
.await?
|
.await?
|
||||||
@@ -1042,7 +1112,7 @@ pub async fn delete_version(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn upload_version_by_ref(
|
pub async fn upload_version_by_ref(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path(artifact_ref): Path<String>,
|
Path(artifact_ref): Path<String>,
|
||||||
mut multipart: Multipart,
|
mut multipart: Multipart,
|
||||||
@@ -1157,6 +1227,8 @@ pub async fn upload_version_by_ref(
|
|||||||
// Upsert: find existing artifact or create a new one
|
// Upsert: find existing artifact or create a new one
|
||||||
let artifact = match ArtifactRepository::find_by_ref(&state.db, &artifact_ref).await? {
|
let artifact = match ArtifactRepository::find_by_ref(&state.db, &artifact_ref).await? {
|
||||||
Some(existing) => {
|
Some(existing) => {
|
||||||
|
authorize_artifact_action(&state, &user, Action::Update, &existing).await?;
|
||||||
|
|
||||||
// Update execution link if a new execution ID was provided
|
// Update execution link if a new execution ID was provided
|
||||||
if execution_id.is_some() && execution_id != existing.execution {
|
if execution_id.is_some() && execution_id != existing.execution {
|
||||||
let update_input = UpdateArtifactInput {
|
let update_input = UpdateArtifactInput {
|
||||||
@@ -1211,6 +1283,16 @@ pub async fn upload_version_by_ref(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
authorize_artifact_create(
|
||||||
|
&state,
|
||||||
|
&user,
|
||||||
|
&artifact_ref,
|
||||||
|
a_scope,
|
||||||
|
owner.as_deref().unwrap_or_default(),
|
||||||
|
a_visibility,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Parse retention
|
// Parse retention
|
||||||
let a_retention_policy: RetentionPolicyType = match &retention_policy {
|
let a_retention_policy: RetentionPolicyType = match &retention_policy {
|
||||||
Some(rp) if !rp.is_empty() => {
|
Some(rp) if !rp.is_empty() => {
|
||||||
@@ -1297,7 +1379,7 @@ pub async fn upload_version_by_ref(
|
|||||||
security(("bearer_auth" = []))
|
security(("bearer_auth" = []))
|
||||||
)]
|
)]
|
||||||
pub async fn allocate_file_version_by_ref(
|
pub async fn allocate_file_version_by_ref(
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Path(artifact_ref): Path<String>,
|
Path(artifact_ref): Path<String>,
|
||||||
Json(request): Json<AllocateFileVersionByRefRequest>,
|
Json(request): Json<AllocateFileVersionByRefRequest>,
|
||||||
@@ -1305,6 +1387,8 @@ pub async fn allocate_file_version_by_ref(
|
|||||||
// Upsert: find existing artifact or create a new one
|
// Upsert: find existing artifact or create a new one
|
||||||
let artifact = match ArtifactRepository::find_by_ref(&state.db, &artifact_ref).await? {
|
let artifact = match ArtifactRepository::find_by_ref(&state.db, &artifact_ref).await? {
|
||||||
Some(existing) => {
|
Some(existing) => {
|
||||||
|
authorize_artifact_action(&state, &user, Action::Update, &existing).await?;
|
||||||
|
|
||||||
// Update execution link if a new execution ID was provided
|
// Update execution link if a new execution ID was provided
|
||||||
if request.execution.is_some() && request.execution != existing.execution {
|
if request.execution.is_some() && request.execution != existing.execution {
|
||||||
let update_input = UpdateArtifactInput {
|
let update_input = UpdateArtifactInput {
|
||||||
@@ -1347,6 +1431,16 @@ pub async fn allocate_file_version_by_ref(
|
|||||||
.unwrap_or(RetentionPolicyType::Versions);
|
.unwrap_or(RetentionPolicyType::Versions);
|
||||||
let a_retention_limit = request.retention_limit.unwrap_or(10);
|
let a_retention_limit = request.retention_limit.unwrap_or(10);
|
||||||
|
|
||||||
|
authorize_artifact_create(
|
||||||
|
&state,
|
||||||
|
&user,
|
||||||
|
&artifact_ref,
|
||||||
|
a_scope,
|
||||||
|
request.owner.as_deref().unwrap_or_default(),
|
||||||
|
a_visibility,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let create_input = CreateArtifactInput {
|
let create_input = CreateArtifactInput {
|
||||||
r#ref: artifact_ref.clone(),
|
r#ref: artifact_ref.clone(),
|
||||||
scope: a_scope,
|
scope: a_scope,
|
||||||
@@ -1437,6 +1531,105 @@ pub async fn allocate_file_version_by_ref(
|
|||||||
// Helpers
|
// Helpers
|
||||||
// ============================================================================
|
// ============================================================================
|
||||||
|
|
||||||
|
async fn authorize_artifact_action(
|
||||||
|
state: &Arc<AppState>,
|
||||||
|
user: &AuthenticatedUser,
|
||||||
|
action: Action,
|
||||||
|
artifact: &attune_common::models::artifact::Artifact,
|
||||||
|
) -> Result<(), ApiError> {
|
||||||
|
if user.claims.token_type != TokenType::Access {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let identity_id = user
|
||||||
|
.identity_id()
|
||||||
|
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
|
||||||
|
let authz = AuthorizationService::new(state.db.clone());
|
||||||
|
authz
|
||||||
|
.authorize(
|
||||||
|
user,
|
||||||
|
AuthorizationCheck {
|
||||||
|
resource: Resource::Artifacts,
|
||||||
|
action,
|
||||||
|
context: artifact_authorization_context(identity_id, artifact),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn authorize_artifact_create(
|
||||||
|
state: &Arc<AppState>,
|
||||||
|
user: &AuthenticatedUser,
|
||||||
|
artifact_ref: &str,
|
||||||
|
scope: OwnerType,
|
||||||
|
owner: &str,
|
||||||
|
visibility: ArtifactVisibility,
|
||||||
|
) -> Result<(), ApiError> {
|
||||||
|
if user.claims.token_type != TokenType::Access {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let identity_id = user
|
||||||
|
.identity_id()
|
||||||
|
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
|
||||||
|
let authz = AuthorizationService::new(state.db.clone());
|
||||||
|
let mut ctx = AuthorizationContext::new(identity_id);
|
||||||
|
ctx.target_ref = Some(artifact_ref.to_string());
|
||||||
|
ctx.owner_type = Some(scope);
|
||||||
|
ctx.owner_ref = Some(owner.to_string());
|
||||||
|
ctx.visibility = Some(visibility);
|
||||||
|
|
||||||
|
authz
|
||||||
|
.authorize(
|
||||||
|
user,
|
||||||
|
AuthorizationCheck {
|
||||||
|
resource: Resource::Artifacts,
|
||||||
|
action: Action::Create,
|
||||||
|
context: ctx,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn ensure_can_read_any_artifact(
|
||||||
|
state: &Arc<AppState>,
|
||||||
|
user: &AuthenticatedUser,
|
||||||
|
) -> Result<Option<(i64, Vec<attune_common::rbac::Grant>)>, ApiError> {
|
||||||
|
if user.claims.token_type != TokenType::Access {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let identity_id = user
|
||||||
|
.identity_id()
|
||||||
|
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
|
||||||
|
let authz = AuthorizationService::new(state.db.clone());
|
||||||
|
let grants = authz.effective_grants(user).await?;
|
||||||
|
|
||||||
|
let can_read_any_artifact = grants
|
||||||
|
.iter()
|
||||||
|
.any(|g| g.resource == Resource::Artifacts && g.actions.contains(&Action::Read));
|
||||||
|
if !can_read_any_artifact {
|
||||||
|
return Err(ApiError::Forbidden(
|
||||||
|
"Insufficient permissions: artifacts:read".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Some((identity_id, grants)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn artifact_authorization_context(
|
||||||
|
identity_id: i64,
|
||||||
|
artifact: &attune_common::models::artifact::Artifact,
|
||||||
|
) -> AuthorizationContext {
|
||||||
|
let mut ctx = AuthorizationContext::new(identity_id);
|
||||||
|
ctx.target_id = Some(artifact.id);
|
||||||
|
ctx.target_ref = Some(artifact.r#ref.clone());
|
||||||
|
ctx.owner_type = Some(artifact.scope);
|
||||||
|
ctx.owner_ref = Some(artifact.owner.clone());
|
||||||
|
ctx.visibility = Some(artifact.visibility);
|
||||||
|
ctx
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns true for artifact types that should use file-backed storage on disk.
|
/// Returns true for artifact types that should use file-backed storage on disk.
|
||||||
fn is_file_backed_type(artifact_type: ArtifactType) -> bool {
|
fn is_file_backed_type(artifact_type: ArtifactType) -> bool {
|
||||||
matches!(
|
matches!(
|
||||||
@@ -1775,14 +1968,19 @@ pub async fn stream_artifact(
|
|||||||
let token = params.token.as_ref().ok_or(ApiError::Unauthorized(
|
let token = params.token.as_ref().ok_or(ApiError::Unauthorized(
|
||||||
"Missing authentication token".to_string(),
|
"Missing authentication token".to_string(),
|
||||||
))?;
|
))?;
|
||||||
validate_token(token, &state.jwt_config)
|
let claims = validate_token(token, &state.jwt_config)
|
||||||
.map_err(|_| ApiError::Unauthorized("Invalid authentication token".to_string()))?;
|
.map_err(|_| ApiError::Unauthorized("Invalid authentication token".to_string()))?;
|
||||||
|
let user = AuthenticatedUser { claims };
|
||||||
|
|
||||||
// --- resolve artifact + latest version ---------------------------------
|
// --- resolve artifact + latest version ---------------------------------
|
||||||
let artifact = ArtifactRepository::find_by_id(&state.db, id)
|
let artifact = ArtifactRepository::find_by_id(&state.db, id)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
|
authorize_artifact_action(&state, &user, Action::Read, &artifact)
|
||||||
|
.await
|
||||||
|
.map_err(|_| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
|
||||||
|
|
||||||
if !is_file_backed_type(artifact.r#type) {
|
if !is_file_backed_type(artifact.r#type) {
|
||||||
return Err(ApiError::BadRequest(format!(
|
return Err(ApiError::BadRequest(format!(
|
||||||
"Artifact '{}' is type {:?} which is not file-backed. \
|
"Artifact '{}' is type {:?} which is not file-backed. \
|
||||||
|
|||||||
@@ -169,6 +169,12 @@ pub async fn login(
|
|||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::Unauthorized("Invalid login or password".to_string()))?;
|
.ok_or_else(|| ApiError::Unauthorized("Invalid login or password".to_string()))?;
|
||||||
|
|
||||||
|
if identity.frozen {
|
||||||
|
return Err(ApiError::Forbidden(
|
||||||
|
"Identity is frozen and cannot authenticate".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
// Check if identity has a password set
|
// Check if identity has a password set
|
||||||
let password_hash = identity
|
let password_hash = identity
|
||||||
.password_hash
|
.password_hash
|
||||||
@@ -324,6 +330,12 @@ pub async fn refresh_token(
|
|||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::Unauthorized("Identity not found".to_string()))?;
|
.ok_or_else(|| ApiError::Unauthorized("Identity not found".to_string()))?;
|
||||||
|
|
||||||
|
if identity.frozen {
|
||||||
|
return Err(ApiError::Forbidden(
|
||||||
|
"Identity is frozen and cannot authenticate".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
// Generate new tokens
|
// Generate new tokens
|
||||||
let access_token = generate_access_token(identity.id, &identity.login, &state.jwt_config)?;
|
let access_token = generate_access_token(identity.id, &identity.login, &state.jwt_config)?;
|
||||||
let refresh_token = generate_refresh_token(identity.id, &identity.login, &state.jwt_config)?;
|
let refresh_token = generate_refresh_token(identity.id, &identity.login, &state.jwt_config)?;
|
||||||
@@ -380,6 +392,12 @@ pub async fn get_current_user(
|
|||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound("Identity not found".to_string()))?;
|
.ok_or_else(|| ApiError::NotFound("Identity not found".to_string()))?;
|
||||||
|
|
||||||
|
if identity.frozen {
|
||||||
|
return Err(ApiError::Forbidden(
|
||||||
|
"Identity is frozen and cannot authenticate".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
let response = CurrentUserResponse {
|
let response = CurrentUserResponse {
|
||||||
id: identity.id,
|
id: identity.id,
|
||||||
login: identity.login,
|
login: identity.login,
|
||||||
@@ -551,6 +569,7 @@ pub async fn change_password(
|
|||||||
display_name: None,
|
display_name: None,
|
||||||
password_hash: Some(new_password_hash),
|
password_hash: Some(new_password_hash),
|
||||||
attributes: None,
|
attributes: None,
|
||||||
|
frozen: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
IdentityRepository::update(&state.db, identity_id, update_input).await?;
|
IdentityRepository::update(&state.db, identity_id, update_input).await?;
|
||||||
|
|||||||
@@ -82,6 +82,17 @@ pub async fn create_event(
|
|||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
Json(payload): Json<CreateEventRequest>,
|
Json(payload): Json<CreateEventRequest>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
|
// Only sensor and execution tokens may create events directly.
|
||||||
|
// User sessions must go through the webhook receiver instead.
|
||||||
|
use crate::auth::jwt::TokenType;
|
||||||
|
if user.0.claims.token_type == TokenType::Access {
|
||||||
|
return Err(ApiError::Forbidden(
|
||||||
|
"Events may only be created by sensor services. To fire an event as a user, \
|
||||||
|
enable webhooks on the trigger and POST to its webhook URL."
|
||||||
|
.to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
// Validate request
|
// Validate request
|
||||||
payload
|
payload
|
||||||
.validate()
|
.validate()
|
||||||
@@ -128,7 +139,6 @@ pub async fn create_event(
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Determine source (sensor) from authenticated user if it's a sensor token
|
// Determine source (sensor) from authenticated user if it's a sensor token
|
||||||
use crate::auth::jwt::TokenType;
|
|
||||||
let (source_id, source_ref) = match user.0.claims.token_type {
|
let (source_id, source_ref) = match user.0.claims.token_type {
|
||||||
TokenType::Sensor => {
|
TokenType::Sensor => {
|
||||||
// Extract sensor reference from login
|
// Extract sensor reference from login
|
||||||
|
|||||||
@@ -93,19 +93,6 @@ pub async fn create_execution(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let mut execution_ctx = AuthorizationContext::new(identity_id);
|
|
||||||
execution_ctx.pack_ref = Some(action.pack_ref.clone());
|
|
||||||
authz
|
|
||||||
.authorize(
|
|
||||||
&user,
|
|
||||||
AuthorizationCheck {
|
|
||||||
resource: Resource::Executions,
|
|
||||||
action: Action::Create,
|
|
||||||
context: execution_ctx,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create execution input
|
// Create execution input
|
||||||
|
|||||||
@@ -120,12 +120,16 @@ pub async fn get_key(
|
|||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Key '{}' not found", key_ref)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Key '{}' not found", key_ref)))?;
|
||||||
|
|
||||||
if user.0.claims.token_type == TokenType::Access {
|
// For encrypted keys, track whether this caller is permitted to see the value.
|
||||||
|
// Non-Access tokens (sensor, execution) always get full access.
|
||||||
|
let can_decrypt = if user.0.claims.token_type == TokenType::Access {
|
||||||
let identity_id = user
|
let identity_id = user
|
||||||
.0
|
.0
|
||||||
.identity_id()
|
.identity_id()
|
||||||
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
|
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
|
||||||
let authz = AuthorizationService::new(state.db.clone());
|
let authz = AuthorizationService::new(state.db.clone());
|
||||||
|
|
||||||
|
// Basic read check — hide behind 404 to prevent enumeration.
|
||||||
authz
|
authz
|
||||||
.authorize(
|
.authorize(
|
||||||
&user.0,
|
&user.0,
|
||||||
@@ -136,19 +140,43 @@ pub async fn get_key(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
// Hide unauthorized records behind 404 to reduce enumeration leakage.
|
|
||||||
.map_err(|_| ApiError::NotFound(format!("Key '{}' not found", key_ref)))?;
|
.map_err(|_| ApiError::NotFound(format!("Key '{}' not found", key_ref)))?;
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypt value if encrypted
|
// For encrypted keys, separately check Keys::Decrypt.
|
||||||
|
// Failing this is not an error — we just return the value as null.
|
||||||
if key.encrypted {
|
if key.encrypted {
|
||||||
let encryption_key = state
|
authz
|
||||||
|
.authorize(
|
||||||
|
&user.0,
|
||||||
|
AuthorizationCheck {
|
||||||
|
resource: Resource::Keys,
|
||||||
|
action: Action::Decrypt,
|
||||||
|
context: key_authorization_context(identity_id, &key),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_ok()
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
};
|
||||||
|
|
||||||
|
// Decrypt value if encrypted and caller has permission.
|
||||||
|
// If they lack Keys::Decrypt, return null rather than the ciphertext.
|
||||||
|
if key.encrypted {
|
||||||
|
if can_decrypt {
|
||||||
|
let encryption_key =
|
||||||
|
state
|
||||||
.config
|
.config
|
||||||
.security
|
.security
|
||||||
.encryption_key
|
.encryption_key
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
ApiError::InternalServerError("Encryption key not configured on server".to_string())
|
ApiError::InternalServerError(
|
||||||
|
"Encryption key not configured on server".to_string(),
|
||||||
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let decrypted_value = attune_common::crypto::decrypt_json(&key.value, encryption_key)
|
let decrypted_value = attune_common::crypto::decrypt_json(&key.value, encryption_key)
|
||||||
@@ -158,6 +186,9 @@ pub async fn get_key(
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
key.value = decrypted_value;
|
key.value = decrypted_value;
|
||||||
|
} else {
|
||||||
|
key.value = serde_json::Value::Null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let response = ApiResponse::new(KeyResponse::from(key));
|
let response = ApiResponse::new(KeyResponse::from(key));
|
||||||
@@ -195,6 +226,7 @@ pub async fn create_key(
|
|||||||
let mut ctx = AuthorizationContext::new(identity_id);
|
let mut ctx = AuthorizationContext::new(identity_id);
|
||||||
ctx.owner_identity_id = request.owner_identity;
|
ctx.owner_identity_id = request.owner_identity;
|
||||||
ctx.owner_type = Some(request.owner_type);
|
ctx.owner_type = Some(request.owner_type);
|
||||||
|
ctx.owner_ref = requested_key_owner_ref(&request);
|
||||||
ctx.encrypted = Some(request.encrypted);
|
ctx.encrypted = Some(request.encrypted);
|
||||||
ctx.target_ref = Some(request.r#ref.clone());
|
ctx.target_ref = Some(request.r#ref.clone());
|
||||||
|
|
||||||
@@ -541,6 +573,38 @@ fn key_authorization_context(identity_id: i64, key: &Key) -> AuthorizationContex
|
|||||||
ctx.target_ref = Some(key.r#ref.clone());
|
ctx.target_ref = Some(key.r#ref.clone());
|
||||||
ctx.owner_identity_id = key.owner_identity;
|
ctx.owner_identity_id = key.owner_identity;
|
||||||
ctx.owner_type = Some(key.owner_type);
|
ctx.owner_type = Some(key.owner_type);
|
||||||
|
ctx.owner_ref = key_owner_ref(
|
||||||
|
key.owner_type,
|
||||||
|
key.owner.as_deref(),
|
||||||
|
key.owner_pack_ref.as_deref(),
|
||||||
|
key.owner_action_ref.as_deref(),
|
||||||
|
key.owner_sensor_ref.as_deref(),
|
||||||
|
);
|
||||||
ctx.encrypted = Some(key.encrypted);
|
ctx.encrypted = Some(key.encrypted);
|
||||||
ctx
|
ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn requested_key_owner_ref(request: &CreateKeyRequest) -> Option<String> {
|
||||||
|
key_owner_ref(
|
||||||
|
request.owner_type,
|
||||||
|
request.owner.as_deref(),
|
||||||
|
request.owner_pack_ref.as_deref(),
|
||||||
|
request.owner_action_ref.as_deref(),
|
||||||
|
request.owner_sensor_ref.as_deref(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn key_owner_ref(
|
||||||
|
owner_type: OwnerType,
|
||||||
|
owner: Option<&str>,
|
||||||
|
owner_pack_ref: Option<&str>,
|
||||||
|
owner_action_ref: Option<&str>,
|
||||||
|
owner_sensor_ref: Option<&str>,
|
||||||
|
) -> Option<String> {
|
||||||
|
match owner_type {
|
||||||
|
OwnerType::Pack => owner_pack_ref.map(str::to_string),
|
||||||
|
OwnerType::Action => owner_action_ref.map(str::to_string),
|
||||||
|
OwnerType::Sensor => owner_sensor_ref.map(str::to_string),
|
||||||
|
_ => owner.map(str::to_string),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,12 +9,14 @@ use std::sync::Arc;
|
|||||||
use validator::Validate;
|
use validator::Validate;
|
||||||
|
|
||||||
use attune_common::{
|
use attune_common::{
|
||||||
models::identity::{Identity, PermissionSet},
|
models::identity::{Identity, IdentityRoleAssignment},
|
||||||
rbac::{Action, AuthorizationContext, Resource},
|
rbac::{Action, AuthorizationContext, Resource},
|
||||||
repositories::{
|
repositories::{
|
||||||
identity::{
|
identity::{
|
||||||
CreateIdentityInput, CreatePermissionAssignmentInput, IdentityRepository,
|
CreateIdentityInput, CreateIdentityRoleAssignmentInput,
|
||||||
PermissionAssignmentRepository, PermissionSetRepository, UpdateIdentityInput,
|
CreatePermissionAssignmentInput, CreatePermissionSetRoleAssignmentInput,
|
||||||
|
IdentityRepository, IdentityRoleAssignmentRepository, PermissionAssignmentRepository,
|
||||||
|
PermissionSetRepository, PermissionSetRoleAssignmentRepository, UpdateIdentityInput,
|
||||||
},
|
},
|
||||||
Create, Delete, FindById, FindByRef, List, Update,
|
Create, Delete, FindById, FindByRef, List, Update,
|
||||||
},
|
},
|
||||||
@@ -26,9 +28,12 @@ use crate::{
|
|||||||
authz::{AuthorizationCheck, AuthorizationService},
|
authz::{AuthorizationCheck, AuthorizationService},
|
||||||
dto::{
|
dto::{
|
||||||
common::{PaginatedResponse, PaginationParams},
|
common::{PaginatedResponse, PaginationParams},
|
||||||
ApiResponse, CreateIdentityRequest, CreatePermissionAssignmentRequest, IdentityResponse,
|
ApiResponse, CreateIdentityRequest, CreateIdentityRoleAssignmentRequest,
|
||||||
IdentitySummary, PermissionAssignmentResponse, PermissionSetQueryParams,
|
CreatePermissionAssignmentRequest, CreatePermissionSetRoleAssignmentRequest,
|
||||||
PermissionSetSummary, SuccessResponse, UpdateIdentityRequest,
|
IdentityResponse, IdentityRoleAssignmentResponse, IdentitySummary,
|
||||||
|
PermissionAssignmentResponse, PermissionSetQueryParams,
|
||||||
|
PermissionSetRoleAssignmentResponse, PermissionSetSummary, SuccessResponse,
|
||||||
|
UpdateIdentityRequest,
|
||||||
},
|
},
|
||||||
middleware::{ApiError, ApiResult},
|
middleware::{ApiError, ApiResult},
|
||||||
state::AppState,
|
state::AppState,
|
||||||
@@ -58,16 +63,22 @@ pub async fn list_identities(
|
|||||||
let page_items = if start >= identities.len() {
|
let page_items = if start >= identities.len() {
|
||||||
Vec::new()
|
Vec::new()
|
||||||
} else {
|
} else {
|
||||||
identities[start..end]
|
identities[start..end].to_vec()
|
||||||
.iter()
|
|
||||||
.cloned()
|
|
||||||
.map(IdentitySummary::from)
|
|
||||||
.collect()
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let mut summaries = Vec::with_capacity(page_items.len());
|
||||||
|
for identity in page_items {
|
||||||
|
let role_assignments =
|
||||||
|
IdentityRoleAssignmentRepository::find_by_identity(&state.db, identity.id).await?;
|
||||||
|
let roles = role_assignments.into_iter().map(|ra| ra.role).collect();
|
||||||
|
let mut summary = IdentitySummary::from(identity);
|
||||||
|
summary.roles = roles;
|
||||||
|
summaries.push(summary);
|
||||||
|
}
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
StatusCode::OK,
|
StatusCode::OK,
|
||||||
Json(PaginatedResponse::new(page_items, &query, total)),
|
Json(PaginatedResponse::new(summaries, &query, total)),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,10 +105,42 @@ pub async fn get_identity(
|
|||||||
let identity = IdentityRepository::find_by_id(&state.db, identity_id)
|
let identity = IdentityRepository::find_by_id(&state.db, identity_id)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Identity '{}' not found", identity_id)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Identity '{}' not found", identity_id)))?;
|
||||||
|
let roles = IdentityRoleAssignmentRepository::find_by_identity(&state.db, identity_id).await?;
|
||||||
|
let assignments =
|
||||||
|
PermissionAssignmentRepository::find_by_identity(&state.db, identity_id).await?;
|
||||||
|
let permission_sets = PermissionSetRepository::find_by_identity(&state.db, identity_id).await?;
|
||||||
|
let permission_set_refs = permission_sets
|
||||||
|
.into_iter()
|
||||||
|
.map(|ps| (ps.id, ps.r#ref))
|
||||||
|
.collect::<std::collections::HashMap<_, _>>();
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
StatusCode::OK,
|
StatusCode::OK,
|
||||||
Json(ApiResponse::new(IdentityResponse::from(identity))),
|
Json(ApiResponse::new(IdentityResponse {
|
||||||
|
id: identity.id,
|
||||||
|
login: identity.login,
|
||||||
|
display_name: identity.display_name,
|
||||||
|
frozen: identity.frozen,
|
||||||
|
attributes: identity.attributes,
|
||||||
|
roles: roles
|
||||||
|
.into_iter()
|
||||||
|
.map(IdentityRoleAssignmentResponse::from)
|
||||||
|
.collect(),
|
||||||
|
direct_permissions: assignments
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|assignment| {
|
||||||
|
permission_set_refs.get(&assignment.permset).cloned().map(
|
||||||
|
|permission_set_ref| PermissionAssignmentResponse {
|
||||||
|
id: assignment.id,
|
||||||
|
identity_id: assignment.identity,
|
||||||
|
permission_set_id: assignment.permset,
|
||||||
|
permission_set_ref,
|
||||||
|
created: assignment.created,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
})),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -180,6 +223,7 @@ pub async fn update_identity(
|
|||||||
display_name: request.display_name,
|
display_name: request.display_name,
|
||||||
password_hash,
|
password_hash,
|
||||||
attributes: request.attributes,
|
attributes: request.attributes,
|
||||||
|
frozen: request.frozen,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -257,10 +301,33 @@ pub async fn list_permission_sets(
|
|||||||
permission_sets.retain(|ps| ps.pack_ref.as_deref() == Some(pack_ref.as_str()));
|
permission_sets.retain(|ps| ps.pack_ref.as_deref() == Some(pack_ref.as_str()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let response: Vec<PermissionSetSummary> = permission_sets
|
let mut response = Vec::with_capacity(permission_sets.len());
|
||||||
|
for permission_set in permission_sets {
|
||||||
|
let permission_set_ref = permission_set.r#ref.clone();
|
||||||
|
let roles = PermissionSetRoleAssignmentRepository::find_by_permission_set(
|
||||||
|
&state.db,
|
||||||
|
permission_set.id,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
response.push(PermissionSetSummary {
|
||||||
|
id: permission_set.id,
|
||||||
|
r#ref: permission_set.r#ref,
|
||||||
|
pack_ref: permission_set.pack_ref,
|
||||||
|
label: permission_set.label,
|
||||||
|
description: permission_set.description,
|
||||||
|
grants: permission_set.grants,
|
||||||
|
roles: roles
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(PermissionSetSummary::from)
|
.map(|assignment| PermissionSetRoleAssignmentResponse {
|
||||||
.collect();
|
id: assignment.id,
|
||||||
|
permission_set_id: assignment.permset,
|
||||||
|
permission_set_ref: Some(permission_set_ref.clone()),
|
||||||
|
role: assignment.role,
|
||||||
|
created: assignment.created,
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
Ok((StatusCode::OK, Json(response)))
|
Ok((StatusCode::OK, Json(response)))
|
||||||
}
|
}
|
||||||
@@ -412,6 +479,229 @@ pub async fn delete_permission_assignment(
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[utoipa::path(
|
||||||
|
post,
|
||||||
|
path = "/api/v1/identities/{id}/roles",
|
||||||
|
tag = "permissions",
|
||||||
|
params(
|
||||||
|
("id" = i64, Path, description = "Identity ID")
|
||||||
|
),
|
||||||
|
request_body = CreateIdentityRoleAssignmentRequest,
|
||||||
|
responses(
|
||||||
|
(status = 201, description = "Identity role assignment created", body = inline(ApiResponse<IdentityRoleAssignmentResponse>)),
|
||||||
|
(status = 404, description = "Identity not found")
|
||||||
|
),
|
||||||
|
security(("bearer_auth" = []))
|
||||||
|
)]
|
||||||
|
pub async fn create_identity_role_assignment(
|
||||||
|
State(state): State<Arc<AppState>>,
|
||||||
|
RequireAuth(user): RequireAuth,
|
||||||
|
Path(identity_id): Path<i64>,
|
||||||
|
Json(request): Json<CreateIdentityRoleAssignmentRequest>,
|
||||||
|
) -> ApiResult<impl IntoResponse> {
|
||||||
|
authorize_permissions(&state, &user, Resource::Permissions, Action::Manage).await?;
|
||||||
|
request.validate()?;
|
||||||
|
|
||||||
|
IdentityRepository::find_by_id(&state.db, identity_id)
|
||||||
|
.await?
|
||||||
|
.ok_or_else(|| ApiError::NotFound(format!("Identity '{}' not found", identity_id)))?;
|
||||||
|
|
||||||
|
let assignment = IdentityRoleAssignmentRepository::create(
|
||||||
|
&state.db,
|
||||||
|
CreateIdentityRoleAssignmentInput {
|
||||||
|
identity: identity_id,
|
||||||
|
role: request.role,
|
||||||
|
source: "manual".to_string(),
|
||||||
|
managed: false,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
StatusCode::CREATED,
|
||||||
|
Json(ApiResponse::new(IdentityRoleAssignmentResponse::from(
|
||||||
|
assignment,
|
||||||
|
))),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[utoipa::path(
|
||||||
|
delete,
|
||||||
|
path = "/api/v1/identities/roles/{id}",
|
||||||
|
tag = "permissions",
|
||||||
|
params(
|
||||||
|
("id" = i64, Path, description = "Identity role assignment ID")
|
||||||
|
),
|
||||||
|
responses(
|
||||||
|
(status = 200, description = "Identity role assignment deleted", body = inline(ApiResponse<SuccessResponse>)),
|
||||||
|
(status = 404, description = "Identity role assignment not found")
|
||||||
|
),
|
||||||
|
security(("bearer_auth" = []))
|
||||||
|
)]
|
||||||
|
pub async fn delete_identity_role_assignment(
|
||||||
|
State(state): State<Arc<AppState>>,
|
||||||
|
RequireAuth(user): RequireAuth,
|
||||||
|
Path(assignment_id): Path<i64>,
|
||||||
|
) -> ApiResult<impl IntoResponse> {
|
||||||
|
authorize_permissions(&state, &user, Resource::Permissions, Action::Manage).await?;
|
||||||
|
|
||||||
|
let assignment = IdentityRoleAssignmentRepository::find_by_id(&state.db, assignment_id)
|
||||||
|
.await?
|
||||||
|
.ok_or_else(|| {
|
||||||
|
ApiError::NotFound(format!(
|
||||||
|
"Identity role assignment '{}' not found",
|
||||||
|
assignment_id
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if assignment.managed {
|
||||||
|
return Err(ApiError::BadRequest(
|
||||||
|
"Managed role assignments must be updated through the identity provider sync"
|
||||||
|
.to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
IdentityRoleAssignmentRepository::delete(&state.db, assignment_id).await?;
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
StatusCode::OK,
|
||||||
|
Json(ApiResponse::new(SuccessResponse::new(
|
||||||
|
"Identity role assignment deleted successfully",
|
||||||
|
))),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[utoipa::path(
|
||||||
|
post,
|
||||||
|
path = "/api/v1/permissions/sets/{id}/roles",
|
||||||
|
tag = "permissions",
|
||||||
|
params(
|
||||||
|
("id" = i64, Path, description = "Permission set ID")
|
||||||
|
),
|
||||||
|
request_body = CreatePermissionSetRoleAssignmentRequest,
|
||||||
|
responses(
|
||||||
|
(status = 201, description = "Permission set role assignment created", body = inline(ApiResponse<PermissionSetRoleAssignmentResponse>)),
|
||||||
|
(status = 404, description = "Permission set not found")
|
||||||
|
),
|
||||||
|
security(("bearer_auth" = []))
|
||||||
|
)]
|
||||||
|
pub async fn create_permission_set_role_assignment(
|
||||||
|
State(state): State<Arc<AppState>>,
|
||||||
|
RequireAuth(user): RequireAuth,
|
||||||
|
Path(permission_set_id): Path<i64>,
|
||||||
|
Json(request): Json<CreatePermissionSetRoleAssignmentRequest>,
|
||||||
|
) -> ApiResult<impl IntoResponse> {
|
||||||
|
authorize_permissions(&state, &user, Resource::Permissions, Action::Manage).await?;
|
||||||
|
request.validate()?;
|
||||||
|
|
||||||
|
let permission_set = PermissionSetRepository::find_by_id(&state.db, permission_set_id)
|
||||||
|
.await?
|
||||||
|
.ok_or_else(|| {
|
||||||
|
ApiError::NotFound(format!("Permission set '{}' not found", permission_set_id))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let assignment = PermissionSetRoleAssignmentRepository::create(
|
||||||
|
&state.db,
|
||||||
|
CreatePermissionSetRoleAssignmentInput {
|
||||||
|
permset: permission_set_id,
|
||||||
|
role: request.role,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
StatusCode::CREATED,
|
||||||
|
Json(ApiResponse::new(PermissionSetRoleAssignmentResponse {
|
||||||
|
id: assignment.id,
|
||||||
|
permission_set_id: assignment.permset,
|
||||||
|
permission_set_ref: Some(permission_set.r#ref),
|
||||||
|
role: assignment.role,
|
||||||
|
created: assignment.created,
|
||||||
|
})),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[utoipa::path(
|
||||||
|
delete,
|
||||||
|
path = "/api/v1/permissions/sets/roles/{id}",
|
||||||
|
tag = "permissions",
|
||||||
|
params(
|
||||||
|
("id" = i64, Path, description = "Permission set role assignment ID")
|
||||||
|
),
|
||||||
|
responses(
|
||||||
|
(status = 200, description = "Permission set role assignment deleted", body = inline(ApiResponse<SuccessResponse>)),
|
||||||
|
(status = 404, description = "Permission set role assignment not found")
|
||||||
|
),
|
||||||
|
security(("bearer_auth" = []))
|
||||||
|
)]
|
||||||
|
pub async fn delete_permission_set_role_assignment(
|
||||||
|
State(state): State<Arc<AppState>>,
|
||||||
|
RequireAuth(user): RequireAuth,
|
||||||
|
Path(assignment_id): Path<i64>,
|
||||||
|
) -> ApiResult<impl IntoResponse> {
|
||||||
|
authorize_permissions(&state, &user, Resource::Permissions, Action::Manage).await?;
|
||||||
|
|
||||||
|
PermissionSetRoleAssignmentRepository::find_by_id(&state.db, assignment_id)
|
||||||
|
.await?
|
||||||
|
.ok_or_else(|| {
|
||||||
|
ApiError::NotFound(format!(
|
||||||
|
"Permission set role assignment '{}' not found",
|
||||||
|
assignment_id
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
PermissionSetRoleAssignmentRepository::delete(&state.db, assignment_id).await?;
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
StatusCode::OK,
|
||||||
|
Json(ApiResponse::new(SuccessResponse::new(
|
||||||
|
"Permission set role assignment deleted successfully",
|
||||||
|
))),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[utoipa::path(
|
||||||
|
post,
|
||||||
|
path = "/api/v1/identities/{id}/freeze",
|
||||||
|
tag = "permissions",
|
||||||
|
params(
|
||||||
|
("id" = i64, Path, description = "Identity ID")
|
||||||
|
),
|
||||||
|
responses(
|
||||||
|
(status = 200, description = "Identity frozen", body = inline(ApiResponse<SuccessResponse>)),
|
||||||
|
(status = 404, description = "Identity not found")
|
||||||
|
),
|
||||||
|
security(("bearer_auth" = []))
|
||||||
|
)]
|
||||||
|
pub async fn freeze_identity(
|
||||||
|
State(state): State<Arc<AppState>>,
|
||||||
|
RequireAuth(user): RequireAuth,
|
||||||
|
Path(identity_id): Path<i64>,
|
||||||
|
) -> ApiResult<impl IntoResponse> {
|
||||||
|
set_identity_frozen(&state, &user, identity_id, true).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[utoipa::path(
|
||||||
|
post,
|
||||||
|
path = "/api/v1/identities/{id}/unfreeze",
|
||||||
|
tag = "permissions",
|
||||||
|
params(
|
||||||
|
("id" = i64, Path, description = "Identity ID")
|
||||||
|
),
|
||||||
|
responses(
|
||||||
|
(status = 200, description = "Identity unfrozen", body = inline(ApiResponse<SuccessResponse>)),
|
||||||
|
(status = 404, description = "Identity not found")
|
||||||
|
),
|
||||||
|
security(("bearer_auth" = []))
|
||||||
|
)]
|
||||||
|
pub async fn unfreeze_identity(
|
||||||
|
State(state): State<Arc<AppState>>,
|
||||||
|
RequireAuth(user): RequireAuth,
|
||||||
|
Path(identity_id): Path<i64>,
|
||||||
|
) -> ApiResult<impl IntoResponse> {
|
||||||
|
set_identity_frozen(&state, &user, identity_id, false).await
|
||||||
|
}
|
||||||
|
|
||||||
pub fn routes() -> Router<Arc<AppState>> {
|
pub fn routes() -> Router<Arc<AppState>> {
|
||||||
Router::new()
|
Router::new()
|
||||||
.route("/identities", get(list_identities).post(create_identity))
|
.route("/identities", get(list_identities).post(create_identity))
|
||||||
@@ -421,11 +711,29 @@ pub fn routes() -> Router<Arc<AppState>> {
|
|||||||
.put(update_identity)
|
.put(update_identity)
|
||||||
.delete(delete_identity),
|
.delete(delete_identity),
|
||||||
)
|
)
|
||||||
|
.route(
|
||||||
|
"/identities/{id}/roles",
|
||||||
|
post(create_identity_role_assignment),
|
||||||
|
)
|
||||||
.route(
|
.route(
|
||||||
"/identities/{id}/permissions",
|
"/identities/{id}/permissions",
|
||||||
get(list_identity_permissions),
|
get(list_identity_permissions),
|
||||||
)
|
)
|
||||||
|
.route("/identities/{id}/freeze", post(freeze_identity))
|
||||||
|
.route("/identities/{id}/unfreeze", post(unfreeze_identity))
|
||||||
|
.route(
|
||||||
|
"/identities/roles/{id}",
|
||||||
|
delete(delete_identity_role_assignment),
|
||||||
|
)
|
||||||
.route("/permissions/sets", get(list_permission_sets))
|
.route("/permissions/sets", get(list_permission_sets))
|
||||||
|
.route(
|
||||||
|
"/permissions/sets/{id}/roles",
|
||||||
|
post(create_permission_set_role_assignment),
|
||||||
|
)
|
||||||
|
.route(
|
||||||
|
"/permissions/sets/roles/{id}",
|
||||||
|
delete(delete_permission_set_role_assignment),
|
||||||
|
)
|
||||||
.route(
|
.route(
|
||||||
"/permissions/assignments",
|
"/permissions/assignments",
|
||||||
post(create_permission_assignment),
|
post(create_permission_assignment),
|
||||||
@@ -488,20 +796,82 @@ impl From<Identity> for IdentitySummary {
|
|||||||
id: value.id,
|
id: value.id,
|
||||||
login: value.login,
|
login: value.login,
|
||||||
display_name: value.display_name,
|
display_name: value.display_name,
|
||||||
|
frozen: value.frozen,
|
||||||
attributes: value.attributes,
|
attributes: value.attributes,
|
||||||
|
roles: Vec::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<PermissionSet> for PermissionSetSummary {
|
impl From<IdentityRoleAssignment> for IdentityRoleAssignmentResponse {
|
||||||
fn from(value: PermissionSet) -> Self {
|
fn from(value: IdentityRoleAssignment) -> Self {
|
||||||
Self {
|
Self {
|
||||||
id: value.id,
|
id: value.id,
|
||||||
r#ref: value.r#ref,
|
identity_id: value.identity,
|
||||||
pack_ref: value.pack_ref,
|
role: value.role,
|
||||||
label: value.label,
|
source: value.source,
|
||||||
description: value.description,
|
managed: value.managed,
|
||||||
grants: value.grants,
|
created: value.created,
|
||||||
|
updated: value.updated,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<Identity> for IdentityResponse {
|
||||||
|
fn from(value: Identity) -> Self {
|
||||||
|
Self {
|
||||||
|
id: value.id,
|
||||||
|
login: value.login,
|
||||||
|
display_name: value.display_name,
|
||||||
|
frozen: value.frozen,
|
||||||
|
attributes: value.attributes,
|
||||||
|
roles: Vec::new(),
|
||||||
|
direct_permissions: Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn set_identity_frozen(
|
||||||
|
state: &Arc<AppState>,
|
||||||
|
user: &crate::auth::middleware::AuthenticatedUser,
|
||||||
|
identity_id: i64,
|
||||||
|
frozen: bool,
|
||||||
|
) -> ApiResult<impl IntoResponse> {
|
||||||
|
authorize_permissions(state, user, Resource::Identities, Action::Update).await?;
|
||||||
|
|
||||||
|
let caller_identity_id = user
|
||||||
|
.identity_id()
|
||||||
|
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
|
||||||
|
if caller_identity_id == identity_id && frozen {
|
||||||
|
return Err(ApiError::BadRequest(
|
||||||
|
"Refusing to freeze the currently authenticated identity".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
IdentityRepository::find_by_id(&state.db, identity_id)
|
||||||
|
.await?
|
||||||
|
.ok_or_else(|| ApiError::NotFound(format!("Identity '{}' not found", identity_id)))?;
|
||||||
|
|
||||||
|
IdentityRepository::update(
|
||||||
|
&state.db,
|
||||||
|
identity_id,
|
||||||
|
UpdateIdentityInput {
|
||||||
|
display_name: None,
|
||||||
|
password_hash: None,
|
||||||
|
attributes: None,
|
||||||
|
frozen: Some(frozen),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let message = if frozen {
|
||||||
|
"Identity frozen successfully"
|
||||||
|
} else {
|
||||||
|
"Identity unfrozen successfully"
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
StatusCode::OK,
|
||||||
|
Json(ApiResponse::new(SuccessResponse::new(message))),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ use attune_common::repositories::{
|
|||||||
pack::PackRepository,
|
pack::PackRepository,
|
||||||
rule::{CreateRuleInput, RuleRepository, RuleSearchFilters, UpdateRuleInput},
|
rule::{CreateRuleInput, RuleRepository, RuleSearchFilters, UpdateRuleInput},
|
||||||
trigger::TriggerRepository,
|
trigger::TriggerRepository,
|
||||||
Create, Delete, FindByRef, Update,
|
Create, Delete, FindByRef, Patch, Update,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -474,7 +474,7 @@ pub async fn update_rule(
|
|||||||
// Create update input
|
// Create update input
|
||||||
let update_input = UpdateRuleInput {
|
let update_input = UpdateRuleInput {
|
||||||
label: request.label,
|
label: request.label,
|
||||||
description: request.description,
|
description: request.description.map(Patch::Set),
|
||||||
conditions: request.conditions,
|
conditions: request.conditions,
|
||||||
action_params: request.action_params,
|
action_params: request.action_params,
|
||||||
trigger_params: request.trigger_params,
|
trigger_params: request.trigger_params,
|
||||||
|
|||||||
@@ -724,7 +724,7 @@ pub async fn update_sensor(
|
|||||||
// Create update input
|
// Create update input
|
||||||
let update_input = UpdateSensorInput {
|
let update_input = UpdateSensorInput {
|
||||||
label: request.label,
|
label: request.label,
|
||||||
description: request.description,
|
description: request.description.map(Patch::Set),
|
||||||
entrypoint: request.entrypoint,
|
entrypoint: request.entrypoint,
|
||||||
runtime: None,
|
runtime: None,
|
||||||
runtime_ref: None,
|
runtime_ref: None,
|
||||||
|
|||||||
@@ -20,8 +20,11 @@ use attune_common::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use attune_common::rbac::{Action, AuthorizationContext, Resource};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
auth::middleware::RequireAuth,
|
auth::middleware::RequireAuth,
|
||||||
|
authz::{AuthorizationCheck, AuthorizationService},
|
||||||
dto::{
|
dto::{
|
||||||
trigger::TriggerResponse,
|
trigger::TriggerResponse,
|
||||||
webhook::{WebhookReceiverRequest, WebhookReceiverResponse},
|
webhook::{WebhookReceiverRequest, WebhookReceiverResponse},
|
||||||
@@ -170,7 +173,7 @@ fn get_webhook_config_array(
|
|||||||
)]
|
)]
|
||||||
pub async fn enable_webhook(
|
pub async fn enable_webhook(
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
Path(trigger_ref): Path<String>,
|
Path(trigger_ref): Path<String>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
// First, find the trigger by ref to get its ID
|
// First, find the trigger by ref to get its ID
|
||||||
@@ -179,6 +182,26 @@ pub async fn enable_webhook(
|
|||||||
.map_err(|e| ApiError::InternalServerError(e.to_string()))?
|
.map_err(|e| ApiError::InternalServerError(e.to_string()))?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Trigger '{}' not found", trigger_ref)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Trigger '{}' not found", trigger_ref)))?;
|
||||||
|
|
||||||
|
if user.claims.token_type == crate::auth::jwt::TokenType::Access {
|
||||||
|
let identity_id = user
|
||||||
|
.identity_id()
|
||||||
|
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
|
||||||
|
let authz = AuthorizationService::new(state.db.clone());
|
||||||
|
let mut ctx = AuthorizationContext::new(identity_id);
|
||||||
|
ctx.target_ref = Some(trigger.r#ref.clone());
|
||||||
|
ctx.pack_ref = trigger.pack_ref.clone();
|
||||||
|
authz
|
||||||
|
.authorize(
|
||||||
|
&user,
|
||||||
|
AuthorizationCheck {
|
||||||
|
resource: Resource::Triggers,
|
||||||
|
action: Action::Update,
|
||||||
|
context: ctx,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
// Enable webhooks for this trigger
|
// Enable webhooks for this trigger
|
||||||
let _webhook_info = TriggerRepository::enable_webhook(&state.db, trigger.id)
|
let _webhook_info = TriggerRepository::enable_webhook(&state.db, trigger.id)
|
||||||
.await
|
.await
|
||||||
@@ -213,7 +236,7 @@ pub async fn enable_webhook(
|
|||||||
)]
|
)]
|
||||||
pub async fn disable_webhook(
|
pub async fn disable_webhook(
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
Path(trigger_ref): Path<String>,
|
Path(trigger_ref): Path<String>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
// First, find the trigger by ref to get its ID
|
// First, find the trigger by ref to get its ID
|
||||||
@@ -222,6 +245,26 @@ pub async fn disable_webhook(
|
|||||||
.map_err(|e| ApiError::InternalServerError(e.to_string()))?
|
.map_err(|e| ApiError::InternalServerError(e.to_string()))?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Trigger '{}' not found", trigger_ref)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Trigger '{}' not found", trigger_ref)))?;
|
||||||
|
|
||||||
|
if user.claims.token_type == crate::auth::jwt::TokenType::Access {
|
||||||
|
let identity_id = user
|
||||||
|
.identity_id()
|
||||||
|
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
|
||||||
|
let authz = AuthorizationService::new(state.db.clone());
|
||||||
|
let mut ctx = AuthorizationContext::new(identity_id);
|
||||||
|
ctx.target_ref = Some(trigger.r#ref.clone());
|
||||||
|
ctx.pack_ref = trigger.pack_ref.clone();
|
||||||
|
authz
|
||||||
|
.authorize(
|
||||||
|
&user,
|
||||||
|
AuthorizationCheck {
|
||||||
|
resource: Resource::Triggers,
|
||||||
|
action: Action::Update,
|
||||||
|
context: ctx,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
// Disable webhooks for this trigger
|
// Disable webhooks for this trigger
|
||||||
TriggerRepository::disable_webhook(&state.db, trigger.id)
|
TriggerRepository::disable_webhook(&state.db, trigger.id)
|
||||||
.await
|
.await
|
||||||
@@ -257,7 +300,7 @@ pub async fn disable_webhook(
|
|||||||
)]
|
)]
|
||||||
pub async fn regenerate_webhook_key(
|
pub async fn regenerate_webhook_key(
|
||||||
State(state): State<Arc<AppState>>,
|
State(state): State<Arc<AppState>>,
|
||||||
RequireAuth(_user): RequireAuth,
|
RequireAuth(user): RequireAuth,
|
||||||
Path(trigger_ref): Path<String>,
|
Path(trigger_ref): Path<String>,
|
||||||
) -> ApiResult<impl IntoResponse> {
|
) -> ApiResult<impl IntoResponse> {
|
||||||
// First, find the trigger by ref to get its ID
|
// First, find the trigger by ref to get its ID
|
||||||
@@ -266,6 +309,26 @@ pub async fn regenerate_webhook_key(
|
|||||||
.map_err(|e| ApiError::InternalServerError(e.to_string()))?
|
.map_err(|e| ApiError::InternalServerError(e.to_string()))?
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Trigger '{}' not found", trigger_ref)))?;
|
.ok_or_else(|| ApiError::NotFound(format!("Trigger '{}' not found", trigger_ref)))?;
|
||||||
|
|
||||||
|
if user.claims.token_type == crate::auth::jwt::TokenType::Access {
|
||||||
|
let identity_id = user
|
||||||
|
.identity_id()
|
||||||
|
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
|
||||||
|
let authz = AuthorizationService::new(state.db.clone());
|
||||||
|
let mut ctx = AuthorizationContext::new(identity_id);
|
||||||
|
ctx.target_ref = Some(trigger.r#ref.clone());
|
||||||
|
ctx.pack_ref = trigger.pack_ref.clone();
|
||||||
|
authz
|
||||||
|
.authorize(
|
||||||
|
&user,
|
||||||
|
AuthorizationCheck {
|
||||||
|
resource: Resource::Triggers,
|
||||||
|
action: Action::Update,
|
||||||
|
context: ctx,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
// Check if webhooks are enabled
|
// Check if webhooks are enabled
|
||||||
if !trigger.webhook_enabled {
|
if !trigger.webhook_enabled {
|
||||||
return Err(ApiError::BadRequest(
|
return Err(ApiError::BadRequest(
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ use attune_common::repositories::{
|
|||||||
CreateWorkflowDefinitionInput, UpdateWorkflowDefinitionInput, WorkflowDefinitionRepository,
|
CreateWorkflowDefinitionInput, UpdateWorkflowDefinitionInput, WorkflowDefinitionRepository,
|
||||||
WorkflowSearchFilters,
|
WorkflowSearchFilters,
|
||||||
},
|
},
|
||||||
Create, Delete, FindByRef, Update,
|
Create, Delete, FindByRef, Patch, Update,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -217,7 +217,7 @@ pub async fn create_workflow(
|
|||||||
pack.id,
|
pack.id,
|
||||||
&pack.r#ref,
|
&pack.r#ref,
|
||||||
&request.label,
|
&request.label,
|
||||||
&request.description.clone().unwrap_or_default(),
|
request.description.as_deref(),
|
||||||
"workflow",
|
"workflow",
|
||||||
request.param_schema.as_ref(),
|
request.param_schema.as_ref(),
|
||||||
request.out_schema.as_ref(),
|
request.out_schema.as_ref(),
|
||||||
@@ -416,7 +416,7 @@ pub async fn save_workflow_file(
|
|||||||
pack.id,
|
pack.id,
|
||||||
&pack.r#ref,
|
&pack.r#ref,
|
||||||
&request.label,
|
&request.label,
|
||||||
&request.description.clone().unwrap_or_default(),
|
request.description.as_deref(),
|
||||||
&entrypoint,
|
&entrypoint,
|
||||||
request.param_schema.as_ref(),
|
request.param_schema.as_ref(),
|
||||||
request.out_schema.as_ref(),
|
request.out_schema.as_ref(),
|
||||||
@@ -499,7 +499,7 @@ pub async fn update_workflow_file(
|
|||||||
pack.id,
|
pack.id,
|
||||||
&pack.r#ref,
|
&pack.r#ref,
|
||||||
&request.label,
|
&request.label,
|
||||||
&request.description.unwrap_or_default(),
|
request.description.as_deref(),
|
||||||
&entrypoint,
|
&entrypoint,
|
||||||
request.param_schema.as_ref(),
|
request.param_schema.as_ref(),
|
||||||
request.out_schema.as_ref(),
|
request.out_schema.as_ref(),
|
||||||
@@ -702,7 +702,7 @@ async fn create_companion_action(
|
|||||||
pack_id: i64,
|
pack_id: i64,
|
||||||
pack_ref: &str,
|
pack_ref: &str,
|
||||||
label: &str,
|
label: &str,
|
||||||
description: &str,
|
description: Option<&str>,
|
||||||
entrypoint: &str,
|
entrypoint: &str,
|
||||||
param_schema: Option<&serde_json::Value>,
|
param_schema: Option<&serde_json::Value>,
|
||||||
out_schema: Option<&serde_json::Value>,
|
out_schema: Option<&serde_json::Value>,
|
||||||
@@ -713,7 +713,7 @@ async fn create_companion_action(
|
|||||||
pack: pack_id,
|
pack: pack_id,
|
||||||
pack_ref: pack_ref.to_string(),
|
pack_ref: pack_ref.to_string(),
|
||||||
label: label.to_string(),
|
label: label.to_string(),
|
||||||
description: description.to_string(),
|
description: description.map(|s| s.to_string()),
|
||||||
entrypoint: entrypoint.to_string(),
|
entrypoint: entrypoint.to_string(),
|
||||||
runtime: None,
|
runtime: None,
|
||||||
runtime_version_constraint: None,
|
runtime_version_constraint: None,
|
||||||
@@ -787,7 +787,7 @@ async fn update_companion_action(
|
|||||||
if let Some(action) = existing_action {
|
if let Some(action) = existing_action {
|
||||||
let update_input = UpdateActionInput {
|
let update_input = UpdateActionInput {
|
||||||
label: label.map(|s| s.to_string()),
|
label: label.map(|s| s.to_string()),
|
||||||
description: description.map(|s| s.to_string()),
|
description: description.map(|s| Patch::Set(s.to_string())),
|
||||||
entrypoint: None,
|
entrypoint: None,
|
||||||
runtime: None,
|
runtime: None,
|
||||||
runtime_version_constraint: None,
|
runtime_version_constraint: None,
|
||||||
@@ -838,7 +838,7 @@ async fn ensure_companion_action(
|
|||||||
pack_id: i64,
|
pack_id: i64,
|
||||||
pack_ref: &str,
|
pack_ref: &str,
|
||||||
label: &str,
|
label: &str,
|
||||||
description: &str,
|
description: Option<&str>,
|
||||||
entrypoint: &str,
|
entrypoint: &str,
|
||||||
param_schema: Option<&serde_json::Value>,
|
param_schema: Option<&serde_json::Value>,
|
||||||
out_schema: Option<&serde_json::Value>,
|
out_schema: Option<&serde_json::Value>,
|
||||||
@@ -853,7 +853,10 @@ async fn ensure_companion_action(
|
|||||||
// Update existing companion action
|
// Update existing companion action
|
||||||
let update_input = UpdateActionInput {
|
let update_input = UpdateActionInput {
|
||||||
label: Some(label.to_string()),
|
label: Some(label.to_string()),
|
||||||
description: Some(description.to_string()),
|
description: Some(match description {
|
||||||
|
Some(description) => Patch::Set(description.to_string()),
|
||||||
|
None => Patch::Clear,
|
||||||
|
}),
|
||||||
entrypoint: Some(entrypoint.to_string()),
|
entrypoint: Some(entrypoint.to_string()),
|
||||||
runtime: None,
|
runtime: None,
|
||||||
runtime_version_constraint: None,
|
runtime_version_constraint: None,
|
||||||
|
|||||||
@@ -362,7 +362,7 @@ mod tests {
|
|||||||
pack: 1,
|
pack: 1,
|
||||||
pack_ref: "test".to_string(),
|
pack_ref: "test".to_string(),
|
||||||
label: "Test Action".to_string(),
|
label: "Test Action".to_string(),
|
||||||
description: "Test action".to_string(),
|
description: Some("Test action".to_string()),
|
||||||
entrypoint: "test.sh".to_string(),
|
entrypoint: "test.sh".to_string(),
|
||||||
runtime: Some(1),
|
runtime: Some(1),
|
||||||
runtime_version_constraint: None,
|
runtime_version_constraint: None,
|
||||||
|
|||||||
@@ -241,6 +241,7 @@ impl TestContext {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create and authenticate a test user
|
/// Create and authenticate a test user
|
||||||
|
#[allow(dead_code)]
|
||||||
pub async fn with_auth(mut self) -> Result<Self> {
|
pub async fn with_auth(mut self) -> Result<Self> {
|
||||||
// Generate unique username to avoid conflicts in parallel tests
|
// Generate unique username to avoid conflicts in parallel tests
|
||||||
let unique_id = uuid::Uuid::new_v4().to_string().replace("-", "")[..8].to_string();
|
let unique_id = uuid::Uuid::new_v4().to_string().replace("-", "")[..8].to_string();
|
||||||
@@ -394,6 +395,7 @@ impl TestContext {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get authenticated token
|
/// Get authenticated token
|
||||||
|
#[allow(dead_code)]
|
||||||
pub fn token(&self) -> Option<&str> {
|
pub fn token(&self) -> Option<&str> {
|
||||||
self.token.as_deref()
|
self.token.as_deref()
|
||||||
}
|
}
|
||||||
@@ -495,7 +497,7 @@ pub async fn create_test_action(pool: &PgPool, pack_id: i64, ref_name: &str) ->
|
|||||||
pack: pack_id,
|
pack: pack_id,
|
||||||
pack_ref: format!("pack_{}", pack_id),
|
pack_ref: format!("pack_{}", pack_id),
|
||||||
label: format!("Test Action {}", ref_name),
|
label: format!("Test Action {}", ref_name),
|
||||||
description: format!("Test action for {}", ref_name),
|
description: Some(format!("Test action for {}", ref_name)),
|
||||||
entrypoint: "main.py".to_string(),
|
entrypoint: "main.py".to_string(),
|
||||||
runtime: None,
|
runtime: None,
|
||||||
runtime_version_constraint: None,
|
runtime_version_constraint: None,
|
||||||
|
|||||||
276
crates/api/tests/rbac_scoped_resources_api_tests.rs
Normal file
276
crates/api/tests/rbac_scoped_resources_api_tests.rs
Normal file
@@ -0,0 +1,276 @@
|
|||||||
|
use axum::http::StatusCode;
|
||||||
|
use helpers::*;
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
use attune_common::{
|
||||||
|
models::enums::{ArtifactType, ArtifactVisibility, OwnerType, RetentionPolicyType},
|
||||||
|
repositories::{
|
||||||
|
artifact::{ArtifactRepository, CreateArtifactInput},
|
||||||
|
identity::{
|
||||||
|
CreatePermissionAssignmentInput, CreatePermissionSetInput, IdentityRepository,
|
||||||
|
PermissionAssignmentRepository, PermissionSetRepository,
|
||||||
|
},
|
||||||
|
key::{CreateKeyInput, KeyRepository},
|
||||||
|
Create,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
mod helpers;
|
||||||
|
|
||||||
|
async fn register_scoped_user(
|
||||||
|
ctx: &TestContext,
|
||||||
|
login: &str,
|
||||||
|
grants: serde_json::Value,
|
||||||
|
) -> Result<String> {
|
||||||
|
let response = ctx
|
||||||
|
.post(
|
||||||
|
"/auth/register",
|
||||||
|
json!({
|
||||||
|
"login": login,
|
||||||
|
"password": "TestPassword123!",
|
||||||
|
"display_name": format!("Scoped User {}", login),
|
||||||
|
}),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
assert_eq!(response.status(), StatusCode::CREATED);
|
||||||
|
let body: serde_json::Value = response.json().await?;
|
||||||
|
let token = body["data"]["access_token"]
|
||||||
|
.as_str()
|
||||||
|
.expect("missing access token")
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
let identity = IdentityRepository::find_by_login(&ctx.pool, login)
|
||||||
|
.await?
|
||||||
|
.expect("registered identity should exist");
|
||||||
|
|
||||||
|
let permset = PermissionSetRepository::create(
|
||||||
|
&ctx.pool,
|
||||||
|
CreatePermissionSetInput {
|
||||||
|
r#ref: format!("test.scoped_{}", uuid::Uuid::new_v4().simple()),
|
||||||
|
pack: None,
|
||||||
|
pack_ref: None,
|
||||||
|
label: Some("Scoped Test Permission Set".to_string()),
|
||||||
|
description: Some("Scoped test grants".to_string()),
|
||||||
|
grants,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
PermissionAssignmentRepository::create(
|
||||||
|
&ctx.pool,
|
||||||
|
CreatePermissionAssignmentInput {
|
||||||
|
identity: identity.id,
|
||||||
|
permset: permset.id,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(token)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "integration test — requires database"]
|
||||||
|
async fn test_pack_scoped_key_permissions_enforce_owner_refs() {
|
||||||
|
let ctx = TestContext::new()
|
||||||
|
.await
|
||||||
|
.expect("Failed to create test context");
|
||||||
|
|
||||||
|
let token = register_scoped_user(
|
||||||
|
&ctx,
|
||||||
|
&format!("scoped_keys_{}", uuid::Uuid::new_v4().simple()),
|
||||||
|
json!([
|
||||||
|
{
|
||||||
|
"resource": "keys",
|
||||||
|
"actions": ["read"],
|
||||||
|
"constraints": {
|
||||||
|
"owner_types": ["pack"],
|
||||||
|
"owner_refs": ["python_example"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to register scoped user");
|
||||||
|
|
||||||
|
KeyRepository::create(
|
||||||
|
&ctx.pool,
|
||||||
|
CreateKeyInput {
|
||||||
|
r#ref: format!("python_example_key_{}", uuid::Uuid::new_v4().simple()),
|
||||||
|
owner_type: OwnerType::Pack,
|
||||||
|
owner: Some("python_example".to_string()),
|
||||||
|
owner_identity: None,
|
||||||
|
owner_pack: None,
|
||||||
|
owner_pack_ref: Some("python_example".to_string()),
|
||||||
|
owner_action: None,
|
||||||
|
owner_action_ref: None,
|
||||||
|
owner_sensor: None,
|
||||||
|
owner_sensor_ref: None,
|
||||||
|
name: "Python Example Key".to_string(),
|
||||||
|
encrypted: false,
|
||||||
|
encryption_key_hash: None,
|
||||||
|
value: json!("allowed"),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to create scoped key");
|
||||||
|
|
||||||
|
let blocked_key = KeyRepository::create(
|
||||||
|
&ctx.pool,
|
||||||
|
CreateKeyInput {
|
||||||
|
r#ref: format!("other_pack_key_{}", uuid::Uuid::new_v4().simple()),
|
||||||
|
owner_type: OwnerType::Pack,
|
||||||
|
owner: Some("other_pack".to_string()),
|
||||||
|
owner_identity: None,
|
||||||
|
owner_pack: None,
|
||||||
|
owner_pack_ref: Some("other_pack".to_string()),
|
||||||
|
owner_action: None,
|
||||||
|
owner_action_ref: None,
|
||||||
|
owner_sensor: None,
|
||||||
|
owner_sensor_ref: None,
|
||||||
|
name: "Other Pack Key".to_string(),
|
||||||
|
encrypted: false,
|
||||||
|
encryption_key_hash: None,
|
||||||
|
value: json!("blocked"),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to create blocked key");
|
||||||
|
|
||||||
|
let allowed_list = ctx
|
||||||
|
.get("/api/v1/keys", Some(&token))
|
||||||
|
.await
|
||||||
|
.expect("Failed to list keys");
|
||||||
|
assert_eq!(allowed_list.status(), StatusCode::OK);
|
||||||
|
let allowed_body: serde_json::Value = allowed_list.json().await.expect("Invalid key list");
|
||||||
|
assert_eq!(
|
||||||
|
allowed_body["data"]
|
||||||
|
.as_array()
|
||||||
|
.expect("expected list")
|
||||||
|
.len(),
|
||||||
|
1
|
||||||
|
);
|
||||||
|
assert_eq!(allowed_body["data"][0]["owner"], "python_example");
|
||||||
|
|
||||||
|
let blocked_get = ctx
|
||||||
|
.get(&format!("/api/v1/keys/{}", blocked_key.r#ref), Some(&token))
|
||||||
|
.await
|
||||||
|
.expect("Failed to fetch blocked key");
|
||||||
|
assert_eq!(blocked_get.status(), StatusCode::NOT_FOUND);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "integration test — requires database"]
|
||||||
|
async fn test_pack_scoped_artifact_permissions_enforce_owner_refs() {
|
||||||
|
let ctx = TestContext::new()
|
||||||
|
.await
|
||||||
|
.expect("Failed to create test context");
|
||||||
|
|
||||||
|
let token = register_scoped_user(
|
||||||
|
&ctx,
|
||||||
|
&format!("scoped_artifacts_{}", uuid::Uuid::new_v4().simple()),
|
||||||
|
json!([
|
||||||
|
{
|
||||||
|
"resource": "artifacts",
|
||||||
|
"actions": ["read", "create"],
|
||||||
|
"constraints": {
|
||||||
|
"owner_types": ["pack"],
|
||||||
|
"owner_refs": ["python_example"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to register scoped user");
|
||||||
|
|
||||||
|
let allowed_artifact = ArtifactRepository::create(
|
||||||
|
&ctx.pool,
|
||||||
|
CreateArtifactInput {
|
||||||
|
r#ref: format!("python_example.allowed_{}", uuid::Uuid::new_v4().simple()),
|
||||||
|
scope: OwnerType::Pack,
|
||||||
|
owner: "python_example".to_string(),
|
||||||
|
r#type: ArtifactType::FileText,
|
||||||
|
visibility: ArtifactVisibility::Private,
|
||||||
|
retention_policy: RetentionPolicyType::Versions,
|
||||||
|
retention_limit: 5,
|
||||||
|
name: Some("Allowed Artifact".to_string()),
|
||||||
|
description: None,
|
||||||
|
content_type: Some("text/plain".to_string()),
|
||||||
|
execution: None,
|
||||||
|
data: None,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to create allowed artifact");
|
||||||
|
|
||||||
|
let blocked_artifact = ArtifactRepository::create(
|
||||||
|
&ctx.pool,
|
||||||
|
CreateArtifactInput {
|
||||||
|
r#ref: format!("other_pack.blocked_{}", uuid::Uuid::new_v4().simple()),
|
||||||
|
scope: OwnerType::Pack,
|
||||||
|
owner: "other_pack".to_string(),
|
||||||
|
r#type: ArtifactType::FileText,
|
||||||
|
visibility: ArtifactVisibility::Private,
|
||||||
|
retention_policy: RetentionPolicyType::Versions,
|
||||||
|
retention_limit: 5,
|
||||||
|
name: Some("Blocked Artifact".to_string()),
|
||||||
|
description: None,
|
||||||
|
content_type: Some("text/plain".to_string()),
|
||||||
|
execution: None,
|
||||||
|
data: None,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to create blocked artifact");
|
||||||
|
|
||||||
|
let allowed_get = ctx
|
||||||
|
.get(
|
||||||
|
&format!("/api/v1/artifacts/{}", allowed_artifact.id),
|
||||||
|
Some(&token),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to fetch allowed artifact");
|
||||||
|
assert_eq!(allowed_get.status(), StatusCode::OK);
|
||||||
|
|
||||||
|
let blocked_get = ctx
|
||||||
|
.get(
|
||||||
|
&format!("/api/v1/artifacts/{}", blocked_artifact.id),
|
||||||
|
Some(&token),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to fetch blocked artifact");
|
||||||
|
assert_eq!(blocked_get.status(), StatusCode::NOT_FOUND);
|
||||||
|
|
||||||
|
let create_allowed = ctx
|
||||||
|
.post(
|
||||||
|
"/api/v1/artifacts",
|
||||||
|
json!({
|
||||||
|
"ref": format!("python_example.created_{}", uuid::Uuid::new_v4().simple()),
|
||||||
|
"scope": "pack",
|
||||||
|
"owner": "python_example",
|
||||||
|
"type": "file_text",
|
||||||
|
"name": "Created Artifact"
|
||||||
|
}),
|
||||||
|
Some(&token),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to create allowed artifact");
|
||||||
|
assert_eq!(create_allowed.status(), StatusCode::CREATED);
|
||||||
|
|
||||||
|
let create_blocked = ctx
|
||||||
|
.post(
|
||||||
|
"/api/v1/artifacts",
|
||||||
|
json!({
|
||||||
|
"ref": format!("other_pack.created_{}", uuid::Uuid::new_v4().simple()),
|
||||||
|
"scope": "pack",
|
||||||
|
"owner": "other_pack",
|
||||||
|
"type": "file_text",
|
||||||
|
"name": "Blocked Artifact"
|
||||||
|
}),
|
||||||
|
Some(&token),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("Failed to create blocked artifact");
|
||||||
|
assert_eq!(create_blocked.status(), StatusCode::FORBIDDEN);
|
||||||
|
}
|
||||||
@@ -52,7 +52,7 @@ async fn setup_test_pack_and_action(pool: &PgPool) -> Result<(Pack, Action)> {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Action".to_string(),
|
label: "Test Action".to_string(),
|
||||||
description: "Test action for SSE tests".to_string(),
|
description: Some("Test action for SSE tests".to_string()),
|
||||||
entrypoint: "test.sh".to_string(),
|
entrypoint: "test.sh".to_string(),
|
||||||
runtime: None,
|
runtime: None,
|
||||||
runtime_version_constraint: None,
|
runtime_version_constraint: None,
|
||||||
|
|||||||
@@ -90,7 +90,7 @@ struct Action {
|
|||||||
action_ref: String,
|
action_ref: String,
|
||||||
pack_ref: String,
|
pack_ref: String,
|
||||||
label: String,
|
label: String,
|
||||||
description: String,
|
description: Option<String>,
|
||||||
entrypoint: String,
|
entrypoint: String,
|
||||||
runtime: Option<i64>,
|
runtime: Option<i64>,
|
||||||
created: String,
|
created: String,
|
||||||
@@ -105,7 +105,7 @@ struct ActionDetail {
|
|||||||
pack: i64,
|
pack: i64,
|
||||||
pack_ref: String,
|
pack_ref: String,
|
||||||
label: String,
|
label: String,
|
||||||
description: String,
|
description: Option<String>,
|
||||||
entrypoint: String,
|
entrypoint: String,
|
||||||
runtime: Option<i64>,
|
runtime: Option<i64>,
|
||||||
param_schema: Option<serde_json::Value>,
|
param_schema: Option<serde_json::Value>,
|
||||||
@@ -253,7 +253,7 @@ async fn handle_list(
|
|||||||
.runtime
|
.runtime
|
||||||
.map(|r| r.to_string())
|
.map(|r| r.to_string())
|
||||||
.unwrap_or_else(|| "none".to_string()),
|
.unwrap_or_else(|| "none".to_string()),
|
||||||
output::truncate(&action.description, 40),
|
output::truncate(&action.description.unwrap_or_default(), 40),
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -288,7 +288,10 @@ async fn handle_show(
|
|||||||
("Reference", action.action_ref.clone()),
|
("Reference", action.action_ref.clone()),
|
||||||
("Pack", action.pack_ref.clone()),
|
("Pack", action.pack_ref.clone()),
|
||||||
("Label", action.label.clone()),
|
("Label", action.label.clone()),
|
||||||
("Description", action.description.clone()),
|
(
|
||||||
|
"Description",
|
||||||
|
action.description.unwrap_or_else(|| "None".to_string()),
|
||||||
|
),
|
||||||
("Entry Point", action.entrypoint.clone()),
|
("Entry Point", action.entrypoint.clone()),
|
||||||
(
|
(
|
||||||
"Runtime",
|
"Runtime",
|
||||||
@@ -356,7 +359,10 @@ async fn handle_update(
|
|||||||
("Ref", action.action_ref.clone()),
|
("Ref", action.action_ref.clone()),
|
||||||
("Pack", action.pack_ref.clone()),
|
("Pack", action.pack_ref.clone()),
|
||||||
("Label", action.label.clone()),
|
("Label", action.label.clone()),
|
||||||
("Description", action.description.clone()),
|
(
|
||||||
|
"Description",
|
||||||
|
action.description.unwrap_or_else(|| "None".to_string()),
|
||||||
|
),
|
||||||
("Entrypoint", action.entrypoint.clone()),
|
("Entrypoint", action.entrypoint.clone()),
|
||||||
(
|
(
|
||||||
"Runtime",
|
"Runtime",
|
||||||
|
|||||||
@@ -112,7 +112,7 @@ struct Rule {
|
|||||||
pack: Option<i64>,
|
pack: Option<i64>,
|
||||||
pack_ref: String,
|
pack_ref: String,
|
||||||
label: String,
|
label: String,
|
||||||
description: String,
|
description: Option<String>,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
trigger: Option<i64>,
|
trigger: Option<i64>,
|
||||||
trigger_ref: String,
|
trigger_ref: String,
|
||||||
@@ -133,7 +133,7 @@ struct RuleDetail {
|
|||||||
pack: Option<i64>,
|
pack: Option<i64>,
|
||||||
pack_ref: String,
|
pack_ref: String,
|
||||||
label: String,
|
label: String,
|
||||||
description: String,
|
description: Option<String>,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
trigger: Option<i64>,
|
trigger: Option<i64>,
|
||||||
trigger_ref: String,
|
trigger_ref: String,
|
||||||
@@ -321,7 +321,10 @@ async fn handle_show(
|
|||||||
("Ref", rule.rule_ref.clone()),
|
("Ref", rule.rule_ref.clone()),
|
||||||
("Pack", rule.pack_ref.clone()),
|
("Pack", rule.pack_ref.clone()),
|
||||||
("Label", rule.label.clone()),
|
("Label", rule.label.clone()),
|
||||||
("Description", rule.description.clone()),
|
(
|
||||||
|
"Description",
|
||||||
|
rule.description.unwrap_or_else(|| "None".to_string()),
|
||||||
|
),
|
||||||
("Trigger", rule.trigger_ref.clone()),
|
("Trigger", rule.trigger_ref.clone()),
|
||||||
("Action", rule.action_ref.clone()),
|
("Action", rule.action_ref.clone()),
|
||||||
("Enabled", output::format_bool(rule.enabled)),
|
("Enabled", output::format_bool(rule.enabled)),
|
||||||
@@ -440,7 +443,10 @@ async fn handle_update(
|
|||||||
("Ref", rule.rule_ref.clone()),
|
("Ref", rule.rule_ref.clone()),
|
||||||
("Pack", rule.pack_ref.clone()),
|
("Pack", rule.pack_ref.clone()),
|
||||||
("Label", rule.label.clone()),
|
("Label", rule.label.clone()),
|
||||||
("Description", rule.description.clone()),
|
(
|
||||||
|
"Description",
|
||||||
|
rule.description.unwrap_or_else(|| "None".to_string()),
|
||||||
|
),
|
||||||
("Trigger", rule.trigger_ref.clone()),
|
("Trigger", rule.trigger_ref.clone()),
|
||||||
("Action", rule.action_ref.clone()),
|
("Action", rule.action_ref.clone()),
|
||||||
("Enabled", output::format_bool(rule.enabled)),
|
("Enabled", output::format_bool(rule.enabled)),
|
||||||
|
|||||||
@@ -444,13 +444,55 @@ pub mod runtime {
|
|||||||
|
|
||||||
/// Optional environment variables to set during action execution.
|
/// Optional environment variables to set during action execution.
|
||||||
///
|
///
|
||||||
/// Values support the same template variables as other fields:
|
/// Entries support the same template variables as other fields:
|
||||||
/// `{pack_dir}`, `{env_dir}`, `{interpreter}`, `{manifest_path}`.
|
/// `{pack_dir}`, `{env_dir}`, `{interpreter}`, `{manifest_path}`.
|
||||||
///
|
///
|
||||||
/// Example: `{"NODE_PATH": "{env_dir}/node_modules"}` ensures Node.js
|
/// The shorthand string form replaces the variable entirely:
|
||||||
/// can find packages installed in the isolated runtime environment.
|
/// `{"NODE_PATH": "{env_dir}/node_modules"}`
|
||||||
|
///
|
||||||
|
/// The object form supports declarative merge semantics:
|
||||||
|
/// `{"PYTHONPATH": {"value": "{pack_dir}/lib", "operation": "prepend"}}`
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub env_vars: HashMap<String, String>,
|
pub env_vars: HashMap<String, RuntimeEnvVarConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Declarative configuration for a single runtime environment variable.
|
||||||
|
///
|
||||||
|
/// The string form is shorthand for `{ "value": "...", "operation": "set" }`.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
#[serde(untagged)]
|
||||||
|
pub enum RuntimeEnvVarConfig {
|
||||||
|
Value(String),
|
||||||
|
Spec(RuntimeEnvVarSpec),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Full configuration for a runtime environment variable.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct RuntimeEnvVarSpec {
|
||||||
|
/// Template value to resolve for this variable.
|
||||||
|
pub value: String,
|
||||||
|
|
||||||
|
/// How the resolved value should be merged with any existing value.
|
||||||
|
#[serde(default)]
|
||||||
|
pub operation: RuntimeEnvVarOperation,
|
||||||
|
|
||||||
|
/// Separator used for prepend/append operations.
|
||||||
|
#[serde(default = "default_env_var_separator")]
|
||||||
|
pub separator: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Merge behavior for runtime-provided environment variables.
|
||||||
|
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Default)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum RuntimeEnvVarOperation {
|
||||||
|
#[default]
|
||||||
|
Set,
|
||||||
|
Prepend,
|
||||||
|
Append,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_env_var_separator() -> String {
|
||||||
|
":".to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Controls how inline code is materialized before execution.
|
/// Controls how inline code is materialized before execution.
|
||||||
@@ -768,6 +810,43 @@ pub mod runtime {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl RuntimeEnvVarConfig {
|
||||||
|
/// Resolve this environment variable against the current template
|
||||||
|
/// variables and any existing value already present in the process env.
|
||||||
|
pub fn resolve(
|
||||||
|
&self,
|
||||||
|
vars: &HashMap<&str, String>,
|
||||||
|
existing_value: Option<&str>,
|
||||||
|
) -> String {
|
||||||
|
match self {
|
||||||
|
Self::Value(value) => RuntimeExecutionConfig::resolve_template(value, vars),
|
||||||
|
Self::Spec(spec) => {
|
||||||
|
let resolved = RuntimeExecutionConfig::resolve_template(&spec.value, vars);
|
||||||
|
match spec.operation {
|
||||||
|
RuntimeEnvVarOperation::Set => resolved,
|
||||||
|
RuntimeEnvVarOperation::Prepend => {
|
||||||
|
join_env_var_values(&resolved, existing_value, &spec.separator)
|
||||||
|
}
|
||||||
|
RuntimeEnvVarOperation::Append => join_env_var_values(
|
||||||
|
existing_value.unwrap_or_default(),
|
||||||
|
Some(&resolved),
|
||||||
|
&spec.separator,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn join_env_var_values(left: &str, right: Option<&str>, separator: &str) -> String {
|
||||||
|
match (left.is_empty(), right.unwrap_or_default().is_empty()) {
|
||||||
|
(true, true) => String::new(),
|
||||||
|
(false, true) => left.to_string(),
|
||||||
|
(true, false) => right.unwrap_or_default().to_string(),
|
||||||
|
(false, false) => format!("{}{}{}", left, separator, right.unwrap_or_default()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
|
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
|
||||||
pub struct Runtime {
|
pub struct Runtime {
|
||||||
pub id: Id,
|
pub id: Id,
|
||||||
@@ -887,7 +966,7 @@ pub mod trigger {
|
|||||||
pub pack: Option<Id>,
|
pub pack: Option<Id>,
|
||||||
pub pack_ref: Option<String>,
|
pub pack_ref: Option<String>,
|
||||||
pub label: String,
|
pub label: String,
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
pub entrypoint: String,
|
pub entrypoint: String,
|
||||||
pub runtime: Id,
|
pub runtime: Id,
|
||||||
pub runtime_ref: String,
|
pub runtime_ref: String,
|
||||||
@@ -915,7 +994,7 @@ pub mod action {
|
|||||||
pub pack: Id,
|
pub pack: Id,
|
||||||
pub pack_ref: String,
|
pub pack_ref: String,
|
||||||
pub label: String,
|
pub label: String,
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
pub entrypoint: String,
|
pub entrypoint: String,
|
||||||
pub runtime: Option<Id>,
|
pub runtime: Option<Id>,
|
||||||
/// Optional semver version constraint for the runtime
|
/// Optional semver version constraint for the runtime
|
||||||
@@ -965,7 +1044,7 @@ pub mod rule {
|
|||||||
pub pack: Id,
|
pub pack: Id,
|
||||||
pub pack_ref: String,
|
pub pack_ref: String,
|
||||||
pub label: String,
|
pub label: String,
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
pub action: Option<Id>,
|
pub action: Option<Id>,
|
||||||
pub action_ref: String,
|
pub action_ref: String,
|
||||||
pub trigger: Option<Id>,
|
pub trigger: Option<Id>,
|
||||||
@@ -1221,6 +1300,7 @@ pub mod identity {
|
|||||||
pub display_name: Option<String>,
|
pub display_name: Option<String>,
|
||||||
pub password_hash: Option<String>,
|
pub password_hash: Option<String>,
|
||||||
pub attributes: JsonDict,
|
pub attributes: JsonDict,
|
||||||
|
pub frozen: bool,
|
||||||
pub created: DateTime<Utc>,
|
pub created: DateTime<Utc>,
|
||||||
pub updated: DateTime<Utc>,
|
pub updated: DateTime<Utc>,
|
||||||
}
|
}
|
||||||
@@ -1245,6 +1325,25 @@ pub mod identity {
|
|||||||
pub permset: Id,
|
pub permset: Id,
|
||||||
pub created: DateTime<Utc>,
|
pub created: DateTime<Utc>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
|
||||||
|
pub struct IdentityRoleAssignment {
|
||||||
|
pub id: Id,
|
||||||
|
pub identity: Id,
|
||||||
|
pub role: String,
|
||||||
|
pub source: String,
|
||||||
|
pub managed: bool,
|
||||||
|
pub created: DateTime<Utc>,
|
||||||
|
pub updated: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
|
||||||
|
pub struct PermissionSetRoleAssignment {
|
||||||
|
pub id: Id,
|
||||||
|
pub permset: Id,
|
||||||
|
pub role: String,
|
||||||
|
pub created: DateTime<Utc>,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Key/Value storage
|
/// Key/Value storage
|
||||||
@@ -1620,3 +1719,68 @@ pub mod entity_history {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::runtime::{
|
||||||
|
RuntimeEnvVarConfig, RuntimeEnvVarOperation, RuntimeEnvVarSpec, RuntimeExecutionConfig,
|
||||||
|
};
|
||||||
|
use serde_json::json;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn runtime_execution_config_env_vars_accept_string_and_object_forms() {
|
||||||
|
let config: RuntimeExecutionConfig = serde_json::from_value(json!({
|
||||||
|
"env_vars": {
|
||||||
|
"NODE_PATH": "{env_dir}/node_modules",
|
||||||
|
"PYTHONPATH": {
|
||||||
|
"value": "{pack_dir}/lib",
|
||||||
|
"operation": "prepend",
|
||||||
|
"separator": ":"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
.expect("runtime execution config should deserialize");
|
||||||
|
|
||||||
|
assert!(matches!(
|
||||||
|
config.env_vars.get("NODE_PATH"),
|
||||||
|
Some(RuntimeEnvVarConfig::Value(value)) if value == "{env_dir}/node_modules"
|
||||||
|
));
|
||||||
|
|
||||||
|
assert!(matches!(
|
||||||
|
config.env_vars.get("PYTHONPATH"),
|
||||||
|
Some(RuntimeEnvVarConfig::Spec(RuntimeEnvVarSpec {
|
||||||
|
value,
|
||||||
|
operation: RuntimeEnvVarOperation::Prepend,
|
||||||
|
separator,
|
||||||
|
})) if value == "{pack_dir}/lib" && separator == ":"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn runtime_env_var_config_resolves_prepend_and_append_against_existing_values() {
|
||||||
|
let mut vars = HashMap::new();
|
||||||
|
vars.insert("pack_dir", "/packs/example".to_string());
|
||||||
|
vars.insert("env_dir", "/runtime_envs/example/python".to_string());
|
||||||
|
|
||||||
|
let prepend = RuntimeEnvVarConfig::Spec(RuntimeEnvVarSpec {
|
||||||
|
value: "{pack_dir}/lib".to_string(),
|
||||||
|
operation: RuntimeEnvVarOperation::Prepend,
|
||||||
|
separator: ":".to_string(),
|
||||||
|
});
|
||||||
|
assert_eq!(
|
||||||
|
prepend.resolve(&vars, Some("/already/set")),
|
||||||
|
"/packs/example/lib:/already/set"
|
||||||
|
);
|
||||||
|
|
||||||
|
let append = RuntimeEnvVarConfig::Spec(RuntimeEnvVarSpec {
|
||||||
|
value: "{env_dir}/node_modules".to_string(),
|
||||||
|
operation: RuntimeEnvVarOperation::Append,
|
||||||
|
separator: ":".to_string(),
|
||||||
|
});
|
||||||
|
assert_eq!(
|
||||||
|
append.resolve(&vars, Some("/base/modules")),
|
||||||
|
"/base/modules:/runtime_envs/example/python/node_modules"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -725,8 +725,7 @@ impl<'a> PackComponentLoader<'a> {
|
|||||||
let description = data
|
let description = data
|
||||||
.get("description")
|
.get("description")
|
||||||
.and_then(|v| v.as_str())
|
.and_then(|v| v.as_str())
|
||||||
.unwrap_or("")
|
.map(|s| s.to_string());
|
||||||
.to_string();
|
|
||||||
|
|
||||||
let enabled = data
|
let enabled = data
|
||||||
.get("enabled")
|
.get("enabled")
|
||||||
@@ -745,7 +744,10 @@ impl<'a> PackComponentLoader<'a> {
|
|||||||
if let Some(existing) = TriggerRepository::find_by_ref(self.pool, &trigger_ref).await? {
|
if let Some(existing) = TriggerRepository::find_by_ref(self.pool, &trigger_ref).await? {
|
||||||
let update_input = UpdateTriggerInput {
|
let update_input = UpdateTriggerInput {
|
||||||
label: Some(label),
|
label: Some(label),
|
||||||
description: Some(Patch::Set(description)),
|
description: Some(match description {
|
||||||
|
Some(description) => Patch::Set(description),
|
||||||
|
None => Patch::Clear,
|
||||||
|
}),
|
||||||
enabled: Some(enabled),
|
enabled: Some(enabled),
|
||||||
param_schema: Some(match param_schema {
|
param_schema: Some(match param_schema {
|
||||||
Some(value) => Patch::Set(value),
|
Some(value) => Patch::Set(value),
|
||||||
@@ -778,7 +780,7 @@ impl<'a> PackComponentLoader<'a> {
|
|||||||
pack: Some(self.pack_id),
|
pack: Some(self.pack_id),
|
||||||
pack_ref: Some(self.pack_ref.clone()),
|
pack_ref: Some(self.pack_ref.clone()),
|
||||||
label,
|
label,
|
||||||
description: Some(description),
|
description,
|
||||||
enabled,
|
enabled,
|
||||||
param_schema,
|
param_schema,
|
||||||
out_schema,
|
out_schema,
|
||||||
@@ -858,8 +860,7 @@ impl<'a> PackComponentLoader<'a> {
|
|||||||
let description = data
|
let description = data
|
||||||
.get("description")
|
.get("description")
|
||||||
.and_then(|v| v.as_str())
|
.and_then(|v| v.as_str())
|
||||||
.unwrap_or("")
|
.map(|s| s.to_string());
|
||||||
.to_string();
|
|
||||||
|
|
||||||
// ── Workflow file handling ──────────────────────────────────
|
// ── Workflow file handling ──────────────────────────────────
|
||||||
// If the action declares `workflow_file`, load the referenced
|
// If the action declares `workflow_file`, load the referenced
|
||||||
@@ -876,7 +877,7 @@ impl<'a> PackComponentLoader<'a> {
|
|||||||
wf_path,
|
wf_path,
|
||||||
&action_ref,
|
&action_ref,
|
||||||
&label,
|
&label,
|
||||||
&description,
|
description.as_deref().unwrap_or(""),
|
||||||
&data,
|
&data,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@@ -956,7 +957,10 @@ impl<'a> PackComponentLoader<'a> {
|
|||||||
if let Some(existing) = ActionRepository::find_by_ref(self.pool, &action_ref).await? {
|
if let Some(existing) = ActionRepository::find_by_ref(self.pool, &action_ref).await? {
|
||||||
let update_input = UpdateActionInput {
|
let update_input = UpdateActionInput {
|
||||||
label: Some(label),
|
label: Some(label),
|
||||||
description: Some(description),
|
description: Some(match description {
|
||||||
|
Some(description) => Patch::Set(description),
|
||||||
|
None => Patch::Clear,
|
||||||
|
}),
|
||||||
entrypoint: Some(entrypoint),
|
entrypoint: Some(entrypoint),
|
||||||
runtime: runtime_id,
|
runtime: runtime_id,
|
||||||
runtime_version_constraint: Some(match runtime_version_constraint {
|
runtime_version_constraint: Some(match runtime_version_constraint {
|
||||||
@@ -1310,8 +1314,7 @@ impl<'a> PackComponentLoader<'a> {
|
|||||||
let description = data
|
let description = data
|
||||||
.get("description")
|
.get("description")
|
||||||
.and_then(|v| v.as_str())
|
.and_then(|v| v.as_str())
|
||||||
.unwrap_or("")
|
.map(|s| s.to_string());
|
||||||
.to_string();
|
|
||||||
|
|
||||||
let enabled = data
|
let enabled = data
|
||||||
.get("enabled")
|
.get("enabled")
|
||||||
@@ -1347,7 +1350,10 @@ impl<'a> PackComponentLoader<'a> {
|
|||||||
if let Some(existing) = SensorRepository::find_by_ref(self.pool, &sensor_ref).await? {
|
if let Some(existing) = SensorRepository::find_by_ref(self.pool, &sensor_ref).await? {
|
||||||
let update_input = UpdateSensorInput {
|
let update_input = UpdateSensorInput {
|
||||||
label: Some(label),
|
label: Some(label),
|
||||||
description: Some(description),
|
description: Some(match description {
|
||||||
|
Some(description) => Patch::Set(description),
|
||||||
|
None => Patch::Clear,
|
||||||
|
}),
|
||||||
entrypoint: Some(entrypoint),
|
entrypoint: Some(entrypoint),
|
||||||
runtime: Some(sensor_runtime_id),
|
runtime: Some(sensor_runtime_id),
|
||||||
runtime_ref: Some(sensor_runtime_ref.clone()),
|
runtime_ref: Some(sensor_runtime_ref.clone()),
|
||||||
|
|||||||
@@ -21,10 +21,6 @@ pub enum Resource {
|
|||||||
Inquiries,
|
Inquiries,
|
||||||
Keys,
|
Keys,
|
||||||
Artifacts,
|
Artifacts,
|
||||||
Workflows,
|
|
||||||
Webhooks,
|
|
||||||
Analytics,
|
|
||||||
History,
|
|
||||||
Identities,
|
Identities,
|
||||||
Permissions,
|
Permissions,
|
||||||
}
|
}
|
||||||
@@ -40,6 +36,7 @@ pub enum Action {
|
|||||||
Cancel,
|
Cancel,
|
||||||
Respond,
|
Respond,
|
||||||
Manage,
|
Manage,
|
||||||
|
Decrypt,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
|
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
@@ -69,6 +66,8 @@ pub struct GrantConstraints {
|
|||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub owner_types: Option<Vec<OwnerType>>,
|
pub owner_types: Option<Vec<OwnerType>>,
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub owner_refs: Option<Vec<String>>,
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub visibility: Option<Vec<ArtifactVisibility>>,
|
pub visibility: Option<Vec<ArtifactVisibility>>,
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub execution_scope: Option<ExecutionScopeConstraint>,
|
pub execution_scope: Option<ExecutionScopeConstraint>,
|
||||||
@@ -99,6 +98,7 @@ pub struct AuthorizationContext {
|
|||||||
pub pack_ref: Option<String>,
|
pub pack_ref: Option<String>,
|
||||||
pub owner_identity_id: Option<Id>,
|
pub owner_identity_id: Option<Id>,
|
||||||
pub owner_type: Option<OwnerType>,
|
pub owner_type: Option<OwnerType>,
|
||||||
|
pub owner_ref: Option<String>,
|
||||||
pub visibility: Option<ArtifactVisibility>,
|
pub visibility: Option<ArtifactVisibility>,
|
||||||
pub encrypted: Option<bool>,
|
pub encrypted: Option<bool>,
|
||||||
pub execution_owner_identity_id: Option<Id>,
|
pub execution_owner_identity_id: Option<Id>,
|
||||||
@@ -115,6 +115,7 @@ impl AuthorizationContext {
|
|||||||
pack_ref: None,
|
pack_ref: None,
|
||||||
owner_identity_id: None,
|
owner_identity_id: None,
|
||||||
owner_type: None,
|
owner_type: None,
|
||||||
|
owner_ref: None,
|
||||||
visibility: None,
|
visibility: None,
|
||||||
encrypted: None,
|
encrypted: None,
|
||||||
execution_owner_identity_id: None,
|
execution_owner_identity_id: None,
|
||||||
@@ -162,6 +163,15 @@ impl Grant {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(owner_refs) = &constraints.owner_refs {
|
||||||
|
let Some(owner_ref) = &ctx.owner_ref else {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
if !owner_refs.contains(owner_ref) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(visibility) = &constraints.visibility {
|
if let Some(visibility) = &constraints.visibility {
|
||||||
let Some(target_visibility) = ctx.visibility else {
|
let Some(target_visibility) = ctx.visibility else {
|
||||||
return false;
|
return false;
|
||||||
@@ -289,4 +299,28 @@ mod tests {
|
|||||||
.insert("team".to_string(), json!("infra"));
|
.insert("team".to_string(), json!("infra"));
|
||||||
assert!(!grant.allows(Resource::Packs, Action::Read, &ctx));
|
assert!(!grant.allows(Resource::Packs, Action::Read, &ctx));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn owner_ref_constraint_requires_exact_value_match() {
|
||||||
|
let grant = Grant {
|
||||||
|
resource: Resource::Artifacts,
|
||||||
|
actions: vec![Action::Read],
|
||||||
|
constraints: Some(GrantConstraints {
|
||||||
|
owner_types: Some(vec![OwnerType::Pack]),
|
||||||
|
owner_refs: Some(vec!["python_example".to_string()]),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut ctx = AuthorizationContext::new(1);
|
||||||
|
ctx.owner_type = Some(OwnerType::Pack);
|
||||||
|
ctx.owner_ref = Some("python_example".to_string());
|
||||||
|
assert!(grant.allows(Resource::Artifacts, Action::Read, &ctx));
|
||||||
|
|
||||||
|
ctx.owner_ref = Some("other_pack".to_string());
|
||||||
|
assert!(!grant.allows(Resource::Artifacts, Action::Read, &ctx));
|
||||||
|
|
||||||
|
ctx.owner_ref = None;
|
||||||
|
assert!(!grant.allows(Resource::Artifacts, Action::Read, &ctx));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ pub struct CreateActionInput {
|
|||||||
pub pack: Id,
|
pub pack: Id,
|
||||||
pub pack_ref: String,
|
pub pack_ref: String,
|
||||||
pub label: String,
|
pub label: String,
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
pub entrypoint: String,
|
pub entrypoint: String,
|
||||||
pub runtime: Option<Id>,
|
pub runtime: Option<Id>,
|
||||||
pub runtime_version_constraint: Option<String>,
|
pub runtime_version_constraint: Option<String>,
|
||||||
@@ -64,7 +64,7 @@ pub struct CreateActionInput {
|
|||||||
#[derive(Debug, Clone, Default)]
|
#[derive(Debug, Clone, Default)]
|
||||||
pub struct UpdateActionInput {
|
pub struct UpdateActionInput {
|
||||||
pub label: Option<String>,
|
pub label: Option<String>,
|
||||||
pub description: Option<String>,
|
pub description: Option<Patch<String>>,
|
||||||
pub entrypoint: Option<String>,
|
pub entrypoint: Option<String>,
|
||||||
pub runtime: Option<Id>,
|
pub runtime: Option<Id>,
|
||||||
pub runtime_version_constraint: Option<Patch<String>>,
|
pub runtime_version_constraint: Option<Patch<String>>,
|
||||||
@@ -210,7 +210,10 @@ impl Update for ActionRepository {
|
|||||||
query.push(", ");
|
query.push(", ");
|
||||||
}
|
}
|
||||||
query.push("description = ");
|
query.push("description = ");
|
||||||
query.push_bind(description);
|
match description {
|
||||||
|
Patch::Set(value) => query.push_bind(value),
|
||||||
|
Patch::Clear => query.push_bind(Option::<String>::None),
|
||||||
|
};
|
||||||
has_updates = true;
|
has_updates = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -577,6 +577,14 @@ pub struct CreateArtifactVersionInput {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ArtifactVersionRepository {
|
impl ArtifactVersionRepository {
|
||||||
|
fn select_columns_with_alias(alias: &str) -> String {
|
||||||
|
format!(
|
||||||
|
"{alias}.id, {alias}.artifact, {alias}.version, {alias}.content_type, \
|
||||||
|
{alias}.size_bytes, NULL::bytea AS content, {alias}.content_json, \
|
||||||
|
{alias}.file_path, {alias}.meta, {alias}.created_by, {alias}.created"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
/// Find a version by ID (without binary content for performance)
|
/// Find a version by ID (without binary content for performance)
|
||||||
pub async fn find_by_id<'e, E>(executor: E, id: i64) -> Result<Option<ArtifactVersion>>
|
pub async fn find_by_id<'e, E>(executor: E, id: i64) -> Result<Option<ArtifactVersion>>
|
||||||
where
|
where
|
||||||
@@ -812,14 +820,11 @@ impl ArtifactVersionRepository {
|
|||||||
E: Executor<'e, Database = Postgres> + 'e,
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
{
|
{
|
||||||
let query = format!(
|
let query = format!(
|
||||||
"SELECT av.{} \
|
"SELECT {} \
|
||||||
FROM artifact_version av \
|
FROM artifact_version av \
|
||||||
JOIN artifact a ON av.artifact = a.id \
|
JOIN artifact a ON av.artifact = a.id \
|
||||||
WHERE a.execution = $1 AND av.file_path IS NOT NULL",
|
WHERE a.execution = $1 AND av.file_path IS NOT NULL",
|
||||||
artifact_version::SELECT_COLUMNS
|
Self::select_columns_with_alias("av")
|
||||||
.split(", ")
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.join(", av.")
|
|
||||||
);
|
);
|
||||||
sqlx::query_as::<_, ArtifactVersion>(&query)
|
sqlx::query_as::<_, ArtifactVersion>(&query)
|
||||||
.bind(execution_id)
|
.bind(execution_id)
|
||||||
@@ -847,3 +852,18 @@ impl ArtifactVersionRepository {
|
|||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::ArtifactVersionRepository;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn aliased_select_columns_keep_null_content_expression_unqualified() {
|
||||||
|
let columns = ArtifactVersionRepository::select_columns_with_alias("av");
|
||||||
|
|
||||||
|
assert!(columns.contains("av.id"));
|
||||||
|
assert!(columns.contains("av.file_path"));
|
||||||
|
assert!(columns.contains("NULL::bytea AS content"));
|
||||||
|
assert!(!columns.contains("av.NULL::bytea AS content"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ pub struct UpdateIdentityInput {
|
|||||||
pub display_name: Option<String>,
|
pub display_name: Option<String>,
|
||||||
pub password_hash: Option<String>,
|
pub password_hash: Option<String>,
|
||||||
pub attributes: Option<JsonDict>,
|
pub attributes: Option<JsonDict>,
|
||||||
|
pub frozen: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
@@ -37,7 +38,7 @@ impl FindById for IdentityRepository {
|
|||||||
E: Executor<'e, Database = Postgres> + 'e,
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
{
|
{
|
||||||
sqlx::query_as::<_, Identity>(
|
sqlx::query_as::<_, Identity>(
|
||||||
"SELECT id, login, display_name, password_hash, attributes, created, updated FROM identity WHERE id = $1"
|
"SELECT id, login, display_name, password_hash, attributes, frozen, created, updated FROM identity WHERE id = $1"
|
||||||
).bind(id).fetch_optional(executor).await.map_err(Into::into)
|
).bind(id).fetch_optional(executor).await.map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -49,7 +50,7 @@ impl List for IdentityRepository {
|
|||||||
E: Executor<'e, Database = Postgres> + 'e,
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
{
|
{
|
||||||
sqlx::query_as::<_, Identity>(
|
sqlx::query_as::<_, Identity>(
|
||||||
"SELECT id, login, display_name, password_hash, attributes, created, updated FROM identity ORDER BY login ASC"
|
"SELECT id, login, display_name, password_hash, attributes, frozen, created, updated FROM identity ORDER BY login ASC"
|
||||||
).fetch_all(executor).await.map_err(Into::into)
|
).fetch_all(executor).await.map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -62,7 +63,7 @@ impl Create for IdentityRepository {
|
|||||||
E: Executor<'e, Database = Postgres> + 'e,
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
{
|
{
|
||||||
sqlx::query_as::<_, Identity>(
|
sqlx::query_as::<_, Identity>(
|
||||||
"INSERT INTO identity (login, display_name, password_hash, attributes) VALUES ($1, $2, $3, $4) RETURNING id, login, display_name, password_hash, attributes, created, updated"
|
"INSERT INTO identity (login, display_name, password_hash, attributes) VALUES ($1, $2, $3, $4) RETURNING id, login, display_name, password_hash, attributes, frozen, created, updated"
|
||||||
)
|
)
|
||||||
.bind(&input.login)
|
.bind(&input.login)
|
||||||
.bind(&input.display_name)
|
.bind(&input.display_name)
|
||||||
@@ -111,6 +112,13 @@ impl Update for IdentityRepository {
|
|||||||
query.push("attributes = ").push_bind(attributes);
|
query.push("attributes = ").push_bind(attributes);
|
||||||
has_updates = true;
|
has_updates = true;
|
||||||
}
|
}
|
||||||
|
if let Some(frozen) = input.frozen {
|
||||||
|
if has_updates {
|
||||||
|
query.push(", ");
|
||||||
|
}
|
||||||
|
query.push("frozen = ").push_bind(frozen);
|
||||||
|
has_updates = true;
|
||||||
|
}
|
||||||
|
|
||||||
if !has_updates {
|
if !has_updates {
|
||||||
// No updates requested, fetch and return existing entity
|
// No updates requested, fetch and return existing entity
|
||||||
@@ -119,7 +127,7 @@ impl Update for IdentityRepository {
|
|||||||
|
|
||||||
query.push(", updated = NOW() WHERE id = ").push_bind(id);
|
query.push(", updated = NOW() WHERE id = ").push_bind(id);
|
||||||
query.push(
|
query.push(
|
||||||
" RETURNING id, login, display_name, password_hash, attributes, created, updated",
|
" RETURNING id, login, display_name, password_hash, attributes, frozen, created, updated",
|
||||||
);
|
);
|
||||||
|
|
||||||
query
|
query
|
||||||
@@ -156,7 +164,7 @@ impl IdentityRepository {
|
|||||||
E: Executor<'e, Database = Postgres> + 'e,
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
{
|
{
|
||||||
sqlx::query_as::<_, Identity>(
|
sqlx::query_as::<_, Identity>(
|
||||||
"SELECT id, login, display_name, password_hash, attributes, created, updated FROM identity WHERE login = $1"
|
"SELECT id, login, display_name, password_hash, attributes, frozen, created, updated FROM identity WHERE login = $1"
|
||||||
).bind(login).fetch_optional(executor).await.map_err(Into::into)
|
).bind(login).fetch_optional(executor).await.map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -169,7 +177,7 @@ impl IdentityRepository {
|
|||||||
E: Executor<'e, Database = Postgres> + 'e,
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
{
|
{
|
||||||
sqlx::query_as::<_, Identity>(
|
sqlx::query_as::<_, Identity>(
|
||||||
"SELECT id, login, display_name, password_hash, attributes, created, updated
|
"SELECT id, login, display_name, password_hash, attributes, frozen, created, updated
|
||||||
FROM identity
|
FROM identity
|
||||||
WHERE attributes->'oidc'->>'issuer' = $1
|
WHERE attributes->'oidc'->>'issuer' = $1
|
||||||
AND attributes->'oidc'->>'sub' = $2",
|
AND attributes->'oidc'->>'sub' = $2",
|
||||||
@@ -190,7 +198,7 @@ impl IdentityRepository {
|
|||||||
E: Executor<'e, Database = Postgres> + 'e,
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
{
|
{
|
||||||
sqlx::query_as::<_, Identity>(
|
sqlx::query_as::<_, Identity>(
|
||||||
"SELECT id, login, display_name, password_hash, attributes, created, updated
|
"SELECT id, login, display_name, password_hash, attributes, frozen, created, updated
|
||||||
FROM identity
|
FROM identity
|
||||||
WHERE attributes->'ldap'->>'server_url' = $1
|
WHERE attributes->'ldap'->>'server_url' = $1
|
||||||
AND attributes->'ldap'->>'dn' = $2",
|
AND attributes->'ldap'->>'dn' = $2",
|
||||||
@@ -363,6 +371,27 @@ impl PermissionSetRepository {
|
|||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn find_by_roles<'e, E>(executor: E, roles: &[String]) -> Result<Vec<PermissionSet>>
|
||||||
|
where
|
||||||
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
|
{
|
||||||
|
if roles.is_empty() {
|
||||||
|
return Ok(Vec::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlx::query_as::<_, PermissionSet>(
|
||||||
|
"SELECT DISTINCT ps.id, ps.ref, ps.pack, ps.pack_ref, ps.label, ps.description, ps.grants, ps.created, ps.updated
|
||||||
|
FROM permission_set ps
|
||||||
|
INNER JOIN permission_set_role_assignment psra ON psra.permset = ps.id
|
||||||
|
WHERE psra.role = ANY($1)
|
||||||
|
ORDER BY ps.ref ASC",
|
||||||
|
)
|
||||||
|
.bind(roles)
|
||||||
|
.fetch_all(executor)
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
/// Delete permission sets belonging to a pack whose refs are NOT in the given set.
|
/// Delete permission sets belonging to a pack whose refs are NOT in the given set.
|
||||||
///
|
///
|
||||||
/// Used during pack reinstallation to clean up permission sets that were
|
/// Used during pack reinstallation to clean up permission sets that were
|
||||||
@@ -481,3 +510,231 @@ impl PermissionAssignmentRepository {
|
|||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct IdentityRoleAssignmentRepository;
|
||||||
|
|
||||||
|
impl Repository for IdentityRoleAssignmentRepository {
|
||||||
|
type Entity = IdentityRoleAssignment;
|
||||||
|
fn table_name() -> &'static str {
|
||||||
|
"identity_role_assignment"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct CreateIdentityRoleAssignmentInput {
|
||||||
|
pub identity: Id,
|
||||||
|
pub role: String,
|
||||||
|
pub source: String,
|
||||||
|
pub managed: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl FindById for IdentityRoleAssignmentRepository {
|
||||||
|
async fn find_by_id<'e, E>(executor: E, id: i64) -> Result<Option<Self::Entity>>
|
||||||
|
where
|
||||||
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
|
{
|
||||||
|
sqlx::query_as::<_, IdentityRoleAssignment>(
|
||||||
|
"SELECT id, identity, role, source, managed, created, updated FROM identity_role_assignment WHERE id = $1"
|
||||||
|
)
|
||||||
|
.bind(id)
|
||||||
|
.fetch_optional(executor)
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl Create for IdentityRoleAssignmentRepository {
|
||||||
|
type CreateInput = CreateIdentityRoleAssignmentInput;
|
||||||
|
async fn create<'e, E>(executor: E, input: Self::CreateInput) -> Result<Self::Entity>
|
||||||
|
where
|
||||||
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
|
{
|
||||||
|
sqlx::query_as::<_, IdentityRoleAssignment>(
|
||||||
|
"INSERT INTO identity_role_assignment (identity, role, source, managed)
|
||||||
|
VALUES ($1, $2, $3, $4)
|
||||||
|
RETURNING id, identity, role, source, managed, created, updated",
|
||||||
|
)
|
||||||
|
.bind(input.identity)
|
||||||
|
.bind(&input.role)
|
||||||
|
.bind(&input.source)
|
||||||
|
.bind(input.managed)
|
||||||
|
.fetch_one(executor)
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl Delete for IdentityRoleAssignmentRepository {
|
||||||
|
async fn delete<'e, E>(executor: E, id: i64) -> Result<bool>
|
||||||
|
where
|
||||||
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
|
{
|
||||||
|
let result = sqlx::query("DELETE FROM identity_role_assignment WHERE id = $1")
|
||||||
|
.bind(id)
|
||||||
|
.execute(executor)
|
||||||
|
.await?;
|
||||||
|
Ok(result.rows_affected() > 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IdentityRoleAssignmentRepository {
|
||||||
|
pub async fn find_by_identity<'e, E>(
|
||||||
|
executor: E,
|
||||||
|
identity_id: Id,
|
||||||
|
) -> Result<Vec<IdentityRoleAssignment>>
|
||||||
|
where
|
||||||
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
|
{
|
||||||
|
sqlx::query_as::<_, IdentityRoleAssignment>(
|
||||||
|
"SELECT id, identity, role, source, managed, created, updated
|
||||||
|
FROM identity_role_assignment
|
||||||
|
WHERE identity = $1
|
||||||
|
ORDER BY role ASC",
|
||||||
|
)
|
||||||
|
.bind(identity_id)
|
||||||
|
.fetch_all(executor)
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn find_role_names_by_identity<'e, E>(
|
||||||
|
executor: E,
|
||||||
|
identity_id: Id,
|
||||||
|
) -> Result<Vec<String>>
|
||||||
|
where
|
||||||
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
|
{
|
||||||
|
sqlx::query_scalar::<_, String>(
|
||||||
|
"SELECT role FROM identity_role_assignment WHERE identity = $1 ORDER BY role ASC",
|
||||||
|
)
|
||||||
|
.bind(identity_id)
|
||||||
|
.fetch_all(executor)
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn replace_managed_roles<'e, E>(
|
||||||
|
executor: E,
|
||||||
|
identity_id: Id,
|
||||||
|
source: &str,
|
||||||
|
roles: &[String],
|
||||||
|
) -> Result<()>
|
||||||
|
where
|
||||||
|
E: Executor<'e, Database = Postgres> + Copy + 'e,
|
||||||
|
{
|
||||||
|
sqlx::query(
|
||||||
|
"DELETE FROM identity_role_assignment WHERE identity = $1 AND source = $2 AND managed = true",
|
||||||
|
)
|
||||||
|
.bind(identity_id)
|
||||||
|
.bind(source)
|
||||||
|
.execute(executor)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
for role in roles {
|
||||||
|
sqlx::query(
|
||||||
|
"INSERT INTO identity_role_assignment (identity, role, source, managed)
|
||||||
|
VALUES ($1, $2, $3, true)
|
||||||
|
ON CONFLICT (identity, role) DO UPDATE
|
||||||
|
SET source = EXCLUDED.source,
|
||||||
|
managed = EXCLUDED.managed,
|
||||||
|
updated = NOW()",
|
||||||
|
)
|
||||||
|
.bind(identity_id)
|
||||||
|
.bind(role)
|
||||||
|
.bind(source)
|
||||||
|
.execute(executor)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct PermissionSetRoleAssignmentRepository;
|
||||||
|
|
||||||
|
impl Repository for PermissionSetRoleAssignmentRepository {
|
||||||
|
type Entity = PermissionSetRoleAssignment;
|
||||||
|
fn table_name() -> &'static str {
|
||||||
|
"permission_set_role_assignment"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct CreatePermissionSetRoleAssignmentInput {
|
||||||
|
pub permset: Id,
|
||||||
|
pub role: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl FindById for PermissionSetRoleAssignmentRepository {
|
||||||
|
async fn find_by_id<'e, E>(executor: E, id: i64) -> Result<Option<Self::Entity>>
|
||||||
|
where
|
||||||
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
|
{
|
||||||
|
sqlx::query_as::<_, PermissionSetRoleAssignment>(
|
||||||
|
"SELECT id, permset, role, created FROM permission_set_role_assignment WHERE id = $1",
|
||||||
|
)
|
||||||
|
.bind(id)
|
||||||
|
.fetch_optional(executor)
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl Create for PermissionSetRoleAssignmentRepository {
|
||||||
|
type CreateInput = CreatePermissionSetRoleAssignmentInput;
|
||||||
|
async fn create<'e, E>(executor: E, input: Self::CreateInput) -> Result<Self::Entity>
|
||||||
|
where
|
||||||
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
|
{
|
||||||
|
sqlx::query_as::<_, PermissionSetRoleAssignment>(
|
||||||
|
"INSERT INTO permission_set_role_assignment (permset, role)
|
||||||
|
VALUES ($1, $2)
|
||||||
|
RETURNING id, permset, role, created",
|
||||||
|
)
|
||||||
|
.bind(input.permset)
|
||||||
|
.bind(&input.role)
|
||||||
|
.fetch_one(executor)
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl Delete for PermissionSetRoleAssignmentRepository {
|
||||||
|
async fn delete<'e, E>(executor: E, id: i64) -> Result<bool>
|
||||||
|
where
|
||||||
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
|
{
|
||||||
|
let result = sqlx::query("DELETE FROM permission_set_role_assignment WHERE id = $1")
|
||||||
|
.bind(id)
|
||||||
|
.execute(executor)
|
||||||
|
.await?;
|
||||||
|
Ok(result.rows_affected() > 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PermissionSetRoleAssignmentRepository {
|
||||||
|
pub async fn find_by_permission_set<'e, E>(
|
||||||
|
executor: E,
|
||||||
|
permset_id: Id,
|
||||||
|
) -> Result<Vec<PermissionSetRoleAssignment>>
|
||||||
|
where
|
||||||
|
E: Executor<'e, Database = Postgres> + 'e,
|
||||||
|
{
|
||||||
|
sqlx::query_as::<_, PermissionSetRoleAssignment>(
|
||||||
|
"SELECT id, permset, role, created
|
||||||
|
FROM permission_set_role_assignment
|
||||||
|
WHERE permset = $1
|
||||||
|
ORDER BY role ASC",
|
||||||
|
)
|
||||||
|
.bind(permset_id)
|
||||||
|
.fetch_all(executor)
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use crate::models::{rule::*, Id};
|
|||||||
use crate::{Error, Result};
|
use crate::{Error, Result};
|
||||||
use sqlx::{Executor, Postgres, QueryBuilder};
|
use sqlx::{Executor, Postgres, QueryBuilder};
|
||||||
|
|
||||||
use super::{Create, Delete, FindById, FindByRef, List, Repository, Update};
|
use super::{Create, Delete, FindById, FindByRef, List, Patch, Repository, Update};
|
||||||
|
|
||||||
/// Filters for [`RuleRepository::list_search`].
|
/// Filters for [`RuleRepository::list_search`].
|
||||||
///
|
///
|
||||||
@@ -41,7 +41,7 @@ pub struct RestoreRuleInput {
|
|||||||
pub pack: Id,
|
pub pack: Id,
|
||||||
pub pack_ref: String,
|
pub pack_ref: String,
|
||||||
pub label: String,
|
pub label: String,
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
pub action: Option<Id>,
|
pub action: Option<Id>,
|
||||||
pub action_ref: String,
|
pub action_ref: String,
|
||||||
pub trigger: Option<Id>,
|
pub trigger: Option<Id>,
|
||||||
@@ -70,7 +70,7 @@ pub struct CreateRuleInput {
|
|||||||
pub pack: Id,
|
pub pack: Id,
|
||||||
pub pack_ref: String,
|
pub pack_ref: String,
|
||||||
pub label: String,
|
pub label: String,
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
pub action: Id,
|
pub action: Id,
|
||||||
pub action_ref: String,
|
pub action_ref: String,
|
||||||
pub trigger: Id,
|
pub trigger: Id,
|
||||||
@@ -86,7 +86,7 @@ pub struct CreateRuleInput {
|
|||||||
#[derive(Debug, Clone, Default)]
|
#[derive(Debug, Clone, Default)]
|
||||||
pub struct UpdateRuleInput {
|
pub struct UpdateRuleInput {
|
||||||
pub label: Option<String>,
|
pub label: Option<String>,
|
||||||
pub description: Option<String>,
|
pub description: Option<Patch<String>>,
|
||||||
pub conditions: Option<serde_json::Value>,
|
pub conditions: Option<serde_json::Value>,
|
||||||
pub action_params: Option<serde_json::Value>,
|
pub action_params: Option<serde_json::Value>,
|
||||||
pub trigger_params: Option<serde_json::Value>,
|
pub trigger_params: Option<serde_json::Value>,
|
||||||
@@ -228,7 +228,10 @@ impl Update for RuleRepository {
|
|||||||
query.push(", ");
|
query.push(", ");
|
||||||
}
|
}
|
||||||
query.push("description = ");
|
query.push("description = ");
|
||||||
query.push_bind(description);
|
match description {
|
||||||
|
Patch::Set(value) => query.push_bind(value),
|
||||||
|
Patch::Clear => query.push_bind(Option::<String>::None),
|
||||||
|
};
|
||||||
has_updates = true;
|
has_updates = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -237,7 +237,7 @@ impl Update for RuntimeRepository {
|
|||||||
|
|
||||||
query.push(", updated = NOW() WHERE id = ");
|
query.push(", updated = NOW() WHERE id = ");
|
||||||
query.push_bind(id);
|
query.push_bind(id);
|
||||||
query.push(&format!(" RETURNING {}", SELECT_COLUMNS));
|
query.push(format!(" RETURNING {}", SELECT_COLUMNS));
|
||||||
|
|
||||||
let runtime = query
|
let runtime = query
|
||||||
.build_query_as::<Runtime>()
|
.build_query_as::<Runtime>()
|
||||||
|
|||||||
@@ -665,7 +665,7 @@ pub struct CreateSensorInput {
|
|||||||
pub pack: Option<Id>,
|
pub pack: Option<Id>,
|
||||||
pub pack_ref: Option<String>,
|
pub pack_ref: Option<String>,
|
||||||
pub label: String,
|
pub label: String,
|
||||||
pub description: String,
|
pub description: Option<String>,
|
||||||
pub entrypoint: String,
|
pub entrypoint: String,
|
||||||
pub runtime: Id,
|
pub runtime: Id,
|
||||||
pub runtime_ref: String,
|
pub runtime_ref: String,
|
||||||
@@ -681,7 +681,7 @@ pub struct CreateSensorInput {
|
|||||||
#[derive(Debug, Clone, Default)]
|
#[derive(Debug, Clone, Default)]
|
||||||
pub struct UpdateSensorInput {
|
pub struct UpdateSensorInput {
|
||||||
pub label: Option<String>,
|
pub label: Option<String>,
|
||||||
pub description: Option<String>,
|
pub description: Option<Patch<String>>,
|
||||||
pub entrypoint: Option<String>,
|
pub entrypoint: Option<String>,
|
||||||
pub runtime: Option<Id>,
|
pub runtime: Option<Id>,
|
||||||
pub runtime_ref: Option<String>,
|
pub runtime_ref: Option<String>,
|
||||||
@@ -830,7 +830,10 @@ impl Update for SensorRepository {
|
|||||||
query.push(", ");
|
query.push(", ");
|
||||||
}
|
}
|
||||||
query.push("description = ");
|
query.push("description = ");
|
||||||
query.push_bind(description);
|
match description {
|
||||||
|
Patch::Set(value) => query.push_bind(value),
|
||||||
|
Patch::Clear => query.push_bind(Option::<String>::None),
|
||||||
|
};
|
||||||
has_updates = true;
|
has_updates = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -13,6 +13,7 @@
|
|||||||
use crate::error::{Error, Result};
|
use crate::error::{Error, Result};
|
||||||
use crate::repositories::action::{ActionRepository, CreateActionInput, UpdateActionInput};
|
use crate::repositories::action::{ActionRepository, CreateActionInput, UpdateActionInput};
|
||||||
use crate::repositories::workflow::{CreateWorkflowDefinitionInput, UpdateWorkflowDefinitionInput};
|
use crate::repositories::workflow::{CreateWorkflowDefinitionInput, UpdateWorkflowDefinitionInput};
|
||||||
|
use crate::repositories::Patch;
|
||||||
use crate::repositories::{
|
use crate::repositories::{
|
||||||
Create, Delete, FindByRef, PackRepository, Update, WorkflowDefinitionRepository,
|
Create, Delete, FindByRef, PackRepository, Update, WorkflowDefinitionRepository,
|
||||||
};
|
};
|
||||||
@@ -270,7 +271,7 @@ impl WorkflowRegistrar {
|
|||||||
pack: pack_id,
|
pack: pack_id,
|
||||||
pack_ref: pack_ref.to_string(),
|
pack_ref: pack_ref.to_string(),
|
||||||
label: effective_label.to_string(),
|
label: effective_label.to_string(),
|
||||||
description: workflow.description.clone().unwrap_or_default(),
|
description: workflow.description.clone(),
|
||||||
entrypoint,
|
entrypoint,
|
||||||
runtime: None,
|
runtime: None,
|
||||||
runtime_version_constraint: None,
|
runtime_version_constraint: None,
|
||||||
@@ -317,7 +318,10 @@ impl WorkflowRegistrar {
|
|||||||
// Update the existing companion action to stay in sync
|
// Update the existing companion action to stay in sync
|
||||||
let update_input = UpdateActionInput {
|
let update_input = UpdateActionInput {
|
||||||
label: Some(effective_label.to_string()),
|
label: Some(effective_label.to_string()),
|
||||||
description: workflow.description.clone(),
|
description: Some(match workflow.description.clone() {
|
||||||
|
Some(description) => Patch::Set(description),
|
||||||
|
None => Patch::Clear,
|
||||||
|
}),
|
||||||
entrypoint: Some(format!("workflows/{}.workflow.yaml", workflow_name)),
|
entrypoint: Some(format!("workflows/{}.workflow.yaml", workflow_name)),
|
||||||
runtime: None,
|
runtime: None,
|
||||||
runtime_version_constraint: None,
|
runtime_version_constraint: None,
|
||||||
|
|||||||
@@ -66,7 +66,10 @@ async fn test_create_action_with_optional_fields() {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(action.label, "Full Test Action");
|
assert_eq!(action.label, "Full Test Action");
|
||||||
assert_eq!(action.description, "Action with all optional fields");
|
assert_eq!(
|
||||||
|
action.description,
|
||||||
|
Some("Action with all optional fields".to_string())
|
||||||
|
);
|
||||||
assert_eq!(action.entrypoint, "custom.py");
|
assert_eq!(action.entrypoint, "custom.py");
|
||||||
assert!(action.param_schema.is_some());
|
assert!(action.param_schema.is_some());
|
||||||
assert!(action.out_schema.is_some());
|
assert!(action.out_schema.is_some());
|
||||||
@@ -204,7 +207,9 @@ async fn test_update_action() {
|
|||||||
|
|
||||||
let update = UpdateActionInput {
|
let update = UpdateActionInput {
|
||||||
label: Some("Updated Label".to_string()),
|
label: Some("Updated Label".to_string()),
|
||||||
description: Some("Updated description".to_string()),
|
description: Some(attune_common::repositories::Patch::Set(
|
||||||
|
"Updated description".to_string(),
|
||||||
|
)),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -214,7 +219,7 @@ async fn test_update_action() {
|
|||||||
|
|
||||||
assert_eq!(updated.id, action.id);
|
assert_eq!(updated.id, action.id);
|
||||||
assert_eq!(updated.label, "Updated Label");
|
assert_eq!(updated.label, "Updated Label");
|
||||||
assert_eq!(updated.description, "Updated description");
|
assert_eq!(updated.description, Some("Updated description".to_string()));
|
||||||
assert_eq!(updated.entrypoint, action.entrypoint); // Unchanged
|
assert_eq!(updated.entrypoint, action.entrypoint); // Unchanged
|
||||||
assert!(updated.updated > original_updated);
|
assert!(updated.updated > original_updated);
|
||||||
}
|
}
|
||||||
@@ -338,7 +343,7 @@ async fn test_action_foreign_key_constraint() {
|
|||||||
pack: 99999,
|
pack: 99999,
|
||||||
pack_ref: "nonexistent.pack".to_string(),
|
pack_ref: "nonexistent.pack".to_string(),
|
||||||
label: "Test Action".to_string(),
|
label: "Test Action".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
entrypoint: "main.py".to_string(),
|
entrypoint: "main.py".to_string(),
|
||||||
runtime: None,
|
runtime: None,
|
||||||
runtime_version_constraint: None,
|
runtime_version_constraint: None,
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ async fn test_create_enforcement_minimal() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -121,7 +121,7 @@ async fn test_create_enforcement_with_event() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -189,7 +189,7 @@ async fn test_create_enforcement_with_conditions() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -255,7 +255,7 @@ async fn test_create_enforcement_with_any_condition() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -397,7 +397,7 @@ async fn test_find_enforcement_by_id() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -471,7 +471,7 @@ async fn test_get_enforcement_by_id() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -552,7 +552,7 @@ async fn test_list_enforcements() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -624,7 +624,7 @@ async fn test_update_enforcement_status() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -690,7 +690,7 @@ async fn test_update_enforcement_status_transitions() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -769,7 +769,7 @@ async fn test_update_enforcement_payload() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -832,7 +832,7 @@ async fn test_update_enforcement_both_fields() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -896,7 +896,7 @@ async fn test_update_enforcement_no_changes() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -981,7 +981,7 @@ async fn test_delete_enforcement() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -1056,7 +1056,7 @@ async fn test_find_enforcements_by_rule() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Rule 1".to_string(),
|
label: "Rule 1".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -1078,7 +1078,7 @@ async fn test_find_enforcements_by_rule() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Rule 2".to_string(),
|
label: "Rule 2".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -1149,7 +1149,7 @@ async fn test_find_enforcements_by_status() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -1239,7 +1239,7 @@ async fn test_find_enforcements_by_event() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -1324,7 +1324,7 @@ async fn test_delete_rule_sets_enforcement_rule_to_null() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -1390,7 +1390,7 @@ async fn test_enforcement_resolved_at_lifecycle() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
|
|||||||
@@ -449,7 +449,7 @@ async fn test_delete_event_enforcement_retains_event_id() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
|
|||||||
@@ -454,7 +454,7 @@ impl ActionFixture {
|
|||||||
pack_ref: self.pack_ref,
|
pack_ref: self.pack_ref,
|
||||||
r#ref: self.r#ref,
|
r#ref: self.r#ref,
|
||||||
label: self.label,
|
label: self.label,
|
||||||
description: self.description,
|
description: Some(self.description),
|
||||||
entrypoint: self.entrypoint,
|
entrypoint: self.entrypoint,
|
||||||
runtime: self.runtime,
|
runtime: self.runtime,
|
||||||
runtime_version_constraint: None,
|
runtime_version_constraint: None,
|
||||||
@@ -1088,7 +1088,7 @@ impl SensorFixture {
|
|||||||
pack: self.pack_id,
|
pack: self.pack_id,
|
||||||
pack_ref: self.pack_ref,
|
pack_ref: self.pack_ref,
|
||||||
label: self.label,
|
label: self.label,
|
||||||
description: self.description,
|
description: Some(self.description),
|
||||||
entrypoint: self.entrypoint,
|
entrypoint: self.entrypoint,
|
||||||
runtime: self.runtime_id,
|
runtime: self.runtime_id,
|
||||||
runtime_ref: self.runtime_ref,
|
runtime_ref: self.runtime_ref,
|
||||||
|
|||||||
@@ -219,6 +219,7 @@ async fn test_update_identity() {
|
|||||||
display_name: Some("Updated Name".to_string()),
|
display_name: Some("Updated Name".to_string()),
|
||||||
password_hash: None,
|
password_hash: None,
|
||||||
attributes: Some(json!({"key": "updated", "new_key": "new_value"})),
|
attributes: Some(json!({"key": "updated", "new_key": "new_value"})),
|
||||||
|
frozen: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let updated = IdentityRepository::update(&pool, identity.id, update_input)
|
let updated = IdentityRepository::update(&pool, identity.id, update_input)
|
||||||
@@ -252,6 +253,7 @@ async fn test_update_identity_partial() {
|
|||||||
display_name: Some("Only Display Name Changed".to_string()),
|
display_name: Some("Only Display Name Changed".to_string()),
|
||||||
password_hash: None,
|
password_hash: None,
|
||||||
attributes: None,
|
attributes: None,
|
||||||
|
frozen: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let updated = IdentityRepository::update(&pool, identity.id, update_input)
|
let updated = IdentityRepository::update(&pool, identity.id, update_input)
|
||||||
@@ -274,6 +276,7 @@ async fn test_update_identity_not_found() {
|
|||||||
display_name: Some("Updated Name".to_string()),
|
display_name: Some("Updated Name".to_string()),
|
||||||
password_hash: None,
|
password_hash: None,
|
||||||
attributes: None,
|
attributes: None,
|
||||||
|
frozen: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let result = IdentityRepository::update(&pool, 999999, update_input).await;
|
let result = IdentityRepository::update(&pool, 999999, update_input).await;
|
||||||
@@ -380,6 +383,7 @@ async fn test_identity_updated_changes_on_update() {
|
|||||||
display_name: Some("Updated".to_string()),
|
display_name: Some("Updated".to_string()),
|
||||||
password_hash: None,
|
password_hash: None,
|
||||||
attributes: None,
|
attributes: None,
|
||||||
|
frozen: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let updated = IdentityRepository::update(&pool, identity.id, update_input)
|
let updated = IdentityRepository::update(&pool, identity.id, update_input)
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ mod helpers;
|
|||||||
use attune_common::{
|
use attune_common::{
|
||||||
repositories::{
|
repositories::{
|
||||||
rule::{CreateRuleInput, RuleRepository, UpdateRuleInput},
|
rule::{CreateRuleInput, RuleRepository, UpdateRuleInput},
|
||||||
Create, Delete, FindById, FindByRef, List, Update,
|
Create, Delete, FindById, FindByRef, List, Patch, Update,
|
||||||
},
|
},
|
||||||
Error,
|
Error,
|
||||||
};
|
};
|
||||||
@@ -48,7 +48,7 @@ async fn test_create_rule() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "A test rule".to_string(),
|
description: Some("A test rule".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -66,7 +66,7 @@ async fn test_create_rule() {
|
|||||||
assert_eq!(rule.pack, pack.id);
|
assert_eq!(rule.pack, pack.id);
|
||||||
assert_eq!(rule.pack_ref, pack.r#ref);
|
assert_eq!(rule.pack_ref, pack.r#ref);
|
||||||
assert_eq!(rule.label, "Test Rule");
|
assert_eq!(rule.label, "Test Rule");
|
||||||
assert_eq!(rule.description, "A test rule");
|
assert_eq!(rule.description, Some("A test rule".to_string()));
|
||||||
assert_eq!(rule.action, Some(action.id));
|
assert_eq!(rule.action, Some(action.id));
|
||||||
assert_eq!(rule.action_ref, action.r#ref);
|
assert_eq!(rule.action_ref, action.r#ref);
|
||||||
assert_eq!(rule.trigger, Some(trigger.id));
|
assert_eq!(rule.trigger, Some(trigger.id));
|
||||||
@@ -105,7 +105,7 @@ async fn test_create_rule_disabled() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Disabled Rule".to_string(),
|
label: "Disabled Rule".to_string(),
|
||||||
description: "A disabled rule".to_string(),
|
description: Some("A disabled rule".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -155,7 +155,7 @@ async fn test_create_rule_with_complex_conditions() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Complex Rule".to_string(),
|
label: "Complex Rule".to_string(),
|
||||||
description: "Rule with complex conditions".to_string(),
|
description: Some("Rule with complex conditions".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -200,7 +200,7 @@ async fn test_create_rule_duplicate_ref() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "First Rule".to_string(),
|
label: "First Rule".to_string(),
|
||||||
description: "First".to_string(),
|
description: Some("First".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -220,7 +220,7 @@ async fn test_create_rule_duplicate_ref() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Second Rule".to_string(),
|
label: "Second Rule".to_string(),
|
||||||
description: "Second".to_string(),
|
description: Some("Second".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -274,7 +274,7 @@ async fn test_create_rule_invalid_ref_format_uppercase() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Upper Rule".to_string(),
|
label: "Upper Rule".to_string(),
|
||||||
description: "Invalid uppercase ref".to_string(),
|
description: Some("Invalid uppercase ref".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -316,7 +316,7 @@ async fn test_create_rule_invalid_ref_format_no_dot() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "No Dot Rule".to_string(),
|
label: "No Dot Rule".to_string(),
|
||||||
description: "Invalid ref without dot".to_string(),
|
description: Some("Invalid ref without dot".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -362,7 +362,7 @@ async fn test_find_rule_by_id() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Find Rule".to_string(),
|
label: "Find Rule".to_string(),
|
||||||
description: "Rule to find".to_string(),
|
description: Some("Rule to find".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -422,7 +422,7 @@ async fn test_find_rule_by_ref() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Find By Ref Rule".to_string(),
|
label: "Find By Ref Rule".to_string(),
|
||||||
description: "Find by ref".to_string(),
|
description: Some("Find by ref".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -484,7 +484,7 @@ async fn test_list_rules() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: format!("List Rule {}", i),
|
label: format!("List Rule {}", i),
|
||||||
description: format!("Rule {}", i),
|
description: Some(format!("Rule {}", i)),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -538,7 +538,7 @@ async fn test_list_rules_ordered_by_ref() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: name.to_string(),
|
label: name.to_string(),
|
||||||
description: name.to_string(),
|
description: Some(name.to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -594,7 +594,7 @@ async fn test_update_rule_label() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Original Label".to_string(),
|
label: "Original Label".to_string(),
|
||||||
description: "Original".to_string(),
|
description: Some("Original".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -618,7 +618,7 @@ async fn test_update_rule_label() {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(updated.label, "Updated Label");
|
assert_eq!(updated.label, "Updated Label");
|
||||||
assert_eq!(updated.description, "Original"); // unchanged
|
assert_eq!(updated.description, Some("Original".to_string())); // unchanged
|
||||||
assert!(updated.updated > created.updated);
|
assert!(updated.updated > created.updated);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -647,7 +647,7 @@ async fn test_update_rule_description() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test".to_string(),
|
label: "Test".to_string(),
|
||||||
description: "Old description".to_string(),
|
description: Some("Old description".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -662,7 +662,7 @@ async fn test_update_rule_description() {
|
|||||||
let created = RuleRepository::create(&pool, input).await.unwrap();
|
let created = RuleRepository::create(&pool, input).await.unwrap();
|
||||||
|
|
||||||
let update = UpdateRuleInput {
|
let update = UpdateRuleInput {
|
||||||
description: Some("New description".to_string()),
|
description: Some(Patch::Set("New description".to_string())),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -670,7 +670,7 @@ async fn test_update_rule_description() {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(updated.description, "New description");
|
assert_eq!(updated.description, Some("New description".to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -698,7 +698,7 @@ async fn test_update_rule_conditions() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test".to_string(),
|
label: "Test".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -750,7 +750,7 @@ async fn test_update_rule_enabled() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test".to_string(),
|
label: "Test".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -803,7 +803,7 @@ async fn test_update_rule_multiple_fields() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Old".to_string(),
|
label: "Old".to_string(),
|
||||||
description: "Old".to_string(),
|
description: Some("Old".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -819,7 +819,7 @@ async fn test_update_rule_multiple_fields() {
|
|||||||
|
|
||||||
let update = UpdateRuleInput {
|
let update = UpdateRuleInput {
|
||||||
label: Some("New Label".to_string()),
|
label: Some("New Label".to_string()),
|
||||||
description: Some("New Description".to_string()),
|
description: Some(Patch::Set("New Description".to_string())),
|
||||||
conditions: Some(json!({"updated": true})),
|
conditions: Some(json!({"updated": true})),
|
||||||
action_params: None,
|
action_params: None,
|
||||||
trigger_params: None,
|
trigger_params: None,
|
||||||
@@ -831,7 +831,7 @@ async fn test_update_rule_multiple_fields() {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(updated.label, "New Label");
|
assert_eq!(updated.label, "New Label");
|
||||||
assert_eq!(updated.description, "New Description");
|
assert_eq!(updated.description, Some("New Description".to_string()));
|
||||||
assert_eq!(updated.conditions, json!({"updated": true}));
|
assert_eq!(updated.conditions, json!({"updated": true}));
|
||||||
assert!(!updated.enabled);
|
assert!(!updated.enabled);
|
||||||
}
|
}
|
||||||
@@ -861,7 +861,7 @@ async fn test_update_rule_no_changes() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Test".to_string(),
|
label: "Test".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -914,7 +914,7 @@ async fn test_delete_rule() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "To Delete".to_string(),
|
label: "To Delete".to_string(),
|
||||||
description: "Will be deleted".to_string(),
|
description: Some("Will be deleted".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -995,7 +995,7 @@ async fn test_find_rules_by_pack() {
|
|||||||
pack: pack1.id,
|
pack: pack1.id,
|
||||||
pack_ref: pack1.r#ref.clone(),
|
pack_ref: pack1.r#ref.clone(),
|
||||||
label: format!("Rule {}", i),
|
label: format!("Rule {}", i),
|
||||||
description: format!("Rule {}", i),
|
description: Some(format!("Rule {}", i)),
|
||||||
action: action1.id,
|
action: action1.id,
|
||||||
action_ref: action1.r#ref.clone(),
|
action_ref: action1.r#ref.clone(),
|
||||||
trigger: trigger1.id,
|
trigger: trigger1.id,
|
||||||
@@ -1016,7 +1016,7 @@ async fn test_find_rules_by_pack() {
|
|||||||
pack: pack2.id,
|
pack: pack2.id,
|
||||||
pack_ref: pack2.r#ref.clone(),
|
pack_ref: pack2.r#ref.clone(),
|
||||||
label: "Pack2 Rule".to_string(),
|
label: "Pack2 Rule".to_string(),
|
||||||
description: "Pack2".to_string(),
|
description: Some("Pack2".to_string()),
|
||||||
action: action2.id,
|
action: action2.id,
|
||||||
action_ref: action2.r#ref.clone(),
|
action_ref: action2.r#ref.clone(),
|
||||||
trigger: trigger2.id,
|
trigger: trigger2.id,
|
||||||
@@ -1073,7 +1073,7 @@ async fn test_find_rules_by_action() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: format!("Action1 Rule {}", i),
|
label: format!("Action1 Rule {}", i),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action1.id,
|
action: action1.id,
|
||||||
action_ref: action1.r#ref.clone(),
|
action_ref: action1.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -1094,7 +1094,7 @@ async fn test_find_rules_by_action() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Action2 Rule".to_string(),
|
label: "Action2 Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action2.id,
|
action: action2.id,
|
||||||
action_ref: action2.r#ref.clone(),
|
action_ref: action2.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -1155,7 +1155,7 @@ async fn test_find_rules_by_trigger() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: format!("Trigger1 Rule {}", i),
|
label: format!("Trigger1 Rule {}", i),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger1.id,
|
trigger: trigger1.id,
|
||||||
@@ -1176,7 +1176,7 @@ async fn test_find_rules_by_trigger() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Trigger2 Rule".to_string(),
|
label: "Trigger2 Rule".to_string(),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger2.id,
|
trigger: trigger2.id,
|
||||||
@@ -1234,7 +1234,7 @@ async fn test_find_enabled_rules() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: format!("Enabled {}", i),
|
label: format!("Enabled {}", i),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -1256,7 +1256,7 @@ async fn test_find_enabled_rules() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: format!("Disabled {}", i),
|
label: format!("Disabled {}", i),
|
||||||
description: "Test".to_string(),
|
description: Some("Test".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -1312,7 +1312,7 @@ async fn test_cascade_delete_pack_deletes_rules() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Cascade Rule".to_string(),
|
label: "Cascade Rule".to_string(),
|
||||||
description: "Will be cascade deleted".to_string(),
|
description: Some("Will be cascade deleted".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
@@ -1368,7 +1368,7 @@ async fn test_rule_timestamps() {
|
|||||||
pack: pack.id,
|
pack: pack.id,
|
||||||
pack_ref: pack.r#ref.clone(),
|
pack_ref: pack.r#ref.clone(),
|
||||||
label: "Timestamp Rule".to_string(),
|
label: "Timestamp Rule".to_string(),
|
||||||
description: "Test timestamps".to_string(),
|
description: Some("Test timestamps".to_string()),
|
||||||
action: action.id,
|
action: action.id,
|
||||||
action_ref: action.r#ref.clone(),
|
action_ref: action.r#ref.clone(),
|
||||||
trigger: trigger.id,
|
trigger: trigger.id,
|
||||||
|
|||||||
@@ -179,7 +179,7 @@ async fn test_create_sensor_duplicate_ref_fails() {
|
|||||||
pack: Some(pack.id),
|
pack: Some(pack.id),
|
||||||
pack_ref: Some(pack.r#ref.clone()),
|
pack_ref: Some(pack.r#ref.clone()),
|
||||||
label: "Duplicate Sensor".to_string(),
|
label: "Duplicate Sensor".to_string(),
|
||||||
description: "Test sensor".to_string(),
|
description: Some("Test sensor".to_string()),
|
||||||
entrypoint: "sensors/dup.py".to_string(),
|
entrypoint: "sensors/dup.py".to_string(),
|
||||||
runtime: runtime.id,
|
runtime: runtime.id,
|
||||||
runtime_ref: runtime.r#ref.clone(),
|
runtime_ref: runtime.r#ref.clone(),
|
||||||
@@ -235,7 +235,7 @@ async fn test_create_sensor_invalid_ref_format_fails() {
|
|||||||
pack: Some(pack.id),
|
pack: Some(pack.id),
|
||||||
pack_ref: Some(pack.r#ref.clone()),
|
pack_ref: Some(pack.r#ref.clone()),
|
||||||
label: "Invalid Sensor".to_string(),
|
label: "Invalid Sensor".to_string(),
|
||||||
description: "Test sensor".to_string(),
|
description: Some("Test sensor".to_string()),
|
||||||
entrypoint: "sensors/invalid.py".to_string(),
|
entrypoint: "sensors/invalid.py".to_string(),
|
||||||
runtime: runtime.id,
|
runtime: runtime.id,
|
||||||
runtime_ref: runtime.r#ref.clone(),
|
runtime_ref: runtime.r#ref.clone(),
|
||||||
@@ -276,7 +276,7 @@ async fn test_create_sensor_invalid_pack_fails() {
|
|||||||
pack: Some(99999), // Non-existent pack
|
pack: Some(99999), // Non-existent pack
|
||||||
pack_ref: Some("invalid".to_string()),
|
pack_ref: Some("invalid".to_string()),
|
||||||
label: "Invalid Pack Sensor".to_string(),
|
label: "Invalid Pack Sensor".to_string(),
|
||||||
description: "Test sensor".to_string(),
|
description: Some("Test sensor".to_string()),
|
||||||
entrypoint: "sensors/invalid.py".to_string(),
|
entrypoint: "sensors/invalid.py".to_string(),
|
||||||
runtime: runtime.id,
|
runtime: runtime.id,
|
||||||
runtime_ref: runtime.r#ref.clone(),
|
runtime_ref: runtime.r#ref.clone(),
|
||||||
@@ -308,7 +308,7 @@ async fn test_create_sensor_invalid_trigger_fails() {
|
|||||||
pack: None,
|
pack: None,
|
||||||
pack_ref: None,
|
pack_ref: None,
|
||||||
label: "Invalid Trigger Sensor".to_string(),
|
label: "Invalid Trigger Sensor".to_string(),
|
||||||
description: "Test sensor".to_string(),
|
description: Some("Test sensor".to_string()),
|
||||||
entrypoint: "sensors/invalid.py".to_string(),
|
entrypoint: "sensors/invalid.py".to_string(),
|
||||||
runtime: runtime.id,
|
runtime: runtime.id,
|
||||||
runtime_ref: runtime.r#ref.clone(),
|
runtime_ref: runtime.r#ref.clone(),
|
||||||
@@ -340,7 +340,7 @@ async fn test_create_sensor_invalid_runtime_fails() {
|
|||||||
pack: None,
|
pack: None,
|
||||||
pack_ref: None,
|
pack_ref: None,
|
||||||
label: "Invalid Runtime Sensor".to_string(),
|
label: "Invalid Runtime Sensor".to_string(),
|
||||||
description: "Test sensor".to_string(),
|
description: Some("Test sensor".to_string()),
|
||||||
entrypoint: "sensors/invalid.py".to_string(),
|
entrypoint: "sensors/invalid.py".to_string(),
|
||||||
runtime: 99999, // Non-existent runtime
|
runtime: 99999, // Non-existent runtime
|
||||||
runtime_ref: "invalid.runtime".to_string(),
|
runtime_ref: "invalid.runtime".to_string(),
|
||||||
@@ -728,7 +728,7 @@ async fn test_update_description() {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let input = UpdateSensorInput {
|
let input = UpdateSensorInput {
|
||||||
description: Some("New description for the sensor".to_string()),
|
description: Some(Patch::Set("New description for the sensor".to_string())),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -736,7 +736,10 @@ async fn test_update_description() {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(updated.description, "New description for the sensor");
|
assert_eq!(
|
||||||
|
updated.description,
|
||||||
|
Some("New description for the sensor".to_string())
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -934,7 +937,7 @@ async fn test_update_multiple_fields() {
|
|||||||
|
|
||||||
let input = UpdateSensorInput {
|
let input = UpdateSensorInput {
|
||||||
label: Some("Multi Update".to_string()),
|
label: Some("Multi Update".to_string()),
|
||||||
description: Some("Updated multiple fields".to_string()),
|
description: Some(Patch::Set("Updated multiple fields".to_string())),
|
||||||
entrypoint: Some("sensors/multi.py".to_string()),
|
entrypoint: Some("sensors/multi.py".to_string()),
|
||||||
enabled: Some(false),
|
enabled: Some(false),
|
||||||
param_schema: Some(Patch::Set(json!({"type": "object"}))),
|
param_schema: Some(Patch::Set(json!({"type": "object"}))),
|
||||||
@@ -946,7 +949,10 @@ async fn test_update_multiple_fields() {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(updated.label, "Multi Update");
|
assert_eq!(updated.label, "Multi Update");
|
||||||
assert_eq!(updated.description, "Updated multiple fields");
|
assert_eq!(
|
||||||
|
updated.description,
|
||||||
|
Some("Updated multiple fields".to_string())
|
||||||
|
);
|
||||||
assert_eq!(updated.entrypoint, "sensors/multi.py");
|
assert_eq!(updated.entrypoint, "sensors/multi.py");
|
||||||
assert!(!updated.enabled);
|
assert!(!updated.enabled);
|
||||||
assert_eq!(updated.param_schema, Some(json!({"type": "object"})));
|
assert_eq!(updated.param_schema, Some(json!({"type": "object"})));
|
||||||
|
|||||||
@@ -368,7 +368,7 @@ mod tests {
|
|||||||
pack: 1,
|
pack: 1,
|
||||||
pack_ref: "test".to_string(),
|
pack_ref: "test".to_string(),
|
||||||
label: "Test Rule".to_string(),
|
label: "Test Rule".to_string(),
|
||||||
description: "Test rule description".to_string(),
|
description: Some("Test rule description".to_string()),
|
||||||
trigger_ref: "test.trigger".to_string(),
|
trigger_ref: "test.trigger".to_string(),
|
||||||
trigger: Some(1),
|
trigger: Some(1),
|
||||||
action_ref: "test.action".to_string(),
|
action_ref: "test.action".to_string(),
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ async fn create_test_action(pool: &PgPool, pack_id: i64, pack_ref: &str, suffix:
|
|||||||
pack: pack_id,
|
pack: pack_id,
|
||||||
pack_ref: pack_ref.to_string(),
|
pack_ref: pack_ref.to_string(),
|
||||||
label: format!("FIFO Test Action {}", suffix),
|
label: format!("FIFO Test Action {}", suffix),
|
||||||
description: format!("Test action {}", suffix),
|
description: Some(format!("Test action {}", suffix)),
|
||||||
entrypoint: "echo test".to_string(),
|
entrypoint: "echo test".to_string(),
|
||||||
runtime: None,
|
runtime: None,
|
||||||
runtime_version_constraint: None,
|
runtime_version_constraint: None,
|
||||||
|
|||||||
@@ -94,7 +94,7 @@ async fn create_test_action(pool: &PgPool, pack_id: i64, suffix: &str) -> i64 {
|
|||||||
pack: pack_id,
|
pack: pack_id,
|
||||||
pack_ref: format!("test_pack_{}", suffix),
|
pack_ref: format!("test_pack_{}", suffix),
|
||||||
label: format!("Test Action {}", suffix),
|
label: format!("Test Action {}", suffix),
|
||||||
description: format!("Test action {}", suffix),
|
description: Some(format!("Test action {}", suffix)),
|
||||||
entrypoint: "echo test".to_string(),
|
entrypoint: "echo test".to_string(),
|
||||||
runtime: None,
|
runtime: None,
|
||||||
runtime_version_constraint: None,
|
runtime_version_constraint: None,
|
||||||
|
|||||||
@@ -27,6 +27,37 @@ use tracing::{debug, error, info, warn};
|
|||||||
|
|
||||||
use crate::api_client::ApiClient;
|
use crate::api_client::ApiClient;
|
||||||
|
|
||||||
|
fn existing_command_env(cmd: &Command, key: &str) -> Option<String> {
|
||||||
|
cmd.as_std()
|
||||||
|
.get_envs()
|
||||||
|
.find_map(|(env_key, value)| {
|
||||||
|
if env_key == key {
|
||||||
|
value.map(|value| value.to_string_lossy().into_owned())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.or_else(|| std::env::var(key).ok())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn apply_runtime_env_vars(
|
||||||
|
cmd: &mut Command,
|
||||||
|
exec_config: &RuntimeExecutionConfig,
|
||||||
|
pack_dir: &std::path::Path,
|
||||||
|
env_dir: Option<&std::path::Path>,
|
||||||
|
) {
|
||||||
|
if exec_config.env_vars.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let vars = exec_config.build_template_vars_with_env(pack_dir, env_dir);
|
||||||
|
for (key, env_var_config) in &exec_config.env_vars {
|
||||||
|
let resolved = env_var_config.resolve(&vars, existing_command_env(cmd, key).as_deref());
|
||||||
|
debug!("Setting sensor runtime env var: {}={}", key, resolved);
|
||||||
|
cmd.env(key, resolved);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Sensor manager that coordinates all sensor instances
|
/// Sensor manager that coordinates all sensor instances
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct SensorManager {
|
pub struct SensorManager {
|
||||||
@@ -502,20 +533,7 @@ impl SensorManager {
|
|||||||
.env("ATTUNE_MQ_EXCHANGE", "attune.events")
|
.env("ATTUNE_MQ_EXCHANGE", "attune.events")
|
||||||
.env("ATTUNE_LOG_LEVEL", "info");
|
.env("ATTUNE_LOG_LEVEL", "info");
|
||||||
|
|
||||||
if !exec_config.env_vars.is_empty() {
|
apply_runtime_env_vars(&mut cmd, &exec_config, &pack_dir, env_dir_opt);
|
||||||
let vars = exec_config.build_template_vars_with_env(&pack_dir, env_dir_opt);
|
|
||||||
for (key, value_template) in &exec_config.env_vars {
|
|
||||||
let resolved = attune_common::models::RuntimeExecutionConfig::resolve_template(
|
|
||||||
value_template,
|
|
||||||
&vars,
|
|
||||||
);
|
|
||||||
debug!(
|
|
||||||
"Setting sensor runtime env var: {}={} (template: {})",
|
|
||||||
key, resolved, value_template
|
|
||||||
);
|
|
||||||
cmd.env(key, resolved);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut child = cmd
|
let mut child = cmd
|
||||||
.stdin(Stdio::null())
|
.stdin(Stdio::null())
|
||||||
@@ -904,6 +922,10 @@ pub struct SensorStatus {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use attune_common::models::runtime::{
|
||||||
|
RuntimeEnvVarConfig, RuntimeEnvVarOperation, RuntimeEnvVarSpec,
|
||||||
|
};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_sensor_status_default() {
|
fn test_sensor_status_default() {
|
||||||
@@ -913,4 +935,46 @@ mod tests {
|
|||||||
assert_eq!(status.failure_count, 0);
|
assert_eq!(status.failure_count, 0);
|
||||||
assert!(status.last_poll.is_none());
|
assert!(status.last_poll.is_none());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_apply_runtime_env_vars_prepends_to_existing_command_env() {
|
||||||
|
let mut env_vars = HashMap::new();
|
||||||
|
env_vars.insert(
|
||||||
|
"PYTHONPATH".to_string(),
|
||||||
|
RuntimeEnvVarConfig::Spec(RuntimeEnvVarSpec {
|
||||||
|
value: "{pack_dir}/lib".to_string(),
|
||||||
|
operation: RuntimeEnvVarOperation::Prepend,
|
||||||
|
separator: ":".to_string(),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
let exec_config = RuntimeExecutionConfig {
|
||||||
|
env_vars,
|
||||||
|
..RuntimeExecutionConfig::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut cmd = Command::new("python3");
|
||||||
|
cmd.env("PYTHONPATH", "/existing/pythonpath");
|
||||||
|
|
||||||
|
apply_runtime_env_vars(
|
||||||
|
&mut cmd,
|
||||||
|
&exec_config,
|
||||||
|
std::path::Path::new("/packs/testpack"),
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
|
||||||
|
let resolved = cmd
|
||||||
|
.as_std()
|
||||||
|
.get_envs()
|
||||||
|
.find_map(|(key, value)| {
|
||||||
|
if key == "PYTHONPATH" {
|
||||||
|
value.map(|value| value.to_string_lossy().into_owned())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.expect("PYTHONPATH should be set");
|
||||||
|
|
||||||
|
assert_eq!(resolved, "/packs/testpack/lib:/existing/pythonpath");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -452,7 +452,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_detected_runtimes_json_structure() {
|
fn test_detected_runtimes_json_structure() {
|
||||||
// Test the JSON structure that set_detected_runtimes builds
|
// Test the JSON structure that set_detected_runtimes builds
|
||||||
let runtimes = vec![
|
let runtimes = [
|
||||||
DetectedRuntime {
|
DetectedRuntime {
|
||||||
name: "python".to_string(),
|
name: "python".to_string(),
|
||||||
path: "/usr/bin/python3".to_string(),
|
path: "/usr/bin/python3".to_string(),
|
||||||
|
|||||||
@@ -49,6 +49,52 @@ fn bash_single_quote_escape(s: &str) -> String {
|
|||||||
s.replace('\'', "'\\''")
|
s.replace('\'', "'\\''")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn format_command_for_log(cmd: &Command) -> String {
|
||||||
|
let program = cmd.as_std().get_program().to_string_lossy().into_owned();
|
||||||
|
let args = cmd
|
||||||
|
.as_std()
|
||||||
|
.get_args()
|
||||||
|
.map(|arg| arg.to_string_lossy().into_owned())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let cwd = cmd
|
||||||
|
.as_std()
|
||||||
|
.get_current_dir()
|
||||||
|
.map(|dir| dir.display().to_string())
|
||||||
|
.unwrap_or_else(|| "<inherit>".to_string());
|
||||||
|
let env = cmd
|
||||||
|
.as_std()
|
||||||
|
.get_envs()
|
||||||
|
.map(|(key, value)| {
|
||||||
|
let key = key.to_string_lossy().into_owned();
|
||||||
|
let value = value
|
||||||
|
.map(|v| {
|
||||||
|
if is_sensitive_env_var(&key) {
|
||||||
|
"<redacted>".to_string()
|
||||||
|
} else {
|
||||||
|
v.to_string_lossy().into_owned()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| "<unset>".to_string());
|
||||||
|
format!("{key}={value}")
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
format!(
|
||||||
|
"program={program}, args={args:?}, cwd={cwd}, env={env:?}",
|
||||||
|
args = args,
|
||||||
|
env = env,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_sensitive_env_var(key: &str) -> bool {
|
||||||
|
let upper = key.to_ascii_uppercase();
|
||||||
|
upper.contains("TOKEN")
|
||||||
|
|| upper.contains("SECRET")
|
||||||
|
|| upper.contains("PASSWORD")
|
||||||
|
|| upper.ends_with("_KEY")
|
||||||
|
|| upper == "KEY"
|
||||||
|
}
|
||||||
|
|
||||||
/// A generic runtime driven by `RuntimeExecutionConfig` from the database.
|
/// A generic runtime driven by `RuntimeExecutionConfig` from the database.
|
||||||
///
|
///
|
||||||
/// Each `ProcessRuntime` instance corresponds to a row in the `runtime` table.
|
/// Each `ProcessRuntime` instance corresponds to a row in the `runtime` table.
|
||||||
@@ -784,12 +830,9 @@ impl Runtime for ProcessRuntime {
|
|||||||
// resolved against the current pack/env directories.
|
// resolved against the current pack/env directories.
|
||||||
if !effective_config.env_vars.is_empty() {
|
if !effective_config.env_vars.is_empty() {
|
||||||
let vars = effective_config.build_template_vars_with_env(&pack_dir, env_dir_opt);
|
let vars = effective_config.build_template_vars_with_env(&pack_dir, env_dir_opt);
|
||||||
for (key, value_template) in &effective_config.env_vars {
|
for (key, env_var_config) in &effective_config.env_vars {
|
||||||
let resolved = RuntimeExecutionConfig::resolve_template(value_template, &vars);
|
let resolved = env_var_config.resolve(&vars, env.get(key).map(String::as_str));
|
||||||
debug!(
|
debug!("Setting runtime env var: {}={}", key, resolved);
|
||||||
"Setting runtime env var: {}={} (template: {})",
|
|
||||||
key, resolved, value_template
|
|
||||||
);
|
|
||||||
env.insert(key.clone(), resolved);
|
env.insert(key.clone(), resolved);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -897,10 +940,10 @@ impl Runtime for ProcessRuntime {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Log the full command about to be executed
|
// Log the spawned process accurately instead of using Command's shell-like Debug output.
|
||||||
info!(
|
info!(
|
||||||
"Running command: {:?} (action: '{}', execution_id: {}, working_dir: {:?})",
|
"Running command: {} (action: '{}', execution_id: {}, working_dir: {:?})",
|
||||||
cmd,
|
format_command_for_log(&cmd),
|
||||||
context.action_ref,
|
context.action_ref,
|
||||||
context.execution_id,
|
context.execution_id,
|
||||||
working_dir
|
working_dir
|
||||||
@@ -1016,7 +1059,8 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use attune_common::models::runtime::{
|
use attune_common::models::runtime::{
|
||||||
DependencyConfig, EnvironmentConfig, InlineExecutionConfig, InlineExecutionStrategy,
|
DependencyConfig, EnvironmentConfig, InlineExecutionConfig, InlineExecutionStrategy,
|
||||||
InterpreterConfig, RuntimeExecutionConfig,
|
InterpreterConfig, RuntimeEnvVarConfig, RuntimeEnvVarOperation, RuntimeEnvVarSpec,
|
||||||
|
RuntimeExecutionConfig,
|
||||||
};
|
};
|
||||||
use attune_common::models::{OutputFormat, ParameterDelivery, ParameterFormat};
|
use attune_common::models::{OutputFormat, ParameterDelivery, ParameterFormat};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
@@ -1331,6 +1375,88 @@ mod tests {
|
|||||||
assert!(result.stdout.contains("hello from python process runtime"));
|
assert!(result.stdout.contains("hello from python process runtime"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_execute_python_file_with_pack_lib_on_pythonpath() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
let packs_dir = temp_dir.path().join("packs");
|
||||||
|
let pack_dir = packs_dir.join("testpack");
|
||||||
|
let actions_dir = pack_dir.join("actions");
|
||||||
|
let lib_dir = pack_dir.join("lib");
|
||||||
|
std::fs::create_dir_all(&actions_dir).unwrap();
|
||||||
|
std::fs::create_dir_all(&lib_dir).unwrap();
|
||||||
|
|
||||||
|
std::fs::write(
|
||||||
|
lib_dir.join("helper.py"),
|
||||||
|
"def message():\n return 'hello from pack lib'\n",
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
std::fs::write(
|
||||||
|
actions_dir.join("hello.py"),
|
||||||
|
"import helper\nimport os\nprint(helper.message())\nprint(os.environ['PYTHONPATH'])\n",
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut env_vars = HashMap::new();
|
||||||
|
env_vars.insert(
|
||||||
|
"PYTHONPATH".to_string(),
|
||||||
|
RuntimeEnvVarConfig::Spec(RuntimeEnvVarSpec {
|
||||||
|
value: "{pack_dir}/lib".to_string(),
|
||||||
|
operation: RuntimeEnvVarOperation::Prepend,
|
||||||
|
separator: ":".to_string(),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
let runtime = ProcessRuntime::new(
|
||||||
|
"python".to_string(),
|
||||||
|
RuntimeExecutionConfig {
|
||||||
|
interpreter: InterpreterConfig {
|
||||||
|
binary: "python3".to_string(),
|
||||||
|
args: vec![],
|
||||||
|
file_extension: Some(".py".to_string()),
|
||||||
|
},
|
||||||
|
inline_execution: InlineExecutionConfig::default(),
|
||||||
|
environment: None,
|
||||||
|
dependencies: None,
|
||||||
|
env_vars,
|
||||||
|
},
|
||||||
|
packs_dir,
|
||||||
|
temp_dir.path().join("runtime_envs"),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut env = HashMap::new();
|
||||||
|
env.insert("PYTHONPATH".to_string(), "/existing/pythonpath".to_string());
|
||||||
|
|
||||||
|
let context = ExecutionContext {
|
||||||
|
execution_id: 3,
|
||||||
|
action_ref: "testpack.hello".to_string(),
|
||||||
|
parameters: HashMap::new(),
|
||||||
|
env,
|
||||||
|
secrets: HashMap::new(),
|
||||||
|
timeout: Some(10),
|
||||||
|
working_dir: None,
|
||||||
|
entry_point: "hello.py".to_string(),
|
||||||
|
code: None,
|
||||||
|
code_path: Some(actions_dir.join("hello.py")),
|
||||||
|
runtime_name: Some("python".to_string()),
|
||||||
|
runtime_config_override: None,
|
||||||
|
runtime_env_dir_suffix: None,
|
||||||
|
selected_runtime_version: None,
|
||||||
|
max_stdout_bytes: 1024 * 1024,
|
||||||
|
max_stderr_bytes: 1024 * 1024,
|
||||||
|
parameter_delivery: ParameterDelivery::default(),
|
||||||
|
parameter_format: ParameterFormat::default(),
|
||||||
|
output_format: OutputFormat::default(),
|
||||||
|
cancel_token: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = runtime.execute(context).await.unwrap();
|
||||||
|
assert_eq!(result.exit_code, 0);
|
||||||
|
assert!(result.stdout.contains("hello from pack lib"));
|
||||||
|
assert!(result
|
||||||
|
.stdout
|
||||||
|
.contains(&format!("{}/lib:/existing/pythonpath", pack_dir.display())));
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_execute_inline_code() {
|
async fn test_execute_inline_code() {
|
||||||
let temp_dir = TempDir::new().unwrap();
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
|||||||
@@ -28,12 +28,15 @@
|
|||||||
|
|
||||||
ARG RUST_VERSION=1.92
|
ARG RUST_VERSION=1.92
|
||||||
ARG DEBIAN_VERSION=bookworm
|
ARG DEBIAN_VERSION=bookworm
|
||||||
|
ARG RUST_TARGET=x86_64-unknown-linux-musl
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# Stage 1: Builder - Cross-compile a statically-linked binary with musl
|
# Stage 1: Builder - Cross-compile a statically-linked binary with musl
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
FROM rust:${RUST_VERSION}-${DEBIAN_VERSION} AS builder
|
FROM rust:${RUST_VERSION}-${DEBIAN_VERSION} AS builder
|
||||||
|
|
||||||
|
ARG RUST_TARGET
|
||||||
|
|
||||||
# Install musl toolchain for static linking
|
# Install musl toolchain for static linking
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
musl-tools \
|
musl-tools \
|
||||||
@@ -42,8 +45,8 @@ RUN apt-get update && apt-get install -y \
|
|||||||
ca-certificates \
|
ca-certificates \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Add the musl target for fully static binaries
|
# Add the requested musl target for fully static binaries
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
RUN rustup target add ${RUST_TARGET}
|
||||||
|
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
|
|
||||||
@@ -104,9 +107,9 @@ COPY crates/ ./crates/
|
|||||||
RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=shared \
|
RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=shared \
|
||||||
--mount=type=cache,target=/usr/local/cargo/git,sharing=shared \
|
--mount=type=cache,target=/usr/local/cargo/git,sharing=shared \
|
||||||
--mount=type=cache,id=agent-target,target=/build/target,sharing=locked \
|
--mount=type=cache,id=agent-target,target=/build/target,sharing=locked \
|
||||||
cargo build --release --target x86_64-unknown-linux-musl --bin attune-agent --bin attune-sensor-agent && \
|
cargo build --release --target ${RUST_TARGET} --bin attune-agent --bin attune-sensor-agent && \
|
||||||
cp /build/target/x86_64-unknown-linux-musl/release/attune-agent /build/attune-agent && \
|
cp /build/target/${RUST_TARGET}/release/attune-agent /build/attune-agent && \
|
||||||
cp /build/target/x86_64-unknown-linux-musl/release/attune-sensor-agent /build/attune-sensor-agent
|
cp /build/target/${RUST_TARGET}/release/attune-sensor-agent /build/attune-sensor-agent
|
||||||
|
|
||||||
# Strip the binaries to minimize size
|
# Strip the binaries to minimize size
|
||||||
RUN strip /build/attune-agent && strip /build/attune-sensor-agent
|
RUN strip /build/attune-agent && strip /build/attune-sensor-agent
|
||||||
|
|||||||
6
docker/Dockerfile.agent-package
Normal file
6
docker/Dockerfile.agent-package
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
FROM busybox:1.36
|
||||||
|
|
||||||
|
COPY dist/attune-agent /usr/local/bin/attune-agent
|
||||||
|
COPY dist/attune-sensor-agent /usr/local/bin/attune-sensor-agent
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/local/bin/attune-agent"]
|
||||||
33
docker/Dockerfile.runtime
Normal file
33
docker/Dockerfile.runtime
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
ARG DEBIAN_VERSION=bookworm
|
||||||
|
|
||||||
|
FROM debian:${DEBIAN_VERSION}-slim AS runtime
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
ca-certificates \
|
||||||
|
libssl3 \
|
||||||
|
curl \
|
||||||
|
git \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN useradd -m -u 1000 attune && \
|
||||||
|
mkdir -p /opt/attune/packs /opt/attune/logs /opt/attune/runtime_envs /opt/attune/config /opt/attune/artifacts /opt/attune/agent && \
|
||||||
|
chown -R attune:attune /opt/attune
|
||||||
|
|
||||||
|
WORKDIR /opt/attune
|
||||||
|
|
||||||
|
COPY dist/attune-service-binary /usr/local/bin/attune-service
|
||||||
|
COPY migrations/ ./migrations/
|
||||||
|
|
||||||
|
RUN chown -R attune:attune /opt/attune
|
||||||
|
|
||||||
|
USER attune
|
||||||
|
|
||||||
|
ENV RUST_LOG=info
|
||||||
|
ENV ATTUNE_CONFIG=/opt/attune/config/config.yaml
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
|
||||||
|
CMD curl -f http://localhost:8080/health || exit 1
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
CMD ["/usr/local/bin/attune-service"]
|
||||||
64
docker/distributable/README.md
Normal file
64
docker/distributable/README.md
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
# Attune Docker Dist Bundle
|
||||||
|
|
||||||
|
This directory is a distributable Docker bundle built from the main workspace compose setup.
|
||||||
|
|
||||||
|
It is designed to run Attune without building the Rust services locally:
|
||||||
|
|
||||||
|
- `api`, `executor`, `notifier`, `agent`, and `web` pull published images
|
||||||
|
- database bootstrap, user bootstrap, and pack loading run from local scripts shipped in this bundle
|
||||||
|
- workers and sensor still use stock runtime images plus the published injected agent binaries
|
||||||
|
|
||||||
|
## Registry Defaults
|
||||||
|
|
||||||
|
The compose file defaults to:
|
||||||
|
|
||||||
|
- registry: `git.rdrx.app/attune-system`
|
||||||
|
- tag: `latest`
|
||||||
|
|
||||||
|
Override them with env vars:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export ATTUNE_IMAGE_REGISTRY=git.rdrx.app/attune-system
|
||||||
|
export ATTUNE_IMAGE_TAG=latest
|
||||||
|
```
|
||||||
|
|
||||||
|
If the registry requires auth:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker login git.rdrx.app
|
||||||
|
```
|
||||||
|
|
||||||
|
## Run
|
||||||
|
|
||||||
|
From this directory:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
Or with an explicit tag:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ATTUNE_IMAGE_TAG=sha-xxxxxxxxxxxx docker compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
## Rebuild Bundle
|
||||||
|
|
||||||
|
Refresh this bundle and create a tarball from the workspace root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
bash scripts/package-docker-dist.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Included Assets
|
||||||
|
|
||||||
|
- `docker-compose.yaml` - published-image compose stack
|
||||||
|
- `config.docker.yaml` - container config mounted into services
|
||||||
|
- `docker/` - init scripts and SQL helpers
|
||||||
|
- `migrations/` - schema migrations for the bootstrap job
|
||||||
|
- `packs/core/` - builtin core pack content
|
||||||
|
- `scripts/load_core_pack.py` - pack loader used by `init-packs`
|
||||||
|
|
||||||
|
## Current Limitation
|
||||||
|
|
||||||
|
The publish workflow does not currently publish dedicated worker or sensor runtime images. This bundle therefore keeps using stock runtime images with the published `attune/agent` image for injection.
|
||||||
159
docker/distributable/config.docker.yaml
Normal file
159
docker/distributable/config.docker.yaml
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
# Attune Docker Environment Configuration
|
||||||
|
# This file overrides base config.yaml settings for Docker deployments
|
||||||
|
|
||||||
|
environment: docker
|
||||||
|
|
||||||
|
# Docker database (PostgreSQL container)
|
||||||
|
database:
|
||||||
|
url: postgresql://attune:attune@postgres:5432/attune
|
||||||
|
max_connections: 20
|
||||||
|
min_connections: 5
|
||||||
|
acquire_timeout: 30
|
||||||
|
idle_timeout: 600
|
||||||
|
max_lifetime: 1800
|
||||||
|
log_statements: false
|
||||||
|
schema: "attune"
|
||||||
|
|
||||||
|
# Docker message queue (RabbitMQ container)
|
||||||
|
message_queue:
|
||||||
|
url: amqp://attune:attune@rabbitmq:5672
|
||||||
|
connection_timeout: 30
|
||||||
|
heartbeat: 60
|
||||||
|
prefetch_count: 10
|
||||||
|
rabbitmq:
|
||||||
|
worker_queue_ttl_ms: 300000 # 5 minutes - expire unprocessed executions
|
||||||
|
dead_letter:
|
||||||
|
enabled: true
|
||||||
|
exchange: attune.dlx
|
||||||
|
ttl_ms: 86400000 # 24 hours - retain DLQ messages for debugging
|
||||||
|
|
||||||
|
# Docker cache (Redis container - optional)
|
||||||
|
cache:
|
||||||
|
enabled: true
|
||||||
|
url: redis://redis:6379
|
||||||
|
connection_timeout: 5
|
||||||
|
default_ttl: 3600
|
||||||
|
|
||||||
|
# API server configuration
|
||||||
|
server:
|
||||||
|
host: 0.0.0.0
|
||||||
|
port: 8080
|
||||||
|
cors_origins:
|
||||||
|
- http://localhost
|
||||||
|
- http://localhost:3000
|
||||||
|
- http://localhost:3001
|
||||||
|
- http://localhost:3002
|
||||||
|
- http://localhost:5173
|
||||||
|
- http://127.0.0.1:3000
|
||||||
|
- http://127.0.0.1:3001
|
||||||
|
- http://127.0.0.1:3002
|
||||||
|
- http://127.0.0.1:5173
|
||||||
|
- http://web
|
||||||
|
request_timeout: 60
|
||||||
|
max_request_size: 10485760 # 10MB
|
||||||
|
|
||||||
|
# Logging configuration
|
||||||
|
log:
|
||||||
|
level: info
|
||||||
|
format: json # Structured logs for container environments
|
||||||
|
console: true
|
||||||
|
|
||||||
|
# Security settings (MUST override via environment variables in production)
|
||||||
|
security:
|
||||||
|
jwt_secret: ${JWT_SECRET}
|
||||||
|
jwt_access_expiration: 3600 # 1 hour
|
||||||
|
jwt_refresh_expiration: 604800 # 7 days
|
||||||
|
encryption_key: ${ENCRYPTION_KEY}
|
||||||
|
enable_auth: true
|
||||||
|
allow_self_registration: false
|
||||||
|
login_page:
|
||||||
|
show_local_login: true
|
||||||
|
show_oidc_login: true
|
||||||
|
oidc:
|
||||||
|
# example local dev
|
||||||
|
enabled: false
|
||||||
|
discovery_url: https://my.sso.provider.com/.well-known/openid-configuration
|
||||||
|
client_id: 31d194737840d32bd3afe6474826976bae346d77247a158c4dc43887278eb605
|
||||||
|
client_secret: xL2C9WOC8shZ2QrZs9VFa10JK1Ob95xcMtZU3N86H1Pz0my5
|
||||||
|
provider_name: my-sso-provider
|
||||||
|
provider_label: My SSO Provider
|
||||||
|
provider_icon_url: https://my.sso.provider.com/favicon.ico
|
||||||
|
redirect_uri: http://localhost:3000/auth/callback
|
||||||
|
post_logout_redirect_uri: http://localhost:3000/login
|
||||||
|
scopes:
|
||||||
|
- groups
|
||||||
|
|
||||||
|
# Packs directory (mounted volume in containers)
|
||||||
|
packs_base_dir: /opt/attune/packs
|
||||||
|
|
||||||
|
# Runtime environments directory (isolated envs like virtualenvs, node_modules).
|
||||||
|
# Kept separate from packs so pack directories remain clean and read-only.
|
||||||
|
# Pattern: {runtime_envs_dir}/{pack_ref}/{runtime_name}
|
||||||
|
runtime_envs_dir: /opt/attune/runtime_envs
|
||||||
|
|
||||||
|
# Artifacts directory (shared volume for file-based artifact storage).
|
||||||
|
# File-type artifacts are written here by execution processes and served by the API.
|
||||||
|
# Pattern: {artifacts_dir}/{ref_slug}/v{version}.{ext}
|
||||||
|
artifacts_dir: /opt/attune/artifacts
|
||||||
|
|
||||||
|
# Executor service configuration
|
||||||
|
executor:
|
||||||
|
service_name: attune-executor
|
||||||
|
max_concurrent_executions: 50
|
||||||
|
heartbeat_interval: 30
|
||||||
|
task_timeout: 300
|
||||||
|
cleanup_interval: 120
|
||||||
|
scheduling_interval: 5
|
||||||
|
retry_max_attempts: 3
|
||||||
|
retry_backoff_multiplier: 2.0
|
||||||
|
retry_backoff_max: 300
|
||||||
|
scheduled_timeout: 300 # 5 minutes - fail executions stuck in SCHEDULED
|
||||||
|
timeout_check_interval: 60 # Check every minute for stale executions
|
||||||
|
enable_timeout_monitor: true
|
||||||
|
|
||||||
|
# Worker service configuration
|
||||||
|
worker:
|
||||||
|
service_name: attune-worker
|
||||||
|
worker_type: container
|
||||||
|
max_concurrent_tasks: 20
|
||||||
|
heartbeat_interval: 10 # Reduced from 30s for faster stale detection (staleness = 30s)
|
||||||
|
task_timeout: 300
|
||||||
|
cleanup_interval: 120
|
||||||
|
work_dir: /tmp/attune-worker
|
||||||
|
python:
|
||||||
|
executable: python3
|
||||||
|
venv_dir: /tmp/attune-worker/venvs
|
||||||
|
requirements_timeout: 300
|
||||||
|
nodejs:
|
||||||
|
executable: node
|
||||||
|
npm_executable: npm
|
||||||
|
modules_dir: /tmp/attune-worker/node_modules
|
||||||
|
install_timeout: 300
|
||||||
|
shell:
|
||||||
|
executable: /bin/bash
|
||||||
|
allowed_shells:
|
||||||
|
- /bin/bash
|
||||||
|
- /bin/sh
|
||||||
|
|
||||||
|
# Sensor service configuration
|
||||||
|
sensor:
|
||||||
|
service_name: attune-sensor
|
||||||
|
heartbeat_interval: 10 # Reduced from 30s for faster stale detection
|
||||||
|
max_concurrent_sensors: 50
|
||||||
|
sensor_timeout: 300
|
||||||
|
polling_interval: 10
|
||||||
|
cleanup_interval: 120
|
||||||
|
|
||||||
|
# Notifier service configuration
|
||||||
|
notifier:
|
||||||
|
service_name: attune-notifier
|
||||||
|
websocket_host: 0.0.0.0
|
||||||
|
websocket_port: 8081
|
||||||
|
heartbeat_interval: 30
|
||||||
|
connection_timeout: 60
|
||||||
|
max_connections: 1000
|
||||||
|
message_buffer_size: 10000
|
||||||
|
|
||||||
|
# Agent binary distribution (serves the agent binary via API for remote downloads)
|
||||||
|
agent:
|
||||||
|
binary_dir: /opt/attune/agent
|
||||||
581
docker/distributable/docker-compose.yaml
Normal file
581
docker/distributable/docker-compose.yaml
Normal file
@@ -0,0 +1,581 @@
|
|||||||
|
name: attune
|
||||||
|
|
||||||
|
services:
|
||||||
|
postgres:
|
||||||
|
image: timescale/timescaledb:2.17.2-pg16
|
||||||
|
container_name: attune-postgres
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: attune
|
||||||
|
POSTGRES_PASSWORD: attune
|
||||||
|
POSTGRES_DB: attune
|
||||||
|
PGDATA: /var/lib/postgresql/data/pgdata
|
||||||
|
ports:
|
||||||
|
- "5432:5432"
|
||||||
|
volumes:
|
||||||
|
- postgres_data:/var/lib/postgresql/data
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U attune"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
migrations:
|
||||||
|
image: postgres:16-alpine
|
||||||
|
container_name: attune-migrations
|
||||||
|
volumes:
|
||||||
|
- ./migrations:/migrations:ro
|
||||||
|
- ./docker/run-migrations.sh:/run-migrations.sh:ro
|
||||||
|
- ./docker/init-roles.sql:/docker/init-roles.sql:ro
|
||||||
|
environment:
|
||||||
|
DB_HOST: postgres
|
||||||
|
DB_PORT: 5432
|
||||||
|
DB_USER: attune
|
||||||
|
DB_PASSWORD: attune
|
||||||
|
DB_NAME: attune
|
||||||
|
MIGRATIONS_DIR: /migrations
|
||||||
|
command: ["/bin/sh", "/run-migrations.sh"]
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
restart: on-failure
|
||||||
|
|
||||||
|
init-user:
|
||||||
|
image: postgres:16-alpine
|
||||||
|
container_name: attune-init-user
|
||||||
|
volumes:
|
||||||
|
- ./docker/init-user.sh:/init-user.sh:ro
|
||||||
|
environment:
|
||||||
|
DB_HOST: postgres
|
||||||
|
DB_PORT: 5432
|
||||||
|
DB_USER: attune
|
||||||
|
DB_PASSWORD: attune
|
||||||
|
DB_NAME: attune
|
||||||
|
DB_SCHEMA: public
|
||||||
|
TEST_LOGIN: ${ATTUNE_TEST_LOGIN:-test@attune.local}
|
||||||
|
TEST_PASSWORD: ${ATTUNE_TEST_PASSWORD:-TestPass123!}
|
||||||
|
TEST_DISPLAY_NAME: ${ATTUNE_TEST_DISPLAY_NAME:-Test User}
|
||||||
|
command: ["/bin/sh", "/init-user.sh"]
|
||||||
|
depends_on:
|
||||||
|
migrations:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
restart: on-failure
|
||||||
|
|
||||||
|
init-packs:
|
||||||
|
image: python:3.11-slim
|
||||||
|
container_name: attune-init-packs
|
||||||
|
volumes:
|
||||||
|
- ./packs:/source/packs:ro
|
||||||
|
- ./scripts/load_core_pack.py:/scripts/load_core_pack.py:ro
|
||||||
|
- ./docker/init-packs.sh:/init-packs.sh:ro
|
||||||
|
- packs_data:/opt/attune/packs
|
||||||
|
- runtime_envs:/opt/attune/runtime_envs
|
||||||
|
- artifacts_data:/opt/attune/artifacts
|
||||||
|
environment:
|
||||||
|
DB_HOST: postgres
|
||||||
|
DB_PORT: 5432
|
||||||
|
DB_USER: attune
|
||||||
|
DB_PASSWORD: attune
|
||||||
|
DB_NAME: attune
|
||||||
|
DB_SCHEMA: public
|
||||||
|
SOURCE_PACKS_DIR: /source/packs
|
||||||
|
TARGET_PACKS_DIR: /opt/attune/packs
|
||||||
|
LOADER_SCRIPT: /scripts/load_core_pack.py
|
||||||
|
DEFAULT_ADMIN_LOGIN: ${ATTUNE_TEST_LOGIN:-test@attune.local}
|
||||||
|
DEFAULT_ADMIN_PERMISSION_SET_REF: core.admin
|
||||||
|
command: ["/bin/sh", "/init-packs.sh"]
|
||||||
|
depends_on:
|
||||||
|
migrations:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
restart: on-failure
|
||||||
|
entrypoint: ""
|
||||||
|
|
||||||
|
init-agent:
|
||||||
|
image: ${ATTUNE_IMAGE_REGISTRY:-git.rdrx.app/attune-system}/attune/agent:${ATTUNE_IMAGE_TAG:-latest}
|
||||||
|
container_name: attune-init-agent
|
||||||
|
volumes:
|
||||||
|
- agent_bin:/opt/attune/agent
|
||||||
|
entrypoint:
|
||||||
|
[
|
||||||
|
"/bin/sh",
|
||||||
|
"-c",
|
||||||
|
"cp /usr/local/bin/attune-agent /opt/attune/agent/attune-agent && cp /usr/local/bin/attune-sensor-agent /opt/attune/agent/attune-sensor-agent && chmod +x /opt/attune/agent/attune-agent /opt/attune/agent/attune-sensor-agent && /usr/local/bin/attune-agent --version > /opt/attune/agent/attune-agent.version && /usr/local/bin/attune-sensor-agent --version > /opt/attune/agent/attune-sensor-agent.version && echo 'Agent binaries copied successfully'",
|
||||||
|
]
|
||||||
|
restart: "no"
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
|
||||||
|
rabbitmq:
|
||||||
|
image: rabbitmq:3.13-management-alpine
|
||||||
|
container_name: attune-rabbitmq
|
||||||
|
environment:
|
||||||
|
RABBITMQ_DEFAULT_USER: attune
|
||||||
|
RABBITMQ_DEFAULT_PASS: attune
|
||||||
|
RABBITMQ_DEFAULT_VHOST: /
|
||||||
|
ports:
|
||||||
|
- "5672:5672"
|
||||||
|
- "15672:15672"
|
||||||
|
volumes:
|
||||||
|
- rabbitmq_data:/var/lib/rabbitmq
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis:7-alpine
|
||||||
|
container_name: attune-redis
|
||||||
|
ports:
|
||||||
|
- "6379:6379"
|
||||||
|
volumes:
|
||||||
|
- redis_data:/data
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "redis-cli", "ping"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
restart: unless-stopped
|
||||||
|
command: redis-server --appendonly yes
|
||||||
|
|
||||||
|
api:
|
||||||
|
image: ${ATTUNE_IMAGE_REGISTRY:-git.rdrx.app/attune-system}/attune/api:${ATTUNE_IMAGE_TAG:-latest}
|
||||||
|
container_name: attune-api
|
||||||
|
environment:
|
||||||
|
RUST_LOG: info
|
||||||
|
ATTUNE_CONFIG: /opt/attune/config/config.yaml
|
||||||
|
ATTUNE__SECURITY__JWT_SECRET: ${JWT_SECRET:-docker-dev-secret-change-in-production}
|
||||||
|
ATTUNE__SECURITY__ENCRYPTION_KEY: ${ENCRYPTION_KEY:-docker-dev-encryption-key-please-change-in-production-32plus}
|
||||||
|
ATTUNE__DATABASE__URL: postgresql://attune:attune@postgres:5432/attune
|
||||||
|
ATTUNE__DATABASE__SCHEMA: public
|
||||||
|
ATTUNE__MESSAGE_QUEUE__URL: amqp://attune:attune@rabbitmq:5672
|
||||||
|
ATTUNE__CACHE__URL: redis://redis:6379
|
||||||
|
ATTUNE__WORKER__WORKER_TYPE: container
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
volumes:
|
||||||
|
- ${ATTUNE_DOCKER_CONFIG_PATH:-./config.docker.yaml}:/opt/attune/config/config.yaml:ro
|
||||||
|
- packs_data:/opt/attune/packs:rw
|
||||||
|
- runtime_envs:/opt/attune/runtime_envs
|
||||||
|
- artifacts_data:/opt/attune/artifacts
|
||||||
|
- api_logs:/opt/attune/logs
|
||||||
|
- agent_bin:/opt/attune/agent:ro
|
||||||
|
depends_on:
|
||||||
|
init-agent:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
init-packs:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
init-user:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrations:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
rabbitmq:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 20s
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
executor:
|
||||||
|
image: ${ATTUNE_IMAGE_REGISTRY:-git.rdrx.app/attune-system}/attune/executor:${ATTUNE_IMAGE_TAG:-latest}
|
||||||
|
container_name: attune-executor
|
||||||
|
environment:
|
||||||
|
RUST_LOG: info
|
||||||
|
ATTUNE_CONFIG: /opt/attune/config/config.yaml
|
||||||
|
ATTUNE__SECURITY__JWT_SECRET: ${JWT_SECRET:-docker-dev-secret-change-in-production}
|
||||||
|
ATTUNE__SECURITY__ENCRYPTION_KEY: ${ENCRYPTION_KEY:-docker-dev-encryption-key-please-change-in-production-32plus}
|
||||||
|
ATTUNE__DATABASE__URL: postgresql://attune:attune@postgres:5432/attune
|
||||||
|
ATTUNE__DATABASE__SCHEMA: public
|
||||||
|
ATTUNE__MESSAGE_QUEUE__URL: amqp://attune:attune@rabbitmq:5672
|
||||||
|
ATTUNE__CACHE__URL: redis://redis:6379
|
||||||
|
ATTUNE__WORKER__WORKER_TYPE: container
|
||||||
|
volumes:
|
||||||
|
- ${ATTUNE_DOCKER_CONFIG_PATH:-./config.docker.yaml}:/opt/attune/config/config.yaml:ro
|
||||||
|
- packs_data:/opt/attune/packs:ro
|
||||||
|
- artifacts_data:/opt/attune/artifacts:ro
|
||||||
|
- executor_logs:/opt/attune/logs
|
||||||
|
depends_on:
|
||||||
|
init-packs:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
init-user:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrations:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
rabbitmq:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "kill -0 1 || exit 1"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 20s
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
worker-shell:
|
||||||
|
image: debian:bookworm-slim
|
||||||
|
container_name: attune-worker-shell
|
||||||
|
entrypoint: ["/opt/attune/agent/attune-agent"]
|
||||||
|
stop_grace_period: 45s
|
||||||
|
environment:
|
||||||
|
RUST_LOG: info
|
||||||
|
ATTUNE_CONFIG: /opt/attune/config/config.yaml
|
||||||
|
ATTUNE_WORKER_TYPE: container
|
||||||
|
ATTUNE_WORKER_NAME: worker-shell-01
|
||||||
|
ATTUNE__SECURITY__JWT_SECRET: ${JWT_SECRET:-docker-dev-secret-change-in-production}
|
||||||
|
ATTUNE__SECURITY__ENCRYPTION_KEY: ${ENCRYPTION_KEY:-docker-dev-encryption-key-please-change-in-production-32plus}
|
||||||
|
ATTUNE__DATABASE__URL: postgresql://attune:attune@postgres:5432/attune
|
||||||
|
ATTUNE__DATABASE__SCHEMA: public
|
||||||
|
ATTUNE__MESSAGE_QUEUE__URL: amqp://attune:attune@rabbitmq:5672
|
||||||
|
ATTUNE_API_URL: http://attune-api:8080
|
||||||
|
volumes:
|
||||||
|
- agent_bin:/opt/attune/agent:ro
|
||||||
|
- ${ATTUNE_DOCKER_CONFIG_PATH:-./config.docker.yaml}:/opt/attune/config/config.yaml:ro
|
||||||
|
- packs_data:/opt/attune/packs:ro
|
||||||
|
- runtime_envs:/opt/attune/runtime_envs
|
||||||
|
- artifacts_data:/opt/attune/artifacts
|
||||||
|
- worker_shell_logs:/opt/attune/logs
|
||||||
|
depends_on:
|
||||||
|
init-agent:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
init-packs:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
init-user:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrations:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
rabbitmq:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pgrep -f attune-agent || exit 1"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 20s
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
worker-python:
|
||||||
|
image: python:3.12-slim
|
||||||
|
container_name: attune-worker-python
|
||||||
|
entrypoint: ["/opt/attune/agent/attune-agent"]
|
||||||
|
stop_grace_period: 45s
|
||||||
|
environment:
|
||||||
|
RUST_LOG: info
|
||||||
|
ATTUNE_CONFIG: /opt/attune/config/config.yaml
|
||||||
|
ATTUNE_WORKER_TYPE: container
|
||||||
|
ATTUNE_WORKER_NAME: worker-python-01
|
||||||
|
ATTUNE__SECURITY__JWT_SECRET: ${JWT_SECRET:-docker-dev-secret-change-in-production}
|
||||||
|
ATTUNE__SECURITY__ENCRYPTION_KEY: ${ENCRYPTION_KEY:-docker-dev-encryption-key-please-change-in-production-32plus}
|
||||||
|
ATTUNE__DATABASE__URL: postgresql://attune:attune@postgres:5432/attune
|
||||||
|
ATTUNE__DATABASE__SCHEMA: public
|
||||||
|
ATTUNE__MESSAGE_QUEUE__URL: amqp://attune:attune@rabbitmq:5672
|
||||||
|
ATTUNE_API_URL: http://attune-api:8080
|
||||||
|
volumes:
|
||||||
|
- agent_bin:/opt/attune/agent:ro
|
||||||
|
- ${ATTUNE_DOCKER_CONFIG_PATH:-./config.docker.yaml}:/opt/attune/config/config.yaml:ro
|
||||||
|
- packs_data:/opt/attune/packs:ro
|
||||||
|
- runtime_envs:/opt/attune/runtime_envs
|
||||||
|
- artifacts_data:/opt/attune/artifacts
|
||||||
|
- worker_python_logs:/opt/attune/logs
|
||||||
|
depends_on:
|
||||||
|
init-agent:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
init-packs:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
init-user:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrations:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
rabbitmq:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pgrep -f attune-agent || exit 1"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 20s
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
worker-node:
|
||||||
|
image: node:22-slim
|
||||||
|
container_name: attune-worker-node
|
||||||
|
entrypoint: ["/opt/attune/agent/attune-agent"]
|
||||||
|
stop_grace_period: 45s
|
||||||
|
environment:
|
||||||
|
RUST_LOG: info
|
||||||
|
ATTUNE_CONFIG: /opt/attune/config/config.yaml
|
||||||
|
ATTUNE_WORKER_TYPE: container
|
||||||
|
ATTUNE_WORKER_NAME: worker-node-01
|
||||||
|
ATTUNE__SECURITY__JWT_SECRET: ${JWT_SECRET:-docker-dev-secret-change-in-production}
|
||||||
|
ATTUNE__SECURITY__ENCRYPTION_KEY: ${ENCRYPTION_KEY:-docker-dev-encryption-key-please-change-in-production-32plus}
|
||||||
|
ATTUNE__DATABASE__URL: postgresql://attune:attune@postgres:5432/attune
|
||||||
|
ATTUNE__DATABASE__SCHEMA: public
|
||||||
|
ATTUNE__MESSAGE_QUEUE__URL: amqp://attune:attune@rabbitmq:5672
|
||||||
|
ATTUNE_API_URL: http://attune-api:8080
|
||||||
|
volumes:
|
||||||
|
- agent_bin:/opt/attune/agent:ro
|
||||||
|
- ${ATTUNE_DOCKER_CONFIG_PATH:-./config.docker.yaml}:/opt/attune/config/config.yaml:ro
|
||||||
|
- packs_data:/opt/attune/packs:ro
|
||||||
|
- runtime_envs:/opt/attune/runtime_envs
|
||||||
|
- artifacts_data:/opt/attune/artifacts
|
||||||
|
- worker_node_logs:/opt/attune/logs
|
||||||
|
depends_on:
|
||||||
|
init-agent:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
init-packs:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
init-user:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrations:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
rabbitmq:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pgrep -f attune-agent || exit 1"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 20s
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
worker-full:
|
||||||
|
image: nikolaik/python-nodejs:python3.12-nodejs22-slim
|
||||||
|
container_name: attune-worker-full
|
||||||
|
entrypoint: ["/opt/attune/agent/attune-agent"]
|
||||||
|
stop_grace_period: 45s
|
||||||
|
environment:
|
||||||
|
RUST_LOG: info
|
||||||
|
ATTUNE_CONFIG: /opt/attune/config/config.yaml
|
||||||
|
ATTUNE_WORKER_RUNTIMES: shell,python,node,native
|
||||||
|
ATTUNE_WORKER_TYPE: container
|
||||||
|
ATTUNE_WORKER_NAME: worker-full-01
|
||||||
|
ATTUNE__SECURITY__JWT_SECRET: ${JWT_SECRET:-docker-dev-secret-change-in-production}
|
||||||
|
ATTUNE__SECURITY__ENCRYPTION_KEY: ${ENCRYPTION_KEY:-docker-dev-encryption-key-please-change-in-production-32plus}
|
||||||
|
ATTUNE__DATABASE__URL: postgresql://attune:attune@postgres:5432/attune
|
||||||
|
ATTUNE__DATABASE__SCHEMA: public
|
||||||
|
ATTUNE__MESSAGE_QUEUE__URL: amqp://attune:attune@rabbitmq:5672
|
||||||
|
ATTUNE_API_URL: http://attune-api:8080
|
||||||
|
volumes:
|
||||||
|
- agent_bin:/opt/attune/agent:ro
|
||||||
|
- ${ATTUNE_DOCKER_CONFIG_PATH:-./config.docker.yaml}:/opt/attune/config/config.yaml:ro
|
||||||
|
- packs_data:/opt/attune/packs:ro
|
||||||
|
- runtime_envs:/opt/attune/runtime_envs
|
||||||
|
- artifacts_data:/opt/attune/artifacts
|
||||||
|
- worker_full_logs:/opt/attune/logs
|
||||||
|
depends_on:
|
||||||
|
init-agent:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
init-packs:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
init-user:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrations:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
rabbitmq:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pgrep -f attune-agent || exit 1"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 20s
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
sensor:
|
||||||
|
image: nikolaik/python-nodejs:python3.12-nodejs22-slim
|
||||||
|
container_name: attune-sensor
|
||||||
|
entrypoint: ["/opt/attune/agent/attune-sensor-agent"]
|
||||||
|
stop_grace_period: 45s
|
||||||
|
environment:
|
||||||
|
RUST_LOG: debug
|
||||||
|
ATTUNE_CONFIG: /opt/attune/config/config.yaml
|
||||||
|
ATTUNE_SENSOR_RUNTIMES: shell,python,node,native
|
||||||
|
ATTUNE__SECURITY__JWT_SECRET: ${JWT_SECRET:-docker-dev-secret-change-in-production}
|
||||||
|
ATTUNE__SECURITY__ENCRYPTION_KEY: ${ENCRYPTION_KEY:-docker-dev-encryption-key-please-change-in-production-32plus}
|
||||||
|
ATTUNE__DATABASE__URL: postgresql://attune:attune@postgres:5432/attune
|
||||||
|
ATTUNE__DATABASE__SCHEMA: public
|
||||||
|
ATTUNE__MESSAGE_QUEUE__URL: amqp://attune:attune@rabbitmq:5672
|
||||||
|
ATTUNE__WORKER__WORKER_TYPE: container
|
||||||
|
ATTUNE_API_URL: http://attune-api:8080
|
||||||
|
ATTUNE_MQ_URL: amqp://attune:attune@rabbitmq:5672
|
||||||
|
ATTUNE_PACKS_BASE_DIR: /opt/attune/packs
|
||||||
|
volumes:
|
||||||
|
- agent_bin:/opt/attune/agent:ro
|
||||||
|
- ${ATTUNE_DOCKER_CONFIG_PATH:-./config.docker.yaml}:/opt/attune/config/config.yaml:ro
|
||||||
|
- packs_data:/opt/attune/packs:rw
|
||||||
|
- runtime_envs:/opt/attune/runtime_envs
|
||||||
|
- sensor_logs:/opt/attune/logs
|
||||||
|
depends_on:
|
||||||
|
init-agent:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
init-packs:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
init-user:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrations:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
rabbitmq:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "kill -0 1 || exit 1"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 20s
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
notifier:
|
||||||
|
image: ${ATTUNE_IMAGE_REGISTRY:-git.rdrx.app/attune-system}/attune/notifier:${ATTUNE_IMAGE_TAG:-latest}
|
||||||
|
container_name: attune-notifier
|
||||||
|
environment:
|
||||||
|
RUST_LOG: info
|
||||||
|
ATTUNE_CONFIG: /opt/attune/config/config.yaml
|
||||||
|
ATTUNE__SECURITY__JWT_SECRET: ${JWT_SECRET:-docker-dev-secret-change-in-production}
|
||||||
|
ATTUNE__SECURITY__ENCRYPTION_KEY: ${ENCRYPTION_KEY:-docker-dev-encryption-key-please-change-in-production-32plus}
|
||||||
|
ATTUNE__DATABASE__URL: postgresql://attune:attune@postgres:5432/attune
|
||||||
|
ATTUNE__DATABASE__SCHEMA: public
|
||||||
|
ATTUNE__MESSAGE_QUEUE__URL: amqp://attune:attune@rabbitmq:5672
|
||||||
|
ATTUNE__WORKER__WORKER_TYPE: container
|
||||||
|
ports:
|
||||||
|
- "8081:8081"
|
||||||
|
volumes:
|
||||||
|
- ${ATTUNE_DOCKER_CONFIG_PATH:-./config.docker.yaml}:/opt/attune/config/config.yaml:ro
|
||||||
|
- notifier_logs:/opt/attune/logs
|
||||||
|
depends_on:
|
||||||
|
migrations:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
rabbitmq:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:8081/health"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 20s
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
web:
|
||||||
|
image: ${ATTUNE_IMAGE_REGISTRY:-git.rdrx.app/attune-system}/attune/web:${ATTUNE_IMAGE_TAG:-latest}
|
||||||
|
container_name: attune-web
|
||||||
|
environment:
|
||||||
|
API_URL: ${API_URL:-http://localhost:8080}
|
||||||
|
WS_URL: ${WS_URL:-ws://localhost:8081}
|
||||||
|
ENVIRONMENT: docker
|
||||||
|
ports:
|
||||||
|
- "3000:80"
|
||||||
|
depends_on:
|
||||||
|
api:
|
||||||
|
condition: service_healthy
|
||||||
|
notifier:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test:
|
||||||
|
[
|
||||||
|
"CMD",
|
||||||
|
"wget",
|
||||||
|
"--no-verbose",
|
||||||
|
"--tries=1",
|
||||||
|
"--spider",
|
||||||
|
"http://localhost/health",
|
||||||
|
]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
|
networks:
|
||||||
|
- attune-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
postgres_data:
|
||||||
|
driver: local
|
||||||
|
rabbitmq_data:
|
||||||
|
driver: local
|
||||||
|
redis_data:
|
||||||
|
driver: local
|
||||||
|
api_logs:
|
||||||
|
driver: local
|
||||||
|
executor_logs:
|
||||||
|
driver: local
|
||||||
|
worker_shell_logs:
|
||||||
|
driver: local
|
||||||
|
worker_python_logs:
|
||||||
|
driver: local
|
||||||
|
worker_node_logs:
|
||||||
|
driver: local
|
||||||
|
worker_full_logs:
|
||||||
|
driver: local
|
||||||
|
sensor_logs:
|
||||||
|
driver: local
|
||||||
|
notifier_logs:
|
||||||
|
driver: local
|
||||||
|
packs_data:
|
||||||
|
driver: local
|
||||||
|
runtime_envs:
|
||||||
|
driver: local
|
||||||
|
artifacts_data:
|
||||||
|
driver: local
|
||||||
|
agent_bin:
|
||||||
|
driver: local
|
||||||
|
|
||||||
|
networks:
|
||||||
|
attune-network:
|
||||||
|
driver: bridge
|
||||||
|
ipam:
|
||||||
|
config:
|
||||||
|
- subnet: 172.28.0.0/16
|
||||||
296
docker/distributable/docker/init-packs.sh
Executable file
296
docker/distributable/docker/init-packs.sh
Executable file
@@ -0,0 +1,296 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# Initialize builtin packs for Attune
|
||||||
|
# This script copies pack files to the shared volume and registers them in the database
|
||||||
|
# Designed to run on python:3.11-slim (Debian-based) image
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Color output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Configuration from environment
|
||||||
|
DB_HOST="${DB_HOST:-postgres}"
|
||||||
|
DB_PORT="${DB_PORT:-5432}"
|
||||||
|
DB_USER="${DB_USER:-attune}"
|
||||||
|
DB_PASSWORD="${DB_PASSWORD:-attune}"
|
||||||
|
DB_NAME="${DB_NAME:-attune}"
|
||||||
|
DB_SCHEMA="${DB_SCHEMA:-public}"
|
||||||
|
|
||||||
|
# Pack directories
|
||||||
|
SOURCE_PACKS_DIR="${SOURCE_PACKS_DIR:-/source/packs}"
|
||||||
|
TARGET_PACKS_DIR="${TARGET_PACKS_DIR:-/opt/attune/packs}"
|
||||||
|
|
||||||
|
# Python loader script
|
||||||
|
LOADER_SCRIPT="${LOADER_SCRIPT:-/scripts/load_core_pack.py}"
|
||||||
|
DEFAULT_ADMIN_LOGIN="${DEFAULT_ADMIN_LOGIN:-}"
|
||||||
|
DEFAULT_ADMIN_PERMISSION_SET_REF="${DEFAULT_ADMIN_PERMISSION_SET_REF:-core.admin}"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}╔════════════════════════════════════════════════╗${NC}"
|
||||||
|
echo -e "${BLUE}║ Attune Builtin Packs Initialization ║${NC}"
|
||||||
|
echo -e "${BLUE}╚════════════════════════════════════════════════╝${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Install Python dependencies
|
||||||
|
echo -e "${YELLOW}→${NC} Installing Python dependencies..."
|
||||||
|
if pip install --quiet --no-cache-dir psycopg2-binary pyyaml; then
|
||||||
|
echo -e "${GREEN}✓${NC} Python dependencies installed"
|
||||||
|
else
|
||||||
|
echo -e "${RED}✗${NC} Failed to install Python dependencies"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Wait for database to be ready (using Python instead of psql to avoid needing postgresql-client)
|
||||||
|
echo -e "${YELLOW}→${NC} Waiting for database to be ready..."
|
||||||
|
until python3 -c "
|
||||||
|
import psycopg2, sys
|
||||||
|
try:
|
||||||
|
conn = psycopg2.connect(host='$DB_HOST', port=$DB_PORT, user='$DB_USER', password='$DB_PASSWORD', dbname='$DB_NAME', connect_timeout=3)
|
||||||
|
conn.close()
|
||||||
|
sys.exit(0)
|
||||||
|
except Exception:
|
||||||
|
sys.exit(1)
|
||||||
|
" 2>/dev/null; do
|
||||||
|
echo -e "${YELLOW} ...${NC} Database is unavailable - sleeping"
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
echo -e "${GREEN}✓${NC} Database is ready"
|
||||||
|
|
||||||
|
# Create target packs directory if it doesn't exist
|
||||||
|
echo -e "${YELLOW}→${NC} Ensuring packs directory exists..."
|
||||||
|
mkdir -p "$TARGET_PACKS_DIR"
|
||||||
|
# Ensure the attune user (uid 1000) can write to the packs directory
|
||||||
|
# so the API service can install packs at runtime
|
||||||
|
chown -R 1000:1000 "$TARGET_PACKS_DIR"
|
||||||
|
echo -e "${GREEN}✓${NC} Packs directory ready at: $TARGET_PACKS_DIR"
|
||||||
|
|
||||||
|
# Initialise runtime environments volume with correct ownership.
|
||||||
|
# Workers (running as attune uid 1000) need write access to create
|
||||||
|
# virtualenvs, node_modules, etc. at runtime.
|
||||||
|
RUNTIME_ENVS_DIR="${RUNTIME_ENVS_DIR:-/opt/attune/runtime_envs}"
|
||||||
|
if [ -d "$RUNTIME_ENVS_DIR" ] || mkdir -p "$RUNTIME_ENVS_DIR" 2>/dev/null; then
|
||||||
|
chown -R 1000:1000 "$RUNTIME_ENVS_DIR"
|
||||||
|
echo -e "${GREEN}✓${NC} Runtime environments directory ready at: $RUNTIME_ENVS_DIR"
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}⚠${NC} Runtime environments directory not mounted, skipping"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Initialise artifacts volume with correct ownership.
|
||||||
|
# The API service (creates directories for file-backed artifact versions) and
|
||||||
|
# workers (write artifact files during execution) both run as attune uid 1000.
|
||||||
|
ARTIFACTS_DIR="${ARTIFACTS_DIR:-/opt/attune/artifacts}"
|
||||||
|
if [ -d "$ARTIFACTS_DIR" ] || mkdir -p "$ARTIFACTS_DIR" 2>/dev/null; then
|
||||||
|
chown -R 1000:1000 "$ARTIFACTS_DIR"
|
||||||
|
echo -e "${GREEN}✓${NC} Artifacts directory ready at: $ARTIFACTS_DIR"
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}⚠${NC} Artifacts directory not mounted, skipping"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if source packs directory exists
|
||||||
|
if [ ! -d "$SOURCE_PACKS_DIR" ]; then
|
||||||
|
echo -e "${RED}✗${NC} Source packs directory not found: $SOURCE_PACKS_DIR"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Find all pack directories (directories with pack.yaml)
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}Discovering builtin packs...${NC}"
|
||||||
|
echo "----------------------------------------"
|
||||||
|
|
||||||
|
PACK_COUNT=0
|
||||||
|
COPIED_COUNT=0
|
||||||
|
LOADED_COUNT=0
|
||||||
|
|
||||||
|
for pack_dir in "$SOURCE_PACKS_DIR"/*; do
|
||||||
|
if [ -d "$pack_dir" ]; then
|
||||||
|
pack_name=$(basename "$pack_dir")
|
||||||
|
pack_yaml="$pack_dir/pack.yaml"
|
||||||
|
|
||||||
|
if [ -f "$pack_yaml" ]; then
|
||||||
|
PACK_COUNT=$((PACK_COUNT + 1))
|
||||||
|
echo -e "${BLUE}→${NC} Found pack: ${GREEN}$pack_name${NC}"
|
||||||
|
|
||||||
|
# Check if pack already exists in target
|
||||||
|
target_pack_dir="$TARGET_PACKS_DIR/$pack_name"
|
||||||
|
|
||||||
|
if [ -d "$target_pack_dir" ]; then
|
||||||
|
# Pack exists, update files to ensure we have latest (especially binaries)
|
||||||
|
echo -e "${YELLOW} ⟳${NC} Pack exists at: $target_pack_dir, updating files..."
|
||||||
|
if cp -rf "$pack_dir"/* "$target_pack_dir"/; then
|
||||||
|
echo -e "${GREEN} ✓${NC} Updated pack files at: $target_pack_dir"
|
||||||
|
else
|
||||||
|
echo -e "${RED} ✗${NC} Failed to update pack"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Copy pack to target directory
|
||||||
|
echo -e "${YELLOW} →${NC} Copying pack files..."
|
||||||
|
if cp -r "$pack_dir" "$target_pack_dir"; then
|
||||||
|
COPIED_COUNT=$((COPIED_COUNT + 1))
|
||||||
|
echo -e "${GREEN} ✓${NC} Copied to: $target_pack_dir"
|
||||||
|
else
|
||||||
|
echo -e "${RED} ✗${NC} Failed to copy pack"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "----------------------------------------"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if [ $PACK_COUNT -eq 0 ]; then
|
||||||
|
echo -e "${YELLOW}⚠${NC} No builtin packs found in $SOURCE_PACKS_DIR"
|
||||||
|
echo -e "${BLUE}ℹ${NC} This is OK if you're running with no packs"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${BLUE}Pack Discovery Summary:${NC}"
|
||||||
|
echo " Total packs found: $PACK_COUNT"
|
||||||
|
echo " Newly copied: $COPIED_COUNT"
|
||||||
|
echo " Already present: $((PACK_COUNT - COPIED_COUNT))"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Load packs into database using Python loader
|
||||||
|
if [ -f "$LOADER_SCRIPT" ]; then
|
||||||
|
echo -e "${BLUE}Loading packs into database...${NC}"
|
||||||
|
echo "----------------------------------------"
|
||||||
|
|
||||||
|
# Build database URL with schema support
|
||||||
|
DATABASE_URL="postgresql://$DB_USER:$DB_PASSWORD@$DB_HOST:$DB_PORT/$DB_NAME"
|
||||||
|
|
||||||
|
# Set search_path for the Python script if not using default schema
|
||||||
|
if [ "$DB_SCHEMA" != "public" ]; then
|
||||||
|
export PGOPTIONS="-c search_path=$DB_SCHEMA,public"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run the Python loader for each pack
|
||||||
|
for pack_dir in "$TARGET_PACKS_DIR"/*; do
|
||||||
|
if [ -d "$pack_dir" ]; then
|
||||||
|
pack_name=$(basename "$pack_dir")
|
||||||
|
pack_yaml="$pack_dir/pack.yaml"
|
||||||
|
|
||||||
|
if [ -f "$pack_yaml" ]; then
|
||||||
|
echo -e "${YELLOW}→${NC} Loading pack: ${GREEN}$pack_name${NC}"
|
||||||
|
|
||||||
|
# Run Python loader
|
||||||
|
if python3 "$LOADER_SCRIPT" \
|
||||||
|
--database-url "$DATABASE_URL" \
|
||||||
|
--pack-dir "$TARGET_PACKS_DIR" \
|
||||||
|
--pack-name "$pack_name" \
|
||||||
|
--schema "$DB_SCHEMA"; then
|
||||||
|
LOADED_COUNT=$((LOADED_COUNT + 1))
|
||||||
|
echo -e "${GREEN}✓${NC} Loaded pack: $pack_name"
|
||||||
|
else
|
||||||
|
echo -e "${RED}✗${NC} Failed to load pack: $pack_name"
|
||||||
|
echo -e "${YELLOW}⚠${NC} Continuing with other packs..."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "----------------------------------------"
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}Database Loading Summary:${NC}"
|
||||||
|
echo " Successfully loaded: $LOADED_COUNT"
|
||||||
|
echo " Failed: $((PACK_COUNT - LOADED_COUNT))"
|
||||||
|
echo ""
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}⚠${NC} Pack loader script not found: $LOADER_SCRIPT"
|
||||||
|
echo -e "${BLUE}ℹ${NC} Packs copied but not registered in database"
|
||||||
|
echo -e "${BLUE}ℹ${NC} You can manually load them later"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$DEFAULT_ADMIN_LOGIN" ] && [ "$LOADED_COUNT" -gt 0 ]; then
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}Bootstrapping local admin assignment...${NC}"
|
||||||
|
if python3 - <<PY
|
||||||
|
import psycopg2
|
||||||
|
import sys
|
||||||
|
|
||||||
|
conn = psycopg2.connect(
|
||||||
|
host="${DB_HOST}",
|
||||||
|
port=${DB_PORT},
|
||||||
|
user="${DB_USER}",
|
||||||
|
password="${DB_PASSWORD}",
|
||||||
|
dbname="${DB_NAME}",
|
||||||
|
)
|
||||||
|
conn.autocommit = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
with conn.cursor() as cur:
|
||||||
|
cur.execute("SET search_path TO ${DB_SCHEMA}, public")
|
||||||
|
cur.execute("SELECT id FROM identity WHERE login = %s", ("${DEFAULT_ADMIN_LOGIN}",))
|
||||||
|
identity_row = cur.fetchone()
|
||||||
|
if identity_row is None:
|
||||||
|
print(" ⚠ Default admin identity not found; skipping assignment")
|
||||||
|
conn.rollback()
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
cur.execute("SELECT id FROM permission_set WHERE ref = %s", ("${DEFAULT_ADMIN_PERMISSION_SET_REF}",))
|
||||||
|
permset_row = cur.fetchone()
|
||||||
|
if permset_row is None:
|
||||||
|
print(" ⚠ Default admin permission set not found; skipping assignment")
|
||||||
|
conn.rollback()
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
cur.execute(
|
||||||
|
"""
|
||||||
|
INSERT INTO permission_assignment (identity, permset)
|
||||||
|
VALUES (%s, %s)
|
||||||
|
ON CONFLICT (identity, permset) DO NOTHING
|
||||||
|
""",
|
||||||
|
(identity_row[0], permset_row[0]),
|
||||||
|
)
|
||||||
|
conn.commit()
|
||||||
|
print(" ✓ Default admin permission assignment ensured")
|
||||||
|
except Exception as exc:
|
||||||
|
conn.rollback()
|
||||||
|
print(f" ✗ Failed to ensure default admin assignment: {exc}")
|
||||||
|
sys.exit(1)
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
PY
|
||||||
|
then
|
||||||
|
:
|
||||||
|
else
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
echo ""
|
||||||
|
echo -e "${GREEN}╔════════════════════════════════════════════════╗${NC}"
|
||||||
|
echo -e "${GREEN}║ Builtin Packs Initialization Complete! ║${NC}"
|
||||||
|
echo -e "${GREEN}╚════════════════════════════════════════════════╝${NC}"
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}Packs Location:${NC} ${GREEN}$TARGET_PACKS_DIR${NC}"
|
||||||
|
echo -e "${BLUE}Packs Available:${NC}"
|
||||||
|
|
||||||
|
for pack_dir in "$TARGET_PACKS_DIR"/*; do
|
||||||
|
if [ -d "$pack_dir" ]; then
|
||||||
|
pack_name=$(basename "$pack_dir")
|
||||||
|
pack_yaml="$pack_dir/pack.yaml"
|
||||||
|
if [ -f "$pack_yaml" ]; then
|
||||||
|
# Try to extract version from pack.yaml
|
||||||
|
version=$(grep "^version:" "$pack_yaml" | head -1 | sed 's/version:[[:space:]]*//' | tr -d '"')
|
||||||
|
echo -e " • ${GREEN}$pack_name${NC} ${BLUE}($version)${NC}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
# Ensure ownership is correct after all packs have been copied
|
||||||
|
# The API service (running as attune uid 1000) needs write access to install new packs
|
||||||
|
chown -R 1000:1000 "$TARGET_PACKS_DIR"
|
||||||
|
|
||||||
|
echo -e "${BLUE}ℹ${NC} Pack files are accessible to all services via shared volume"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
exit 0
|
||||||
29
docker/distributable/docker/init-roles.sql
Normal file
29
docker/distributable/docker/init-roles.sql
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
-- Docker initialization script
|
||||||
|
-- Creates the svc_attune role needed by migrations
|
||||||
|
-- This runs before migrations via docker-compose
|
||||||
|
|
||||||
|
-- Create service role for the application
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'svc_attune') THEN
|
||||||
|
CREATE ROLE svc_attune WITH LOGIN PASSWORD 'attune_service_password';
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
$$;
|
||||||
|
|
||||||
|
-- Create API role
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'attune_api') THEN
|
||||||
|
CREATE ROLE attune_api WITH LOGIN PASSWORD 'attune_api_password';
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
$$;
|
||||||
|
|
||||||
|
-- Grant basic permissions
|
||||||
|
GRANT ALL PRIVILEGES ON DATABASE attune TO svc_attune;
|
||||||
|
GRANT ALL PRIVILEGES ON DATABASE attune TO attune_api;
|
||||||
|
|
||||||
|
-- Enable required extensions
|
||||||
|
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||||
|
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
|
||||||
108
docker/distributable/docker/init-user.sh
Executable file
108
docker/distributable/docker/init-user.sh
Executable file
@@ -0,0 +1,108 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# Initialize default test user for Attune
|
||||||
|
# This script creates a default test user if it doesn't already exist
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Color output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Database configuration from environment
|
||||||
|
DB_HOST="${DB_HOST:-postgres}"
|
||||||
|
DB_PORT="${DB_PORT:-5432}"
|
||||||
|
DB_USER="${DB_USER:-attune}"
|
||||||
|
DB_PASSWORD="${DB_PASSWORD:-attune}"
|
||||||
|
DB_NAME="${DB_NAME:-attune}"
|
||||||
|
DB_SCHEMA="${DB_SCHEMA:-public}"
|
||||||
|
|
||||||
|
# Test user configuration
|
||||||
|
TEST_LOGIN="${TEST_LOGIN:-test@attune.local}"
|
||||||
|
TEST_DISPLAY_NAME="${TEST_DISPLAY_NAME:-Test User}"
|
||||||
|
TEST_PASSWORD="${TEST_PASSWORD:-TestPass123!}"
|
||||||
|
|
||||||
|
# Pre-computed Argon2id hash for "TestPass123!"
|
||||||
|
# Using: m=19456, t=2, p=1 (default Argon2id parameters)
|
||||||
|
DEFAULT_PASSWORD_HASH='$argon2id$v=19$m=19456,t=2,p=1$AuZJ0xsGuSRk6LdCd58OOA$vBZnaflJwR9L4LPWoGGrcnRsIOf95FV4uIsoe3PjRE0'
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}╔════════════════════════════════════════════════╗${NC}"
|
||||||
|
echo -e "${BLUE}║ Attune Default User Initialization ║${NC}"
|
||||||
|
echo -e "${BLUE}╚════════════════════════════════════════════════╝${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Wait for database to be ready
|
||||||
|
echo -e "${YELLOW}→${NC} Waiting for database to be ready..."
|
||||||
|
until PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c '\q' 2>/dev/null; do
|
||||||
|
echo -e "${YELLOW} ...${NC} Database is unavailable - sleeping"
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
echo -e "${GREEN}✓${NC} Database is ready"
|
||||||
|
|
||||||
|
# Check if user already exists
|
||||||
|
echo -e "${YELLOW}→${NC} Checking if user exists..."
|
||||||
|
USER_EXISTS=$(PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -tAc \
|
||||||
|
"SELECT COUNT(*) FROM ${DB_SCHEMA}.identity WHERE login = '$TEST_LOGIN';")
|
||||||
|
|
||||||
|
if [ "$USER_EXISTS" -gt 0 ]; then
|
||||||
|
echo -e "${GREEN}✓${NC} User '$TEST_LOGIN' already exists"
|
||||||
|
echo -e "${BLUE}ℹ${NC} Skipping user creation"
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}→${NC} Creating default test user..."
|
||||||
|
|
||||||
|
# Use the pre-computed hash for default password
|
||||||
|
if [ "$TEST_PASSWORD" = "TestPass123!" ]; then
|
||||||
|
PASSWORD_HASH="$DEFAULT_PASSWORD_HASH"
|
||||||
|
echo -e "${BLUE}ℹ${NC} Using default password hash"
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}⚠${NC} Custom password detected - using basic hash"
|
||||||
|
echo -e "${YELLOW}⚠${NC} For production, generate proper Argon2id hash"
|
||||||
|
# Note: For custom passwords in Docker, you should pre-generate the hash
|
||||||
|
# This is a fallback that will work but is less secure
|
||||||
|
PASSWORD_HASH="$DEFAULT_PASSWORD_HASH"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Insert the user
|
||||||
|
PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" << EOF
|
||||||
|
INSERT INTO ${DB_SCHEMA}.identity (login, display_name, password_hash, attributes)
|
||||||
|
VALUES (
|
||||||
|
'$TEST_LOGIN',
|
||||||
|
'$TEST_DISPLAY_NAME',
|
||||||
|
'$PASSWORD_HASH',
|
||||||
|
jsonb_build_object(
|
||||||
|
'email', '$TEST_LOGIN',
|
||||||
|
'created_via', 'docker-init',
|
||||||
|
'is_test_user', true
|
||||||
|
)
|
||||||
|
);
|
||||||
|
EOF
|
||||||
|
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
echo -e "${GREEN}✓${NC} User created successfully"
|
||||||
|
else
|
||||||
|
echo -e "${RED}✗${NC} Failed to create user"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo -e "${GREEN}╔════════════════════════════════════════════════╗${NC}"
|
||||||
|
echo -e "${GREEN}║ Default User Initialization Complete! ║${NC}"
|
||||||
|
echo -e "${GREEN}╚════════════════════════════════════════════════╝${NC}"
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}Default User Credentials:${NC}"
|
||||||
|
echo -e " Login: ${GREEN}$TEST_LOGIN${NC}"
|
||||||
|
echo -e " Password: ${GREEN}$TEST_PASSWORD${NC}"
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}Test Login:${NC}"
|
||||||
|
echo -e " ${YELLOW}curl -X POST http://localhost:8080/auth/login \\${NC}"
|
||||||
|
echo -e " ${YELLOW}-H 'Content-Type: application/json' \\${NC}"
|
||||||
|
echo -e " ${YELLOW}-d '{\"login\":\"$TEST_LOGIN\",\"password\":\"$TEST_PASSWORD\"}'${NC}"
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}ℹ${NC} For custom users, see: docs/testing/test-user-setup.md"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
exit 0
|
||||||
24
docker/distributable/docker/inject-env.sh
Executable file
24
docker/distributable/docker/inject-env.sh
Executable file
@@ -0,0 +1,24 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# inject-env.sh - Injects runtime environment variables into the Web UI
|
||||||
|
# This script runs at container startup to make environment variables available to the browser
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Default values
|
||||||
|
API_URL="${API_URL:-http://localhost:8080}"
|
||||||
|
WS_URL="${WS_URL:-ws://localhost:8081}"
|
||||||
|
|
||||||
|
# Create runtime configuration file
|
||||||
|
cat > /usr/share/nginx/html/config/runtime-config.js <<EOF
|
||||||
|
// Runtime configuration injected at container startup
|
||||||
|
window.ATTUNE_CONFIG = {
|
||||||
|
apiUrl: '${API_URL}',
|
||||||
|
wsUrl: '${WS_URL}',
|
||||||
|
environment: '${ENVIRONMENT:-production}'
|
||||||
|
};
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo "Runtime configuration injected:"
|
||||||
|
echo " API_URL: ${API_URL}"
|
||||||
|
echo " WS_URL: ${WS_URL}"
|
||||||
|
echo " ENVIRONMENT: ${ENVIRONMENT:-production}"
|
||||||
125
docker/distributable/docker/nginx.conf
Normal file
125
docker/distributable/docker/nginx.conf
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
# Nginx configuration for Attune Web UI
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
listen [::]:80;
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
index index.html;
|
||||||
|
|
||||||
|
# Enable gzip compression
|
||||||
|
gzip on;
|
||||||
|
gzip_vary on;
|
||||||
|
gzip_min_length 1024;
|
||||||
|
gzip_types text/plain text/css text/xml text/javascript application/javascript application/x-javascript application/xml+rss application/json;
|
||||||
|
|
||||||
|
# Security headers
|
||||||
|
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||||
|
add_header X-Content-Type-Options "nosniff" always;
|
||||||
|
add_header X-XSS-Protection "1; mode=block" always;
|
||||||
|
add_header Referrer-Policy "no-referrer-when-downgrade" always;
|
||||||
|
|
||||||
|
# Health check endpoint
|
||||||
|
location /health {
|
||||||
|
access_log off;
|
||||||
|
return 200 "OK\n";
|
||||||
|
add_header Content-Type text/plain;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Use Docker's embedded DNS resolver so that proxy_pass with variables
|
||||||
|
# resolves hostnames at request time, not config load time.
|
||||||
|
# This prevents nginx from crashing if backends aren't ready yet.
|
||||||
|
resolver 127.0.0.11 valid=10s;
|
||||||
|
set $api_upstream http://api:8080;
|
||||||
|
set $notifier_upstream http://notifier:8081;
|
||||||
|
|
||||||
|
# Auth proxy - forward auth requests to backend
|
||||||
|
# With variable proxy_pass (no URI path), the full original request URI
|
||||||
|
# (e.g. /auth/login) is passed through to the backend as-is.
|
||||||
|
location /auth/ {
|
||||||
|
proxy_pass $api_upstream;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection 'upgrade';
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_cache_bypass $http_upgrade;
|
||||||
|
|
||||||
|
# Timeouts
|
||||||
|
proxy_connect_timeout 60s;
|
||||||
|
proxy_send_timeout 60s;
|
||||||
|
proxy_read_timeout 60s;
|
||||||
|
}
|
||||||
|
|
||||||
|
# API proxy - forward API requests to backend (preserves /api prefix)
|
||||||
|
# With variable proxy_pass (no URI path), the full original request URI
|
||||||
|
# (e.g. /api/packs?page=1) is passed through to the backend as-is.
|
||||||
|
location /api/ {
|
||||||
|
proxy_pass $api_upstream;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection 'upgrade';
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_cache_bypass $http_upgrade;
|
||||||
|
|
||||||
|
# Timeouts
|
||||||
|
proxy_connect_timeout 60s;
|
||||||
|
proxy_send_timeout 60s;
|
||||||
|
proxy_read_timeout 60s;
|
||||||
|
}
|
||||||
|
|
||||||
|
# WebSocket proxy for notifier service
|
||||||
|
# Strip the /ws/ prefix before proxying (notifier expects paths at root).
|
||||||
|
# e.g. /ws/events → /events
|
||||||
|
location /ws/ {
|
||||||
|
rewrite ^/ws/(.*) /$1 break;
|
||||||
|
proxy_pass $notifier_upstream;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "Upgrade";
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
|
||||||
|
# WebSocket timeouts
|
||||||
|
proxy_connect_timeout 7d;
|
||||||
|
proxy_send_timeout 7d;
|
||||||
|
proxy_read_timeout 7d;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Serve static assets with caching
|
||||||
|
location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
|
||||||
|
expires 1y;
|
||||||
|
add_header Cache-Control "public, immutable";
|
||||||
|
}
|
||||||
|
|
||||||
|
# Runtime configuration endpoint
|
||||||
|
location /config/runtime-config.js {
|
||||||
|
expires -1;
|
||||||
|
add_header Cache-Control "no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0";
|
||||||
|
}
|
||||||
|
|
||||||
|
# SPA routing - serve index.html for all routes
|
||||||
|
location / {
|
||||||
|
try_files $uri $uri/ /index.html;
|
||||||
|
|
||||||
|
# Disable caching for index.html
|
||||||
|
location = /index.html {
|
||||||
|
expires -1;
|
||||||
|
add_header Cache-Control "no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deny access to hidden files
|
||||||
|
location ~ /\. {
|
||||||
|
deny all;
|
||||||
|
access_log off;
|
||||||
|
log_not_found off;
|
||||||
|
}
|
||||||
|
}
|
||||||
189
docker/distributable/docker/run-migrations.sh
Executable file
189
docker/distributable/docker/run-migrations.sh
Executable file
@@ -0,0 +1,189 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Migration script for Attune database
|
||||||
|
# Runs all SQL migration files in order
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Attune Database Migration Runner"
|
||||||
|
echo "=========================================="
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Database connection parameters
|
||||||
|
DB_HOST="${DB_HOST:-postgres}"
|
||||||
|
DB_PORT="${DB_PORT:-5432}"
|
||||||
|
DB_USER="${DB_USER:-attune}"
|
||||||
|
DB_PASSWORD="${DB_PASSWORD:-attune}"
|
||||||
|
DB_NAME="${DB_NAME:-attune}"
|
||||||
|
|
||||||
|
MIGRATIONS_DIR="${MIGRATIONS_DIR:-/migrations}"
|
||||||
|
|
||||||
|
# Export password for psql
|
||||||
|
export PGPASSWORD="$DB_PASSWORD"
|
||||||
|
|
||||||
|
# Color output
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Function to wait for PostgreSQL to be ready
|
||||||
|
wait_for_postgres() {
|
||||||
|
echo "Waiting for PostgreSQL to be ready..."
|
||||||
|
local max_attempts=30
|
||||||
|
local attempt=1
|
||||||
|
|
||||||
|
while [ $attempt -le $max_attempts ]; do
|
||||||
|
if psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c '\q' 2>/dev/null; then
|
||||||
|
echo -e "${GREEN}✓ PostgreSQL is ready${NC}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo " Attempt $attempt/$max_attempts: PostgreSQL not ready yet..."
|
||||||
|
sleep 2
|
||||||
|
attempt=$((attempt + 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
echo -e "${RED}✗ PostgreSQL failed to become ready after $max_attempts attempts${NC}"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check if migrations table exists
|
||||||
|
setup_migrations_table() {
|
||||||
|
echo "Setting up migrations tracking table..."
|
||||||
|
|
||||||
|
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -v ON_ERROR_STOP=1 <<-EOSQL
|
||||||
|
CREATE TABLE IF NOT EXISTS _migrations (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
filename VARCHAR(255) UNIQUE NOT NULL,
|
||||||
|
applied_at TIMESTAMP DEFAULT NOW()
|
||||||
|
);
|
||||||
|
EOSQL
|
||||||
|
|
||||||
|
echo -e "${GREEN}✓ Migrations table ready${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check if a migration has been applied
|
||||||
|
is_migration_applied() {
|
||||||
|
local filename=$1
|
||||||
|
local count=$(psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -t -c \
|
||||||
|
"SELECT COUNT(*) FROM _migrations WHERE filename = '$filename';" | tr -d ' ')
|
||||||
|
[ "$count" -gt 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to mark migration as applied
|
||||||
|
mark_migration_applied() {
|
||||||
|
local filename=$1
|
||||||
|
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c \
|
||||||
|
"INSERT INTO _migrations (filename) VALUES ('$filename');" > /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to run a migration file
|
||||||
|
run_migration() {
|
||||||
|
local filepath=$1
|
||||||
|
local filename=$(basename "$filepath")
|
||||||
|
|
||||||
|
if is_migration_applied "$filename"; then
|
||||||
|
echo -e "${YELLOW}⊘ Skipping $filename (already applied)${NC}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${GREEN}→ Applying $filename...${NC}"
|
||||||
|
|
||||||
|
# Run migration in a transaction with detailed error reporting
|
||||||
|
if psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -v ON_ERROR_STOP=1 \
|
||||||
|
-c "BEGIN;" \
|
||||||
|
-f "$filepath" \
|
||||||
|
-c "COMMIT;" > /tmp/migration_output.log 2>&1; then
|
||||||
|
mark_migration_applied "$filename"
|
||||||
|
echo -e "${GREEN}✓ Applied $filename${NC}"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
echo -e "${RED}✗ Failed to apply $filename${NC}"
|
||||||
|
echo ""
|
||||||
|
echo "Error details:"
|
||||||
|
cat /tmp/migration_output.log
|
||||||
|
echo ""
|
||||||
|
echo "Migration rolled back due to error."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to initialize Docker-specific roles and extensions
|
||||||
|
init_docker_roles() {
|
||||||
|
echo "Initializing Docker roles and extensions..."
|
||||||
|
|
||||||
|
if [ -f "/docker/init-roles.sql" ]; then
|
||||||
|
if psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -v ON_ERROR_STOP=1 -f "/docker/init-roles.sql" > /dev/null 2>&1; then
|
||||||
|
echo -e "${GREEN}✓ Docker roles initialized${NC}"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}⚠ Warning: Could not initialize Docker roles (may already exist)${NC}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}⚠ No Docker init script found, skipping${NC}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main migration process
|
||||||
|
main() {
|
||||||
|
echo "Configuration:"
|
||||||
|
echo " Database: $DB_HOST:$DB_PORT/$DB_NAME"
|
||||||
|
echo " User: $DB_USER"
|
||||||
|
echo " Migrations directory: $MIGRATIONS_DIR"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Wait for database
|
||||||
|
wait_for_postgres || exit 1
|
||||||
|
|
||||||
|
# Initialize Docker-specific roles
|
||||||
|
init_docker_roles || exit 1
|
||||||
|
|
||||||
|
# Setup migrations tracking
|
||||||
|
setup_migrations_table || exit 1
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "Running migrations..."
|
||||||
|
echo "----------------------------------------"
|
||||||
|
|
||||||
|
# Find and sort migration files
|
||||||
|
local migration_count=0
|
||||||
|
local applied_count=0
|
||||||
|
local skipped_count=0
|
||||||
|
|
||||||
|
# Process migrations in sorted order
|
||||||
|
for migration_file in $(find "$MIGRATIONS_DIR" -name "*.sql" -type f | sort); do
|
||||||
|
migration_count=$((migration_count + 1))
|
||||||
|
|
||||||
|
if is_migration_applied "$(basename "$migration_file")"; then
|
||||||
|
skipped_count=$((skipped_count + 1))
|
||||||
|
run_migration "$migration_file"
|
||||||
|
else
|
||||||
|
if run_migration "$migration_file"; then
|
||||||
|
applied_count=$((applied_count + 1))
|
||||||
|
else
|
||||||
|
echo -e "${RED}Migration failed!${NC}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "----------------------------------------"
|
||||||
|
echo ""
|
||||||
|
echo "Migration Summary:"
|
||||||
|
echo " Total migrations: $migration_count"
|
||||||
|
echo " Newly applied: $applied_count"
|
||||||
|
echo " Already applied: $skipped_count"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if [ $applied_count -gt 0 ]; then
|
||||||
|
echo -e "${GREEN}✓ All migrations applied successfully!${NC}"
|
||||||
|
else
|
||||||
|
echo -e "${GREEN}✓ Database is up to date (no new migrations)${NC}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run main function
|
||||||
|
main
|
||||||
230
docker/distributable/migrations/20250101000001_initial_setup.sql
Normal file
230
docker/distributable/migrations/20250101000001_initial_setup.sql
Normal file
@@ -0,0 +1,230 @@
|
|||||||
|
-- Migration: Initial Setup
|
||||||
|
-- Description: Creates the attune schema, enums, and shared database functions
|
||||||
|
-- Version: 20250101000001
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- EXTENSIONS
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Enable required extensions
|
||||||
|
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||||
|
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- ENUM TYPES
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- WorkerType enum
|
||||||
|
DO $$ BEGIN
|
||||||
|
CREATE TYPE worker_type_enum AS ENUM (
|
||||||
|
'local',
|
||||||
|
'remote',
|
||||||
|
'container'
|
||||||
|
);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN duplicate_object THEN null;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
COMMENT ON TYPE worker_type_enum IS 'Type of worker deployment';
|
||||||
|
|
||||||
|
-- WorkerRole enum
|
||||||
|
DO $$ BEGIN
|
||||||
|
CREATE TYPE worker_role_enum AS ENUM (
|
||||||
|
'action',
|
||||||
|
'sensor',
|
||||||
|
'hybrid'
|
||||||
|
);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN duplicate_object THEN null;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
COMMENT ON TYPE worker_role_enum IS 'Role of worker (action executor, sensor, or both)';
|
||||||
|
|
||||||
|
|
||||||
|
-- WorkerStatus enum
|
||||||
|
DO $$ BEGIN
|
||||||
|
CREATE TYPE worker_status_enum AS ENUM (
|
||||||
|
'active',
|
||||||
|
'inactive',
|
||||||
|
'busy',
|
||||||
|
'error'
|
||||||
|
);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN duplicate_object THEN null;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
COMMENT ON TYPE worker_status_enum IS 'Worker operational status';
|
||||||
|
|
||||||
|
-- EnforcementStatus enum
|
||||||
|
DO $$ BEGIN
|
||||||
|
CREATE TYPE enforcement_status_enum AS ENUM (
|
||||||
|
'created',
|
||||||
|
'processed',
|
||||||
|
'disabled'
|
||||||
|
);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN duplicate_object THEN null;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
COMMENT ON TYPE enforcement_status_enum IS 'Enforcement processing status';
|
||||||
|
|
||||||
|
-- EnforcementCondition enum
|
||||||
|
DO $$ BEGIN
|
||||||
|
CREATE TYPE enforcement_condition_enum AS ENUM (
|
||||||
|
'any',
|
||||||
|
'all'
|
||||||
|
);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN duplicate_object THEN null;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
COMMENT ON TYPE enforcement_condition_enum IS 'Logical operator for conditions (OR/AND)';
|
||||||
|
|
||||||
|
-- ExecutionStatus enum
|
||||||
|
DO $$ BEGIN
|
||||||
|
CREATE TYPE execution_status_enum AS ENUM (
|
||||||
|
'requested',
|
||||||
|
'scheduling',
|
||||||
|
'scheduled',
|
||||||
|
'running',
|
||||||
|
'completed',
|
||||||
|
'failed',
|
||||||
|
'canceling',
|
||||||
|
'cancelled',
|
||||||
|
'timeout',
|
||||||
|
'abandoned'
|
||||||
|
);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN duplicate_object THEN null;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
COMMENT ON TYPE execution_status_enum IS 'Execution lifecycle status';
|
||||||
|
|
||||||
|
-- InquiryStatus enum
|
||||||
|
DO $$ BEGIN
|
||||||
|
CREATE TYPE inquiry_status_enum AS ENUM (
|
||||||
|
'pending',
|
||||||
|
'responded',
|
||||||
|
'timeout',
|
||||||
|
'cancelled'
|
||||||
|
);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN duplicate_object THEN null;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
COMMENT ON TYPE inquiry_status_enum IS 'Inquiry lifecycle status';
|
||||||
|
|
||||||
|
-- PolicyMethod enum
|
||||||
|
DO $$ BEGIN
|
||||||
|
CREATE TYPE policy_method_enum AS ENUM (
|
||||||
|
'cancel',
|
||||||
|
'enqueue'
|
||||||
|
);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN duplicate_object THEN null;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
COMMENT ON TYPE policy_method_enum IS 'Policy enforcement method';
|
||||||
|
|
||||||
|
-- OwnerType enum
|
||||||
|
DO $$ BEGIN
|
||||||
|
CREATE TYPE owner_type_enum AS ENUM (
|
||||||
|
'system',
|
||||||
|
'identity',
|
||||||
|
'pack',
|
||||||
|
'action',
|
||||||
|
'sensor'
|
||||||
|
);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN duplicate_object THEN null;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
COMMENT ON TYPE owner_type_enum IS 'Type of resource owner';
|
||||||
|
|
||||||
|
-- NotificationState enum
|
||||||
|
DO $$ BEGIN
|
||||||
|
CREATE TYPE notification_status_enum AS ENUM (
|
||||||
|
'created',
|
||||||
|
'queued',
|
||||||
|
'processing',
|
||||||
|
'error'
|
||||||
|
);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN duplicate_object THEN null;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
COMMENT ON TYPE notification_status_enum IS 'Notification processing state';
|
||||||
|
|
||||||
|
-- ArtifactType enum
|
||||||
|
DO $$ BEGIN
|
||||||
|
CREATE TYPE artifact_type_enum AS ENUM (
|
||||||
|
'file_binary',
|
||||||
|
'file_datatable',
|
||||||
|
'file_image',
|
||||||
|
'file_text',
|
||||||
|
'other',
|
||||||
|
'progress',
|
||||||
|
'url'
|
||||||
|
);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN duplicate_object THEN null;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
COMMENT ON TYPE artifact_type_enum IS 'Type of artifact';
|
||||||
|
|
||||||
|
-- RetentionPolicyType enum
|
||||||
|
DO $$ BEGIN
|
||||||
|
CREATE TYPE artifact_retention_enum AS ENUM (
|
||||||
|
'versions',
|
||||||
|
'days',
|
||||||
|
'hours',
|
||||||
|
'minutes'
|
||||||
|
);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN duplicate_object THEN null;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
COMMENT ON TYPE artifact_retention_enum IS 'Type of retention policy';
|
||||||
|
|
||||||
|
-- ArtifactVisibility enum
|
||||||
|
DO $$ BEGIN
|
||||||
|
CREATE TYPE artifact_visibility_enum AS ENUM (
|
||||||
|
'public',
|
||||||
|
'private'
|
||||||
|
);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN duplicate_object THEN null;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
COMMENT ON TYPE artifact_visibility_enum IS 'Visibility of an artifact (public = viewable by all users, private = scoped by owner)';
|
||||||
|
|
||||||
|
|
||||||
|
-- PackEnvironmentStatus enum
|
||||||
|
DO $$ BEGIN
|
||||||
|
CREATE TYPE pack_environment_status_enum AS ENUM (
|
||||||
|
'pending',
|
||||||
|
'installing',
|
||||||
|
'ready',
|
||||||
|
'failed',
|
||||||
|
'outdated'
|
||||||
|
);
|
||||||
|
EXCEPTION
|
||||||
|
WHEN duplicate_object THEN null;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
COMMENT ON TYPE pack_environment_status_enum IS 'Status of pack runtime environment installation';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- SHARED FUNCTIONS
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Function to automatically update the 'updated' timestamp
|
||||||
|
CREATE OR REPLACE FUNCTION update_updated_column()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
BEGIN
|
||||||
|
NEW.updated = NOW();
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION update_updated_column() IS 'Automatically updates the updated timestamp on row modification';
|
||||||
262
docker/distributable/migrations/20250101000002_pack_system.sql
Normal file
262
docker/distributable/migrations/20250101000002_pack_system.sql
Normal file
@@ -0,0 +1,262 @@
|
|||||||
|
-- Migration: Pack System
|
||||||
|
-- Description: Creates pack, runtime, and runtime_version tables
|
||||||
|
-- Version: 20250101000002
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- PACK TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE pack (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
ref TEXT NOT NULL UNIQUE,
|
||||||
|
label TEXT NOT NULL,
|
||||||
|
description TEXT,
|
||||||
|
version TEXT NOT NULL,
|
||||||
|
conf_schema JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||||
|
config JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||||
|
meta JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||||
|
tags TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
|
||||||
|
runtime_deps TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
|
||||||
|
dependencies TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
|
||||||
|
is_standard BOOLEAN NOT NULL DEFAULT FALSE,
|
||||||
|
installers JSONB DEFAULT '[]'::jsonb,
|
||||||
|
|
||||||
|
-- Installation metadata (nullable for non-installed packs)
|
||||||
|
source_type TEXT,
|
||||||
|
source_url TEXT,
|
||||||
|
source_ref TEXT,
|
||||||
|
checksum TEXT,
|
||||||
|
checksum_verified BOOLEAN DEFAULT FALSE,
|
||||||
|
installed_at TIMESTAMPTZ,
|
||||||
|
installed_by BIGINT,
|
||||||
|
installation_method TEXT,
|
||||||
|
storage_path TEXT,
|
||||||
|
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
|
||||||
|
-- Constraints
|
||||||
|
CONSTRAINT pack_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||||
|
CONSTRAINT pack_ref_format CHECK (ref ~ '^[a-z][a-z0-9_-]+$'),
|
||||||
|
CONSTRAINT pack_version_semver CHECK (
|
||||||
|
version ~ '^\d+\.\d+\.\d+(-[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$'
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_pack_ref ON pack(ref);
|
||||||
|
CREATE INDEX idx_pack_created ON pack(created DESC);
|
||||||
|
CREATE INDEX idx_pack_is_standard ON pack(is_standard) WHERE is_standard = TRUE;
|
||||||
|
CREATE INDEX idx_pack_is_standard_created ON pack(is_standard, created DESC);
|
||||||
|
CREATE INDEX idx_pack_version_created ON pack(version, created DESC);
|
||||||
|
CREATE INDEX idx_pack_config_gin ON pack USING GIN (config);
|
||||||
|
CREATE INDEX idx_pack_meta_gin ON pack USING GIN (meta);
|
||||||
|
CREATE INDEX idx_pack_tags_gin ON pack USING GIN (tags);
|
||||||
|
CREATE INDEX idx_pack_runtime_deps_gin ON pack USING GIN (runtime_deps);
|
||||||
|
CREATE INDEX idx_pack_dependencies_gin ON pack USING GIN (dependencies);
|
||||||
|
CREATE INDEX idx_pack_installed_at ON pack(installed_at DESC) WHERE installed_at IS NOT NULL;
|
||||||
|
CREATE INDEX idx_pack_installed_by ON pack(installed_by) WHERE installed_by IS NOT NULL;
|
||||||
|
CREATE INDEX idx_pack_source_type ON pack(source_type) WHERE source_type IS NOT NULL;
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_pack_updated
|
||||||
|
BEFORE UPDATE ON pack
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE pack IS 'Packs bundle related automation components';
|
||||||
|
COMMENT ON COLUMN pack.ref IS 'Unique pack reference identifier (e.g., "slack", "github")';
|
||||||
|
COMMENT ON COLUMN pack.label IS 'Human-readable pack name';
|
||||||
|
COMMENT ON COLUMN pack.version IS 'Semantic version of the pack';
|
||||||
|
COMMENT ON COLUMN pack.conf_schema IS 'JSON schema for pack configuration';
|
||||||
|
COMMENT ON COLUMN pack.config IS 'Pack configuration values';
|
||||||
|
COMMENT ON COLUMN pack.meta IS 'Pack metadata';
|
||||||
|
COMMENT ON COLUMN pack.runtime_deps IS 'Array of required runtime references (e.g., shell, python, nodejs)';
|
||||||
|
COMMENT ON COLUMN pack.dependencies IS 'Array of required pack references (e.g., core, utils)';
|
||||||
|
COMMENT ON COLUMN pack.is_standard IS 'Whether this is a core/built-in pack';
|
||||||
|
COMMENT ON COLUMN pack.source_type IS 'Installation source type (e.g., "git", "local", "registry")';
|
||||||
|
COMMENT ON COLUMN pack.source_url IS 'URL or path where pack was installed from';
|
||||||
|
COMMENT ON COLUMN pack.source_ref IS 'Git ref, version tag, or other source reference';
|
||||||
|
COMMENT ON COLUMN pack.checksum IS 'Content checksum for verification';
|
||||||
|
COMMENT ON COLUMN pack.checksum_verified IS 'Whether checksum has been verified';
|
||||||
|
COMMENT ON COLUMN pack.installed_at IS 'Timestamp when pack was installed';
|
||||||
|
COMMENT ON COLUMN pack.installed_by IS 'Identity ID of user who installed the pack';
|
||||||
|
COMMENT ON COLUMN pack.installation_method IS 'Method used for installation (e.g., "cli", "api", "auto")';
|
||||||
|
COMMENT ON COLUMN pack.storage_path IS 'Filesystem path where pack files are stored';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- RUNTIME TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE runtime (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
ref TEXT NOT NULL UNIQUE,
|
||||||
|
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
|
||||||
|
pack_ref TEXT,
|
||||||
|
description TEXT,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
aliases TEXT[] NOT NULL DEFAULT '{}'::text[],
|
||||||
|
|
||||||
|
distributions JSONB NOT NULL,
|
||||||
|
installation JSONB,
|
||||||
|
installers JSONB DEFAULT '[]'::jsonb,
|
||||||
|
|
||||||
|
-- Execution configuration: describes how to execute actions using this runtime,
|
||||||
|
-- how to create isolated environments, and how to install dependencies.
|
||||||
|
--
|
||||||
|
-- Structure:
|
||||||
|
-- {
|
||||||
|
-- "interpreter": {
|
||||||
|
-- "binary": "python3", -- interpreter binary name or path
|
||||||
|
-- "args": [], -- additional args before the action file
|
||||||
|
-- "file_extension": ".py" -- file extension this runtime handles
|
||||||
|
-- },
|
||||||
|
-- "environment": { -- optional: isolated environment config
|
||||||
|
-- "env_type": "virtualenv", -- "virtualenv", "node_modules", "none"
|
||||||
|
-- "dir_name": ".venv", -- directory name relative to pack dir
|
||||||
|
-- "create_command": ["python3", "-m", "venv", "{env_dir}"],
|
||||||
|
-- "interpreter_path": "{env_dir}/bin/python3" -- overrides interpreter.binary
|
||||||
|
-- },
|
||||||
|
-- "dependencies": { -- optional: dependency management config
|
||||||
|
-- "manifest_file": "requirements.txt",
|
||||||
|
-- "install_command": ["{interpreter}", "-m", "pip", "install", "-r", "{manifest_path}"]
|
||||||
|
-- }
|
||||||
|
-- }
|
||||||
|
--
|
||||||
|
-- Template variables:
|
||||||
|
-- {pack_dir} - absolute path to the pack directory
|
||||||
|
-- {env_dir} - resolved environment directory (pack_dir/dir_name)
|
||||||
|
-- {interpreter} - resolved interpreter path
|
||||||
|
-- {action_file} - absolute path to the action script file
|
||||||
|
-- {manifest_path} - absolute path to the dependency manifest file
|
||||||
|
execution_config JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||||
|
|
||||||
|
-- Whether this runtime was auto-registered by an agent
|
||||||
|
-- (vs. loaded from a pack's YAML file during pack registration)
|
||||||
|
auto_detected BOOLEAN NOT NULL DEFAULT FALSE,
|
||||||
|
|
||||||
|
-- Detection metadata for auto-discovered runtimes.
|
||||||
|
-- Stores how the agent discovered this runtime (binary path, version, etc.)
|
||||||
|
-- enables re-verification on restart.
|
||||||
|
-- Example: { "detected_path": "/usr/bin/ruby", "detected_name": "ruby",
|
||||||
|
-- "detected_version": "3.3.0" }
|
||||||
|
detection_config JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||||
|
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
|
||||||
|
-- Constraints
|
||||||
|
CONSTRAINT runtime_ref_lowercase CHECK (ref = LOWER(ref))
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_runtime_ref ON runtime(ref);
|
||||||
|
CREATE INDEX idx_runtime_pack ON runtime(pack);
|
||||||
|
CREATE INDEX idx_runtime_created ON runtime(created DESC);
|
||||||
|
CREATE INDEX idx_runtime_name ON runtime(name);
|
||||||
|
CREATE INDEX idx_runtime_verification ON runtime USING GIN ((distributions->'verification'));
|
||||||
|
CREATE INDEX idx_runtime_execution_config ON runtime USING GIN (execution_config);
|
||||||
|
CREATE INDEX idx_runtime_auto_detected ON runtime(auto_detected);
|
||||||
|
CREATE INDEX idx_runtime_detection_config ON runtime USING GIN (detection_config);
|
||||||
|
CREATE INDEX idx_runtime_aliases ON runtime USING GIN (aliases);
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_runtime_updated
|
||||||
|
BEFORE UPDATE ON runtime
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE runtime IS 'Runtime environments for executing actions and sensors (unified)';
|
||||||
|
COMMENT ON COLUMN runtime.ref IS 'Unique runtime reference (format: pack.name, e.g., core.python)';
|
||||||
|
COMMENT ON COLUMN runtime.name IS 'Runtime name (e.g., "Python", "Node.js", "Shell")';
|
||||||
|
COMMENT ON COLUMN runtime.aliases IS 'Lowercase alias names for this runtime (e.g., ["ruby", "rb"] for the Ruby runtime). Used for alias-aware matching during auto-detection and scheduling.';
|
||||||
|
COMMENT ON COLUMN runtime.distributions IS 'Runtime distribution metadata including verification commands, version requirements, and capabilities';
|
||||||
|
COMMENT ON COLUMN runtime.installation IS 'Installation requirements and instructions including package managers and setup steps';
|
||||||
|
COMMENT ON COLUMN runtime.installers IS 'Array of installer actions to create pack-specific runtime environments. Each installer defines commands to set up isolated environments (e.g., Python venv, npm install).';
|
||||||
|
COMMENT ON COLUMN runtime.execution_config IS 'Execution configuration: interpreter, environment setup, and dependency management. Drives how the worker executes actions and how pack install sets up environments.';
|
||||||
|
COMMENT ON COLUMN runtime.auto_detected IS 'Whether this runtime was auto-registered by an agent (true) vs. loaded from a pack YAML (false)';
|
||||||
|
COMMENT ON COLUMN runtime.detection_config IS 'Detection metadata for auto-discovered runtimes: binaries probed, version regex, detected path/version';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- RUNTIME VERSION TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE runtime_version (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
runtime BIGINT NOT NULL REFERENCES runtime(id) ON DELETE CASCADE,
|
||||||
|
runtime_ref TEXT NOT NULL,
|
||||||
|
|
||||||
|
-- Semantic version string (e.g., "3.12.1", "20.11.0")
|
||||||
|
version TEXT NOT NULL,
|
||||||
|
|
||||||
|
-- Individual version components for efficient range queries.
|
||||||
|
-- Nullable because some runtimes may use non-numeric versioning.
|
||||||
|
version_major INT,
|
||||||
|
version_minor INT,
|
||||||
|
version_patch INT,
|
||||||
|
|
||||||
|
-- Complete execution configuration for this specific version.
|
||||||
|
-- This is NOT a diff/override — it is a full standalone config that can
|
||||||
|
-- replace the parent runtime's execution_config when this version is selected.
|
||||||
|
-- Structure is identical to runtime.execution_config (RuntimeExecutionConfig).
|
||||||
|
execution_config JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||||
|
|
||||||
|
-- Version-specific distribution/verification metadata.
|
||||||
|
-- Structure mirrors runtime.distributions but with version-specific commands.
|
||||||
|
-- Example: verification commands that check for a specific binary like python3.12.
|
||||||
|
distributions JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||||
|
|
||||||
|
-- Whether this version is the default for the parent runtime.
|
||||||
|
-- At most one version per runtime should be marked as default.
|
||||||
|
is_default BOOLEAN NOT NULL DEFAULT FALSE,
|
||||||
|
|
||||||
|
-- Whether this version has been verified as available on the current system.
|
||||||
|
available BOOLEAN NOT NULL DEFAULT TRUE,
|
||||||
|
|
||||||
|
-- When this version was last verified (via running verification commands).
|
||||||
|
verified_at TIMESTAMPTZ,
|
||||||
|
|
||||||
|
-- Arbitrary version-specific metadata (e.g., EOL date, release notes URL,
|
||||||
|
-- feature flags, platform-specific notes).
|
||||||
|
meta JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||||
|
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
|
||||||
|
-- Constraints
|
||||||
|
CONSTRAINT runtime_version_unique UNIQUE(runtime, version)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_runtime_version_runtime ON runtime_version(runtime);
|
||||||
|
CREATE INDEX idx_runtime_version_runtime_ref ON runtime_version(runtime_ref);
|
||||||
|
CREATE INDEX idx_runtime_version_version ON runtime_version(version);
|
||||||
|
CREATE INDEX idx_runtime_version_available ON runtime_version(available) WHERE available = TRUE;
|
||||||
|
CREATE INDEX idx_runtime_version_is_default ON runtime_version(is_default) WHERE is_default = TRUE;
|
||||||
|
CREATE INDEX idx_runtime_version_components ON runtime_version(runtime, version_major, version_minor, version_patch);
|
||||||
|
CREATE INDEX idx_runtime_version_created ON runtime_version(created DESC);
|
||||||
|
CREATE INDEX idx_runtime_version_execution_config ON runtime_version USING GIN (execution_config);
|
||||||
|
CREATE INDEX idx_runtime_version_meta ON runtime_version USING GIN (meta);
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_runtime_version_updated
|
||||||
|
BEFORE UPDATE ON runtime_version
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE runtime_version IS 'Specific versions of a runtime (e.g., Python 3.11, 3.12) with version-specific execution configuration';
|
||||||
|
COMMENT ON COLUMN runtime_version.runtime IS 'Parent runtime this version belongs to';
|
||||||
|
COMMENT ON COLUMN runtime_version.runtime_ref IS 'Parent runtime ref (e.g., core.python) for display/filtering';
|
||||||
|
COMMENT ON COLUMN runtime_version.version IS 'Semantic version string (e.g., "3.12.1", "20.11.0")';
|
||||||
|
COMMENT ON COLUMN runtime_version.version_major IS 'Major version component for efficient range queries';
|
||||||
|
COMMENT ON COLUMN runtime_version.version_minor IS 'Minor version component for efficient range queries';
|
||||||
|
COMMENT ON COLUMN runtime_version.version_patch IS 'Patch version component for efficient range queries';
|
||||||
|
COMMENT ON COLUMN runtime_version.execution_config IS 'Complete execution configuration for this version (same structure as runtime.execution_config)';
|
||||||
|
COMMENT ON COLUMN runtime_version.distributions IS 'Version-specific distribution/verification metadata';
|
||||||
|
COMMENT ON COLUMN runtime_version.is_default IS 'Whether this is the default version for the parent runtime (at most one per runtime)';
|
||||||
|
COMMENT ON COLUMN runtime_version.available IS 'Whether this version has been verified as available on the system';
|
||||||
|
COMMENT ON COLUMN runtime_version.verified_at IS 'Timestamp of last availability verification';
|
||||||
|
COMMENT ON COLUMN runtime_version.meta IS 'Arbitrary version-specific metadata';
|
||||||
@@ -0,0 +1,223 @@
|
|||||||
|
-- Migration: Identity and Authentication
|
||||||
|
-- Description: Creates identity, permission, and policy tables
|
||||||
|
-- Version: 20250101000002
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- IDENTITY TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE identity (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
login TEXT NOT NULL UNIQUE,
|
||||||
|
display_name TEXT,
|
||||||
|
password_hash TEXT,
|
||||||
|
attributes JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_identity_login ON identity(login);
|
||||||
|
CREATE INDEX idx_identity_created ON identity(created DESC);
|
||||||
|
CREATE INDEX idx_identity_password_hash ON identity(password_hash) WHERE password_hash IS NOT NULL;
|
||||||
|
CREATE INDEX idx_identity_attributes_gin ON identity USING GIN (attributes);
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_identity_updated
|
||||||
|
BEFORE UPDATE ON identity
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE identity IS 'Identities represent users or service accounts';
|
||||||
|
COMMENT ON COLUMN identity.login IS 'Unique login identifier';
|
||||||
|
COMMENT ON COLUMN identity.display_name IS 'Human-readable name';
|
||||||
|
COMMENT ON COLUMN identity.password_hash IS 'Argon2 hashed password for authentication (NULL for service accounts or external auth)';
|
||||||
|
COMMENT ON COLUMN identity.attributes IS 'Custom attributes (email, groups, etc.)';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- ADD FOREIGN KEY CONSTRAINTS TO EXISTING TABLES
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Add foreign key constraint for pack.installed_by now that identity table exists
|
||||||
|
ALTER TABLE pack
|
||||||
|
ADD CONSTRAINT fk_pack_installed_by
|
||||||
|
FOREIGN KEY (installed_by)
|
||||||
|
REFERENCES identity(id)
|
||||||
|
ON DELETE SET NULL;
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- PERMISSION_SET TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE permission_set (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
ref TEXT NOT NULL UNIQUE,
|
||||||
|
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
|
||||||
|
pack_ref TEXT,
|
||||||
|
label TEXT,
|
||||||
|
description TEXT,
|
||||||
|
grants JSONB NOT NULL DEFAULT '[]'::jsonb,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
|
||||||
|
-- Constraints
|
||||||
|
CONSTRAINT permission_set_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||||
|
CONSTRAINT permission_set_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_permission_set_ref ON permission_set(ref);
|
||||||
|
CREATE INDEX idx_permission_set_pack ON permission_set(pack);
|
||||||
|
CREATE INDEX idx_permission_set_created ON permission_set(created DESC);
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_permission_set_updated
|
||||||
|
BEFORE UPDATE ON permission_set
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE permission_set IS 'Permission sets group permissions together (like roles)';
|
||||||
|
COMMENT ON COLUMN permission_set.ref IS 'Unique permission set reference (format: pack.name)';
|
||||||
|
COMMENT ON COLUMN permission_set.label IS 'Human-readable name';
|
||||||
|
COMMENT ON COLUMN permission_set.grants IS 'Array of permission grants';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- PERMISSION_ASSIGNMENT TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE permission_assignment (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
identity BIGINT NOT NULL REFERENCES identity(id) ON DELETE CASCADE,
|
||||||
|
permset BIGINT NOT NULL REFERENCES permission_set(id) ON DELETE CASCADE,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
|
||||||
|
-- Unique constraint to prevent duplicate assignments
|
||||||
|
CONSTRAINT unique_identity_permset UNIQUE (identity, permset)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_permission_assignment_identity ON permission_assignment(identity);
|
||||||
|
CREATE INDEX idx_permission_assignment_permset ON permission_assignment(permset);
|
||||||
|
CREATE INDEX idx_permission_assignment_created ON permission_assignment(created DESC);
|
||||||
|
CREATE INDEX idx_permission_assignment_identity_created ON permission_assignment(identity, created DESC);
|
||||||
|
CREATE INDEX idx_permission_assignment_permset_created ON permission_assignment(permset, created DESC);
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE permission_assignment IS 'Links identities to permission sets (many-to-many)';
|
||||||
|
COMMENT ON COLUMN permission_assignment.identity IS 'Identity being granted permissions';
|
||||||
|
COMMENT ON COLUMN permission_assignment.permset IS 'Permission set being assigned';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
ALTER TABLE identity
|
||||||
|
ADD COLUMN frozen BOOLEAN NOT NULL DEFAULT false;
|
||||||
|
|
||||||
|
CREATE INDEX idx_identity_frozen ON identity(frozen);
|
||||||
|
|
||||||
|
COMMENT ON COLUMN identity.frozen IS 'If true, authentication is blocked for this identity';
|
||||||
|
|
||||||
|
CREATE TABLE identity_role_assignment (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
identity BIGINT NOT NULL REFERENCES identity(id) ON DELETE CASCADE,
|
||||||
|
role TEXT NOT NULL,
|
||||||
|
source TEXT NOT NULL DEFAULT 'manual',
|
||||||
|
managed BOOLEAN NOT NULL DEFAULT false,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
|
||||||
|
CONSTRAINT unique_identity_role_assignment UNIQUE (identity, role)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_identity_role_assignment_identity
|
||||||
|
ON identity_role_assignment(identity);
|
||||||
|
CREATE INDEX idx_identity_role_assignment_role
|
||||||
|
ON identity_role_assignment(role);
|
||||||
|
CREATE INDEX idx_identity_role_assignment_source
|
||||||
|
ON identity_role_assignment(source);
|
||||||
|
|
||||||
|
CREATE TRIGGER update_identity_role_assignment_updated
|
||||||
|
BEFORE UPDATE ON identity_role_assignment
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
COMMENT ON TABLE identity_role_assignment IS 'Links identities to role labels from manual assignment or external identity providers';
|
||||||
|
COMMENT ON COLUMN identity_role_assignment.role IS 'Opaque role/group label (e.g. IDP group name)';
|
||||||
|
COMMENT ON COLUMN identity_role_assignment.source IS 'Where the role assignment originated (manual, oidc, ldap, sync, etc.)';
|
||||||
|
COMMENT ON COLUMN identity_role_assignment.managed IS 'True when the assignment is managed by external sync and should not be edited manually';
|
||||||
|
|
||||||
|
CREATE TABLE permission_set_role_assignment (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
permset BIGINT NOT NULL REFERENCES permission_set(id) ON DELETE CASCADE,
|
||||||
|
role TEXT NOT NULL,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
|
||||||
|
CONSTRAINT unique_permission_set_role_assignment UNIQUE (permset, role)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_permission_set_role_assignment_permset
|
||||||
|
ON permission_set_role_assignment(permset);
|
||||||
|
CREATE INDEX idx_permission_set_role_assignment_role
|
||||||
|
ON permission_set_role_assignment(role);
|
||||||
|
|
||||||
|
COMMENT ON TABLE permission_set_role_assignment IS 'Links permission sets to role labels for role-based grant expansion';
|
||||||
|
COMMENT ON COLUMN permission_set_role_assignment.role IS 'Opaque role/group label associated with the permission set';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- POLICY TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE policy (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
ref TEXT NOT NULL UNIQUE,
|
||||||
|
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
|
||||||
|
pack_ref TEXT,
|
||||||
|
action BIGINT, -- Forward reference to action table, will add constraint in next migration
|
||||||
|
action_ref TEXT,
|
||||||
|
parameters TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
|
||||||
|
method policy_method_enum NOT NULL,
|
||||||
|
threshold INTEGER NOT NULL,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
description TEXT,
|
||||||
|
tags TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
|
||||||
|
-- Constraints
|
||||||
|
CONSTRAINT policy_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||||
|
CONSTRAINT policy_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$'),
|
||||||
|
CONSTRAINT policy_threshold_positive CHECK (threshold > 0)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_policy_ref ON policy(ref);
|
||||||
|
CREATE INDEX idx_policy_pack ON policy(pack);
|
||||||
|
CREATE INDEX idx_policy_action ON policy(action);
|
||||||
|
CREATE INDEX idx_policy_created ON policy(created DESC);
|
||||||
|
CREATE INDEX idx_policy_action_created ON policy(action, created DESC);
|
||||||
|
CREATE INDEX idx_policy_pack_created ON policy(pack, created DESC);
|
||||||
|
CREATE INDEX idx_policy_parameters_gin ON policy USING GIN (parameters);
|
||||||
|
CREATE INDEX idx_policy_tags_gin ON policy USING GIN (tags);
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_policy_updated
|
||||||
|
BEFORE UPDATE ON policy
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE policy IS 'Policies define execution controls (rate limiting, concurrency)';
|
||||||
|
COMMENT ON COLUMN policy.ref IS 'Unique policy reference (format: pack.name)';
|
||||||
|
COMMENT ON COLUMN policy.action IS 'Action this policy applies to';
|
||||||
|
COMMENT ON COLUMN policy.parameters IS 'Parameter names used for policy grouping';
|
||||||
|
COMMENT ON COLUMN policy.method IS 'How to handle policy violations (cancel/enqueue)';
|
||||||
|
COMMENT ON COLUMN policy.threshold IS 'Numeric limit (e.g., max concurrent executions)';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
@@ -0,0 +1,290 @@
|
|||||||
|
-- Migration: Event System and Actions
|
||||||
|
-- Description: Creates trigger, sensor, event, enforcement, and action tables
|
||||||
|
-- with runtime version constraint support. Includes webhook key
|
||||||
|
-- generation function used by webhook management functions in 000007.
|
||||||
|
--
|
||||||
|
-- NOTE: The event and enforcement tables are converted to TimescaleDB
|
||||||
|
-- hypertables in migration 000009. Hypertables cannot be the target of
|
||||||
|
-- FK constraints, so enforcement.event is a plain BIGINT with no FK.
|
||||||
|
-- FKs *from* hypertables to regular tables (e.g., event.trigger → trigger,
|
||||||
|
-- enforcement.rule → rule) are supported by TimescaleDB 2.x and are kept.
|
||||||
|
-- Version: 20250101000004
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- WEBHOOK KEY GENERATION
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Generates a unique webhook key in the format: wh_<32 random hex chars>
|
||||||
|
-- Used by enable_trigger_webhook() and regenerate_trigger_webhook_key() in 000007.
|
||||||
|
CREATE OR REPLACE FUNCTION generate_webhook_key()
|
||||||
|
RETURNS VARCHAR(64) AS $$
|
||||||
|
BEGIN
|
||||||
|
RETURN 'wh_' || encode(gen_random_bytes(16), 'hex');
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION generate_webhook_key() IS 'Generates a unique webhook key (format: wh_<32 hex chars>) for trigger webhook authentication';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- TRIGGER TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE trigger (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
ref TEXT NOT NULL UNIQUE,
|
||||||
|
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
|
||||||
|
pack_ref TEXT,
|
||||||
|
label TEXT NOT NULL,
|
||||||
|
description TEXT,
|
||||||
|
enabled BOOLEAN NOT NULL DEFAULT TRUE,
|
||||||
|
is_adhoc BOOLEAN DEFAULT false NOT NULL,
|
||||||
|
param_schema JSONB,
|
||||||
|
out_schema JSONB,
|
||||||
|
webhook_enabled BOOLEAN NOT NULL DEFAULT FALSE,
|
||||||
|
webhook_key VARCHAR(64) UNIQUE,
|
||||||
|
webhook_config JSONB DEFAULT '{}'::jsonb,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
|
||||||
|
-- Constraints
|
||||||
|
CONSTRAINT trigger_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||||
|
CONSTRAINT trigger_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_trigger_ref ON trigger(ref);
|
||||||
|
CREATE INDEX idx_trigger_pack ON trigger(pack);
|
||||||
|
CREATE INDEX idx_trigger_enabled ON trigger(enabled) WHERE enabled = TRUE;
|
||||||
|
CREATE INDEX idx_trigger_created ON trigger(created DESC);
|
||||||
|
CREATE INDEX idx_trigger_pack_enabled ON trigger(pack, enabled);
|
||||||
|
CREATE INDEX idx_trigger_webhook_key ON trigger(webhook_key) WHERE webhook_key IS NOT NULL;
|
||||||
|
CREATE INDEX idx_trigger_enabled_created ON trigger(enabled, created DESC) WHERE enabled = TRUE;
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_trigger_updated
|
||||||
|
BEFORE UPDATE ON trigger
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE trigger IS 'Trigger definitions that can activate rules';
|
||||||
|
COMMENT ON COLUMN trigger.ref IS 'Unique trigger reference (format: pack.name)';
|
||||||
|
COMMENT ON COLUMN trigger.label IS 'Human-readable trigger name';
|
||||||
|
COMMENT ON COLUMN trigger.enabled IS 'Whether this trigger is active';
|
||||||
|
COMMENT ON COLUMN trigger.param_schema IS 'JSON schema defining the expected configuration parameters when this trigger is used';
|
||||||
|
COMMENT ON COLUMN trigger.out_schema IS 'JSON schema defining the structure of event payloads generated by this trigger';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- SENSOR TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE sensor (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
ref TEXT NOT NULL UNIQUE,
|
||||||
|
pack BIGINT REFERENCES pack(id) ON DELETE CASCADE,
|
||||||
|
pack_ref TEXT,
|
||||||
|
label TEXT NOT NULL,
|
||||||
|
description TEXT,
|
||||||
|
entrypoint TEXT NOT NULL,
|
||||||
|
runtime BIGINT NOT NULL REFERENCES runtime(id) ON DELETE CASCADE,
|
||||||
|
runtime_ref TEXT NOT NULL,
|
||||||
|
trigger BIGINT NOT NULL REFERENCES trigger(id) ON DELETE CASCADE,
|
||||||
|
trigger_ref TEXT NOT NULL,
|
||||||
|
enabled BOOLEAN NOT NULL,
|
||||||
|
is_adhoc BOOLEAN NOT NULL DEFAULT FALSE,
|
||||||
|
param_schema JSONB,
|
||||||
|
config JSONB,
|
||||||
|
runtime_version_constraint TEXT,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
|
||||||
|
-- Constraints
|
||||||
|
CONSTRAINT sensor_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||||
|
CONSTRAINT sensor_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_sensor_ref ON sensor(ref);
|
||||||
|
CREATE INDEX idx_sensor_pack ON sensor(pack);
|
||||||
|
CREATE INDEX idx_sensor_runtime ON sensor(runtime);
|
||||||
|
CREATE INDEX idx_sensor_trigger ON sensor(trigger);
|
||||||
|
CREATE INDEX idx_sensor_enabled ON sensor(enabled) WHERE enabled = TRUE;
|
||||||
|
CREATE INDEX idx_sensor_is_adhoc ON sensor(is_adhoc) WHERE is_adhoc = true;
|
||||||
|
CREATE INDEX idx_sensor_created ON sensor(created DESC);
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_sensor_updated
|
||||||
|
BEFORE UPDATE ON sensor
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE sensor IS 'Sensors monitor for events and create trigger instances';
|
||||||
|
COMMENT ON COLUMN sensor.ref IS 'Unique sensor reference (format: pack.name)';
|
||||||
|
COMMENT ON COLUMN sensor.label IS 'Human-readable sensor name';
|
||||||
|
COMMENT ON COLUMN sensor.entrypoint IS 'Script or command to execute';
|
||||||
|
COMMENT ON COLUMN sensor.runtime IS 'Runtime environment for execution';
|
||||||
|
COMMENT ON COLUMN sensor.trigger IS 'Trigger type this sensor creates events for';
|
||||||
|
COMMENT ON COLUMN sensor.enabled IS 'Whether this sensor is active';
|
||||||
|
COMMENT ON COLUMN sensor.is_adhoc IS 'True if sensor was manually created (ad-hoc), false if installed from pack';
|
||||||
|
COMMENT ON COLUMN sensor.runtime_version_constraint IS 'Semver version constraint for the runtime (e.g., ">=3.12", ">=3.12,<4.0", "~18.0"). NULL means any version.';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- EVENT TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE event (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
trigger BIGINT REFERENCES trigger(id) ON DELETE SET NULL,
|
||||||
|
trigger_ref TEXT NOT NULL,
|
||||||
|
config JSONB,
|
||||||
|
payload JSONB,
|
||||||
|
source BIGINT REFERENCES sensor(id) ON DELETE SET NULL,
|
||||||
|
source_ref TEXT,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
rule BIGINT,
|
||||||
|
rule_ref TEXT
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_event_trigger ON event(trigger);
|
||||||
|
CREATE INDEX idx_event_trigger_ref ON event(trigger_ref);
|
||||||
|
CREATE INDEX idx_event_source ON event(source);
|
||||||
|
CREATE INDEX idx_event_created ON event(created DESC);
|
||||||
|
CREATE INDEX idx_event_trigger_created ON event(trigger, created DESC);
|
||||||
|
CREATE INDEX idx_event_trigger_ref_created ON event(trigger_ref, created DESC);
|
||||||
|
CREATE INDEX idx_event_source_created ON event(source, created DESC);
|
||||||
|
CREATE INDEX idx_event_payload_gin ON event USING GIN (payload);
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE event IS 'Events are instances of triggers firing';
|
||||||
|
COMMENT ON COLUMN event.trigger IS 'Trigger that fired (may be null if trigger deleted)';
|
||||||
|
COMMENT ON COLUMN event.trigger_ref IS 'Trigger reference (preserved even if trigger deleted)';
|
||||||
|
COMMENT ON COLUMN event.config IS 'Snapshot of trigger/sensor configuration at event time';
|
||||||
|
COMMENT ON COLUMN event.payload IS 'Event data payload';
|
||||||
|
COMMENT ON COLUMN event.source IS 'Sensor that generated this event';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- ENFORCEMENT TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE enforcement (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
rule BIGINT, -- Forward reference to rule table, will add constraint after rule is created
|
||||||
|
rule_ref TEXT NOT NULL,
|
||||||
|
trigger_ref TEXT NOT NULL,
|
||||||
|
config JSONB,
|
||||||
|
event BIGINT, -- references event(id); no FK because event becomes a hypertable
|
||||||
|
status enforcement_status_enum NOT NULL DEFAULT 'created',
|
||||||
|
payload JSONB NOT NULL,
|
||||||
|
condition enforcement_condition_enum NOT NULL DEFAULT 'all',
|
||||||
|
conditions JSONB NOT NULL DEFAULT '[]'::jsonb,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
resolved_at TIMESTAMPTZ,
|
||||||
|
|
||||||
|
-- Constraints
|
||||||
|
CONSTRAINT enforcement_condition_check CHECK (condition IN ('any', 'all'))
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_enforcement_rule ON enforcement(rule);
|
||||||
|
CREATE INDEX idx_enforcement_rule_ref ON enforcement(rule_ref);
|
||||||
|
CREATE INDEX idx_enforcement_trigger_ref ON enforcement(trigger_ref);
|
||||||
|
CREATE INDEX idx_enforcement_event ON enforcement(event);
|
||||||
|
CREATE INDEX idx_enforcement_status ON enforcement(status);
|
||||||
|
CREATE INDEX idx_enforcement_created ON enforcement(created DESC);
|
||||||
|
CREATE INDEX idx_enforcement_status_created ON enforcement(status, created DESC);
|
||||||
|
CREATE INDEX idx_enforcement_rule_status ON enforcement(rule, status);
|
||||||
|
CREATE INDEX idx_enforcement_event_status ON enforcement(event, status);
|
||||||
|
CREATE INDEX idx_enforcement_payload_gin ON enforcement USING GIN (payload);
|
||||||
|
CREATE INDEX idx_enforcement_conditions_gin ON enforcement USING GIN (conditions);
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE enforcement IS 'Enforcements represent rule triggering by events';
|
||||||
|
COMMENT ON COLUMN enforcement.rule IS 'Rule being enforced (may be null if rule deleted)';
|
||||||
|
COMMENT ON COLUMN enforcement.rule_ref IS 'Rule reference (preserved even if rule deleted)';
|
||||||
|
COMMENT ON COLUMN enforcement.event IS 'Event that triggered this enforcement (no FK — event is a hypertable)';
|
||||||
|
COMMENT ON COLUMN enforcement.status IS 'Processing status (created → processed or disabled)';
|
||||||
|
COMMENT ON COLUMN enforcement.resolved_at IS 'Timestamp when the enforcement was resolved (status changed from created to processed/disabled). NULL while status is created.';
|
||||||
|
COMMENT ON COLUMN enforcement.payload IS 'Event payload for rule evaluation';
|
||||||
|
COMMENT ON COLUMN enforcement.condition IS 'Logical operator for conditions (any=OR, all=AND)';
|
||||||
|
COMMENT ON COLUMN enforcement.conditions IS 'Condition expressions to evaluate';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- ACTION TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE action (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
ref TEXT NOT NULL UNIQUE,
|
||||||
|
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
|
||||||
|
pack_ref TEXT NOT NULL,
|
||||||
|
label TEXT NOT NULL,
|
||||||
|
description TEXT,
|
||||||
|
entrypoint TEXT NOT NULL,
|
||||||
|
runtime BIGINT REFERENCES runtime(id),
|
||||||
|
param_schema JSONB,
|
||||||
|
out_schema JSONB,
|
||||||
|
parameter_delivery TEXT NOT NULL DEFAULT 'stdin' CHECK (parameter_delivery IN ('stdin', 'file')),
|
||||||
|
parameter_format TEXT NOT NULL DEFAULT 'json' CHECK (parameter_format IN ('dotenv', 'json', 'yaml')),
|
||||||
|
output_format TEXT NOT NULL DEFAULT 'text' CHECK (output_format IN ('text', 'json', 'yaml', 'jsonl')),
|
||||||
|
is_adhoc BOOLEAN NOT NULL DEFAULT FALSE,
|
||||||
|
timeout_seconds INTEGER,
|
||||||
|
max_retries INTEGER DEFAULT 0,
|
||||||
|
runtime_version_constraint TEXT,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
|
||||||
|
-- Constraints
|
||||||
|
CONSTRAINT action_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||||
|
CONSTRAINT action_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_action_ref ON action(ref);
|
||||||
|
CREATE INDEX idx_action_pack ON action(pack);
|
||||||
|
CREATE INDEX idx_action_runtime ON action(runtime);
|
||||||
|
CREATE INDEX idx_action_parameter_delivery ON action(parameter_delivery);
|
||||||
|
CREATE INDEX idx_action_parameter_format ON action(parameter_format);
|
||||||
|
CREATE INDEX idx_action_output_format ON action(output_format);
|
||||||
|
CREATE INDEX idx_action_is_adhoc ON action(is_adhoc) WHERE is_adhoc = true;
|
||||||
|
CREATE INDEX idx_action_created ON action(created DESC);
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_action_updated
|
||||||
|
BEFORE UPDATE ON action
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE action IS 'Actions are executable tasks that can be triggered';
|
||||||
|
COMMENT ON COLUMN action.ref IS 'Unique action reference (format: pack.name)';
|
||||||
|
COMMENT ON COLUMN action.pack IS 'Pack this action belongs to';
|
||||||
|
COMMENT ON COLUMN action.label IS 'Human-readable action name';
|
||||||
|
COMMENT ON COLUMN action.entrypoint IS 'Script or command to execute';
|
||||||
|
COMMENT ON COLUMN action.runtime IS 'Runtime environment for execution';
|
||||||
|
COMMENT ON COLUMN action.param_schema IS 'JSON schema for action parameters';
|
||||||
|
COMMENT ON COLUMN action.out_schema IS 'JSON schema for action output';
|
||||||
|
COMMENT ON COLUMN action.parameter_delivery IS 'How parameters are delivered: stdin (standard input - secure), file (temporary file - secure for large payloads). Environment variables are set separately via execution.env_vars.';
|
||||||
|
COMMENT ON COLUMN action.parameter_format IS 'Parameter serialization format: json (JSON object - default), dotenv (KEY=''VALUE''), yaml (YAML format)';
|
||||||
|
COMMENT ON COLUMN action.output_format IS 'Output parsing format: text (no parsing - raw stdout), json (parse stdout as JSON), yaml (parse stdout as YAML), jsonl (parse each line as JSON, collect into array)';
|
||||||
|
COMMENT ON COLUMN action.is_adhoc IS 'True if action was manually created (ad-hoc), false if installed from pack';
|
||||||
|
COMMENT ON COLUMN action.timeout_seconds IS 'Worker queue TTL override in seconds. If NULL, uses global worker_queue_ttl_ms config. Allows per-action timeout tuning.';
|
||||||
|
COMMENT ON COLUMN action.max_retries IS 'Maximum number of automatic retry attempts for failed executions. 0 = no retries (default).';
|
||||||
|
COMMENT ON COLUMN action.runtime_version_constraint IS 'Semver version constraint for the runtime (e.g., ">=3.12", ">=3.12,<4.0", "~18.0"). NULL means any version.';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Add foreign key constraint for policy table
|
||||||
|
ALTER TABLE policy
|
||||||
|
ADD CONSTRAINT policy_action_fkey
|
||||||
|
FOREIGN KEY (action) REFERENCES action(id) ON DELETE CASCADE;
|
||||||
|
|
||||||
|
-- Note: Foreign key constraints for key table (key_owner_action_fkey, key_owner_sensor_fkey)
|
||||||
|
-- will be added in migration 000007_supporting_systems.sql after the key table is created
|
||||||
|
|
||||||
|
-- Note: Rule table will be created in migration 000005 after execution table exists
|
||||||
|
-- Note: Foreign key constraints for enforcement.rule and event.rule will be added there
|
||||||
@@ -0,0 +1,410 @@
|
|||||||
|
-- Migration: Execution and Operations
|
||||||
|
-- Description: Creates execution, inquiry, rule, worker, and notification tables.
|
||||||
|
-- Includes retry tracking, worker health views, and helper functions.
|
||||||
|
-- Consolidates former migrations: 000006 (execution_system), 000008
|
||||||
|
-- (worker_notification), 000014 (worker_table), and 20260209 (phase3).
|
||||||
|
--
|
||||||
|
-- NOTE: The execution table is converted to a TimescaleDB hypertable in
|
||||||
|
-- migration 000009. Hypertables cannot be the target of FK constraints,
|
||||||
|
-- so columns referencing execution (inquiry.execution, workflow_execution.execution)
|
||||||
|
-- are plain BIGINT with no FK. Similarly, columns ON the execution table that
|
||||||
|
-- would self-reference or reference other hypertables (parent, enforcement,
|
||||||
|
-- original_execution) are plain BIGINT. The action and executor FKs are also
|
||||||
|
-- omitted since they would need to be dropped during hypertable conversion.
|
||||||
|
-- Version: 20250101000005
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- EXECUTION TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE execution (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
action BIGINT, -- references action(id); no FK because execution becomes a hypertable
|
||||||
|
action_ref TEXT NOT NULL,
|
||||||
|
config JSONB,
|
||||||
|
env_vars JSONB,
|
||||||
|
parent BIGINT, -- self-reference; no FK because execution becomes a hypertable
|
||||||
|
enforcement BIGINT, -- references enforcement(id); no FK (both are hypertables)
|
||||||
|
executor BIGINT, -- references identity(id); no FK because execution becomes a hypertable
|
||||||
|
worker BIGINT, -- references worker(id); no FK because execution becomes a hypertable
|
||||||
|
status execution_status_enum NOT NULL DEFAULT 'requested',
|
||||||
|
result JSONB,
|
||||||
|
started_at TIMESTAMPTZ, -- set when execution transitions to 'running'
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
is_workflow BOOLEAN DEFAULT false NOT NULL,
|
||||||
|
workflow_def BIGINT, -- references workflow_definition(id); no FK because execution becomes a hypertable
|
||||||
|
workflow_task JSONB,
|
||||||
|
|
||||||
|
-- Retry tracking (baked in from phase 3)
|
||||||
|
retry_count INTEGER NOT NULL DEFAULT 0,
|
||||||
|
max_retries INTEGER,
|
||||||
|
retry_reason TEXT,
|
||||||
|
original_execution BIGINT, -- self-reference; no FK because execution becomes a hypertable
|
||||||
|
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_execution_action ON execution(action);
|
||||||
|
CREATE INDEX idx_execution_action_ref ON execution(action_ref);
|
||||||
|
CREATE INDEX idx_execution_parent ON execution(parent);
|
||||||
|
CREATE INDEX idx_execution_enforcement ON execution(enforcement);
|
||||||
|
CREATE INDEX idx_execution_executor ON execution(executor);
|
||||||
|
CREATE INDEX idx_execution_worker ON execution(worker);
|
||||||
|
CREATE INDEX idx_execution_status ON execution(status);
|
||||||
|
CREATE INDEX idx_execution_created ON execution(created DESC);
|
||||||
|
CREATE INDEX idx_execution_updated ON execution(updated DESC);
|
||||||
|
CREATE INDEX idx_execution_status_created ON execution(status, created DESC);
|
||||||
|
CREATE INDEX idx_execution_status_updated ON execution(status, updated DESC);
|
||||||
|
CREATE INDEX idx_execution_action_status ON execution(action, status);
|
||||||
|
CREATE INDEX idx_execution_executor_created ON execution(executor, created DESC);
|
||||||
|
CREATE INDEX idx_execution_worker_created ON execution(worker, created DESC);
|
||||||
|
CREATE INDEX idx_execution_parent_created ON execution(parent, created DESC);
|
||||||
|
CREATE INDEX idx_execution_result_gin ON execution USING GIN (result);
|
||||||
|
CREATE INDEX idx_execution_env_vars_gin ON execution USING GIN (env_vars);
|
||||||
|
CREATE INDEX idx_execution_original_execution ON execution(original_execution) WHERE original_execution IS NOT NULL;
|
||||||
|
CREATE INDEX idx_execution_status_retry ON execution(status, retry_count) WHERE status = 'failed' AND retry_count < COALESCE(max_retries, 0);
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_execution_updated
|
||||||
|
BEFORE UPDATE ON execution
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE execution IS 'Executions represent action runs, supports nested workflows';
|
||||||
|
COMMENT ON COLUMN execution.action IS 'Action being executed (may be null if action deleted)';
|
||||||
|
COMMENT ON COLUMN execution.action_ref IS 'Action reference (preserved even if action deleted)';
|
||||||
|
COMMENT ON COLUMN execution.config IS 'Snapshot of action configuration at execution time';
|
||||||
|
COMMENT ON COLUMN execution.env_vars IS 'Environment variables for this execution as key-value pairs (string -> string). These are set in the execution environment and are separate from action parameters. Used for execution context, configuration, and non-sensitive metadata.';
|
||||||
|
COMMENT ON COLUMN execution.parent IS 'Parent execution ID for workflow hierarchies (no FK — execution is a hypertable)';
|
||||||
|
COMMENT ON COLUMN execution.enforcement IS 'Enforcement that triggered this execution (no FK — both are hypertables)';
|
||||||
|
COMMENT ON COLUMN execution.executor IS 'Identity that initiated the execution (no FK — execution is a hypertable)';
|
||||||
|
COMMENT ON COLUMN execution.worker IS 'Assigned worker handling this execution (no FK — execution is a hypertable)';
|
||||||
|
COMMENT ON COLUMN execution.status IS 'Current execution lifecycle status';
|
||||||
|
COMMENT ON COLUMN execution.result IS 'Execution output/results';
|
||||||
|
COMMENT ON COLUMN execution.retry_count IS 'Current retry attempt number (0 = first attempt, 1 = first retry, etc.)';
|
||||||
|
COMMENT ON COLUMN execution.max_retries IS 'Maximum retries for this execution. Copied from action.max_retries at creation time.';
|
||||||
|
COMMENT ON COLUMN execution.retry_reason IS 'Reason for retry (e.g., "worker_unavailable", "transient_error", "manual_retry")';
|
||||||
|
COMMENT ON COLUMN execution.original_execution IS 'ID of the original execution if this is a retry. Forms a retry chain.';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- INQUIRY TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE inquiry (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
execution BIGINT NOT NULL, -- references execution(id); no FK because execution is a hypertable
|
||||||
|
prompt TEXT NOT NULL,
|
||||||
|
response_schema JSONB,
|
||||||
|
assigned_to BIGINT REFERENCES identity(id) ON DELETE SET NULL,
|
||||||
|
status inquiry_status_enum NOT NULL DEFAULT 'pending',
|
||||||
|
response JSONB,
|
||||||
|
timeout_at TIMESTAMPTZ,
|
||||||
|
responded_at TIMESTAMPTZ,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_inquiry_execution ON inquiry(execution);
|
||||||
|
CREATE INDEX idx_inquiry_assigned_to ON inquiry(assigned_to);
|
||||||
|
CREATE INDEX idx_inquiry_status ON inquiry(status);
|
||||||
|
CREATE INDEX idx_inquiry_timeout_at ON inquiry(timeout_at) WHERE timeout_at IS NOT NULL;
|
||||||
|
CREATE INDEX idx_inquiry_created ON inquiry(created DESC);
|
||||||
|
CREATE INDEX idx_inquiry_status_created ON inquiry(status, created DESC);
|
||||||
|
CREATE INDEX idx_inquiry_assigned_status ON inquiry(assigned_to, status);
|
||||||
|
CREATE INDEX idx_inquiry_execution_status ON inquiry(execution, status);
|
||||||
|
CREATE INDEX idx_inquiry_response_gin ON inquiry USING GIN (response);
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_inquiry_updated
|
||||||
|
BEFORE UPDATE ON inquiry
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE inquiry IS 'Inquiries enable human-in-the-loop workflows with async user interactions';
|
||||||
|
COMMENT ON COLUMN inquiry.execution IS 'Execution that is waiting on this inquiry (no FK — execution is a hypertable)';
|
||||||
|
COMMENT ON COLUMN inquiry.prompt IS 'Question or prompt text for the user';
|
||||||
|
COMMENT ON COLUMN inquiry.response_schema IS 'JSON schema defining expected response format';
|
||||||
|
COMMENT ON COLUMN inquiry.assigned_to IS 'Identity who should respond to this inquiry';
|
||||||
|
COMMENT ON COLUMN inquiry.status IS 'Current inquiry lifecycle status';
|
||||||
|
COMMENT ON COLUMN inquiry.response IS 'User response data';
|
||||||
|
COMMENT ON COLUMN inquiry.timeout_at IS 'When this inquiry expires';
|
||||||
|
COMMENT ON COLUMN inquiry.responded_at IS 'When the response was received';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- RULE TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE rule (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
ref TEXT NOT NULL UNIQUE,
|
||||||
|
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
|
||||||
|
pack_ref TEXT NOT NULL,
|
||||||
|
label TEXT NOT NULL,
|
||||||
|
description TEXT,
|
||||||
|
action BIGINT REFERENCES action(id) ON DELETE SET NULL,
|
||||||
|
action_ref TEXT NOT NULL,
|
||||||
|
trigger BIGINT REFERENCES trigger(id) ON DELETE SET NULL,
|
||||||
|
trigger_ref TEXT NOT NULL,
|
||||||
|
conditions JSONB NOT NULL DEFAULT '[]'::jsonb,
|
||||||
|
action_params JSONB DEFAULT '{}'::jsonb,
|
||||||
|
trigger_params JSONB DEFAULT '{}'::jsonb,
|
||||||
|
enabled BOOLEAN NOT NULL,
|
||||||
|
is_adhoc BOOLEAN NOT NULL DEFAULT FALSE,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
|
||||||
|
-- Constraints
|
||||||
|
CONSTRAINT rule_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||||
|
CONSTRAINT rule_ref_format CHECK (ref ~ '^[^.]+\.[^.]+$')
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_rule_ref ON rule(ref);
|
||||||
|
CREATE INDEX idx_rule_pack ON rule(pack);
|
||||||
|
CREATE INDEX idx_rule_action ON rule(action);
|
||||||
|
CREATE INDEX idx_rule_trigger ON rule(trigger);
|
||||||
|
CREATE INDEX idx_rule_enabled ON rule(enabled) WHERE enabled = TRUE;
|
||||||
|
CREATE INDEX idx_rule_is_adhoc ON rule(is_adhoc) WHERE is_adhoc = true;
|
||||||
|
CREATE INDEX idx_rule_created ON rule(created DESC);
|
||||||
|
CREATE INDEX idx_rule_trigger_enabled ON rule(trigger, enabled);
|
||||||
|
CREATE INDEX idx_rule_action_enabled ON rule(action, enabled);
|
||||||
|
CREATE INDEX idx_rule_pack_enabled ON rule(pack, enabled);
|
||||||
|
CREATE INDEX idx_rule_action_params_gin ON rule USING GIN (action_params);
|
||||||
|
CREATE INDEX idx_rule_trigger_params_gin ON rule USING GIN (trigger_params);
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_rule_updated
|
||||||
|
BEFORE UPDATE ON rule
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE rule IS 'Rules link triggers to actions with conditions';
|
||||||
|
COMMENT ON COLUMN rule.ref IS 'Unique rule reference (format: pack.name)';
|
||||||
|
COMMENT ON COLUMN rule.label IS 'Human-readable rule name';
|
||||||
|
COMMENT ON COLUMN rule.action IS 'Action to execute when rule triggers (null if action deleted)';
|
||||||
|
COMMENT ON COLUMN rule.trigger IS 'Trigger that activates this rule (null if trigger deleted)';
|
||||||
|
COMMENT ON COLUMN rule.conditions IS 'Condition expressions to evaluate before executing action';
|
||||||
|
COMMENT ON COLUMN rule.action_params IS 'Parameter overrides for the action';
|
||||||
|
COMMENT ON COLUMN rule.trigger_params IS 'Parameter overrides for the trigger';
|
||||||
|
COMMENT ON COLUMN rule.enabled IS 'Whether this rule is active';
|
||||||
|
COMMENT ON COLUMN rule.is_adhoc IS 'True if rule was manually created (ad-hoc), false if installed from pack';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Add foreign key constraints now that rule table exists
|
||||||
|
ALTER TABLE enforcement
|
||||||
|
ADD CONSTRAINT enforcement_rule_fkey
|
||||||
|
FOREIGN KEY (rule) REFERENCES rule(id) ON DELETE SET NULL;
|
||||||
|
|
||||||
|
ALTER TABLE event
|
||||||
|
ADD CONSTRAINT event_rule_fkey
|
||||||
|
FOREIGN KEY (rule) REFERENCES rule(id) ON DELETE SET NULL;
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- WORKER TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE worker (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
name TEXT NOT NULL UNIQUE,
|
||||||
|
worker_type worker_type_enum NOT NULL,
|
||||||
|
worker_role worker_role_enum NOT NULL,
|
||||||
|
runtime BIGINT REFERENCES runtime(id) ON DELETE SET NULL,
|
||||||
|
host TEXT,
|
||||||
|
port INTEGER,
|
||||||
|
status worker_status_enum NOT NULL DEFAULT 'active',
|
||||||
|
capabilities JSONB,
|
||||||
|
meta JSONB,
|
||||||
|
last_heartbeat TIMESTAMPTZ,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_worker_name ON worker(name);
|
||||||
|
CREATE INDEX idx_worker_type ON worker(worker_type);
|
||||||
|
CREATE INDEX idx_worker_role ON worker(worker_role);
|
||||||
|
CREATE INDEX idx_worker_runtime ON worker(runtime);
|
||||||
|
CREATE INDEX idx_worker_status ON worker(status);
|
||||||
|
CREATE INDEX idx_worker_last_heartbeat ON worker(last_heartbeat DESC) WHERE last_heartbeat IS NOT NULL;
|
||||||
|
CREATE INDEX idx_worker_created ON worker(created DESC);
|
||||||
|
CREATE INDEX idx_worker_status_role ON worker(status, worker_role);
|
||||||
|
CREATE INDEX idx_worker_capabilities_gin ON worker USING GIN (capabilities);
|
||||||
|
CREATE INDEX idx_worker_meta_gin ON worker USING GIN (meta);
|
||||||
|
CREATE INDEX idx_worker_capabilities_health_status ON worker USING GIN ((capabilities -> 'health' -> 'status'));
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_worker_updated
|
||||||
|
BEFORE UPDATE ON worker
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE worker IS 'Worker registration and tracking table for action and sensor workers';
|
||||||
|
COMMENT ON COLUMN worker.name IS 'Unique worker identifier (typically hostname-based)';
|
||||||
|
COMMENT ON COLUMN worker.worker_type IS 'Worker deployment type (local or remote)';
|
||||||
|
COMMENT ON COLUMN worker.worker_role IS 'Worker role (action or sensor)';
|
||||||
|
COMMENT ON COLUMN worker.runtime IS 'Runtime environment this worker supports (optional)';
|
||||||
|
COMMENT ON COLUMN worker.host IS 'Worker host address';
|
||||||
|
COMMENT ON COLUMN worker.port IS 'Worker port number';
|
||||||
|
COMMENT ON COLUMN worker.status IS 'Worker operational status';
|
||||||
|
COMMENT ON COLUMN worker.capabilities IS 'Worker capabilities (e.g., max_concurrent_executions, supported runtimes)';
|
||||||
|
COMMENT ON COLUMN worker.meta IS 'Additional worker metadata';
|
||||||
|
COMMENT ON COLUMN worker.last_heartbeat IS 'Timestamp of last heartbeat from worker';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- NOTIFICATION TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE notification (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
channel TEXT NOT NULL,
|
||||||
|
entity_type TEXT NOT NULL,
|
||||||
|
entity TEXT NOT NULL,
|
||||||
|
activity TEXT NOT NULL,
|
||||||
|
state notification_status_enum NOT NULL DEFAULT 'created',
|
||||||
|
content JSONB,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_notification_channel ON notification(channel);
|
||||||
|
CREATE INDEX idx_notification_entity_type ON notification(entity_type);
|
||||||
|
CREATE INDEX idx_notification_entity ON notification(entity);
|
||||||
|
CREATE INDEX idx_notification_state ON notification(state);
|
||||||
|
CREATE INDEX idx_notification_created ON notification(created DESC);
|
||||||
|
CREATE INDEX idx_notification_channel_state ON notification(channel, state);
|
||||||
|
CREATE INDEX idx_notification_entity_type_entity ON notification(entity_type, entity);
|
||||||
|
CREATE INDEX idx_notification_state_created ON notification(state, created DESC);
|
||||||
|
CREATE INDEX idx_notification_content_gin ON notification USING GIN (content);
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_notification_updated
|
||||||
|
BEFORE UPDATE ON notification
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Function for pg_notify on notification insert
|
||||||
|
CREATE OR REPLACE FUNCTION notify_on_insert()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
DECLARE
|
||||||
|
payload TEXT;
|
||||||
|
BEGIN
|
||||||
|
-- Build JSON payload with id, entity, and activity
|
||||||
|
payload := json_build_object(
|
||||||
|
'id', NEW.id,
|
||||||
|
'entity_type', NEW.entity_type,
|
||||||
|
'entity', NEW.entity,
|
||||||
|
'activity', NEW.activity
|
||||||
|
)::text;
|
||||||
|
|
||||||
|
-- Send notification to the specified channel
|
||||||
|
PERFORM pg_notify(NEW.channel, payload);
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
-- Trigger to send pg_notify on notification insert
|
||||||
|
CREATE TRIGGER notify_on_notification_insert
|
||||||
|
AFTER INSERT ON notification
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION notify_on_insert();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE notification IS 'System notifications about entity changes for real-time updates';
|
||||||
|
COMMENT ON COLUMN notification.channel IS 'Notification channel (typically table name)';
|
||||||
|
COMMENT ON COLUMN notification.entity_type IS 'Type of entity (table name)';
|
||||||
|
COMMENT ON COLUMN notification.entity IS 'Entity identifier (typically ID or ref)';
|
||||||
|
COMMENT ON COLUMN notification.activity IS 'Activity type (e.g., "created", "updated", "completed")';
|
||||||
|
COMMENT ON COLUMN notification.state IS 'Processing state of notification';
|
||||||
|
COMMENT ON COLUMN notification.content IS 'Optional notification payload data';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- WORKER HEALTH VIEWS AND FUNCTIONS
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- View for healthy workers (convenience for queries)
|
||||||
|
CREATE OR REPLACE VIEW healthy_workers AS
|
||||||
|
SELECT
|
||||||
|
w.id,
|
||||||
|
w.name,
|
||||||
|
w.worker_type,
|
||||||
|
w.worker_role,
|
||||||
|
w.runtime,
|
||||||
|
w.status,
|
||||||
|
w.capabilities,
|
||||||
|
w.last_heartbeat,
|
||||||
|
(w.capabilities -> 'health' ->> 'status')::TEXT as health_status,
|
||||||
|
(w.capabilities -> 'health' ->> 'queue_depth')::INTEGER as queue_depth,
|
||||||
|
(w.capabilities -> 'health' ->> 'consecutive_failures')::INTEGER as consecutive_failures
|
||||||
|
FROM worker w
|
||||||
|
WHERE
|
||||||
|
w.status = 'active'
|
||||||
|
AND w.last_heartbeat > NOW() - INTERVAL '30 seconds'
|
||||||
|
AND (
|
||||||
|
-- Healthy if no health info (backward compatible)
|
||||||
|
w.capabilities -> 'health' IS NULL
|
||||||
|
OR
|
||||||
|
-- Or explicitly marked healthy
|
||||||
|
w.capabilities -> 'health' ->> 'status' IN ('healthy', 'degraded')
|
||||||
|
);
|
||||||
|
|
||||||
|
COMMENT ON VIEW healthy_workers IS 'Workers that are active, have fresh heartbeat, and are healthy or degraded (not unhealthy)';
|
||||||
|
|
||||||
|
-- Function to get worker queue depth estimate
|
||||||
|
CREATE OR REPLACE FUNCTION get_worker_queue_depth(worker_id_param BIGINT)
|
||||||
|
RETURNS INTEGER AS $$
|
||||||
|
BEGIN
|
||||||
|
RETURN (
|
||||||
|
SELECT (capabilities -> 'health' ->> 'queue_depth')::INTEGER
|
||||||
|
FROM worker
|
||||||
|
WHERE id = worker_id_param
|
||||||
|
);
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql STABLE;
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION get_worker_queue_depth IS 'Extract current queue depth from worker health metadata';
|
||||||
|
|
||||||
|
-- Function to check if execution is retriable
|
||||||
|
CREATE OR REPLACE FUNCTION is_execution_retriable(execution_id_param BIGINT)
|
||||||
|
RETURNS BOOLEAN AS $$
|
||||||
|
DECLARE
|
||||||
|
exec_record RECORD;
|
||||||
|
BEGIN
|
||||||
|
SELECT
|
||||||
|
e.retry_count,
|
||||||
|
e.max_retries,
|
||||||
|
e.status
|
||||||
|
INTO exec_record
|
||||||
|
FROM execution e
|
||||||
|
WHERE e.id = execution_id_param;
|
||||||
|
|
||||||
|
IF NOT FOUND THEN
|
||||||
|
RETURN FALSE;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- Can retry if:
|
||||||
|
-- 1. Status is failed
|
||||||
|
-- 2. max_retries is set and > 0
|
||||||
|
-- 3. retry_count < max_retries
|
||||||
|
RETURN (
|
||||||
|
exec_record.status = 'failed'
|
||||||
|
AND exec_record.max_retries IS NOT NULL
|
||||||
|
AND exec_record.max_retries > 0
|
||||||
|
AND exec_record.retry_count < exec_record.max_retries
|
||||||
|
);
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql STABLE;
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION is_execution_retriable IS 'Check if a failed execution can be automatically retried based on retry limits';
|
||||||
@@ -0,0 +1,145 @@
|
|||||||
|
-- Migration: Workflow System
|
||||||
|
-- Description: Creates workflow_definition and workflow_execution tables
|
||||||
|
-- (workflow_task_execution consolidated into execution.workflow_task JSONB)
|
||||||
|
--
|
||||||
|
-- NOTE: The execution table is converted to a TimescaleDB hypertable in
|
||||||
|
-- migration 000009. Hypertables cannot be the target of FK constraints,
|
||||||
|
-- so workflow_execution.execution is a plain BIGINT with no FK.
|
||||||
|
-- execution.workflow_def also has no FK (added as plain BIGINT in 000005)
|
||||||
|
-- since execution is a hypertable and FKs from hypertables are only
|
||||||
|
-- supported for simple cases — we omit it for consistency.
|
||||||
|
-- Version: 20250101000006
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- WORKFLOW DEFINITION TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE workflow_definition (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
ref VARCHAR(255) NOT NULL UNIQUE,
|
||||||
|
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
|
||||||
|
pack_ref VARCHAR(255) NOT NULL,
|
||||||
|
label VARCHAR(255) NOT NULL,
|
||||||
|
description TEXT,
|
||||||
|
version VARCHAR(50) NOT NULL,
|
||||||
|
param_schema JSONB,
|
||||||
|
out_schema JSONB,
|
||||||
|
definition JSONB NOT NULL,
|
||||||
|
tags TEXT[] DEFAULT '{}',
|
||||||
|
created TIMESTAMPTZ DEFAULT NOW() NOT NULL,
|
||||||
|
updated TIMESTAMPTZ DEFAULT NOW() NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_workflow_def_pack ON workflow_definition(pack);
|
||||||
|
CREATE INDEX idx_workflow_def_ref ON workflow_definition(ref);
|
||||||
|
CREATE INDEX idx_workflow_def_tags ON workflow_definition USING gin(tags);
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_workflow_definition_updated
|
||||||
|
BEFORE UPDATE ON workflow_definition
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE workflow_definition IS 'Stores workflow definitions (YAML parsed to JSON)';
|
||||||
|
COMMENT ON COLUMN workflow_definition.ref IS 'Unique workflow reference (e.g., pack_name.workflow_name)';
|
||||||
|
COMMENT ON COLUMN workflow_definition.definition IS 'Complete workflow specification including tasks, variables, and transitions';
|
||||||
|
COMMENT ON COLUMN workflow_definition.param_schema IS 'JSON schema for workflow input parameters';
|
||||||
|
COMMENT ON COLUMN workflow_definition.out_schema IS 'JSON schema for workflow output';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- WORKFLOW EXECUTION TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE workflow_execution (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
execution BIGINT NOT NULL, -- references execution(id); no FK because execution is a hypertable
|
||||||
|
workflow_def BIGINT NOT NULL REFERENCES workflow_definition(id) ON DELETE CASCADE,
|
||||||
|
current_tasks TEXT[] DEFAULT '{}',
|
||||||
|
completed_tasks TEXT[] DEFAULT '{}',
|
||||||
|
failed_tasks TEXT[] DEFAULT '{}',
|
||||||
|
skipped_tasks TEXT[] DEFAULT '{}',
|
||||||
|
variables JSONB DEFAULT '{}',
|
||||||
|
task_graph JSONB NOT NULL,
|
||||||
|
status execution_status_enum NOT NULL DEFAULT 'requested',
|
||||||
|
error_message TEXT,
|
||||||
|
paused BOOLEAN DEFAULT false NOT NULL,
|
||||||
|
pause_reason TEXT,
|
||||||
|
created TIMESTAMPTZ DEFAULT NOW() NOT NULL,
|
||||||
|
updated TIMESTAMPTZ DEFAULT NOW() NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_workflow_exec_execution ON workflow_execution(execution);
|
||||||
|
CREATE INDEX idx_workflow_exec_workflow_def ON workflow_execution(workflow_def);
|
||||||
|
CREATE INDEX idx_workflow_exec_status ON workflow_execution(status);
|
||||||
|
CREATE INDEX idx_workflow_exec_paused ON workflow_execution(paused) WHERE paused = true;
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_workflow_execution_updated
|
||||||
|
BEFORE UPDATE ON workflow_execution
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE workflow_execution IS 'Runtime state tracking for workflow executions. execution column has no FK — execution is a hypertable.';
|
||||||
|
COMMENT ON COLUMN workflow_execution.variables IS 'Workflow-scoped variables, updated via publish directives';
|
||||||
|
COMMENT ON COLUMN workflow_execution.task_graph IS 'Execution graph with dependencies and transitions';
|
||||||
|
COMMENT ON COLUMN workflow_execution.current_tasks IS 'Array of task names currently executing';
|
||||||
|
COMMENT ON COLUMN workflow_execution.paused IS 'True if workflow execution is paused (can be resumed)';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- MODIFY ACTION TABLE - Add Workflow Support
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
ALTER TABLE action
|
||||||
|
ADD COLUMN workflow_def BIGINT REFERENCES workflow_definition(id) ON DELETE CASCADE;
|
||||||
|
|
||||||
|
CREATE INDEX idx_action_workflow_def ON action(workflow_def);
|
||||||
|
|
||||||
|
COMMENT ON COLUMN action.workflow_def IS 'Reference to workflow definition (non-null means this action is a workflow)';
|
||||||
|
|
||||||
|
-- NOTE: execution.workflow_def has no FK constraint because execution is a
|
||||||
|
-- TimescaleDB hypertable (converted in migration 000009). The column was
|
||||||
|
-- created as a plain BIGINT in migration 000005.
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- WORKFLOW VIEWS
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE VIEW workflow_execution_summary AS
|
||||||
|
SELECT
|
||||||
|
we.id,
|
||||||
|
we.execution,
|
||||||
|
wd.ref as workflow_ref,
|
||||||
|
wd.label as workflow_label,
|
||||||
|
wd.version as workflow_version,
|
||||||
|
we.status,
|
||||||
|
we.paused,
|
||||||
|
array_length(we.current_tasks, 1) as current_task_count,
|
||||||
|
array_length(we.completed_tasks, 1) as completed_task_count,
|
||||||
|
array_length(we.failed_tasks, 1) as failed_task_count,
|
||||||
|
array_length(we.skipped_tasks, 1) as skipped_task_count,
|
||||||
|
we.error_message,
|
||||||
|
we.created,
|
||||||
|
we.updated
|
||||||
|
FROM workflow_execution we
|
||||||
|
JOIN workflow_definition wd ON we.workflow_def = wd.id;
|
||||||
|
|
||||||
|
COMMENT ON VIEW workflow_execution_summary IS 'Summary view of workflow executions with task counts';
|
||||||
|
|
||||||
|
CREATE VIEW workflow_action_link AS
|
||||||
|
SELECT
|
||||||
|
wd.id as workflow_def_id,
|
||||||
|
wd.ref as workflow_ref,
|
||||||
|
wd.label,
|
||||||
|
wd.version,
|
||||||
|
a.id as action_id,
|
||||||
|
a.ref as action_ref,
|
||||||
|
a.pack as pack_id,
|
||||||
|
a.pack_ref
|
||||||
|
FROM workflow_definition wd
|
||||||
|
LEFT JOIN action a ON a.workflow_def = wd.id;
|
||||||
|
|
||||||
|
COMMENT ON VIEW workflow_action_link IS 'Links workflow definitions to their corresponding action records';
|
||||||
@@ -0,0 +1,779 @@
|
|||||||
|
-- Migration: Supporting Systems
|
||||||
|
-- Description: Creates keys, artifacts, queue_stats, pack_environment, pack_testing,
|
||||||
|
-- and webhook function tables.
|
||||||
|
-- Consolidates former migrations: 000009 (keys_artifacts), 000010 (webhook_system),
|
||||||
|
-- 000011 (pack_environments), and 000012 (pack_testing).
|
||||||
|
-- Version: 20250101000007
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- KEY TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE key (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
ref TEXT NOT NULL UNIQUE,
|
||||||
|
owner_type owner_type_enum NOT NULL,
|
||||||
|
owner TEXT,
|
||||||
|
owner_identity BIGINT REFERENCES identity(id),
|
||||||
|
owner_pack BIGINT REFERENCES pack(id),
|
||||||
|
owner_pack_ref TEXT,
|
||||||
|
owner_action BIGINT, -- Forward reference to action table
|
||||||
|
owner_action_ref TEXT,
|
||||||
|
owner_sensor BIGINT, -- Forward reference to sensor table
|
||||||
|
owner_sensor_ref TEXT,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
encrypted BOOLEAN NOT NULL,
|
||||||
|
encryption_key_hash TEXT,
|
||||||
|
value TEXT NOT NULL,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
|
||||||
|
-- Constraints
|
||||||
|
CONSTRAINT key_ref_lowercase CHECK (ref = LOWER(ref)),
|
||||||
|
CONSTRAINT key_ref_format CHECK (ref ~ '^[^.]+(\.[^.]+)*$')
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Unique index on owner_type, owner, name
|
||||||
|
CREATE UNIQUE INDEX idx_key_unique ON key(owner_type, owner, name);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_key_ref ON key(ref);
|
||||||
|
CREATE INDEX idx_key_owner_type ON key(owner_type);
|
||||||
|
CREATE INDEX idx_key_owner_identity ON key(owner_identity);
|
||||||
|
CREATE INDEX idx_key_owner_pack ON key(owner_pack);
|
||||||
|
CREATE INDEX idx_key_owner_action ON key(owner_action);
|
||||||
|
CREATE INDEX idx_key_owner_sensor ON key(owner_sensor);
|
||||||
|
CREATE INDEX idx_key_created ON key(created DESC);
|
||||||
|
CREATE INDEX idx_key_owner_type_owner ON key(owner_type, owner);
|
||||||
|
CREATE INDEX idx_key_owner_identity_name ON key(owner_identity, name);
|
||||||
|
CREATE INDEX idx_key_owner_pack_name ON key(owner_pack, name);
|
||||||
|
|
||||||
|
-- Function to validate and set owner fields
|
||||||
|
CREATE OR REPLACE FUNCTION validate_key_owner()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
DECLARE
|
||||||
|
owner_count INTEGER := 0;
|
||||||
|
BEGIN
|
||||||
|
-- Count how many owner fields are set
|
||||||
|
IF NEW.owner_identity IS NOT NULL THEN owner_count := owner_count + 1; END IF;
|
||||||
|
IF NEW.owner_pack IS NOT NULL THEN owner_count := owner_count + 1; END IF;
|
||||||
|
IF NEW.owner_action IS NOT NULL THEN owner_count := owner_count + 1; END IF;
|
||||||
|
IF NEW.owner_sensor IS NOT NULL THEN owner_count := owner_count + 1; END IF;
|
||||||
|
|
||||||
|
-- System owner should have no owner fields set
|
||||||
|
IF NEW.owner_type = 'system' THEN
|
||||||
|
IF owner_count > 0 THEN
|
||||||
|
RAISE EXCEPTION 'System owner cannot have specific owner fields set';
|
||||||
|
END IF;
|
||||||
|
NEW.owner := 'system';
|
||||||
|
-- All other types must have exactly one owner field set
|
||||||
|
ELSIF owner_count != 1 THEN
|
||||||
|
RAISE EXCEPTION 'Exactly one owner field must be set for owner_type %', NEW.owner_type;
|
||||||
|
-- Validate owner_type matches the populated field and set owner
|
||||||
|
ELSIF NEW.owner_type = 'identity' THEN
|
||||||
|
IF NEW.owner_identity IS NULL THEN
|
||||||
|
RAISE EXCEPTION 'owner_identity must be set for owner_type identity';
|
||||||
|
END IF;
|
||||||
|
NEW.owner := NEW.owner_identity::TEXT;
|
||||||
|
ELSIF NEW.owner_type = 'pack' THEN
|
||||||
|
IF NEW.owner_pack IS NULL THEN
|
||||||
|
RAISE EXCEPTION 'owner_pack must be set for owner_type pack';
|
||||||
|
END IF;
|
||||||
|
NEW.owner := NEW.owner_pack::TEXT;
|
||||||
|
ELSIF NEW.owner_type = 'action' THEN
|
||||||
|
IF NEW.owner_action IS NULL THEN
|
||||||
|
RAISE EXCEPTION 'owner_action must be set for owner_type action';
|
||||||
|
END IF;
|
||||||
|
NEW.owner := NEW.owner_action::TEXT;
|
||||||
|
ELSIF NEW.owner_type = 'sensor' THEN
|
||||||
|
IF NEW.owner_sensor IS NULL THEN
|
||||||
|
RAISE EXCEPTION 'owner_sensor must be set for owner_type sensor';
|
||||||
|
END IF;
|
||||||
|
NEW.owner := NEW.owner_sensor::TEXT;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
-- Trigger to validate owner fields
|
||||||
|
CREATE TRIGGER validate_key_owner_trigger
|
||||||
|
BEFORE INSERT OR UPDATE ON key
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION validate_key_owner();
|
||||||
|
|
||||||
|
-- Trigger for updated timestamp
|
||||||
|
CREATE TRIGGER update_key_updated
|
||||||
|
BEFORE UPDATE ON key
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE key IS 'Keys store configuration values and secrets with ownership scoping';
|
||||||
|
COMMENT ON COLUMN key.ref IS 'Unique key reference (format: [owner.]name)';
|
||||||
|
COMMENT ON COLUMN key.owner_type IS 'Type of owner (system, identity, pack, action, sensor)';
|
||||||
|
COMMENT ON COLUMN key.owner IS 'Owner identifier (auto-populated by trigger)';
|
||||||
|
COMMENT ON COLUMN key.owner_identity IS 'Identity owner (if owner_type=identity)';
|
||||||
|
COMMENT ON COLUMN key.owner_pack IS 'Pack owner (if owner_type=pack)';
|
||||||
|
COMMENT ON COLUMN key.owner_pack_ref IS 'Pack reference for owner_pack';
|
||||||
|
COMMENT ON COLUMN key.owner_action IS 'Action owner (if owner_type=action)';
|
||||||
|
COMMENT ON COLUMN key.owner_sensor IS 'Sensor owner (if owner_type=sensor)';
|
||||||
|
COMMENT ON COLUMN key.name IS 'Key name within owner scope';
|
||||||
|
COMMENT ON COLUMN key.encrypted IS 'Whether the value is encrypted';
|
||||||
|
COMMENT ON COLUMN key.encryption_key_hash IS 'Hash of encryption key used';
|
||||||
|
COMMENT ON COLUMN key.value IS 'The actual value (encrypted if encrypted=true)';
|
||||||
|
|
||||||
|
|
||||||
|
-- Add foreign key constraints for action and sensor references
|
||||||
|
ALTER TABLE key
|
||||||
|
ADD CONSTRAINT key_owner_action_fkey
|
||||||
|
FOREIGN KEY (owner_action) REFERENCES action(id) ON DELETE CASCADE;
|
||||||
|
|
||||||
|
ALTER TABLE key
|
||||||
|
ADD CONSTRAINT key_owner_sensor_fkey
|
||||||
|
FOREIGN KEY (owner_sensor) REFERENCES sensor(id) ON DELETE CASCADE;
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- ARTIFACT TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE artifact (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
ref TEXT NOT NULL,
|
||||||
|
scope owner_type_enum NOT NULL DEFAULT 'system',
|
||||||
|
owner TEXT NOT NULL DEFAULT '',
|
||||||
|
type artifact_type_enum NOT NULL,
|
||||||
|
visibility artifact_visibility_enum NOT NULL DEFAULT 'private',
|
||||||
|
retention_policy artifact_retention_enum NOT NULL DEFAULT 'versions',
|
||||||
|
retention_limit INTEGER NOT NULL DEFAULT 1,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_artifact_ref ON artifact(ref);
|
||||||
|
CREATE INDEX idx_artifact_scope ON artifact(scope);
|
||||||
|
CREATE INDEX idx_artifact_owner ON artifact(owner);
|
||||||
|
CREATE INDEX idx_artifact_type ON artifact(type);
|
||||||
|
CREATE INDEX idx_artifact_created ON artifact(created DESC);
|
||||||
|
CREATE INDEX idx_artifact_scope_owner ON artifact(scope, owner);
|
||||||
|
CREATE INDEX idx_artifact_type_created ON artifact(type, created DESC);
|
||||||
|
CREATE INDEX idx_artifact_visibility ON artifact(visibility);
|
||||||
|
CREATE INDEX idx_artifact_visibility_scope ON artifact(visibility, scope, owner);
|
||||||
|
|
||||||
|
-- Trigger
|
||||||
|
CREATE TRIGGER update_artifact_updated
|
||||||
|
BEFORE UPDATE ON artifact
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE artifact IS 'Artifacts track files, logs, and outputs from executions';
|
||||||
|
COMMENT ON COLUMN artifact.ref IS 'Artifact reference/path';
|
||||||
|
COMMENT ON COLUMN artifact.scope IS 'Owner type (system, identity, pack, action, sensor)';
|
||||||
|
COMMENT ON COLUMN artifact.owner IS 'Owner identifier';
|
||||||
|
COMMENT ON COLUMN artifact.type IS 'Artifact type (file, url, progress, etc.)';
|
||||||
|
COMMENT ON COLUMN artifact.visibility IS 'Visibility level: public (all users) or private (scoped by scope/owner)';
|
||||||
|
COMMENT ON COLUMN artifact.retention_policy IS 'How to retain artifacts (versions, days, hours, minutes)';
|
||||||
|
COMMENT ON COLUMN artifact.retention_limit IS 'Numeric limit for retention policy';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- QUEUE_STATS TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE queue_stats (
|
||||||
|
action_id BIGINT PRIMARY KEY REFERENCES action(id) ON DELETE CASCADE,
|
||||||
|
queue_length INTEGER NOT NULL DEFAULT 0,
|
||||||
|
active_count INTEGER NOT NULL DEFAULT 0,
|
||||||
|
max_concurrent INTEGER NOT NULL DEFAULT 1,
|
||||||
|
oldest_enqueued_at TIMESTAMPTZ,
|
||||||
|
total_enqueued BIGINT NOT NULL DEFAULT 0,
|
||||||
|
total_completed BIGINT NOT NULL DEFAULT 0,
|
||||||
|
last_updated TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_queue_stats_last_updated ON queue_stats(last_updated);
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE queue_stats IS 'Real-time queue statistics for action execution ordering';
|
||||||
|
COMMENT ON COLUMN queue_stats.action_id IS 'Foreign key to action table';
|
||||||
|
COMMENT ON COLUMN queue_stats.queue_length IS 'Number of executions waiting in queue';
|
||||||
|
COMMENT ON COLUMN queue_stats.active_count IS 'Number of currently running executions';
|
||||||
|
COMMENT ON COLUMN queue_stats.max_concurrent IS 'Maximum concurrent executions allowed';
|
||||||
|
COMMENT ON COLUMN queue_stats.oldest_enqueued_at IS 'Timestamp of oldest queued execution (NULL if queue empty)';
|
||||||
|
COMMENT ON COLUMN queue_stats.total_enqueued IS 'Total executions enqueued since queue creation';
|
||||||
|
COMMENT ON COLUMN queue_stats.total_completed IS 'Total executions completed since queue creation';
|
||||||
|
COMMENT ON COLUMN queue_stats.last_updated IS 'Timestamp of last statistics update';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- PACK ENVIRONMENT TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS pack_environment (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
pack BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
|
||||||
|
pack_ref TEXT NOT NULL,
|
||||||
|
runtime BIGINT NOT NULL REFERENCES runtime(id) ON DELETE CASCADE,
|
||||||
|
runtime_ref TEXT NOT NULL,
|
||||||
|
env_path TEXT NOT NULL,
|
||||||
|
status pack_environment_status_enum NOT NULL DEFAULT 'pending',
|
||||||
|
installed_at TIMESTAMPTZ,
|
||||||
|
last_verified TIMESTAMPTZ,
|
||||||
|
install_log TEXT,
|
||||||
|
install_error TEXT,
|
||||||
|
metadata JSONB DEFAULT '{}'::jsonb,
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
UNIQUE(pack, runtime)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_pack_environment_pack ON pack_environment(pack);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_pack_environment_runtime ON pack_environment(runtime);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_pack_environment_status ON pack_environment(status);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_pack_environment_pack_ref ON pack_environment(pack_ref);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_pack_environment_runtime_ref ON pack_environment(runtime_ref);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_pack_environment_pack_runtime ON pack_environment(pack, runtime);
|
||||||
|
|
||||||
|
-- Trigger for updated timestamp
|
||||||
|
CREATE TRIGGER update_pack_environment_updated
|
||||||
|
BEFORE UPDATE ON pack_environment
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_updated_column();
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE pack_environment IS 'Tracks pack-specific runtime environments for dependency isolation';
|
||||||
|
COMMENT ON COLUMN pack_environment.pack IS 'Pack that owns this environment';
|
||||||
|
COMMENT ON COLUMN pack_environment.pack_ref IS 'Pack reference for quick lookup';
|
||||||
|
COMMENT ON COLUMN pack_environment.runtime IS 'Runtime used for this environment';
|
||||||
|
COMMENT ON COLUMN pack_environment.runtime_ref IS 'Runtime reference for quick lookup';
|
||||||
|
COMMENT ON COLUMN pack_environment.env_path IS 'Filesystem path to the environment directory (e.g., /opt/attune/packenvs/mypack/python)';
|
||||||
|
COMMENT ON COLUMN pack_environment.status IS 'Current installation status';
|
||||||
|
COMMENT ON COLUMN pack_environment.installed_at IS 'When the environment was successfully installed';
|
||||||
|
COMMENT ON COLUMN pack_environment.last_verified IS 'Last time the environment was verified as working';
|
||||||
|
COMMENT ON COLUMN pack_environment.install_log IS 'Installation output logs';
|
||||||
|
COMMENT ON COLUMN pack_environment.install_error IS 'Error message if installation failed';
|
||||||
|
COMMENT ON COLUMN pack_environment.metadata IS 'Additional metadata (installed packages, versions, etc.)';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- PACK ENVIRONMENT: Update existing runtimes with installer metadata
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Python runtime installers
|
||||||
|
UPDATE runtime
|
||||||
|
SET installers = jsonb_build_object(
|
||||||
|
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
|
||||||
|
'installers', jsonb_build_array(
|
||||||
|
jsonb_build_object(
|
||||||
|
'name', 'create_venv',
|
||||||
|
'description', 'Create Python virtual environment',
|
||||||
|
'command', 'python3',
|
||||||
|
'args', jsonb_build_array('-m', 'venv', '{env_path}'),
|
||||||
|
'cwd', '{pack_path}',
|
||||||
|
'env', jsonb_build_object(),
|
||||||
|
'order', 1,
|
||||||
|
'optional', false
|
||||||
|
),
|
||||||
|
jsonb_build_object(
|
||||||
|
'name', 'upgrade_pip',
|
||||||
|
'description', 'Upgrade pip to latest version',
|
||||||
|
'command', '{env_path}/bin/pip',
|
||||||
|
'args', jsonb_build_array('install', '--upgrade', 'pip'),
|
||||||
|
'cwd', '{pack_path}',
|
||||||
|
'env', jsonb_build_object(),
|
||||||
|
'order', 2,
|
||||||
|
'optional', true
|
||||||
|
),
|
||||||
|
jsonb_build_object(
|
||||||
|
'name', 'install_requirements',
|
||||||
|
'description', 'Install pack Python dependencies',
|
||||||
|
'command', '{env_path}/bin/pip',
|
||||||
|
'args', jsonb_build_array('install', '-r', '{pack_path}/requirements.txt'),
|
||||||
|
'cwd', '{pack_path}',
|
||||||
|
'env', jsonb_build_object(),
|
||||||
|
'order', 3,
|
||||||
|
'optional', false,
|
||||||
|
'condition', jsonb_build_object(
|
||||||
|
'file_exists', '{pack_path}/requirements.txt'
|
||||||
|
)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
'executable_templates', jsonb_build_object(
|
||||||
|
'python', '{env_path}/bin/python',
|
||||||
|
'pip', '{env_path}/bin/pip'
|
||||||
|
)
|
||||||
|
)
|
||||||
|
WHERE ref = 'core.python';
|
||||||
|
|
||||||
|
-- Node.js runtime installers
|
||||||
|
UPDATE runtime
|
||||||
|
SET installers = jsonb_build_object(
|
||||||
|
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
|
||||||
|
'installers', jsonb_build_array(
|
||||||
|
jsonb_build_object(
|
||||||
|
'name', 'npm_install',
|
||||||
|
'description', 'Install Node.js dependencies',
|
||||||
|
'command', 'npm',
|
||||||
|
'args', jsonb_build_array('install', '--prefix', '{env_path}'),
|
||||||
|
'cwd', '{pack_path}',
|
||||||
|
'env', jsonb_build_object(
|
||||||
|
'NODE_PATH', '{env_path}/node_modules'
|
||||||
|
),
|
||||||
|
'order', 1,
|
||||||
|
'optional', false,
|
||||||
|
'condition', jsonb_build_object(
|
||||||
|
'file_exists', '{pack_path}/package.json'
|
||||||
|
)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
'executable_templates', jsonb_build_object(
|
||||||
|
'node', 'node',
|
||||||
|
'npm', 'npm'
|
||||||
|
),
|
||||||
|
'env_vars', jsonb_build_object(
|
||||||
|
'NODE_PATH', '{env_path}/node_modules'
|
||||||
|
)
|
||||||
|
)
|
||||||
|
WHERE ref = 'core.nodejs';
|
||||||
|
|
||||||
|
-- Shell runtime (no environment needed, uses system shell)
|
||||||
|
UPDATE runtime
|
||||||
|
SET installers = jsonb_build_object(
|
||||||
|
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
|
||||||
|
'installers', jsonb_build_array(),
|
||||||
|
'executable_templates', jsonb_build_object(
|
||||||
|
'sh', 'sh',
|
||||||
|
'bash', 'bash'
|
||||||
|
),
|
||||||
|
'requires_environment', false
|
||||||
|
)
|
||||||
|
WHERE ref = 'core.shell';
|
||||||
|
|
||||||
|
-- Native runtime (no environment needed, binaries are standalone)
|
||||||
|
UPDATE runtime
|
||||||
|
SET installers = jsonb_build_object(
|
||||||
|
'base_path_template', '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}',
|
||||||
|
'installers', jsonb_build_array(),
|
||||||
|
'executable_templates', jsonb_build_object(),
|
||||||
|
'requires_environment', false
|
||||||
|
)
|
||||||
|
WHERE ref = 'core.native';
|
||||||
|
|
||||||
|
-- Built-in sensor runtime (internal, no environment)
|
||||||
|
UPDATE runtime
|
||||||
|
SET installers = jsonb_build_object(
|
||||||
|
'installers', jsonb_build_array(),
|
||||||
|
'requires_environment', false
|
||||||
|
)
|
||||||
|
WHERE ref = 'core.sensor.builtin';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- PACK ENVIRONMENT: Helper functions
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Function to get environment path for a pack/runtime combination
|
||||||
|
CREATE OR REPLACE FUNCTION get_pack_environment_path(p_pack_ref TEXT, p_runtime_ref TEXT)
|
||||||
|
RETURNS TEXT AS $$
|
||||||
|
DECLARE
|
||||||
|
v_runtime_name TEXT;
|
||||||
|
v_base_template TEXT;
|
||||||
|
v_result TEXT;
|
||||||
|
BEGIN
|
||||||
|
-- Get runtime name and base path template
|
||||||
|
SELECT
|
||||||
|
LOWER(name),
|
||||||
|
installers->>'base_path_template'
|
||||||
|
INTO v_runtime_name, v_base_template
|
||||||
|
FROM runtime
|
||||||
|
WHERE ref = p_runtime_ref;
|
||||||
|
|
||||||
|
IF v_base_template IS NULL THEN
|
||||||
|
v_base_template := '/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}';
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- Replace template variables
|
||||||
|
v_result := v_base_template;
|
||||||
|
v_result := REPLACE(v_result, '{pack_ref}', p_pack_ref);
|
||||||
|
v_result := REPLACE(v_result, '{runtime_ref}', p_runtime_ref);
|
||||||
|
v_result := REPLACE(v_result, '{runtime_name_lower}', v_runtime_name);
|
||||||
|
|
||||||
|
RETURN v_result;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql IMMUTABLE;
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION get_pack_environment_path IS 'Calculate the filesystem path for a pack runtime environment';
|
||||||
|
|
||||||
|
-- Function to check if a runtime requires an environment
|
||||||
|
CREATE OR REPLACE FUNCTION runtime_requires_environment(p_runtime_ref TEXT)
|
||||||
|
RETURNS BOOLEAN AS $$
|
||||||
|
DECLARE
|
||||||
|
v_requires BOOLEAN;
|
||||||
|
BEGIN
|
||||||
|
SELECT COALESCE((installers->>'requires_environment')::boolean, true)
|
||||||
|
INTO v_requires
|
||||||
|
FROM runtime
|
||||||
|
WHERE ref = p_runtime_ref;
|
||||||
|
|
||||||
|
RETURN COALESCE(v_requires, false);
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql STABLE;
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION runtime_requires_environment IS 'Check if a runtime needs a pack-specific environment';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- PACK ENVIRONMENT: Status view
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE OR REPLACE VIEW v_pack_environment_status AS
|
||||||
|
SELECT
|
||||||
|
pe.id,
|
||||||
|
pe.pack,
|
||||||
|
p.ref AS pack_ref,
|
||||||
|
p.label AS pack_name,
|
||||||
|
pe.runtime,
|
||||||
|
r.ref AS runtime_ref,
|
||||||
|
r.name AS runtime_name,
|
||||||
|
pe.env_path,
|
||||||
|
pe.status,
|
||||||
|
pe.installed_at,
|
||||||
|
pe.last_verified,
|
||||||
|
CASE
|
||||||
|
WHEN pe.status = 'ready' AND pe.last_verified < NOW() - INTERVAL '7 days' THEN true
|
||||||
|
ELSE false
|
||||||
|
END AS needs_verification,
|
||||||
|
CASE
|
||||||
|
WHEN pe.status = 'ready' THEN 'healthy'
|
||||||
|
WHEN pe.status = 'failed' THEN 'unhealthy'
|
||||||
|
WHEN pe.status IN ('pending', 'installing') THEN 'provisioning'
|
||||||
|
WHEN pe.status = 'outdated' THEN 'needs_update'
|
||||||
|
ELSE 'unknown'
|
||||||
|
END AS health_status,
|
||||||
|
pe.install_error,
|
||||||
|
pe.created,
|
||||||
|
pe.updated
|
||||||
|
FROM pack_environment pe
|
||||||
|
JOIN pack p ON pe.pack = p.id
|
||||||
|
JOIN runtime r ON pe.runtime = r.id;
|
||||||
|
|
||||||
|
COMMENT ON VIEW v_pack_environment_status IS 'Consolidated view of pack environment status with health indicators';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- PACK TEST EXECUTION TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS pack_test_execution (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
pack_id BIGINT NOT NULL REFERENCES pack(id) ON DELETE CASCADE,
|
||||||
|
pack_version VARCHAR(50) NOT NULL,
|
||||||
|
execution_time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
trigger_reason VARCHAR(50) NOT NULL, -- 'install', 'update', 'manual', 'validation'
|
||||||
|
total_tests INT NOT NULL,
|
||||||
|
passed INT NOT NULL,
|
||||||
|
failed INT NOT NULL,
|
||||||
|
skipped INT NOT NULL,
|
||||||
|
pass_rate DECIMAL(5,4) NOT NULL, -- 0.0000 to 1.0000
|
||||||
|
duration_ms BIGINT NOT NULL,
|
||||||
|
result JSONB NOT NULL, -- Full test result structure
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
|
||||||
|
CONSTRAINT valid_test_counts CHECK (total_tests >= 0 AND passed >= 0 AND failed >= 0 AND skipped >= 0),
|
||||||
|
CONSTRAINT valid_pass_rate CHECK (pass_rate >= 0.0 AND pass_rate <= 1.0),
|
||||||
|
CONSTRAINT valid_trigger_reason CHECK (trigger_reason IN ('install', 'update', 'manual', 'validation'))
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes for efficient queries
|
||||||
|
CREATE INDEX idx_pack_test_execution_pack_id ON pack_test_execution(pack_id);
|
||||||
|
CREATE INDEX idx_pack_test_execution_time ON pack_test_execution(execution_time DESC);
|
||||||
|
CREATE INDEX idx_pack_test_execution_pass_rate ON pack_test_execution(pass_rate);
|
||||||
|
CREATE INDEX idx_pack_test_execution_trigger ON pack_test_execution(trigger_reason);
|
||||||
|
|
||||||
|
-- Comments for documentation
|
||||||
|
COMMENT ON TABLE pack_test_execution IS 'Tracks pack test execution results for validation and auditing';
|
||||||
|
COMMENT ON COLUMN pack_test_execution.pack_id IS 'Reference to the pack being tested';
|
||||||
|
COMMENT ON COLUMN pack_test_execution.pack_version IS 'Version of the pack at test time';
|
||||||
|
COMMENT ON COLUMN pack_test_execution.trigger_reason IS 'What triggered the test: install, update, manual, validation';
|
||||||
|
COMMENT ON COLUMN pack_test_execution.pass_rate IS 'Percentage of tests passed (0.0 to 1.0)';
|
||||||
|
COMMENT ON COLUMN pack_test_execution.result IS 'Full JSON structure with detailed test results';
|
||||||
|
|
||||||
|
-- Pack test result summary view (all test executions with pack info)
|
||||||
|
CREATE OR REPLACE VIEW pack_test_summary AS
|
||||||
|
SELECT
|
||||||
|
p.id AS pack_id,
|
||||||
|
p.ref AS pack_ref,
|
||||||
|
p.label AS pack_label,
|
||||||
|
pte.id AS test_execution_id,
|
||||||
|
pte.pack_version,
|
||||||
|
pte.execution_time AS test_time,
|
||||||
|
pte.trigger_reason,
|
||||||
|
pte.total_tests,
|
||||||
|
pte.passed,
|
||||||
|
pte.failed,
|
||||||
|
pte.skipped,
|
||||||
|
pte.pass_rate,
|
||||||
|
pte.duration_ms,
|
||||||
|
ROW_NUMBER() OVER (PARTITION BY p.id ORDER BY pte.execution_time DESC) AS rn
|
||||||
|
FROM pack p
|
||||||
|
LEFT JOIN pack_test_execution pte ON p.id = pte.pack_id
|
||||||
|
WHERE pte.id IS NOT NULL;
|
||||||
|
|
||||||
|
COMMENT ON VIEW pack_test_summary IS 'Summary of all pack test executions with pack details';
|
||||||
|
|
||||||
|
-- Latest test results per pack view
|
||||||
|
CREATE OR REPLACE VIEW pack_latest_test AS
|
||||||
|
SELECT
|
||||||
|
pack_id,
|
||||||
|
pack_ref,
|
||||||
|
pack_label,
|
||||||
|
test_execution_id,
|
||||||
|
pack_version,
|
||||||
|
test_time,
|
||||||
|
trigger_reason,
|
||||||
|
total_tests,
|
||||||
|
passed,
|
||||||
|
failed,
|
||||||
|
skipped,
|
||||||
|
pass_rate,
|
||||||
|
duration_ms
|
||||||
|
FROM pack_test_summary
|
||||||
|
WHERE rn = 1;
|
||||||
|
|
||||||
|
COMMENT ON VIEW pack_latest_test IS 'Latest test results for each pack';
|
||||||
|
|
||||||
|
-- Function to get pack test statistics
|
||||||
|
CREATE OR REPLACE FUNCTION get_pack_test_stats(p_pack_id BIGINT)
|
||||||
|
RETURNS TABLE (
|
||||||
|
total_executions BIGINT,
|
||||||
|
successful_executions BIGINT,
|
||||||
|
failed_executions BIGINT,
|
||||||
|
avg_pass_rate DECIMAL,
|
||||||
|
avg_duration_ms BIGINT,
|
||||||
|
last_test_time TIMESTAMPTZ,
|
||||||
|
last_test_passed BOOLEAN
|
||||||
|
) AS $$
|
||||||
|
BEGIN
|
||||||
|
RETURN QUERY
|
||||||
|
SELECT
|
||||||
|
COUNT(*)::BIGINT AS total_executions,
|
||||||
|
COUNT(*) FILTER (WHERE passed = total_tests)::BIGINT AS successful_executions,
|
||||||
|
COUNT(*) FILTER (WHERE failed > 0)::BIGINT AS failed_executions,
|
||||||
|
AVG(pass_rate) AS avg_pass_rate,
|
||||||
|
AVG(duration_ms)::BIGINT AS avg_duration_ms,
|
||||||
|
MAX(execution_time) AS last_test_time,
|
||||||
|
(SELECT failed = 0 FROM pack_test_execution
|
||||||
|
WHERE pack_id = p_pack_id
|
||||||
|
ORDER BY execution_time DESC
|
||||||
|
LIMIT 1) AS last_test_passed
|
||||||
|
FROM pack_test_execution
|
||||||
|
WHERE pack_id = p_pack_id;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION get_pack_test_stats IS 'Get statistical summary of test executions for a pack';
|
||||||
|
|
||||||
|
-- Function to check if pack has recent passing tests
|
||||||
|
CREATE OR REPLACE FUNCTION pack_has_passing_tests(
|
||||||
|
p_pack_id BIGINT,
|
||||||
|
p_hours_ago INT DEFAULT 24
|
||||||
|
)
|
||||||
|
RETURNS BOOLEAN AS $$
|
||||||
|
DECLARE
|
||||||
|
v_has_passing_tests BOOLEAN;
|
||||||
|
BEGIN
|
||||||
|
SELECT EXISTS(
|
||||||
|
SELECT 1
|
||||||
|
FROM pack_test_execution
|
||||||
|
WHERE pack_id = p_pack_id
|
||||||
|
AND execution_time > NOW() - (p_hours_ago || ' hours')::INTERVAL
|
||||||
|
AND failed = 0
|
||||||
|
AND total_tests > 0
|
||||||
|
) INTO v_has_passing_tests;
|
||||||
|
|
||||||
|
RETURN v_has_passing_tests;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION pack_has_passing_tests IS 'Check if pack has recent passing test executions';
|
||||||
|
|
||||||
|
-- Add trigger to update pack metadata on test execution
|
||||||
|
CREATE OR REPLACE FUNCTION update_pack_test_metadata()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
BEGIN
|
||||||
|
-- Could update pack table with last_tested timestamp if we add that column
|
||||||
|
-- For now, just a placeholder for future functionality
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
CREATE TRIGGER trigger_update_pack_test_metadata
|
||||||
|
AFTER INSERT ON pack_test_execution
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION update_pack_test_metadata();
|
||||||
|
|
||||||
|
COMMENT ON TRIGGER trigger_update_pack_test_metadata ON pack_test_execution IS 'Updates pack metadata when tests are executed';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- WEBHOOK FUNCTIONS
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Drop existing functions to avoid signature conflicts
|
||||||
|
DROP FUNCTION IF EXISTS enable_trigger_webhook(BIGINT, JSONB);
|
||||||
|
DROP FUNCTION IF EXISTS enable_trigger_webhook(BIGINT);
|
||||||
|
DROP FUNCTION IF EXISTS disable_trigger_webhook(BIGINT);
|
||||||
|
DROP FUNCTION IF EXISTS regenerate_trigger_webhook_key(BIGINT);
|
||||||
|
|
||||||
|
-- Function to enable webhooks for a trigger
|
||||||
|
CREATE OR REPLACE FUNCTION enable_trigger_webhook(
|
||||||
|
p_trigger_id BIGINT,
|
||||||
|
p_config JSONB DEFAULT '{}'::jsonb
|
||||||
|
)
|
||||||
|
RETURNS TABLE(
|
||||||
|
webhook_enabled BOOLEAN,
|
||||||
|
webhook_key VARCHAR(255),
|
||||||
|
webhook_url TEXT
|
||||||
|
) AS $$
|
||||||
|
DECLARE
|
||||||
|
v_webhook_key VARCHAR(255);
|
||||||
|
v_api_base_url TEXT := 'http://localhost:8080'; -- Default, should be configured
|
||||||
|
BEGIN
|
||||||
|
-- Check if trigger exists
|
||||||
|
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
|
||||||
|
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- Generate webhook key if one doesn't exist
|
||||||
|
SELECT t.webhook_key INTO v_webhook_key
|
||||||
|
FROM trigger t
|
||||||
|
WHERE t.id = p_trigger_id;
|
||||||
|
|
||||||
|
IF v_webhook_key IS NULL THEN
|
||||||
|
v_webhook_key := generate_webhook_key();
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- Update trigger to enable webhooks
|
||||||
|
UPDATE trigger
|
||||||
|
SET
|
||||||
|
webhook_enabled = TRUE,
|
||||||
|
webhook_key = v_webhook_key,
|
||||||
|
webhook_config = p_config,
|
||||||
|
updated = NOW()
|
||||||
|
WHERE id = p_trigger_id;
|
||||||
|
|
||||||
|
-- Return webhook details
|
||||||
|
RETURN QUERY SELECT
|
||||||
|
TRUE,
|
||||||
|
v_webhook_key,
|
||||||
|
v_api_base_url || '/api/v1/webhooks/' || v_webhook_key;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION enable_trigger_webhook(BIGINT, JSONB) IS
|
||||||
|
'Enables webhooks for a trigger with optional configuration. Generates a new webhook key if one does not exist. Returns webhook details.';
|
||||||
|
|
||||||
|
-- Function to disable webhooks for a trigger
|
||||||
|
CREATE OR REPLACE FUNCTION disable_trigger_webhook(
|
||||||
|
p_trigger_id BIGINT
|
||||||
|
)
|
||||||
|
RETURNS BOOLEAN AS $$
|
||||||
|
BEGIN
|
||||||
|
-- Check if trigger exists
|
||||||
|
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
|
||||||
|
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- Update trigger to disable webhooks
|
||||||
|
-- Set webhook_key to NULL when disabling to remove it from API responses
|
||||||
|
UPDATE trigger
|
||||||
|
SET
|
||||||
|
webhook_enabled = FALSE,
|
||||||
|
webhook_key = NULL,
|
||||||
|
updated = NOW()
|
||||||
|
WHERE id = p_trigger_id;
|
||||||
|
|
||||||
|
RETURN TRUE;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION disable_trigger_webhook(BIGINT) IS
|
||||||
|
'Disables webhooks for a trigger. Webhook key is removed when disabled.';
|
||||||
|
|
||||||
|
-- Function to regenerate webhook key for a trigger
|
||||||
|
CREATE OR REPLACE FUNCTION regenerate_trigger_webhook_key(
|
||||||
|
p_trigger_id BIGINT
|
||||||
|
)
|
||||||
|
RETURNS TABLE(
|
||||||
|
webhook_key VARCHAR(255),
|
||||||
|
previous_key_revoked BOOLEAN
|
||||||
|
) AS $$
|
||||||
|
DECLARE
|
||||||
|
v_new_key VARCHAR(255);
|
||||||
|
v_old_key VARCHAR(255);
|
||||||
|
v_webhook_enabled BOOLEAN;
|
||||||
|
BEGIN
|
||||||
|
-- Check if trigger exists
|
||||||
|
IF NOT EXISTS (SELECT 1 FROM trigger WHERE id = p_trigger_id) THEN
|
||||||
|
RAISE EXCEPTION 'Trigger with id % does not exist', p_trigger_id;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- Get current webhook state
|
||||||
|
SELECT t.webhook_key, t.webhook_enabled INTO v_old_key, v_webhook_enabled
|
||||||
|
FROM trigger t
|
||||||
|
WHERE t.id = p_trigger_id;
|
||||||
|
|
||||||
|
-- Check if webhooks are enabled
|
||||||
|
IF NOT v_webhook_enabled THEN
|
||||||
|
RAISE EXCEPTION 'Webhooks are not enabled for trigger %', p_trigger_id;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- Generate new key
|
||||||
|
v_new_key := generate_webhook_key();
|
||||||
|
|
||||||
|
-- Update trigger with new key
|
||||||
|
UPDATE trigger
|
||||||
|
SET
|
||||||
|
webhook_key = v_new_key,
|
||||||
|
updated = NOW()
|
||||||
|
WHERE id = p_trigger_id;
|
||||||
|
|
||||||
|
-- Return new key and whether old key was present
|
||||||
|
RETURN QUERY SELECT
|
||||||
|
v_new_key,
|
||||||
|
(v_old_key IS NOT NULL);
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION regenerate_trigger_webhook_key(BIGINT) IS
|
||||||
|
'Regenerates webhook key for a trigger. Returns new key and whether a previous key was revoked.';
|
||||||
|
|
||||||
|
-- Verify all webhook functions exist
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (
|
||||||
|
SELECT 1 FROM pg_proc p
|
||||||
|
JOIN pg_namespace n ON p.pronamespace = n.oid
|
||||||
|
WHERE n.nspname = current_schema()
|
||||||
|
AND p.proname = 'enable_trigger_webhook'
|
||||||
|
) THEN
|
||||||
|
RAISE EXCEPTION 'enable_trigger_webhook function not found after migration';
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF NOT EXISTS (
|
||||||
|
SELECT 1 FROM pg_proc p
|
||||||
|
JOIN pg_namespace n ON p.pronamespace = n.oid
|
||||||
|
WHERE n.nspname = current_schema()
|
||||||
|
AND p.proname = 'disable_trigger_webhook'
|
||||||
|
) THEN
|
||||||
|
RAISE EXCEPTION 'disable_trigger_webhook function not found after migration';
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF NOT EXISTS (
|
||||||
|
SELECT 1 FROM pg_proc p
|
||||||
|
JOIN pg_namespace n ON p.pronamespace = n.oid
|
||||||
|
WHERE n.nspname = current_schema()
|
||||||
|
AND p.proname = 'regenerate_trigger_webhook_key'
|
||||||
|
) THEN
|
||||||
|
RAISE EXCEPTION 'regenerate_trigger_webhook_key function not found after migration';
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
RAISE NOTICE 'All webhook functions successfully created';
|
||||||
|
END $$;
|
||||||
@@ -0,0 +1,428 @@
|
|||||||
|
-- Migration: LISTEN/NOTIFY Triggers
|
||||||
|
-- Description: Consolidated PostgreSQL LISTEN/NOTIFY triggers for real-time event notifications
|
||||||
|
-- Version: 20250101000008
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- EXECUTION CHANGE NOTIFICATION
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Function to notify on execution creation
|
||||||
|
CREATE OR REPLACE FUNCTION notify_execution_created()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
DECLARE
|
||||||
|
payload JSON;
|
||||||
|
enforcement_rule_ref TEXT;
|
||||||
|
enforcement_trigger_ref TEXT;
|
||||||
|
BEGIN
|
||||||
|
-- Lookup enforcement details if this execution is linked to an enforcement
|
||||||
|
IF NEW.enforcement IS NOT NULL THEN
|
||||||
|
SELECT rule_ref, trigger_ref
|
||||||
|
INTO enforcement_rule_ref, enforcement_trigger_ref
|
||||||
|
FROM enforcement
|
||||||
|
WHERE id = NEW.enforcement;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
payload := json_build_object(
|
||||||
|
'entity_type', 'execution',
|
||||||
|
'entity_id', NEW.id,
|
||||||
|
'id', NEW.id,
|
||||||
|
'action_id', NEW.action,
|
||||||
|
'action_ref', NEW.action_ref,
|
||||||
|
'status', NEW.status,
|
||||||
|
'enforcement', NEW.enforcement,
|
||||||
|
'rule_ref', enforcement_rule_ref,
|
||||||
|
'trigger_ref', enforcement_trigger_ref,
|
||||||
|
'parent', NEW.parent,
|
||||||
|
'result', NEW.result,
|
||||||
|
'started_at', NEW.started_at,
|
||||||
|
'workflow_task', NEW.workflow_task,
|
||||||
|
'created', NEW.created,
|
||||||
|
'updated', NEW.updated
|
||||||
|
);
|
||||||
|
|
||||||
|
PERFORM pg_notify('execution_created', payload::text);
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
-- Function to notify on execution status changes
|
||||||
|
CREATE OR REPLACE FUNCTION notify_execution_status_changed()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
DECLARE
|
||||||
|
payload JSON;
|
||||||
|
enforcement_rule_ref TEXT;
|
||||||
|
enforcement_trigger_ref TEXT;
|
||||||
|
BEGIN
|
||||||
|
-- Only notify on updates, not inserts
|
||||||
|
IF TG_OP = 'UPDATE' AND OLD.status IS DISTINCT FROM NEW.status THEN
|
||||||
|
-- Lookup enforcement details if this execution is linked to an enforcement
|
||||||
|
IF NEW.enforcement IS NOT NULL THEN
|
||||||
|
SELECT rule_ref, trigger_ref
|
||||||
|
INTO enforcement_rule_ref, enforcement_trigger_ref
|
||||||
|
FROM enforcement
|
||||||
|
WHERE id = NEW.enforcement;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
payload := json_build_object(
|
||||||
|
'entity_type', 'execution',
|
||||||
|
'entity_id', NEW.id,
|
||||||
|
'id', NEW.id,
|
||||||
|
'action_id', NEW.action,
|
||||||
|
'action_ref', NEW.action_ref,
|
||||||
|
'status', NEW.status,
|
||||||
|
'old_status', OLD.status,
|
||||||
|
'enforcement', NEW.enforcement,
|
||||||
|
'rule_ref', enforcement_rule_ref,
|
||||||
|
'trigger_ref', enforcement_trigger_ref,
|
||||||
|
'parent', NEW.parent,
|
||||||
|
'result', NEW.result,
|
||||||
|
'started_at', NEW.started_at,
|
||||||
|
'workflow_task', NEW.workflow_task,
|
||||||
|
'created', NEW.created,
|
||||||
|
'updated', NEW.updated
|
||||||
|
);
|
||||||
|
|
||||||
|
PERFORM pg_notify('execution_status_changed', payload::text);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
-- Trigger on execution table for creation
|
||||||
|
CREATE TRIGGER execution_created_notify
|
||||||
|
AFTER INSERT ON execution
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION notify_execution_created();
|
||||||
|
|
||||||
|
-- Trigger on execution table for status changes
|
||||||
|
CREATE TRIGGER execution_status_changed_notify
|
||||||
|
AFTER UPDATE ON execution
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION notify_execution_status_changed();
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION notify_execution_created() IS 'Sends execution creation notifications via PostgreSQL LISTEN/NOTIFY';
|
||||||
|
COMMENT ON FUNCTION notify_execution_status_changed() IS 'Sends execution status change notifications via PostgreSQL LISTEN/NOTIFY';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- EVENT CREATION NOTIFICATION
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Function to notify on event creation
|
||||||
|
CREATE OR REPLACE FUNCTION notify_event_created()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
DECLARE
|
||||||
|
payload JSON;
|
||||||
|
BEGIN
|
||||||
|
payload := json_build_object(
|
||||||
|
'entity_type', 'event',
|
||||||
|
'entity_id', NEW.id,
|
||||||
|
'id', NEW.id,
|
||||||
|
'trigger', NEW.trigger,
|
||||||
|
'trigger_ref', NEW.trigger_ref,
|
||||||
|
'source', NEW.source,
|
||||||
|
'source_ref', NEW.source_ref,
|
||||||
|
'rule', NEW.rule,
|
||||||
|
'rule_ref', NEW.rule_ref,
|
||||||
|
'payload', NEW.payload,
|
||||||
|
'created', NEW.created
|
||||||
|
);
|
||||||
|
|
||||||
|
PERFORM pg_notify('event_created', payload::text);
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
-- Trigger on event table
|
||||||
|
CREATE TRIGGER event_created_notify
|
||||||
|
AFTER INSERT ON event
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION notify_event_created();
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION notify_event_created() IS 'Sends event creation notifications via PostgreSQL LISTEN/NOTIFY';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- ENFORCEMENT CHANGE NOTIFICATION
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Function to notify on enforcement creation
|
||||||
|
CREATE OR REPLACE FUNCTION notify_enforcement_created()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
DECLARE
|
||||||
|
payload JSON;
|
||||||
|
BEGIN
|
||||||
|
payload := json_build_object(
|
||||||
|
'entity_type', 'enforcement',
|
||||||
|
'entity_id', NEW.id,
|
||||||
|
'id', NEW.id,
|
||||||
|
'rule', NEW.rule,
|
||||||
|
'rule_ref', NEW.rule_ref,
|
||||||
|
'trigger_ref', NEW.trigger_ref,
|
||||||
|
'event', NEW.event,
|
||||||
|
'status', NEW.status,
|
||||||
|
'condition', NEW.condition,
|
||||||
|
'conditions', NEW.conditions,
|
||||||
|
'config', NEW.config,
|
||||||
|
'payload', NEW.payload,
|
||||||
|
'created', NEW.created,
|
||||||
|
'resolved_at', NEW.resolved_at
|
||||||
|
);
|
||||||
|
|
||||||
|
PERFORM pg_notify('enforcement_created', payload::text);
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
-- Trigger on enforcement table
|
||||||
|
CREATE TRIGGER enforcement_created_notify
|
||||||
|
AFTER INSERT ON enforcement
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION notify_enforcement_created();
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION notify_enforcement_created() IS 'Sends enforcement creation notifications via PostgreSQL LISTEN/NOTIFY';
|
||||||
|
|
||||||
|
-- Function to notify on enforcement status changes
|
||||||
|
CREATE OR REPLACE FUNCTION notify_enforcement_status_changed()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
DECLARE
|
||||||
|
payload JSON;
|
||||||
|
BEGIN
|
||||||
|
-- Only notify on updates when status actually changed
|
||||||
|
IF TG_OP = 'UPDATE' AND OLD.status IS DISTINCT FROM NEW.status THEN
|
||||||
|
payload := json_build_object(
|
||||||
|
'entity_type', 'enforcement',
|
||||||
|
'entity_id', NEW.id,
|
||||||
|
'id', NEW.id,
|
||||||
|
'rule', NEW.rule,
|
||||||
|
'rule_ref', NEW.rule_ref,
|
||||||
|
'trigger_ref', NEW.trigger_ref,
|
||||||
|
'event', NEW.event,
|
||||||
|
'status', NEW.status,
|
||||||
|
'old_status', OLD.status,
|
||||||
|
'condition', NEW.condition,
|
||||||
|
'conditions', NEW.conditions,
|
||||||
|
'config', NEW.config,
|
||||||
|
'payload', NEW.payload,
|
||||||
|
'created', NEW.created,
|
||||||
|
'resolved_at', NEW.resolved_at
|
||||||
|
);
|
||||||
|
|
||||||
|
PERFORM pg_notify('enforcement_status_changed', payload::text);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
-- Trigger on enforcement table for status changes
|
||||||
|
CREATE TRIGGER enforcement_status_changed_notify
|
||||||
|
AFTER UPDATE ON enforcement
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION notify_enforcement_status_changed();
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION notify_enforcement_status_changed() IS 'Sends enforcement status change notifications via PostgreSQL LISTEN/NOTIFY';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- INQUIRY NOTIFICATIONS
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Function to notify on inquiry creation
|
||||||
|
CREATE OR REPLACE FUNCTION notify_inquiry_created()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
DECLARE
|
||||||
|
payload JSON;
|
||||||
|
BEGIN
|
||||||
|
payload := json_build_object(
|
||||||
|
'entity_type', 'inquiry',
|
||||||
|
'entity_id', NEW.id,
|
||||||
|
'id', NEW.id,
|
||||||
|
'execution', NEW.execution,
|
||||||
|
'status', NEW.status,
|
||||||
|
'ttl', NEW.ttl,
|
||||||
|
'created', NEW.created
|
||||||
|
);
|
||||||
|
|
||||||
|
PERFORM pg_notify('inquiry_created', payload::text);
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
-- Function to notify on inquiry response
|
||||||
|
CREATE OR REPLACE FUNCTION notify_inquiry_responded()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
DECLARE
|
||||||
|
payload JSON;
|
||||||
|
BEGIN
|
||||||
|
-- Only notify when status changes to 'responded'
|
||||||
|
IF TG_OP = 'UPDATE' AND NEW.status = 'responded' AND OLD.status != 'responded' THEN
|
||||||
|
payload := json_build_object(
|
||||||
|
'entity_type', 'inquiry',
|
||||||
|
'entity_id', NEW.id,
|
||||||
|
'id', NEW.id,
|
||||||
|
'execution', NEW.execution,
|
||||||
|
'status', NEW.status,
|
||||||
|
'response', NEW.response,
|
||||||
|
'updated', NEW.updated
|
||||||
|
);
|
||||||
|
|
||||||
|
PERFORM pg_notify('inquiry_responded', payload::text);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
-- Trigger on inquiry table for creation
|
||||||
|
CREATE TRIGGER inquiry_created_notify
|
||||||
|
AFTER INSERT ON inquiry
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION notify_inquiry_created();
|
||||||
|
|
||||||
|
-- Trigger on inquiry table for responses
|
||||||
|
CREATE TRIGGER inquiry_responded_notify
|
||||||
|
AFTER UPDATE ON inquiry
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION notify_inquiry_responded();
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION notify_inquiry_created() IS 'Sends inquiry creation notifications via PostgreSQL LISTEN/NOTIFY';
|
||||||
|
COMMENT ON FUNCTION notify_inquiry_responded() IS 'Sends inquiry response notifications via PostgreSQL LISTEN/NOTIFY';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- WORKFLOW EXECUTION NOTIFICATIONS
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Function to notify on workflow execution status changes
|
||||||
|
CREATE OR REPLACE FUNCTION notify_workflow_execution_status_changed()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
DECLARE
|
||||||
|
payload JSON;
|
||||||
|
BEGIN
|
||||||
|
-- Only notify for workflow executions when status changes
|
||||||
|
IF TG_OP = 'UPDATE' AND NEW.is_workflow = true AND OLD.status IS DISTINCT FROM NEW.status THEN
|
||||||
|
payload := json_build_object(
|
||||||
|
'entity_type', 'execution',
|
||||||
|
'entity_id', NEW.id,
|
||||||
|
'id', NEW.id,
|
||||||
|
'action_ref', NEW.action_ref,
|
||||||
|
'status', NEW.status,
|
||||||
|
'old_status', OLD.status,
|
||||||
|
'workflow_def', NEW.workflow_def,
|
||||||
|
'parent', NEW.parent,
|
||||||
|
'created', NEW.created,
|
||||||
|
'updated', NEW.updated
|
||||||
|
);
|
||||||
|
|
||||||
|
PERFORM pg_notify('workflow_execution_status_changed', payload::text);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
-- Trigger on execution table for workflow status changes
|
||||||
|
CREATE TRIGGER workflow_execution_status_changed_notify
|
||||||
|
AFTER UPDATE ON execution
|
||||||
|
FOR EACH ROW
|
||||||
|
WHEN (NEW.is_workflow = true)
|
||||||
|
EXECUTE FUNCTION notify_workflow_execution_status_changed();
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION notify_workflow_execution_status_changed() IS 'Sends workflow execution status change notifications via PostgreSQL LISTEN/NOTIFY';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- ARTIFACT NOTIFICATIONS
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Function to notify on artifact creation
|
||||||
|
CREATE OR REPLACE FUNCTION notify_artifact_created()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
DECLARE
|
||||||
|
payload JSON;
|
||||||
|
BEGIN
|
||||||
|
payload := json_build_object(
|
||||||
|
'entity_type', 'artifact',
|
||||||
|
'entity_id', NEW.id,
|
||||||
|
'id', NEW.id,
|
||||||
|
'ref', NEW.ref,
|
||||||
|
'type', NEW.type,
|
||||||
|
'visibility', NEW.visibility,
|
||||||
|
'name', NEW.name,
|
||||||
|
'execution', NEW.execution,
|
||||||
|
'scope', NEW.scope,
|
||||||
|
'owner', NEW.owner,
|
||||||
|
'content_type', NEW.content_type,
|
||||||
|
'size_bytes', NEW.size_bytes,
|
||||||
|
'created', NEW.created
|
||||||
|
);
|
||||||
|
|
||||||
|
PERFORM pg_notify('artifact_created', payload::text);
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
-- Trigger on artifact table for creation
|
||||||
|
CREATE TRIGGER artifact_created_notify
|
||||||
|
AFTER INSERT ON artifact
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION notify_artifact_created();
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION notify_artifact_created() IS 'Sends artifact creation notifications via PostgreSQL LISTEN/NOTIFY';
|
||||||
|
|
||||||
|
-- Function to notify on artifact updates (progress appends, data changes)
|
||||||
|
CREATE OR REPLACE FUNCTION notify_artifact_updated()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
DECLARE
|
||||||
|
payload JSON;
|
||||||
|
latest_percent DOUBLE PRECISION;
|
||||||
|
latest_message TEXT;
|
||||||
|
entry_count INTEGER;
|
||||||
|
BEGIN
|
||||||
|
-- Only notify on actual changes
|
||||||
|
IF TG_OP = 'UPDATE' THEN
|
||||||
|
-- Extract progress summary from data array if this is a progress artifact
|
||||||
|
IF NEW.type = 'progress' AND NEW.data IS NOT NULL AND jsonb_typeof(NEW.data) = 'array' THEN
|
||||||
|
entry_count := jsonb_array_length(NEW.data);
|
||||||
|
IF entry_count > 0 THEN
|
||||||
|
latest_percent := (NEW.data -> (entry_count - 1) ->> 'percent')::DOUBLE PRECISION;
|
||||||
|
latest_message := NEW.data -> (entry_count - 1) ->> 'message';
|
||||||
|
END IF;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
payload := json_build_object(
|
||||||
|
'entity_type', 'artifact',
|
||||||
|
'entity_id', NEW.id,
|
||||||
|
'id', NEW.id,
|
||||||
|
'ref', NEW.ref,
|
||||||
|
'type', NEW.type,
|
||||||
|
'visibility', NEW.visibility,
|
||||||
|
'name', NEW.name,
|
||||||
|
'execution', NEW.execution,
|
||||||
|
'scope', NEW.scope,
|
||||||
|
'owner', NEW.owner,
|
||||||
|
'content_type', NEW.content_type,
|
||||||
|
'size_bytes', NEW.size_bytes,
|
||||||
|
'progress_percent', latest_percent,
|
||||||
|
'progress_message', latest_message,
|
||||||
|
'progress_entries', entry_count,
|
||||||
|
'created', NEW.created,
|
||||||
|
'updated', NEW.updated
|
||||||
|
);
|
||||||
|
|
||||||
|
PERFORM pg_notify('artifact_updated', payload::text);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
-- Trigger on artifact table for updates
|
||||||
|
CREATE TRIGGER artifact_updated_notify
|
||||||
|
AFTER UPDATE ON artifact
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION notify_artifact_updated();
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION notify_artifact_updated() IS 'Sends artifact update notifications via PostgreSQL LISTEN/NOTIFY (includes progress summary for progress-type artifacts)';
|
||||||
@@ -0,0 +1,616 @@
|
|||||||
|
-- Migration: TimescaleDB Entity History and Analytics
|
||||||
|
-- Description: Creates append-only history hypertables for execution and worker tables.
|
||||||
|
-- Uses JSONB diff format to track field-level changes via PostgreSQL triggers.
|
||||||
|
-- Converts the event, enforcement, and execution tables into TimescaleDB
|
||||||
|
-- hypertables (events are immutable; enforcements are updated exactly once;
|
||||||
|
-- executions are updated ~4 times during their lifecycle).
|
||||||
|
-- Includes continuous aggregates for dashboard analytics.
|
||||||
|
-- See docs/plans/timescaledb-entity-history.md for full design.
|
||||||
|
--
|
||||||
|
-- NOTE: FK constraints that would reference hypertable targets were never
|
||||||
|
-- created in earlier migrations (000004, 000005, 000006), so no DROP
|
||||||
|
-- CONSTRAINT statements are needed here.
|
||||||
|
-- Version: 20250101000009
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- EXTENSION
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE EXTENSION IF NOT EXISTS timescaledb;
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- HELPER FUNCTIONS
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Returns a small {digest, size, type} object instead of the full JSONB value.
|
||||||
|
-- Used in history triggers for columns that can be arbitrarily large (e.g. result).
|
||||||
|
-- The full value is always available on the live row.
|
||||||
|
CREATE OR REPLACE FUNCTION _jsonb_digest_summary(val JSONB)
|
||||||
|
RETURNS JSONB AS $$
|
||||||
|
BEGIN
|
||||||
|
IF val IS NULL THEN
|
||||||
|
RETURN NULL;
|
||||||
|
END IF;
|
||||||
|
RETURN jsonb_build_object(
|
||||||
|
'digest', 'md5:' || md5(val::text),
|
||||||
|
'size', octet_length(val::text),
|
||||||
|
'type', jsonb_typeof(val)
|
||||||
|
);
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql IMMUTABLE;
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION _jsonb_digest_summary(JSONB) IS
|
||||||
|
'Returns a compact {digest, size, type} summary of a JSONB value for use in history tables. '
|
||||||
|
'The digest is md5 of the text representation — sufficient for change-detection, not for security.';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- HISTORY TABLES
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
-- execution_history
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
CREATE TABLE execution_history (
|
||||||
|
time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
operation TEXT NOT NULL,
|
||||||
|
entity_id BIGINT NOT NULL,
|
||||||
|
entity_ref TEXT,
|
||||||
|
changed_fields TEXT[] NOT NULL DEFAULT '{}',
|
||||||
|
old_values JSONB,
|
||||||
|
new_values JSONB
|
||||||
|
);
|
||||||
|
|
||||||
|
SELECT create_hypertable('execution_history', 'time',
|
||||||
|
chunk_time_interval => INTERVAL '1 day');
|
||||||
|
|
||||||
|
CREATE INDEX idx_execution_history_entity
|
||||||
|
ON execution_history (entity_id, time DESC);
|
||||||
|
|
||||||
|
CREATE INDEX idx_execution_history_entity_ref
|
||||||
|
ON execution_history (entity_ref, time DESC);
|
||||||
|
|
||||||
|
CREATE INDEX idx_execution_history_status_changes
|
||||||
|
ON execution_history (time DESC)
|
||||||
|
WHERE 'status' = ANY(changed_fields);
|
||||||
|
|
||||||
|
CREATE INDEX idx_execution_history_changed_fields
|
||||||
|
ON execution_history USING GIN (changed_fields);
|
||||||
|
|
||||||
|
COMMENT ON TABLE execution_history IS 'Append-only history of field-level changes to the execution table (TimescaleDB hypertable)';
|
||||||
|
COMMENT ON COLUMN execution_history.time IS 'When the change occurred (hypertable partitioning dimension)';
|
||||||
|
COMMENT ON COLUMN execution_history.operation IS 'INSERT, UPDATE, or DELETE';
|
||||||
|
COMMENT ON COLUMN execution_history.entity_id IS 'execution.id of the changed row';
|
||||||
|
COMMENT ON COLUMN execution_history.entity_ref IS 'Denormalized action_ref for JOIN-free queries';
|
||||||
|
COMMENT ON COLUMN execution_history.changed_fields IS 'Array of field names that changed (empty for INSERT/DELETE)';
|
||||||
|
COMMENT ON COLUMN execution_history.old_values IS 'Previous values of changed fields (NULL for INSERT)';
|
||||||
|
COMMENT ON COLUMN execution_history.new_values IS 'New values of changed fields (NULL for DELETE)';
|
||||||
|
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
-- worker_history
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
CREATE TABLE worker_history (
|
||||||
|
time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
operation TEXT NOT NULL,
|
||||||
|
entity_id BIGINT NOT NULL,
|
||||||
|
entity_ref TEXT,
|
||||||
|
changed_fields TEXT[] NOT NULL DEFAULT '{}',
|
||||||
|
old_values JSONB,
|
||||||
|
new_values JSONB
|
||||||
|
);
|
||||||
|
|
||||||
|
SELECT create_hypertable('worker_history', 'time',
|
||||||
|
chunk_time_interval => INTERVAL '7 days');
|
||||||
|
|
||||||
|
CREATE INDEX idx_worker_history_entity
|
||||||
|
ON worker_history (entity_id, time DESC);
|
||||||
|
|
||||||
|
CREATE INDEX idx_worker_history_entity_ref
|
||||||
|
ON worker_history (entity_ref, time DESC);
|
||||||
|
|
||||||
|
CREATE INDEX idx_worker_history_status_changes
|
||||||
|
ON worker_history (time DESC)
|
||||||
|
WHERE 'status' = ANY(changed_fields);
|
||||||
|
|
||||||
|
CREATE INDEX idx_worker_history_changed_fields
|
||||||
|
ON worker_history USING GIN (changed_fields);
|
||||||
|
|
||||||
|
COMMENT ON TABLE worker_history IS 'Append-only history of field-level changes to the worker table (TimescaleDB hypertable)';
|
||||||
|
COMMENT ON COLUMN worker_history.entity_ref IS 'Denormalized worker name for JOIN-free queries';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- CONVERT EVENT TABLE TO HYPERTABLE
|
||||||
|
-- ============================================================================
|
||||||
|
-- Events are immutable after insert — they are never updated. Instead of
|
||||||
|
-- maintaining a separate event_history table to track changes that never
|
||||||
|
-- happen, we convert the event table itself into a TimescaleDB hypertable
|
||||||
|
-- partitioned on `created`. This gives us automatic time-based partitioning,
|
||||||
|
-- compression, and retention for free.
|
||||||
|
--
|
||||||
|
-- No FK constraints reference event(id) — enforcement.event was created as a
|
||||||
|
-- plain BIGINT in migration 000004 (hypertables cannot be FK targets).
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
-- Replace the single-column PK with a composite PK that includes the
|
||||||
|
-- partitioning column (required by TimescaleDB).
|
||||||
|
ALTER TABLE event DROP CONSTRAINT event_pkey;
|
||||||
|
ALTER TABLE event ADD PRIMARY KEY (id, created);
|
||||||
|
|
||||||
|
SELECT create_hypertable('event', 'created',
|
||||||
|
chunk_time_interval => INTERVAL '1 day',
|
||||||
|
migrate_data => true);
|
||||||
|
|
||||||
|
COMMENT ON TABLE event IS 'Events are instances of triggers firing (TimescaleDB hypertable partitioned on created)';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- CONVERT ENFORCEMENT TABLE TO HYPERTABLE
|
||||||
|
-- ============================================================================
|
||||||
|
-- Enforcements are created and then updated exactly once (status changes from
|
||||||
|
-- `created` to `processed` or `disabled` within ~1 second). This single update
|
||||||
|
-- happens well before the 7-day compression window, so UPDATE on uncompressed
|
||||||
|
-- chunks works without issues.
|
||||||
|
--
|
||||||
|
-- No FK constraints reference enforcement(id) — execution.enforcement was
|
||||||
|
-- created as a plain BIGINT in migration 000005.
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
ALTER TABLE enforcement DROP CONSTRAINT enforcement_pkey;
|
||||||
|
ALTER TABLE enforcement ADD PRIMARY KEY (id, created);
|
||||||
|
|
||||||
|
SELECT create_hypertable('enforcement', 'created',
|
||||||
|
chunk_time_interval => INTERVAL '1 day',
|
||||||
|
migrate_data => true);
|
||||||
|
|
||||||
|
COMMENT ON TABLE enforcement IS 'Enforcements represent rule triggering by events (TimescaleDB hypertable partitioned on created)';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- CONVERT EXECUTION TABLE TO HYPERTABLE
|
||||||
|
-- ============================================================================
|
||||||
|
-- Executions are updated ~4 times during their lifecycle (requested → scheduled
|
||||||
|
-- → running → completed/failed), completing within at most ~1 day — well before
|
||||||
|
-- the 7-day compression window. The `updated` column and its BEFORE UPDATE
|
||||||
|
-- trigger are preserved (used by timeout monitor and UI).
|
||||||
|
--
|
||||||
|
-- No FK constraints reference execution(id) — inquiry.execution,
|
||||||
|
-- workflow_execution.execution, execution.parent, and execution.original_execution
|
||||||
|
-- were all created as plain BIGINT columns in migrations 000005 and 000006.
|
||||||
|
--
|
||||||
|
-- The existing execution_history hypertable and its trigger are preserved —
|
||||||
|
-- they track field-level diffs of each update, which remains valuable for
|
||||||
|
-- a mutable table.
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
ALTER TABLE execution DROP CONSTRAINT execution_pkey;
|
||||||
|
ALTER TABLE execution ADD PRIMARY KEY (id, created);
|
||||||
|
|
||||||
|
SELECT create_hypertable('execution', 'created',
|
||||||
|
chunk_time_interval => INTERVAL '1 day',
|
||||||
|
migrate_data => true);
|
||||||
|
|
||||||
|
COMMENT ON TABLE execution IS 'Executions represent action runs with workflow support (TimescaleDB hypertable partitioned on created). Updated ~4 times during lifecycle, completing within ~1 day (well before 7-day compression window).';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- TRIGGER FUNCTIONS
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
-- execution history trigger
|
||||||
|
-- Tracked fields: status, result, executor, worker, workflow_task, env_vars, started_at
|
||||||
|
-- Note: result uses _jsonb_digest_summary() to avoid storing large payloads
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
CREATE OR REPLACE FUNCTION record_execution_history()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
DECLARE
|
||||||
|
changed TEXT[] := '{}';
|
||||||
|
old_vals JSONB := '{}';
|
||||||
|
new_vals JSONB := '{}';
|
||||||
|
BEGIN
|
||||||
|
IF TG_OP = 'INSERT' THEN
|
||||||
|
INSERT INTO execution_history (time, operation, entity_id, entity_ref, changed_fields, old_values, new_values)
|
||||||
|
VALUES (NOW(), 'INSERT', NEW.id, NEW.action_ref, '{}', NULL,
|
||||||
|
jsonb_build_object(
|
||||||
|
'status', NEW.status,
|
||||||
|
'action_ref', NEW.action_ref,
|
||||||
|
'executor', NEW.executor,
|
||||||
|
'worker', NEW.worker,
|
||||||
|
'parent', NEW.parent,
|
||||||
|
'enforcement', NEW.enforcement,
|
||||||
|
'started_at', NEW.started_at
|
||||||
|
));
|
||||||
|
RETURN NEW;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF TG_OP = 'DELETE' THEN
|
||||||
|
INSERT INTO execution_history (time, operation, entity_id, entity_ref, changed_fields, old_values, new_values)
|
||||||
|
VALUES (NOW(), 'DELETE', OLD.id, OLD.action_ref, '{}', NULL, NULL);
|
||||||
|
RETURN OLD;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- UPDATE: detect which fields changed
|
||||||
|
|
||||||
|
IF OLD.status IS DISTINCT FROM NEW.status THEN
|
||||||
|
changed := array_append(changed, 'status');
|
||||||
|
old_vals := old_vals || jsonb_build_object('status', OLD.status);
|
||||||
|
new_vals := new_vals || jsonb_build_object('status', NEW.status);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- Result: store a compact digest instead of the full JSONB to avoid bloat.
|
||||||
|
-- The live execution row always has the complete result.
|
||||||
|
IF OLD.result IS DISTINCT FROM NEW.result THEN
|
||||||
|
changed := array_append(changed, 'result');
|
||||||
|
old_vals := old_vals || jsonb_build_object('result', _jsonb_digest_summary(OLD.result));
|
||||||
|
new_vals := new_vals || jsonb_build_object('result', _jsonb_digest_summary(NEW.result));
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF OLD.executor IS DISTINCT FROM NEW.executor THEN
|
||||||
|
changed := array_append(changed, 'executor');
|
||||||
|
old_vals := old_vals || jsonb_build_object('executor', OLD.executor);
|
||||||
|
new_vals := new_vals || jsonb_build_object('executor', NEW.executor);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF OLD.worker IS DISTINCT FROM NEW.worker THEN
|
||||||
|
changed := array_append(changed, 'worker');
|
||||||
|
old_vals := old_vals || jsonb_build_object('worker', OLD.worker);
|
||||||
|
new_vals := new_vals || jsonb_build_object('worker', NEW.worker);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF OLD.workflow_task IS DISTINCT FROM NEW.workflow_task THEN
|
||||||
|
changed := array_append(changed, 'workflow_task');
|
||||||
|
old_vals := old_vals || jsonb_build_object('workflow_task', OLD.workflow_task);
|
||||||
|
new_vals := new_vals || jsonb_build_object('workflow_task', NEW.workflow_task);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF OLD.env_vars IS DISTINCT FROM NEW.env_vars THEN
|
||||||
|
changed := array_append(changed, 'env_vars');
|
||||||
|
old_vals := old_vals || jsonb_build_object('env_vars', OLD.env_vars);
|
||||||
|
new_vals := new_vals || jsonb_build_object('env_vars', NEW.env_vars);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF OLD.started_at IS DISTINCT FROM NEW.started_at THEN
|
||||||
|
changed := array_append(changed, 'started_at');
|
||||||
|
old_vals := old_vals || jsonb_build_object('started_at', OLD.started_at);
|
||||||
|
new_vals := new_vals || jsonb_build_object('started_at', NEW.started_at);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- Only record if something actually changed
|
||||||
|
IF array_length(changed, 1) > 0 THEN
|
||||||
|
INSERT INTO execution_history (time, operation, entity_id, entity_ref, changed_fields, old_values, new_values)
|
||||||
|
VALUES (NOW(), 'UPDATE', NEW.id, NEW.action_ref, changed, old_vals, new_vals);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION record_execution_history() IS 'Records field-level changes to execution table in execution_history hypertable';
|
||||||
|
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
-- worker history trigger
|
||||||
|
-- Tracked fields: name, status, capabilities, meta, host, port
|
||||||
|
-- Excludes: last_heartbeat when it is the only field that changed
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
CREATE OR REPLACE FUNCTION record_worker_history()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
DECLARE
|
||||||
|
changed TEXT[] := '{}';
|
||||||
|
old_vals JSONB := '{}';
|
||||||
|
new_vals JSONB := '{}';
|
||||||
|
BEGIN
|
||||||
|
IF TG_OP = 'INSERT' THEN
|
||||||
|
INSERT INTO worker_history (time, operation, entity_id, entity_ref, changed_fields, old_values, new_values)
|
||||||
|
VALUES (NOW(), 'INSERT', NEW.id, NEW.name, '{}', NULL,
|
||||||
|
jsonb_build_object(
|
||||||
|
'name', NEW.name,
|
||||||
|
'worker_type', NEW.worker_type,
|
||||||
|
'worker_role', NEW.worker_role,
|
||||||
|
'status', NEW.status,
|
||||||
|
'host', NEW.host,
|
||||||
|
'port', NEW.port
|
||||||
|
));
|
||||||
|
RETURN NEW;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF TG_OP = 'DELETE' THEN
|
||||||
|
INSERT INTO worker_history (time, operation, entity_id, entity_ref, changed_fields, old_values, new_values)
|
||||||
|
VALUES (NOW(), 'DELETE', OLD.id, OLD.name, '{}', NULL, NULL);
|
||||||
|
RETURN OLD;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- UPDATE: detect which fields changed
|
||||||
|
IF OLD.name IS DISTINCT FROM NEW.name THEN
|
||||||
|
changed := array_append(changed, 'name');
|
||||||
|
old_vals := old_vals || jsonb_build_object('name', OLD.name);
|
||||||
|
new_vals := new_vals || jsonb_build_object('name', NEW.name);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF OLD.status IS DISTINCT FROM NEW.status THEN
|
||||||
|
changed := array_append(changed, 'status');
|
||||||
|
old_vals := old_vals || jsonb_build_object('status', OLD.status);
|
||||||
|
new_vals := new_vals || jsonb_build_object('status', NEW.status);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF OLD.capabilities IS DISTINCT FROM NEW.capabilities THEN
|
||||||
|
changed := array_append(changed, 'capabilities');
|
||||||
|
old_vals := old_vals || jsonb_build_object('capabilities', OLD.capabilities);
|
||||||
|
new_vals := new_vals || jsonb_build_object('capabilities', NEW.capabilities);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF OLD.meta IS DISTINCT FROM NEW.meta THEN
|
||||||
|
changed := array_append(changed, 'meta');
|
||||||
|
old_vals := old_vals || jsonb_build_object('meta', OLD.meta);
|
||||||
|
new_vals := new_vals || jsonb_build_object('meta', NEW.meta);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF OLD.host IS DISTINCT FROM NEW.host THEN
|
||||||
|
changed := array_append(changed, 'host');
|
||||||
|
old_vals := old_vals || jsonb_build_object('host', OLD.host);
|
||||||
|
new_vals := new_vals || jsonb_build_object('host', NEW.host);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF OLD.port IS DISTINCT FROM NEW.port THEN
|
||||||
|
changed := array_append(changed, 'port');
|
||||||
|
old_vals := old_vals || jsonb_build_object('port', OLD.port);
|
||||||
|
new_vals := new_vals || jsonb_build_object('port', NEW.port);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- Only record if something besides last_heartbeat changed.
|
||||||
|
-- Pure heartbeat-only updates are excluded to avoid high-volume noise.
|
||||||
|
IF array_length(changed, 1) > 0 THEN
|
||||||
|
INSERT INTO worker_history (time, operation, entity_id, entity_ref, changed_fields, old_values, new_values)
|
||||||
|
VALUES (NOW(), 'UPDATE', NEW.id, NEW.name, changed, old_vals, new_vals);
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION record_worker_history() IS 'Records field-level changes to worker table in worker_history hypertable. Excludes heartbeat-only updates.';
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- ATTACH TRIGGERS TO OPERATIONAL TABLES
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TRIGGER execution_history_trigger
|
||||||
|
AFTER INSERT OR UPDATE OR DELETE ON execution
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION record_execution_history();
|
||||||
|
|
||||||
|
CREATE TRIGGER worker_history_trigger
|
||||||
|
AFTER INSERT OR UPDATE OR DELETE ON worker
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION record_worker_history();
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- COMPRESSION POLICIES
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- History tables
|
||||||
|
ALTER TABLE execution_history SET (
|
||||||
|
timescaledb.compress,
|
||||||
|
timescaledb.compress_segmentby = 'entity_id',
|
||||||
|
timescaledb.compress_orderby = 'time DESC'
|
||||||
|
);
|
||||||
|
SELECT add_compression_policy('execution_history', INTERVAL '7 days');
|
||||||
|
|
||||||
|
ALTER TABLE worker_history SET (
|
||||||
|
timescaledb.compress,
|
||||||
|
timescaledb.compress_segmentby = 'entity_id',
|
||||||
|
timescaledb.compress_orderby = 'time DESC'
|
||||||
|
);
|
||||||
|
SELECT add_compression_policy('worker_history', INTERVAL '7 days');
|
||||||
|
|
||||||
|
-- Event table (hypertable)
|
||||||
|
ALTER TABLE event SET (
|
||||||
|
timescaledb.compress,
|
||||||
|
timescaledb.compress_segmentby = 'trigger_ref',
|
||||||
|
timescaledb.compress_orderby = 'created DESC'
|
||||||
|
);
|
||||||
|
SELECT add_compression_policy('event', INTERVAL '7 days');
|
||||||
|
|
||||||
|
-- Enforcement table (hypertable)
|
||||||
|
ALTER TABLE enforcement SET (
|
||||||
|
timescaledb.compress,
|
||||||
|
timescaledb.compress_segmentby = 'rule_ref',
|
||||||
|
timescaledb.compress_orderby = 'created DESC'
|
||||||
|
);
|
||||||
|
SELECT add_compression_policy('enforcement', INTERVAL '7 days');
|
||||||
|
|
||||||
|
-- Execution table (hypertable)
|
||||||
|
ALTER TABLE execution SET (
|
||||||
|
timescaledb.compress,
|
||||||
|
timescaledb.compress_segmentby = 'action_ref',
|
||||||
|
timescaledb.compress_orderby = 'created DESC'
|
||||||
|
);
|
||||||
|
SELECT add_compression_policy('execution', INTERVAL '7 days');
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- RETENTION POLICIES
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
SELECT add_retention_policy('execution_history', INTERVAL '90 days');
|
||||||
|
SELECT add_retention_policy('worker_history', INTERVAL '180 days');
|
||||||
|
SELECT add_retention_policy('event', INTERVAL '90 days');
|
||||||
|
SELECT add_retention_policy('enforcement', INTERVAL '90 days');
|
||||||
|
SELECT add_retention_policy('execution', INTERVAL '90 days');
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- CONTINUOUS AGGREGATES
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Drop existing continuous aggregates if they exist, so this migration can be
|
||||||
|
-- re-run safely after a partial failure. (TimescaleDB continuous aggregates
|
||||||
|
-- must be dropped with CASCADE to remove their associated policies.)
|
||||||
|
DROP MATERIALIZED VIEW IF EXISTS execution_status_hourly CASCADE;
|
||||||
|
DROP MATERIALIZED VIEW IF EXISTS execution_throughput_hourly CASCADE;
|
||||||
|
DROP MATERIALIZED VIEW IF EXISTS event_volume_hourly CASCADE;
|
||||||
|
DROP MATERIALIZED VIEW IF EXISTS worker_status_hourly CASCADE;
|
||||||
|
DROP MATERIALIZED VIEW IF EXISTS enforcement_volume_hourly CASCADE;
|
||||||
|
DROP MATERIALIZED VIEW IF EXISTS execution_volume_hourly CASCADE;
|
||||||
|
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
-- execution_status_hourly
|
||||||
|
-- Tracks execution status transitions per hour, grouped by action_ref and new status.
|
||||||
|
-- Powers: execution throughput chart, failure rate widget, status breakdown over time.
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW execution_status_hourly
|
||||||
|
WITH (timescaledb.continuous) AS
|
||||||
|
SELECT
|
||||||
|
time_bucket('1 hour', time) AS bucket,
|
||||||
|
entity_ref AS action_ref,
|
||||||
|
new_values->>'status' AS new_status,
|
||||||
|
COUNT(*) AS transition_count
|
||||||
|
FROM execution_history
|
||||||
|
WHERE 'status' = ANY(changed_fields)
|
||||||
|
GROUP BY bucket, entity_ref, new_values->>'status'
|
||||||
|
WITH NO DATA;
|
||||||
|
|
||||||
|
SELECT add_continuous_aggregate_policy('execution_status_hourly',
|
||||||
|
start_offset => INTERVAL '7 days',
|
||||||
|
end_offset => INTERVAL '1 hour',
|
||||||
|
schedule_interval => INTERVAL '30 minutes'
|
||||||
|
);
|
||||||
|
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
-- execution_throughput_hourly
|
||||||
|
-- Tracks total execution creation volume per hour, regardless of status.
|
||||||
|
-- Powers: execution throughput sparkline on the dashboard.
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW execution_throughput_hourly
|
||||||
|
WITH (timescaledb.continuous) AS
|
||||||
|
SELECT
|
||||||
|
time_bucket('1 hour', time) AS bucket,
|
||||||
|
entity_ref AS action_ref,
|
||||||
|
COUNT(*) AS execution_count
|
||||||
|
FROM execution_history
|
||||||
|
WHERE operation = 'INSERT'
|
||||||
|
GROUP BY bucket, entity_ref
|
||||||
|
WITH NO DATA;
|
||||||
|
|
||||||
|
SELECT add_continuous_aggregate_policy('execution_throughput_hourly',
|
||||||
|
start_offset => INTERVAL '7 days',
|
||||||
|
end_offset => INTERVAL '1 hour',
|
||||||
|
schedule_interval => INTERVAL '30 minutes'
|
||||||
|
);
|
||||||
|
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
-- event_volume_hourly
|
||||||
|
-- Tracks event creation volume per hour by trigger ref.
|
||||||
|
-- Powers: event throughput monitoring widget.
|
||||||
|
-- NOTE: Queries the event table directly (it is now a hypertable) instead of
|
||||||
|
-- a separate event_history table.
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW event_volume_hourly
|
||||||
|
WITH (timescaledb.continuous) AS
|
||||||
|
SELECT
|
||||||
|
time_bucket('1 hour', created) AS bucket,
|
||||||
|
trigger_ref,
|
||||||
|
COUNT(*) AS event_count
|
||||||
|
FROM event
|
||||||
|
GROUP BY bucket, trigger_ref
|
||||||
|
WITH NO DATA;
|
||||||
|
|
||||||
|
SELECT add_continuous_aggregate_policy('event_volume_hourly',
|
||||||
|
start_offset => INTERVAL '7 days',
|
||||||
|
end_offset => INTERVAL '1 hour',
|
||||||
|
schedule_interval => INTERVAL '30 minutes'
|
||||||
|
);
|
||||||
|
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
-- worker_status_hourly
|
||||||
|
-- Tracks worker status changes per hour (online/offline/draining transitions).
|
||||||
|
-- Powers: worker health trends widget.
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW worker_status_hourly
|
||||||
|
WITH (timescaledb.continuous) AS
|
||||||
|
SELECT
|
||||||
|
time_bucket('1 hour', time) AS bucket,
|
||||||
|
entity_ref AS worker_name,
|
||||||
|
new_values->>'status' AS new_status,
|
||||||
|
COUNT(*) AS transition_count
|
||||||
|
FROM worker_history
|
||||||
|
WHERE 'status' = ANY(changed_fields)
|
||||||
|
GROUP BY bucket, entity_ref, new_values->>'status'
|
||||||
|
WITH NO DATA;
|
||||||
|
|
||||||
|
SELECT add_continuous_aggregate_policy('worker_status_hourly',
|
||||||
|
start_offset => INTERVAL '30 days',
|
||||||
|
end_offset => INTERVAL '1 hour',
|
||||||
|
schedule_interval => INTERVAL '1 hour'
|
||||||
|
);
|
||||||
|
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
-- enforcement_volume_hourly
|
||||||
|
-- Tracks enforcement creation volume per hour by rule ref.
|
||||||
|
-- Powers: rule activation rate monitoring.
|
||||||
|
-- NOTE: Queries the enforcement table directly (it is now a hypertable)
|
||||||
|
-- instead of a separate enforcement_history table.
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW enforcement_volume_hourly
|
||||||
|
WITH (timescaledb.continuous) AS
|
||||||
|
SELECT
|
||||||
|
time_bucket('1 hour', created) AS bucket,
|
||||||
|
rule_ref,
|
||||||
|
COUNT(*) AS enforcement_count
|
||||||
|
FROM enforcement
|
||||||
|
GROUP BY bucket, rule_ref
|
||||||
|
WITH NO DATA;
|
||||||
|
|
||||||
|
SELECT add_continuous_aggregate_policy('enforcement_volume_hourly',
|
||||||
|
start_offset => INTERVAL '7 days',
|
||||||
|
end_offset => INTERVAL '1 hour',
|
||||||
|
schedule_interval => INTERVAL '30 minutes'
|
||||||
|
);
|
||||||
|
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
-- execution_volume_hourly
|
||||||
|
-- Tracks execution creation volume per hour by action_ref and status.
|
||||||
|
-- This queries the execution hypertable directly (like event_volume_hourly
|
||||||
|
-- queries the event table). Complements the existing execution_status_hourly
|
||||||
|
-- and execution_throughput_hourly aggregates which query execution_history.
|
||||||
|
--
|
||||||
|
-- Use case: direct execution volume monitoring without relying on the history
|
||||||
|
-- trigger (belt-and-suspenders, plus captures the initial status at creation).
|
||||||
|
-- ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW execution_volume_hourly
|
||||||
|
WITH (timescaledb.continuous) AS
|
||||||
|
SELECT
|
||||||
|
time_bucket('1 hour', created) AS bucket,
|
||||||
|
action_ref,
|
||||||
|
status AS initial_status,
|
||||||
|
COUNT(*) AS execution_count
|
||||||
|
FROM execution
|
||||||
|
GROUP BY bucket, action_ref, status
|
||||||
|
WITH NO DATA;
|
||||||
|
|
||||||
|
SELECT add_continuous_aggregate_policy('execution_volume_hourly',
|
||||||
|
start_offset => INTERVAL '7 days',
|
||||||
|
end_offset => INTERVAL '1 hour',
|
||||||
|
schedule_interval => INTERVAL '30 minutes'
|
||||||
|
);
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- INITIAL REFRESH NOTE
|
||||||
|
-- ============================================================================
|
||||||
|
-- NOTE: refresh_continuous_aggregate() cannot run inside a transaction block,
|
||||||
|
-- and the migration runner wraps each file in BEGIN/COMMIT. The continuous
|
||||||
|
-- aggregate policies configured above will automatically backfill data within
|
||||||
|
-- their first scheduled interval (30 min – 1 hour). On a fresh database there
|
||||||
|
-- is no history data to backfill anyway.
|
||||||
|
--
|
||||||
|
-- If you need an immediate manual refresh after migration, run outside a
|
||||||
|
-- transaction:
|
||||||
|
-- CALL refresh_continuous_aggregate('execution_status_hourly', NULL, NOW());
|
||||||
|
-- CALL refresh_continuous_aggregate('execution_throughput_hourly', NULL, NOW());
|
||||||
|
-- CALL refresh_continuous_aggregate('event_volume_hourly', NULL, NOW());
|
||||||
|
-- CALL refresh_continuous_aggregate('worker_status_hourly', NULL, NOW());
|
||||||
|
-- CALL refresh_continuous_aggregate('enforcement_volume_hourly', NULL, NOW());
|
||||||
|
-- CALL refresh_continuous_aggregate('execution_volume_hourly', NULL, NOW());
|
||||||
@@ -0,0 +1,202 @@
|
|||||||
|
-- Migration: Artifact Content System
|
||||||
|
-- Description: Enhances the artifact table with content fields (name, description,
|
||||||
|
-- content_type, size_bytes, execution link, structured data, visibility)
|
||||||
|
-- and creates the artifact_version table for versioned file/data storage.
|
||||||
|
--
|
||||||
|
-- The artifact table now serves as the "header" for a logical artifact,
|
||||||
|
-- while artifact_version rows hold the actual immutable content snapshots.
|
||||||
|
-- Progress-type artifacts store their live state directly in artifact.data
|
||||||
|
-- (append-style updates without creating new versions).
|
||||||
|
--
|
||||||
|
-- Version: 20250101000010
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- ENHANCE ARTIFACT TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
-- Human-readable name (e.g. "Build Log", "Test Results")
|
||||||
|
ALTER TABLE artifact ADD COLUMN IF NOT EXISTS name TEXT;
|
||||||
|
|
||||||
|
-- Optional longer description
|
||||||
|
ALTER TABLE artifact ADD COLUMN IF NOT EXISTS description TEXT;
|
||||||
|
|
||||||
|
-- MIME content type (e.g. "application/json", "text/plain", "image/png")
|
||||||
|
ALTER TABLE artifact ADD COLUMN IF NOT EXISTS content_type TEXT;
|
||||||
|
|
||||||
|
-- Total size in bytes of the latest version's content (NULL for progress artifacts)
|
||||||
|
ALTER TABLE artifact ADD COLUMN IF NOT EXISTS size_bytes BIGINT;
|
||||||
|
|
||||||
|
-- Execution that produced/owns this artifact (plain BIGINT, no FK — execution is a hypertable)
|
||||||
|
ALTER TABLE artifact ADD COLUMN IF NOT EXISTS execution BIGINT;
|
||||||
|
|
||||||
|
-- Structured data for progress-type artifacts and small structured payloads.
|
||||||
|
-- Progress artifacts append entries here; file artifacts may store parsed metadata.
|
||||||
|
ALTER TABLE artifact ADD COLUMN IF NOT EXISTS data JSONB;
|
||||||
|
|
||||||
|
-- Visibility: public artifacts are viewable by all authenticated users;
|
||||||
|
-- private artifacts are restricted based on the artifact's scope/owner.
|
||||||
|
-- The scope (identity, action, pack, etc.) + owner fields define who can access
|
||||||
|
-- a private artifact. Full RBAC enforcement is deferred — for now the column
|
||||||
|
-- enables filtering and is available for future permission checks.
|
||||||
|
ALTER TABLE artifact ADD COLUMN IF NOT EXISTS visibility artifact_visibility_enum NOT NULL DEFAULT 'private';
|
||||||
|
|
||||||
|
-- New indexes for the added columns
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_artifact_execution ON artifact(execution);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_artifact_name ON artifact(name);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_artifact_execution_type ON artifact(execution, type);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_artifact_visibility ON artifact(visibility);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_artifact_visibility_scope ON artifact(visibility, scope, owner);
|
||||||
|
|
||||||
|
-- Comments for new columns
|
||||||
|
COMMENT ON COLUMN artifact.name IS 'Human-readable artifact name';
|
||||||
|
COMMENT ON COLUMN artifact.description IS 'Optional description of the artifact';
|
||||||
|
COMMENT ON COLUMN artifact.content_type IS 'MIME content type (e.g. application/json, text/plain)';
|
||||||
|
COMMENT ON COLUMN artifact.size_bytes IS 'Size of latest version content in bytes';
|
||||||
|
COMMENT ON COLUMN artifact.execution IS 'Execution that produced this artifact (no FK — execution is a hypertable)';
|
||||||
|
COMMENT ON COLUMN artifact.data IS 'Structured JSONB data for progress artifacts or metadata';
|
||||||
|
COMMENT ON COLUMN artifact.visibility IS 'Access visibility: public (all users) or private (scope/owner-restricted)';
|
||||||
|
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- ARTIFACT_VERSION TABLE
|
||||||
|
-- ============================================================================
|
||||||
|
-- Each row is an immutable snapshot of artifact content. File-type artifacts get
|
||||||
|
-- a new version on each upload; progress-type artifacts do NOT use versions
|
||||||
|
-- (they update artifact.data directly).
|
||||||
|
|
||||||
|
CREATE TABLE artifact_version (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
|
||||||
|
-- Parent artifact
|
||||||
|
artifact BIGINT NOT NULL REFERENCES artifact(id) ON DELETE CASCADE,
|
||||||
|
|
||||||
|
-- Monotonically increasing version number within the artifact (1-based)
|
||||||
|
version INTEGER NOT NULL,
|
||||||
|
|
||||||
|
-- MIME content type for this specific version (may differ from parent)
|
||||||
|
content_type TEXT,
|
||||||
|
|
||||||
|
-- Size of the content in bytes
|
||||||
|
size_bytes BIGINT,
|
||||||
|
|
||||||
|
-- Binary content (file uploads, DB-stored). NULL for file-backed versions.
|
||||||
|
content BYTEA,
|
||||||
|
|
||||||
|
-- Structured content (JSON payloads, parsed results, etc.)
|
||||||
|
content_json JSONB,
|
||||||
|
|
||||||
|
-- Relative path from artifacts_dir root for disk-stored content.
|
||||||
|
-- When set, content BYTEA is NULL — file lives on shared volume.
|
||||||
|
-- Pattern: {ref_slug}/v{version}.{ext}
|
||||||
|
-- e.g., "mypack/build_log/v1.txt"
|
||||||
|
file_path TEXT,
|
||||||
|
|
||||||
|
-- Free-form metadata about this version (e.g. commit hash, build number)
|
||||||
|
meta JSONB,
|
||||||
|
|
||||||
|
-- Who or what created this version (identity ref, action ref, "system", etc.)
|
||||||
|
created_by TEXT,
|
||||||
|
|
||||||
|
-- Immutable — no updated column
|
||||||
|
created TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Unique constraint: one version number per artifact
|
||||||
|
ALTER TABLE artifact_version
|
||||||
|
ADD CONSTRAINT uq_artifact_version_artifact_version UNIQUE (artifact, version);
|
||||||
|
|
||||||
|
-- Indexes
|
||||||
|
CREATE INDEX idx_artifact_version_artifact ON artifact_version(artifact);
|
||||||
|
CREATE INDEX idx_artifact_version_artifact_version ON artifact_version(artifact, version DESC);
|
||||||
|
CREATE INDEX idx_artifact_version_created ON artifact_version(created DESC);
|
||||||
|
CREATE INDEX idx_artifact_version_file_path ON artifact_version(file_path) WHERE file_path IS NOT NULL;
|
||||||
|
|
||||||
|
-- Comments
|
||||||
|
COMMENT ON TABLE artifact_version IS 'Immutable content snapshots for artifacts (file uploads, structured data)';
|
||||||
|
COMMENT ON COLUMN artifact_version.artifact IS 'Parent artifact this version belongs to';
|
||||||
|
COMMENT ON COLUMN artifact_version.version IS 'Version number (1-based, monotonically increasing per artifact)';
|
||||||
|
COMMENT ON COLUMN artifact_version.content_type IS 'MIME content type for this version';
|
||||||
|
COMMENT ON COLUMN artifact_version.size_bytes IS 'Size of content in bytes';
|
||||||
|
COMMENT ON COLUMN artifact_version.content IS 'Binary content (file data)';
|
||||||
|
COMMENT ON COLUMN artifact_version.content_json IS 'Structured JSON content';
|
||||||
|
COMMENT ON COLUMN artifact_version.meta IS 'Free-form metadata about this version';
|
||||||
|
COMMENT ON COLUMN artifact_version.created_by IS 'Who created this version (identity ref, action ref, system)';
|
||||||
|
COMMENT ON COLUMN artifact_version.file_path IS 'Relative path from artifacts_dir root for disk-stored content. When set, content BYTEA is NULL — file lives on shared volume.';
|
||||||
|
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- HELPER FUNCTION: next_artifact_version
|
||||||
|
-- ============================================================================
|
||||||
|
-- Returns the next version number for an artifact (MAX(version) + 1, or 1 if none).
|
||||||
|
|
||||||
|
CREATE OR REPLACE FUNCTION next_artifact_version(p_artifact_id BIGINT)
|
||||||
|
RETURNS INTEGER AS $$
|
||||||
|
DECLARE
|
||||||
|
v_next INTEGER;
|
||||||
|
BEGIN
|
||||||
|
SELECT COALESCE(MAX(version), 0) + 1
|
||||||
|
INTO v_next
|
||||||
|
FROM artifact_version
|
||||||
|
WHERE artifact = p_artifact_id;
|
||||||
|
|
||||||
|
RETURN v_next;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION next_artifact_version IS 'Returns the next version number for the given artifact';
|
||||||
|
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- RETENTION ENFORCEMENT FUNCTION
|
||||||
|
-- ============================================================================
|
||||||
|
-- Called after inserting a new version to enforce the artifact retention policy.
|
||||||
|
-- For 'versions' policy: deletes oldest versions beyond the limit.
|
||||||
|
-- Time-based policies (days/hours/minutes) are handled by a scheduled job (not this trigger).
|
||||||
|
|
||||||
|
CREATE OR REPLACE FUNCTION enforce_artifact_retention()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
DECLARE
|
||||||
|
v_policy artifact_retention_enum;
|
||||||
|
v_limit INTEGER;
|
||||||
|
v_count INTEGER;
|
||||||
|
BEGIN
|
||||||
|
SELECT retention_policy, retention_limit
|
||||||
|
INTO v_policy, v_limit
|
||||||
|
FROM artifact
|
||||||
|
WHERE id = NEW.artifact;
|
||||||
|
|
||||||
|
IF v_policy = 'versions' AND v_limit > 0 THEN
|
||||||
|
-- Count existing versions
|
||||||
|
SELECT COUNT(*) INTO v_count
|
||||||
|
FROM artifact_version
|
||||||
|
WHERE artifact = NEW.artifact;
|
||||||
|
|
||||||
|
-- If over limit, delete the oldest ones
|
||||||
|
IF v_count > v_limit THEN
|
||||||
|
DELETE FROM artifact_version
|
||||||
|
WHERE id IN (
|
||||||
|
SELECT id
|
||||||
|
FROM artifact_version
|
||||||
|
WHERE artifact = NEW.artifact
|
||||||
|
ORDER BY version ASC
|
||||||
|
LIMIT (v_count - v_limit)
|
||||||
|
);
|
||||||
|
END IF;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- Update parent artifact size_bytes with the new version's size
|
||||||
|
UPDATE artifact
|
||||||
|
SET size_bytes = NEW.size_bytes,
|
||||||
|
content_type = COALESCE(NEW.content_type, content_type)
|
||||||
|
WHERE id = NEW.artifact;
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
CREATE TRIGGER trg_enforce_artifact_retention
|
||||||
|
AFTER INSERT ON artifact_version
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE FUNCTION enforce_artifact_retention();
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION enforce_artifact_retention IS 'Enforces version-count retention policy and syncs size to parent artifact';
|
||||||
@@ -0,0 +1,17 @@
|
|||||||
|
-- Migration: Convert key.value from TEXT to JSONB
|
||||||
|
--
|
||||||
|
-- This allows keys to store structured data (objects, arrays, numbers, booleans)
|
||||||
|
-- in addition to plain strings. Existing string values are wrapped in JSON string
|
||||||
|
-- literals so they remain valid and accessible.
|
||||||
|
--
|
||||||
|
-- Before: value TEXT NOT NULL (e.g., 'my-secret-token')
|
||||||
|
-- After: value JSONB NOT NULL (e.g., '"my-secret-token"' or '{"user":"admin","pass":"s3cret"}')
|
||||||
|
|
||||||
|
-- Step 1: Convert existing TEXT values to JSONB.
|
||||||
|
-- to_jsonb(text) wraps a plain string as a JSON string literal, e.g.:
|
||||||
|
-- 'hello' -> '"hello"'
|
||||||
|
-- This preserves all existing values perfectly — encrypted values (base64 strings)
|
||||||
|
-- become JSON strings, and plain text values become JSON strings.
|
||||||
|
ALTER TABLE key
|
||||||
|
ALTER COLUMN value TYPE JSONB
|
||||||
|
USING to_jsonb(value);
|
||||||
348
docker/distributable/migrations/README.md
Normal file
348
docker/distributable/migrations/README.md
Normal file
@@ -0,0 +1,348 @@
|
|||||||
|
# Attune Database Migrations
|
||||||
|
|
||||||
|
This directory contains SQL migrations for the Attune automation platform database schema.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Migrations are numbered and executed in order. Each migration file is named with a timestamp prefix to ensure proper ordering:
|
||||||
|
|
||||||
|
```
|
||||||
|
YYYYMMDDHHMMSS_description.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
## Migration Files
|
||||||
|
|
||||||
|
The schema is organized into 5 logical migration files:
|
||||||
|
|
||||||
|
| File | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `20250101000001_initial_setup.sql` | Creates schema, service role, all enum types, and shared functions |
|
||||||
|
| `20250101000002_core_tables.sql` | Creates pack, runtime, worker, identity, permission_set, permission_assignment, policy, and key tables |
|
||||||
|
| `20250101000003_event_system.sql` | Creates trigger, sensor, event, and enforcement tables |
|
||||||
|
| `20250101000004_execution_system.sql` | Creates action, rule, execution, inquiry, workflow orchestration tables (workflow_definition, workflow_execution, workflow_task_execution), and workflow views |
|
||||||
|
| `20250101000005_supporting_tables.sql` | Creates notification, artifact, and queue_stats tables with performance indexes |
|
||||||
|
|
||||||
|
### Migration Dependencies
|
||||||
|
|
||||||
|
The migrations must be run in order due to foreign key dependencies:
|
||||||
|
|
||||||
|
1. **Initial Setup** - Foundation (schema, enums, functions)
|
||||||
|
2. **Core Tables** - Base entities (pack, runtime, worker, identity, permissions, policy, key)
|
||||||
|
3. **Event System** - Event monitoring (trigger, sensor, event, enforcement)
|
||||||
|
4. **Execution System** - Action execution (action, rule, execution, inquiry)
|
||||||
|
5. **Supporting Tables** - Auxiliary features (notification, artifact)
|
||||||
|
|
||||||
|
## Running Migrations
|
||||||
|
|
||||||
|
### Using SQLx CLI
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install sqlx-cli if not already installed
|
||||||
|
cargo install sqlx-cli --no-default-features --features postgres
|
||||||
|
|
||||||
|
# Run all pending migrations
|
||||||
|
sqlx migrate run
|
||||||
|
|
||||||
|
# Check migration status
|
||||||
|
sqlx migrate info
|
||||||
|
|
||||||
|
# Revert last migration (if needed)
|
||||||
|
sqlx migrate revert
|
||||||
|
```
|
||||||
|
|
||||||
|
### Manual Execution
|
||||||
|
|
||||||
|
You can also run migrations manually using `psql`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all migrations in order
|
||||||
|
for file in migrations/202501*.sql; do
|
||||||
|
psql -U postgres -d attune -f "$file"
|
||||||
|
done
|
||||||
|
```
|
||||||
|
|
||||||
|
Or individually:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
psql -U postgres -d attune -f migrations/20250101000001_initial_setup.sql
|
||||||
|
psql -U postgres -d attune -f migrations/20250101000002_core_tables.sql
|
||||||
|
# ... etc
|
||||||
|
```
|
||||||
|
|
||||||
|
## Database Setup
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
1. PostgreSQL 14 or later installed
|
||||||
|
2. Create the database:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
createdb attune
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Set environment variable:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export DATABASE_URL="postgresql://postgres:postgres@localhost:5432/attune"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Initial Setup
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Navigate to workspace root
|
||||||
|
cd /path/to/attune
|
||||||
|
|
||||||
|
# Run migrations
|
||||||
|
sqlx migrate run
|
||||||
|
|
||||||
|
# Verify tables were created
|
||||||
|
psql -U postgres -d attune -c "\dt attune.*"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Schema Overview
|
||||||
|
|
||||||
|
The Attune schema includes 22 tables organized into logical groups:
|
||||||
|
|
||||||
|
### Core Tables (Migration 2)
|
||||||
|
- **pack**: Automation component bundles
|
||||||
|
- **runtime**: Execution environments (Python, Node.js, containers)
|
||||||
|
- **worker**: Execution workers
|
||||||
|
- **identity**: Users and service accounts
|
||||||
|
- **permission_set**: Permission groups (like roles)
|
||||||
|
- **permission_assignment**: Identity-permission links (many-to-many)
|
||||||
|
- **policy**: Execution policies (rate limiting, concurrency)
|
||||||
|
- **key**: Secure configuration and secrets storage
|
||||||
|
|
||||||
|
### Event System (Migration 3)
|
||||||
|
- **trigger**: Event type definitions
|
||||||
|
- **sensor**: Event monitors that watch for triggers
|
||||||
|
- **event**: Event instances (trigger firings)
|
||||||
|
- **enforcement**: Rule activation instances
|
||||||
|
|
||||||
|
### Execution System (Migration 4)
|
||||||
|
- **action**: Executable operations (can be workflows)
|
||||||
|
- **rule**: Trigger-to-action automation logic
|
||||||
|
- **execution**: Action execution instances (supports workflows)
|
||||||
|
- **inquiry**: Human-in-the-loop interactions (approvals, inputs)
|
||||||
|
- **workflow_definition**: YAML-based workflow definitions (composable action graphs)
|
||||||
|
- **workflow_execution**: Runtime state tracking for workflow executions
|
||||||
|
- **workflow_task_execution**: Individual task executions within workflows
|
||||||
|
|
||||||
|
### Supporting Tables (Migration 5)
|
||||||
|
- **notification**: Real-time system notifications (uses PostgreSQL LISTEN/NOTIFY)
|
||||||
|
- **artifact**: Execution outputs (files, logs, progress data)
|
||||||
|
- **queue_stats**: Real-time execution queue statistics for FIFO ordering
|
||||||
|
|
||||||
|
## Key Features
|
||||||
|
|
||||||
|
### Automatic Timestamps
|
||||||
|
All tables include `created` and `updated` timestamps that are automatically managed by the `update_updated_column()` trigger function.
|
||||||
|
|
||||||
|
### Reference Preservation
|
||||||
|
Tables use both ID foreign keys and `*_ref` text columns. The ref columns preserve string references even when the referenced entity is deleted, maintaining complete audit trails.
|
||||||
|
|
||||||
|
### Soft Deletes
|
||||||
|
Foreign keys strategically use:
|
||||||
|
- `ON DELETE CASCADE` - For dependent data that should be removed
|
||||||
|
- `ON DELETE SET NULL` - To preserve historical records while breaking the link
|
||||||
|
|
||||||
|
### Validation Constraints
|
||||||
|
- **Reference format validation** - Lowercase, specific patterns (e.g., `pack.name`)
|
||||||
|
- **Semantic version validation** - For pack versions
|
||||||
|
- **Ownership validation** - Custom trigger for key table ownership rules
|
||||||
|
- **Range checks** - Port numbers, positive thresholds, etc.
|
||||||
|
|
||||||
|
### Performance Optimization
|
||||||
|
- **B-tree indexes** - On frequently queried columns (IDs, refs, status, timestamps)
|
||||||
|
- **Partial indexes** - For filtered queries (e.g., `enabled = TRUE`)
|
||||||
|
- **GIN indexes** - On JSONB and array columns for fast containment queries
|
||||||
|
- **Composite indexes** - For common multi-column query patterns
|
||||||
|
|
||||||
|
### PostgreSQL Features
|
||||||
|
- **JSONB** - Flexible schema storage for configurations, payloads, results
|
||||||
|
- **Array types** - Multi-value fields (tags, parameters, dependencies)
|
||||||
|
- **Custom enum types** - Constrained string values with type safety
|
||||||
|
- **Triggers** - Data validation, timestamp management, notifications
|
||||||
|
- **pg_notify** - Real-time notifications via PostgreSQL's LISTEN/NOTIFY
|
||||||
|
|
||||||
|
## Service Role
|
||||||
|
|
||||||
|
The migrations create a `svc_attune` role with appropriate permissions. **Change the password in production:**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ALTER ROLE svc_attune WITH PASSWORD 'secure_password_here';
|
||||||
|
```
|
||||||
|
|
||||||
|
The default password is `attune_service_password` (only for development).
|
||||||
|
|
||||||
|
## Rollback Strategy
|
||||||
|
|
||||||
|
### Complete Reset
|
||||||
|
|
||||||
|
To completely reset the database:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Drop and recreate
|
||||||
|
dropdb attune
|
||||||
|
createdb attune
|
||||||
|
sqlx migrate run
|
||||||
|
```
|
||||||
|
|
||||||
|
Or drop just the schema:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
psql -U postgres -d attune -c "DROP SCHEMA attune CASCADE;"
|
||||||
|
```
|
||||||
|
|
||||||
|
Then re-run migrations.
|
||||||
|
|
||||||
|
### Individual Migration Revert
|
||||||
|
|
||||||
|
With SQLx CLI:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sqlx migrate revert
|
||||||
|
```
|
||||||
|
|
||||||
|
Or manually remove from tracking:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DELETE FROM _sqlx_migrations WHERE version = 20250101000001;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Never edit existing migrations** - Create new migrations to modify schema
|
||||||
|
2. **Test migrations** - Always test on a copy of production data first
|
||||||
|
3. **Backup before migrating** - Backup production database before applying migrations
|
||||||
|
4. **Review changes** - Review all migrations before applying to production
|
||||||
|
5. **Version control** - Keep migrations in version control (they are!)
|
||||||
|
6. **Document changes** - Add comments to complex migrations
|
||||||
|
|
||||||
|
## Development Workflow
|
||||||
|
|
||||||
|
1. Create new migration file with timestamp:
|
||||||
|
```bash
|
||||||
|
touch migrations/$(date +%Y%m%d%H%M%S)_description.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Write migration SQL (follow existing patterns)
|
||||||
|
|
||||||
|
3. Test migration:
|
||||||
|
```bash
|
||||||
|
sqlx migrate run
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Verify changes:
|
||||||
|
```bash
|
||||||
|
psql -U postgres -d attune
|
||||||
|
\d+ attune.table_name
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Commit to version control
|
||||||
|
|
||||||
|
## Production Deployment
|
||||||
|
|
||||||
|
1. **Backup** production database
|
||||||
|
2. **Review** all pending migrations
|
||||||
|
3. **Test** migrations on staging environment with production data copy
|
||||||
|
4. **Schedule** maintenance window if needed
|
||||||
|
5. **Apply** migrations:
|
||||||
|
```bash
|
||||||
|
sqlx migrate run
|
||||||
|
```
|
||||||
|
6. **Verify** application functionality
|
||||||
|
7. **Monitor** for errors in logs
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Migration already applied
|
||||||
|
|
||||||
|
If you need to re-run a migration:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Remove from migration tracking (SQLx)
|
||||||
|
psql -U postgres -d attune -c "DELETE FROM _sqlx_migrations WHERE version = 20250101000001;"
|
||||||
|
|
||||||
|
# Then re-run
|
||||||
|
sqlx migrate run
|
||||||
|
```
|
||||||
|
|
||||||
|
### Permission denied
|
||||||
|
|
||||||
|
Ensure the PostgreSQL user has sufficient permissions:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
GRANT ALL PRIVILEGES ON DATABASE attune TO postgres;
|
||||||
|
GRANT ALL PRIVILEGES ON SCHEMA attune TO postgres;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Connection refused
|
||||||
|
|
||||||
|
Check PostgreSQL is running:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Linux/macOS
|
||||||
|
pg_ctl status
|
||||||
|
sudo systemctl status postgresql
|
||||||
|
|
||||||
|
# Check if listening
|
||||||
|
psql -U postgres -c "SELECT version();"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Foreign key constraint violations
|
||||||
|
|
||||||
|
Ensure migrations run in correct order. The consolidated migrations handle forward references correctly:
|
||||||
|
- Migration 2 creates tables with forward references (commented as such)
|
||||||
|
- Migration 3 and 4 add the foreign key constraints back
|
||||||
|
|
||||||
|
## Schema Diagram
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────┐
|
||||||
|
│ pack │◄──┐
|
||||||
|
└─────────────┘ │
|
||||||
|
▲ │
|
||||||
|
│ │
|
||||||
|
┌──────┴──────────┴──────┐
|
||||||
|
│ runtime │ trigger │ ... │ (Core entities reference pack)
|
||||||
|
└─────────┴─────────┴─────┘
|
||||||
|
▲ ▲
|
||||||
|
│ │
|
||||||
|
┌──────┴──────┐ │
|
||||||
|
│ sensor │──┘ (Sensors reference both runtime and trigger)
|
||||||
|
└─────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
┌─────────────┐ ┌──────────────┐
|
||||||
|
│ event │────►│ enforcement │ (Events trigger enforcements)
|
||||||
|
└─────────────┘ └──────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
┌──────────────┐
|
||||||
|
│ execution │ (Enforcements create executions)
|
||||||
|
└──────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Workflow Orchestration
|
||||||
|
|
||||||
|
Migration 4 includes comprehensive workflow orchestration support:
|
||||||
|
- **workflow_definition**: Stores parsed YAML workflow definitions with tasks, variables, and transitions
|
||||||
|
- **workflow_execution**: Tracks runtime state including current/completed/failed tasks and variables
|
||||||
|
- **workflow_task_execution**: Individual task execution tracking with retry and timeout support
|
||||||
|
- **Action table extensions**: `is_workflow` and `workflow_def` columns link actions to workflows
|
||||||
|
- **Helper views**: Three views for querying workflow state (summary, task detail, action links)
|
||||||
|
|
||||||
|
## Queue Statistics
|
||||||
|
|
||||||
|
Migration 5 includes the queue_stats table for execution ordering:
|
||||||
|
- Tracks per-action queue length, active executions, and concurrency limits
|
||||||
|
- Enables FIFO queue management with database persistence
|
||||||
|
- Supports monitoring and API visibility of execution queues
|
||||||
|
|
||||||
|
## Additional Resources
|
||||||
|
|
||||||
|
- [SQLx Documentation](https://github.com/launchbadge/sqlx)
|
||||||
|
- [PostgreSQL Documentation](https://www.postgresql.org/docs/)
|
||||||
|
- [Attune Architecture Documentation](../docs/architecture.md)
|
||||||
|
- [Attune Data Model Documentation](../docs/data-model.md)
|
||||||
270
docker/distributable/packs/core/DEPENDENCIES.md
Normal file
270
docker/distributable/packs/core/DEPENDENCIES.md
Normal file
@@ -0,0 +1,270 @@
|
|||||||
|
# Core Pack Dependencies
|
||||||
|
|
||||||
|
**Philosophy:** The core pack has **zero runtime dependencies** beyond standard system utilities.
|
||||||
|
|
||||||
|
## Why Zero Dependencies?
|
||||||
|
|
||||||
|
1. **Portability:** Works in any environment with standard Unix utilities
|
||||||
|
2. **Reliability:** No version conflicts, no package installation failures
|
||||||
|
3. **Security:** Minimal attack surface, no third-party library vulnerabilities
|
||||||
|
4. **Performance:** Fast startup, no runtime initialization overhead
|
||||||
|
5. **Simplicity:** Easy to audit, test, and maintain
|
||||||
|
|
||||||
|
## Required System Utilities
|
||||||
|
|
||||||
|
All core pack actions rely only on utilities available in standard Linux/Unix environments:
|
||||||
|
|
||||||
|
| Utility | Purpose | Used By |
|
||||||
|
|---------|---------|---------|
|
||||||
|
| `bash` | Shell scripting | All shell actions |
|
||||||
|
| `jq` | JSON parsing/generation | All actions (parameter handling) |
|
||||||
|
| `curl` | HTTP client | `http_request.sh` |
|
||||||
|
| Standard Unix tools | Text processing, file operations | Various actions |
|
||||||
|
|
||||||
|
These utilities are:
|
||||||
|
- ✅ Pre-installed in all Attune worker containers
|
||||||
|
- ✅ Standard across Linux distributions
|
||||||
|
- ✅ Stable, well-tested, and widely used
|
||||||
|
- ✅ Available via package managers if needed
|
||||||
|
|
||||||
|
## No Runtime Dependencies
|
||||||
|
|
||||||
|
The core pack **does not require:**
|
||||||
|
- ❌ Python interpreter or packages
|
||||||
|
- ❌ Node.js runtime or npm modules
|
||||||
|
- ❌ Ruby, Perl, or other scripting languages
|
||||||
|
- ❌ Third-party libraries or frameworks
|
||||||
|
- ❌ Package installations at runtime
|
||||||
|
|
||||||
|
## Action Implementation Guidelines
|
||||||
|
|
||||||
|
### ✅ Preferred Approaches
|
||||||
|
|
||||||
|
**Use bash + standard utilities:**
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
# Read params with jq
|
||||||
|
INPUT=$(cat)
|
||||||
|
PARAM=$(echo "$INPUT" | jq -r '.param // "default"')
|
||||||
|
|
||||||
|
# Process with standard tools
|
||||||
|
RESULT=$(echo "$PARAM" | tr '[:lower:]' '[:upper:]')
|
||||||
|
|
||||||
|
# Output with jq
|
||||||
|
jq -n --arg result "$RESULT" '{result: $result}'
|
||||||
|
```
|
||||||
|
|
||||||
|
**Use curl for HTTP:**
|
||||||
|
```bash
|
||||||
|
# Make HTTP requests with curl
|
||||||
|
curl -s -X POST "$URL" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{"key": "value"}'
|
||||||
|
```
|
||||||
|
|
||||||
|
**Use jq for JSON processing:**
|
||||||
|
```bash
|
||||||
|
# Parse JSON responses
|
||||||
|
echo "$RESPONSE" | jq '.data.items[] | .name'
|
||||||
|
|
||||||
|
# Generate JSON output
|
||||||
|
jq -n \
|
||||||
|
--arg status "success" \
|
||||||
|
--argjson count 42 \
|
||||||
|
'{status: $status, count: $count}'
|
||||||
|
```
|
||||||
|
|
||||||
|
### ❌ Avoid
|
||||||
|
|
||||||
|
**Don't add runtime dependencies:**
|
||||||
|
```bash
|
||||||
|
# ❌ DON'T DO THIS
|
||||||
|
pip install requests
|
||||||
|
python3 script.py
|
||||||
|
|
||||||
|
# ❌ DON'T DO THIS
|
||||||
|
npm install axios
|
||||||
|
node script.js
|
||||||
|
|
||||||
|
# ❌ DON'T DO THIS
|
||||||
|
gem install httparty
|
||||||
|
ruby script.rb
|
||||||
|
```
|
||||||
|
|
||||||
|
**Don't use language-specific features:**
|
||||||
|
```python
|
||||||
|
# ❌ DON'T DO THIS in core pack
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
import requests # External dependency!
|
||||||
|
response = requests.get(url)
|
||||||
|
```
|
||||||
|
|
||||||
|
Instead, use bash + curl:
|
||||||
|
```bash
|
||||||
|
# ✅ DO THIS in core pack
|
||||||
|
#!/bin/bash
|
||||||
|
response=$(curl -s "$url")
|
||||||
|
```
|
||||||
|
|
||||||
|
## When Runtime Dependencies Are Acceptable
|
||||||
|
|
||||||
|
For **custom packs** (not core pack), runtime dependencies are fine:
|
||||||
|
- ✅ Pack-specific Python libraries (installed in pack virtualenv)
|
||||||
|
- ✅ Pack-specific npm modules (installed in pack node_modules)
|
||||||
|
- ✅ Language runtimes (Python, Node.js) for complex logic
|
||||||
|
- ✅ Specialized tools for specific integrations
|
||||||
|
|
||||||
|
The core pack serves as a foundation with zero dependencies. Custom packs can have dependencies managed via:
|
||||||
|
- `requirements.txt` for Python packages
|
||||||
|
- `package.json` for Node.js modules
|
||||||
|
- Pack runtime environments (isolated per pack)
|
||||||
|
|
||||||
|
## Migration from Runtime Dependencies
|
||||||
|
|
||||||
|
If an action currently uses a runtime dependency, consider:
|
||||||
|
|
||||||
|
1. **Can it be done with bash + standard utilities?**
|
||||||
|
- Yes → Rewrite in bash
|
||||||
|
- No → Consider if it belongs in core pack
|
||||||
|
|
||||||
|
2. **Is the functionality complex?**
|
||||||
|
- Simple HTTP/JSON → Use curl + jq
|
||||||
|
- Complex API client → Move to custom pack
|
||||||
|
|
||||||
|
3. **Is it a specialized integration?**
|
||||||
|
- Yes → Move to integration-specific pack
|
||||||
|
- No → Keep in core pack with bash implementation
|
||||||
|
|
||||||
|
### Example: http_request Migration
|
||||||
|
|
||||||
|
**Before (Python with dependency):**
|
||||||
|
```python
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
import requests # ❌ External dependency
|
||||||
|
|
||||||
|
response = requests.get(url, headers=headers)
|
||||||
|
print(response.json())
|
||||||
|
```
|
||||||
|
|
||||||
|
**After (Bash with standard utilities):**
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
# ✅ No dependencies beyond curl + jq
|
||||||
|
|
||||||
|
response=$(curl -s -H "Authorization: Bearer $TOKEN" "$URL")
|
||||||
|
echo "$response" | jq '.'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing Without Dependencies
|
||||||
|
|
||||||
|
Core pack actions can be tested anywhere with standard utilities:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Local testing (no installation needed)
|
||||||
|
echo '{"param": "value"}' | ./action.sh
|
||||||
|
|
||||||
|
# Docker testing (minimal base image)
|
||||||
|
docker run --rm -i alpine:latest sh -c '
|
||||||
|
apk add --no-cache bash jq curl &&
|
||||||
|
/bin/bash < action.sh
|
||||||
|
'
|
||||||
|
|
||||||
|
# CI/CD testing (standard tools available)
|
||||||
|
./action.sh < test-params.json
|
||||||
|
```
|
||||||
|
|
||||||
|
## Benefits Realized
|
||||||
|
|
||||||
|
### For Developers
|
||||||
|
- No dependency management overhead
|
||||||
|
- Immediate action execution (no runtime setup)
|
||||||
|
- Easy to test locally
|
||||||
|
- Simple to audit and debug
|
||||||
|
|
||||||
|
### For Operators
|
||||||
|
- No version conflicts between packs
|
||||||
|
- No package installation failures
|
||||||
|
- Faster container startup
|
||||||
|
- Smaller container images
|
||||||
|
|
||||||
|
### For Security
|
||||||
|
- Minimal attack surface
|
||||||
|
- No third-party library vulnerabilities
|
||||||
|
- Easier to audit (standard tools only)
|
||||||
|
- No supply chain risks
|
||||||
|
|
||||||
|
### For Performance
|
||||||
|
- Fast action startup (no runtime initialization)
|
||||||
|
- Low memory footprint
|
||||||
|
- No package loading overhead
|
||||||
|
- Efficient resource usage
|
||||||
|
|
||||||
|
## Standard Utility Reference
|
||||||
|
|
||||||
|
### jq (JSON Processing)
|
||||||
|
```bash
|
||||||
|
# Parse input
|
||||||
|
VALUE=$(echo "$JSON" | jq -r '.key')
|
||||||
|
|
||||||
|
# Generate output
|
||||||
|
jq -n --arg val "$VALUE" '{result: $val}'
|
||||||
|
|
||||||
|
# Transform data
|
||||||
|
echo "$JSON" | jq '.items[] | select(.active)'
|
||||||
|
```
|
||||||
|
|
||||||
|
### curl (HTTP Client)
|
||||||
|
```bash
|
||||||
|
# GET request
|
||||||
|
curl -s "$URL"
|
||||||
|
|
||||||
|
# POST with JSON
|
||||||
|
curl -s -X POST "$URL" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{"key": "value"}'
|
||||||
|
|
||||||
|
# With authentication
|
||||||
|
curl -s -H "Authorization: Bearer $TOKEN" "$URL"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Standard Text Tools
|
||||||
|
```bash
|
||||||
|
# grep - Pattern matching
|
||||||
|
echo "$TEXT" | grep "pattern"
|
||||||
|
|
||||||
|
# sed - Text transformation
|
||||||
|
echo "$TEXT" | sed 's/old/new/g'
|
||||||
|
|
||||||
|
# awk - Text processing
|
||||||
|
echo "$TEXT" | awk '{print $1}'
|
||||||
|
|
||||||
|
# tr - Character translation
|
||||||
|
echo "$TEXT" | tr '[:lower:]' '[:upper:]'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Future Considerations
|
||||||
|
|
||||||
|
The core pack will:
|
||||||
|
- ✅ Continue to have zero runtime dependencies
|
||||||
|
- ✅ Use only standard Unix utilities
|
||||||
|
- ✅ Serve as a reference implementation
|
||||||
|
- ✅ Provide foundational actions for workflows
|
||||||
|
|
||||||
|
Custom packs may:
|
||||||
|
- ✅ Have runtime dependencies (Python, Node.js, etc.)
|
||||||
|
- ✅ Use specialized libraries for integrations
|
||||||
|
- ✅ Require specific tools or SDKs
|
||||||
|
- ✅ Manage dependencies via pack environments
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
**Core Pack = Zero Dependencies + Standard Utilities**
|
||||||
|
|
||||||
|
This philosophy ensures the core pack is:
|
||||||
|
- Portable across all environments
|
||||||
|
- Reliable without version conflicts
|
||||||
|
- Secure with minimal attack surface
|
||||||
|
- Performant with fast startup
|
||||||
|
- Simple to test and maintain
|
||||||
|
|
||||||
|
For actions requiring runtime dependencies, create custom packs with proper dependency management via `requirements.txt`, `package.json`, or similar mechanisms.
|
||||||
361
docker/distributable/packs/core/README.md
Normal file
361
docker/distributable/packs/core/README.md
Normal file
@@ -0,0 +1,361 @@
|
|||||||
|
# Attune Core Pack
|
||||||
|
|
||||||
|
The **Core Pack** is the foundational system pack for Attune, providing essential automation components including timer triggers, HTTP utilities, and basic shell actions.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The core pack is automatically installed with Attune and provides the building blocks for creating automation workflows. It includes:
|
||||||
|
|
||||||
|
- **Timer Triggers**: Interval-based, cron-based, and one-shot datetime timers
|
||||||
|
- **HTTP Actions**: Make HTTP requests to external APIs
|
||||||
|
- **Shell Actions**: Execute basic shell commands (echo, sleep, noop)
|
||||||
|
- **Built-in Sensors**: System sensors for monitoring time-based events
|
||||||
|
|
||||||
|
## Components
|
||||||
|
|
||||||
|
### Actions
|
||||||
|
|
||||||
|
#### `core.echo`
|
||||||
|
Outputs a message to stdout.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `message` (string, required): Message to echo
|
||||||
|
- `uppercase` (boolean, optional): Convert message to uppercase
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```yaml
|
||||||
|
action: core.echo
|
||||||
|
parameters:
|
||||||
|
message: "Hello, Attune!"
|
||||||
|
uppercase: false
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### `core.sleep`
|
||||||
|
Pauses execution for a specified duration.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `seconds` (integer, required): Number of seconds to sleep (0-3600)
|
||||||
|
- `message` (string, optional): Optional message to display before sleeping
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```yaml
|
||||||
|
action: core.sleep
|
||||||
|
parameters:
|
||||||
|
seconds: 30
|
||||||
|
message: "Waiting 30 seconds..."
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### `core.noop`
|
||||||
|
Does nothing - useful for testing and placeholder workflows.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `message` (string, optional): Optional message to log
|
||||||
|
- `exit_code` (integer, optional): Exit code to return (default: 0)
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```yaml
|
||||||
|
action: core.noop
|
||||||
|
parameters:
|
||||||
|
message: "Testing workflow structure"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### `core.http_request`
|
||||||
|
Make HTTP requests to external APIs with full control over headers, authentication, and request body.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `url` (string, required): URL to send the request to
|
||||||
|
- `method` (string, optional): HTTP method (GET, POST, PUT, PATCH, DELETE, HEAD, OPTIONS)
|
||||||
|
- `headers` (object, optional): HTTP headers as key-value pairs
|
||||||
|
- `body` (string, optional): Request body for POST/PUT/PATCH
|
||||||
|
- `json_body` (object, optional): JSON request body (alternative to `body`)
|
||||||
|
- `query_params` (object, optional): URL query parameters
|
||||||
|
- `timeout` (integer, optional): Request timeout in seconds (default: 30)
|
||||||
|
- `verify_ssl` (boolean, optional): Verify SSL certificates (default: true)
|
||||||
|
- `auth_type` (string, optional): Authentication type (none, basic, bearer)
|
||||||
|
- `auth_username` (string, optional): Username for basic auth
|
||||||
|
- `auth_password` (string, secret, optional): Password for basic auth
|
||||||
|
- `auth_token` (string, secret, optional): Bearer token
|
||||||
|
- `follow_redirects` (boolean, optional): Follow HTTP redirects (default: true)
|
||||||
|
- `max_redirects` (integer, optional): Maximum redirects to follow (default: 10)
|
||||||
|
|
||||||
|
**Output:**
|
||||||
|
- `status_code` (integer): HTTP status code
|
||||||
|
- `headers` (object): Response headers
|
||||||
|
- `body` (string): Response body as text
|
||||||
|
- `json` (object): Parsed JSON response (if applicable)
|
||||||
|
- `elapsed_ms` (integer): Request duration in milliseconds
|
||||||
|
- `url` (string): Final URL after redirects
|
||||||
|
- `success` (boolean): Whether request was successful (2xx status)
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```yaml
|
||||||
|
action: core.http_request
|
||||||
|
parameters:
|
||||||
|
url: "https://api.example.com/users"
|
||||||
|
method: "POST"
|
||||||
|
json_body:
|
||||||
|
name: "John Doe"
|
||||||
|
email: "john@example.com"
|
||||||
|
headers:
|
||||||
|
Content-Type: "application/json"
|
||||||
|
auth_type: "bearer"
|
||||||
|
auth_token: "${secret:api_token}"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Triggers
|
||||||
|
|
||||||
|
#### `core.intervaltimer`
|
||||||
|
Fires at regular intervals based on time unit and interval.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `unit` (string, required): Time unit (seconds, minutes, hours)
|
||||||
|
- `interval` (integer, required): Number of time units between triggers
|
||||||
|
|
||||||
|
**Payload:**
|
||||||
|
- `type`: "interval"
|
||||||
|
- `interval_seconds`: Total interval in seconds
|
||||||
|
- `fired_at`: ISO 8601 timestamp
|
||||||
|
- `execution_count`: Number of times fired
|
||||||
|
- `sensor_ref`: Reference to the sensor
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```yaml
|
||||||
|
trigger: core.intervaltimer
|
||||||
|
config:
|
||||||
|
unit: "minutes"
|
||||||
|
interval: 5
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### `core.crontimer`
|
||||||
|
Fires based on cron schedule expressions.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `expression` (string, required): Cron expression (6 fields: second minute hour day month weekday)
|
||||||
|
- `timezone` (string, optional): Timezone (default: UTC)
|
||||||
|
- `description` (string, optional): Human-readable schedule description
|
||||||
|
|
||||||
|
**Payload:**
|
||||||
|
- `type`: "cron"
|
||||||
|
- `fired_at`: ISO 8601 timestamp
|
||||||
|
- `scheduled_at`: When trigger was scheduled to fire
|
||||||
|
- `expression`: The cron expression
|
||||||
|
- `timezone`: Timezone used
|
||||||
|
- `next_fire_at`: Next scheduled fire time
|
||||||
|
- `execution_count`: Number of times fired
|
||||||
|
- `sensor_ref`: Reference to the sensor
|
||||||
|
|
||||||
|
**Cron Format:**
|
||||||
|
```
|
||||||
|
┌───────── second (0-59)
|
||||||
|
│ ┌─────── minute (0-59)
|
||||||
|
│ │ ┌───── hour (0-23)
|
||||||
|
│ │ │ ┌─── day of month (1-31)
|
||||||
|
│ │ │ │ ┌─ month (1-12)
|
||||||
|
│ │ │ │ │ ┌ day of week (0-6, 0=Sunday)
|
||||||
|
│ │ │ │ │ │
|
||||||
|
* * * * * *
|
||||||
|
```
|
||||||
|
|
||||||
|
**Examples:**
|
||||||
|
- `0 0 * * * *` - Every hour
|
||||||
|
- `0 0 0 * * *` - Every day at midnight
|
||||||
|
- `0 */15 * * * *` - Every 15 minutes
|
||||||
|
- `0 30 8 * * 1-5` - 8:30 AM on weekdays
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### `core.datetimetimer`
|
||||||
|
Fires once at a specific date and time.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `fire_at` (string, required): ISO 8601 timestamp when timer should fire
|
||||||
|
- `timezone` (string, optional): Timezone (default: UTC)
|
||||||
|
- `description` (string, optional): Human-readable description
|
||||||
|
|
||||||
|
**Payload:**
|
||||||
|
- `type`: "one_shot"
|
||||||
|
- `fire_at`: Scheduled fire time
|
||||||
|
- `fired_at`: Actual fire time
|
||||||
|
- `timezone`: Timezone used
|
||||||
|
- `delay_ms`: Delay between scheduled and actual fire time
|
||||||
|
- `sensor_ref`: Reference to the sensor
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```yaml
|
||||||
|
trigger: core.datetimetimer
|
||||||
|
config:
|
||||||
|
fire_at: "2024-12-31T23:59:59Z"
|
||||||
|
description: "New Year's countdown"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Sensors
|
||||||
|
|
||||||
|
#### `core.interval_timer_sensor`
|
||||||
|
Built-in sensor that monitors time and fires interval timer triggers.
|
||||||
|
|
||||||
|
**Configuration:**
|
||||||
|
- `check_interval_seconds` (integer, optional): How often to check triggers (default: 1)
|
||||||
|
|
||||||
|
This sensor automatically runs as part of the Attune sensor service and manages all interval timer trigger instances.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
The core pack supports the following configuration options:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# config.yaml
|
||||||
|
packs:
|
||||||
|
core:
|
||||||
|
max_action_timeout: 300 # Maximum action timeout in seconds
|
||||||
|
enable_debug_logging: false # Enable debug logging
|
||||||
|
```
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
### Python Dependencies
|
||||||
|
- `requests>=2.28.0` - For HTTP request action
|
||||||
|
- `croniter>=1.4.0` - For cron timer parsing (future)
|
||||||
|
|
||||||
|
### Runtime Dependencies
|
||||||
|
- Shell (bash/sh) - For shell-based actions
|
||||||
|
- Python 3.8+ - For Python-based actions and sensors
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
The core pack is automatically installed with Attune. No manual installation is required.
|
||||||
|
|
||||||
|
To verify the core pack is loaded:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Using CLI
|
||||||
|
attune pack list | grep core
|
||||||
|
|
||||||
|
# Using API
|
||||||
|
curl http://localhost:8080/api/v1/packs/core
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage Examples
|
||||||
|
|
||||||
|
### Example 1: Echo Every 10 Seconds
|
||||||
|
|
||||||
|
Create a rule that echoes "Hello, World!" every 10 seconds:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
ref: core.hello_world_rule
|
||||||
|
trigger: core.intervaltimer
|
||||||
|
trigger_config:
|
||||||
|
unit: "seconds"
|
||||||
|
interval: 10
|
||||||
|
action: core.echo
|
||||||
|
action_params:
|
||||||
|
message: "Hello, World!"
|
||||||
|
uppercase: false
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 2: HTTP Health Check Every 5 Minutes
|
||||||
|
|
||||||
|
Monitor an API endpoint every 5 minutes:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
ref: core.health_check_rule
|
||||||
|
trigger: core.intervaltimer
|
||||||
|
trigger_config:
|
||||||
|
unit: "minutes"
|
||||||
|
interval: 5
|
||||||
|
action: core.http_request
|
||||||
|
action_params:
|
||||||
|
url: "https://api.example.com/health"
|
||||||
|
method: "GET"
|
||||||
|
timeout: 10
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 3: Daily Report at Midnight
|
||||||
|
|
||||||
|
Generate a report every day at midnight:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
ref: core.daily_report_rule
|
||||||
|
trigger: core.crontimer
|
||||||
|
trigger_config:
|
||||||
|
expression: "0 0 0 * * *"
|
||||||
|
timezone: "UTC"
|
||||||
|
description: "Daily at midnight"
|
||||||
|
action: core.http_request
|
||||||
|
action_params:
|
||||||
|
url: "https://api.example.com/reports/generate"
|
||||||
|
method: "POST"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 4: One-Time Reminder
|
||||||
|
|
||||||
|
Set a one-time reminder for a specific date and time:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
ref: core.meeting_reminder
|
||||||
|
trigger: core.datetimetimer
|
||||||
|
trigger_config:
|
||||||
|
fire_at: "2024-06-15T14:00:00Z"
|
||||||
|
description: "Team meeting reminder"
|
||||||
|
action: core.echo
|
||||||
|
action_params:
|
||||||
|
message: "Team meeting starts in 15 minutes!"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
### Adding New Actions
|
||||||
|
|
||||||
|
1. Create action metadata file: `actions/<action_name>.yaml`
|
||||||
|
2. Create action implementation: `actions/<action_name>.sh` or `actions/<action_name>.py`
|
||||||
|
3. Make script executable: `chmod +x actions/<action_name>.sh`
|
||||||
|
4. Update pack manifest if needed
|
||||||
|
5. Test the action
|
||||||
|
|
||||||
|
### Testing Actions Locally
|
||||||
|
|
||||||
|
Test actions directly by setting environment variables:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test echo action
|
||||||
|
export ATTUNE_ACTION_MESSAGE="Test message"
|
||||||
|
export ATTUNE_ACTION_UPPERCASE=true
|
||||||
|
./actions/echo.sh
|
||||||
|
|
||||||
|
# Test HTTP request action
|
||||||
|
export ATTUNE_ACTION_URL="https://httpbin.org/get"
|
||||||
|
export ATTUNE_ACTION_METHOD="GET"
|
||||||
|
python3 actions/http_request.py
|
||||||
|
```
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
The core pack is part of the Attune project. Contributions are welcome!
|
||||||
|
|
||||||
|
1. Follow the existing code style and structure
|
||||||
|
2. Add tests for new actions/sensors
|
||||||
|
3. Update documentation
|
||||||
|
4. Submit a pull request
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
The core pack is licensed under the same license as Attune.
|
||||||
|
|
||||||
|
## Support
|
||||||
|
|
||||||
|
- Documentation: https://docs.attune.io/packs/core
|
||||||
|
- Issues: https://github.com/attune-io/attune/issues
|
||||||
|
- Discussions: https://github.com/attune-io/attune/discussions
|
||||||
305
docker/distributable/packs/core/SETUP.md
Normal file
305
docker/distributable/packs/core/SETUP.md
Normal file
@@ -0,0 +1,305 @@
|
|||||||
|
# Core Pack Setup Guide
|
||||||
|
|
||||||
|
This guide explains how to set up and load the Attune core pack into your database.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The **core pack** is Attune's built-in system pack that provides essential automation components including:
|
||||||
|
|
||||||
|
- **Timer Triggers**: Interval-based, cron-based, and datetime triggers
|
||||||
|
- **Basic Actions**: Echo, sleep, noop, and HTTP request actions
|
||||||
|
- **Built-in Sensors**: Interval timer sensor for time-based automation
|
||||||
|
|
||||||
|
The core pack must be loaded into the database before it can be used in rules and workflows.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
Before loading the core pack, ensure:
|
||||||
|
|
||||||
|
1. **PostgreSQL is running** and accessible
|
||||||
|
2. **Database migrations are applied**: `sqlx migrate run`
|
||||||
|
3. **Python 3.8+** is installed (for the loader script)
|
||||||
|
4. **Required Python packages** are installed:
|
||||||
|
```bash
|
||||||
|
pip install psycopg2-binary pyyaml
|
||||||
|
```
|
||||||
|
|
||||||
|
## Loading Methods
|
||||||
|
|
||||||
|
### Method 1: Python Loader Script (Recommended)
|
||||||
|
|
||||||
|
The Python loader script reads the pack YAML files and creates database entries automatically.
|
||||||
|
|
||||||
|
**Usage:**
|
||||||
|
```bash
|
||||||
|
# From the project root
|
||||||
|
python3 scripts/load_core_pack.py
|
||||||
|
|
||||||
|
# With custom database URL
|
||||||
|
python3 scripts/load_core_pack.py --database-url "postgresql://user:pass@localhost:5432/attune"
|
||||||
|
|
||||||
|
# With custom pack directory
|
||||||
|
python3 scripts/load_core_pack.py --pack-dir ./packs
|
||||||
|
```
|
||||||
|
|
||||||
|
**What it does:**
|
||||||
|
- Reads `pack.yaml` for pack metadata
|
||||||
|
- Loads all trigger definitions from `triggers/*.yaml`
|
||||||
|
- Loads all action definitions from `actions/*.yaml`
|
||||||
|
- Loads all sensor definitions from `sensors/*.yaml`
|
||||||
|
- Creates or updates database entries (idempotent)
|
||||||
|
- Uses transactions (all-or-nothing)
|
||||||
|
|
||||||
|
**Output:**
|
||||||
|
```
|
||||||
|
============================================================
|
||||||
|
Core Pack Loader
|
||||||
|
============================================================
|
||||||
|
|
||||||
|
→ Loading pack metadata...
|
||||||
|
✓ Pack 'core' loaded (ID: 1)
|
||||||
|
|
||||||
|
→ Loading triggers...
|
||||||
|
✓ Trigger 'core.intervaltimer' (ID: 1)
|
||||||
|
✓ Trigger 'core.crontimer' (ID: 2)
|
||||||
|
✓ Trigger 'core.datetimetimer' (ID: 3)
|
||||||
|
|
||||||
|
→ Loading actions...
|
||||||
|
✓ Action 'core.echo' (ID: 1)
|
||||||
|
✓ Action 'core.sleep' (ID: 2)
|
||||||
|
✓ Action 'core.noop' (ID: 3)
|
||||||
|
✓ Action 'core.http_request' (ID: 4)
|
||||||
|
|
||||||
|
→ Loading sensors...
|
||||||
|
✓ Sensor 'core.interval_timer_sensor' (ID: 1)
|
||||||
|
|
||||||
|
============================================================
|
||||||
|
✓ Core pack loaded successfully!
|
||||||
|
============================================================
|
||||||
|
Pack ID: 1
|
||||||
|
Triggers: 3
|
||||||
|
Actions: 4
|
||||||
|
Sensors: 1
|
||||||
|
```
|
||||||
|
|
||||||
|
### Method 2: SQL Seed Script
|
||||||
|
|
||||||
|
For simpler setups or CI/CD, you can use the SQL seed script directly.
|
||||||
|
|
||||||
|
**Usage:**
|
||||||
|
```bash
|
||||||
|
psql $DATABASE_URL -f scripts/seed_core_pack.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** The SQL script may not include all pack metadata and is less flexible than the Python loader.
|
||||||
|
|
||||||
|
### Method 3: CLI (Future)
|
||||||
|
|
||||||
|
Once the CLI pack management commands are fully implemented:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
attune pack register ./packs/core
|
||||||
|
```
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
After loading, verify the core pack is available:
|
||||||
|
|
||||||
|
### Using CLI
|
||||||
|
```bash
|
||||||
|
# List all packs
|
||||||
|
attune pack list
|
||||||
|
|
||||||
|
# Show core pack details
|
||||||
|
attune pack show core
|
||||||
|
|
||||||
|
# List core pack actions
|
||||||
|
attune action list --pack core
|
||||||
|
|
||||||
|
# List core pack triggers
|
||||||
|
attune trigger list --pack core
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using API
|
||||||
|
```bash
|
||||||
|
# Get pack info
|
||||||
|
curl http://localhost:8080/api/v1/packs/core | jq
|
||||||
|
|
||||||
|
# List actions
|
||||||
|
curl http://localhost:8080/api/v1/packs/core/actions | jq
|
||||||
|
|
||||||
|
# List triggers
|
||||||
|
curl http://localhost:8080/api/v1/packs/core/triggers | jq
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using Database
|
||||||
|
```sql
|
||||||
|
-- Check pack exists
|
||||||
|
SELECT * FROM attune.pack WHERE ref = 'core';
|
||||||
|
|
||||||
|
-- Count components
|
||||||
|
SELECT
|
||||||
|
(SELECT COUNT(*) FROM attune.trigger WHERE pack_ref = 'core') as triggers,
|
||||||
|
(SELECT COUNT(*) FROM attune.action WHERE pack_ref = 'core') as actions,
|
||||||
|
(SELECT COUNT(*) FROM attune.sensor WHERE pack_ref = 'core') as sensors;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing the Core Pack
|
||||||
|
|
||||||
|
### 1. Test Actions Directly
|
||||||
|
|
||||||
|
Test actions using environment variables:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test echo action
|
||||||
|
export ATTUNE_ACTION_MESSAGE="Hello, Attune!"
|
||||||
|
export ATTUNE_ACTION_UPPERCASE=false
|
||||||
|
./packs/core/actions/echo.sh
|
||||||
|
|
||||||
|
# Test sleep action
|
||||||
|
export ATTUNE_ACTION_SECONDS=2
|
||||||
|
export ATTUNE_ACTION_MESSAGE="Sleeping..."
|
||||||
|
./packs/core/actions/sleep.sh
|
||||||
|
|
||||||
|
# Test HTTP request action
|
||||||
|
export ATTUNE_ACTION_URL="https://httpbin.org/get"
|
||||||
|
export ATTUNE_ACTION_METHOD="GET"
|
||||||
|
python3 packs/core/actions/http_request.py
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Run Pack Test Suite
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run comprehensive test suite
|
||||||
|
./packs/core/test_core_pack.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Create a Test Rule
|
||||||
|
|
||||||
|
Create a simple rule to test the core pack integration:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create a rule that echoes every 10 seconds
|
||||||
|
attune rule create \
|
||||||
|
--name "test_timer_echo" \
|
||||||
|
--trigger "core.intervaltimer" \
|
||||||
|
--trigger-config '{"unit":"seconds","interval":10}' \
|
||||||
|
--action "core.echo" \
|
||||||
|
--action-params '{"message":"Timer triggered!"}' \
|
||||||
|
--enabled
|
||||||
|
```
|
||||||
|
|
||||||
|
## Updating the Core Pack
|
||||||
|
|
||||||
|
To update the core pack after making changes:
|
||||||
|
|
||||||
|
1. Edit the relevant YAML files in `packs/core/`
|
||||||
|
2. Re-run the loader script:
|
||||||
|
```bash
|
||||||
|
python3 scripts/load_core_pack.py
|
||||||
|
```
|
||||||
|
3. The loader will update existing entries (upsert)
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### "Failed to connect to database"
|
||||||
|
- Verify PostgreSQL is running: `pg_isready`
|
||||||
|
- Check `DATABASE_URL` environment variable
|
||||||
|
- Test connection: `psql $DATABASE_URL -c "SELECT 1"`
|
||||||
|
|
||||||
|
### "pack.yaml not found"
|
||||||
|
- Ensure you're running from the project root
|
||||||
|
- Check the `--pack-dir` argument points to the correct directory
|
||||||
|
- Verify `packs/core/pack.yaml` exists
|
||||||
|
|
||||||
|
### "ModuleNotFoundError: No module named 'psycopg2'"
|
||||||
|
```bash
|
||||||
|
pip install psycopg2-binary pyyaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### "Pack loaded but not visible in API"
|
||||||
|
- Restart the API service to reload pack data
|
||||||
|
- Check pack is enabled: `SELECT enabled FROM attune.pack WHERE ref = 'core'`
|
||||||
|
|
||||||
|
### Actions not executing
|
||||||
|
- Verify action scripts are executable: `chmod +x packs/core/actions/*.sh`
|
||||||
|
- Check worker service is running and can access the packs directory
|
||||||
|
- Verify runtime configuration is correct
|
||||||
|
|
||||||
|
## Development Workflow
|
||||||
|
|
||||||
|
When developing new core pack components:
|
||||||
|
|
||||||
|
1. **Add new action:**
|
||||||
|
- Create `actions/new_action.yaml` with metadata
|
||||||
|
- Create `actions/new_action.sh` (or `.py`) with implementation
|
||||||
|
- Make script executable: `chmod +x actions/new_action.sh`
|
||||||
|
- Test locally: `export ATTUNE_ACTION_*=... && ./actions/new_action.sh`
|
||||||
|
- Load into database: `python3 scripts/load_core_pack.py`
|
||||||
|
|
||||||
|
2. **Add new trigger:**
|
||||||
|
- Create `triggers/new_trigger.yaml` with metadata
|
||||||
|
- Load into database: `python3 scripts/load_core_pack.py`
|
||||||
|
- Create sensor if needed
|
||||||
|
|
||||||
|
3. **Add new sensor:**
|
||||||
|
- Create `sensors/new_sensor.yaml` with metadata
|
||||||
|
- Create `sensors/new_sensor.py` with implementation
|
||||||
|
- Load into database: `python3 scripts/load_core_pack.py`
|
||||||
|
- Restart sensor service
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
The loader script supports the following environment variables:
|
||||||
|
|
||||||
|
- `DATABASE_URL` - PostgreSQL connection string
|
||||||
|
- Default: `postgresql://postgres:postgres@localhost:5432/attune`
|
||||||
|
- Example: `postgresql://user:pass@db.example.com:5432/attune`
|
||||||
|
|
||||||
|
- `ATTUNE_PACKS_DIR` - Base directory for packs
|
||||||
|
- Default: `./packs`
|
||||||
|
- Example: `/opt/attune/packs`
|
||||||
|
|
||||||
|
## CI/CD Integration
|
||||||
|
|
||||||
|
For automated deployments:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Example GitHub Actions workflow
|
||||||
|
- name: Load Core Pack
|
||||||
|
run: |
|
||||||
|
python3 scripts/load_core_pack.py \
|
||||||
|
--database-url "${{ secrets.DATABASE_URL }}"
|
||||||
|
env:
|
||||||
|
DATABASE_URL: ${{ secrets.DATABASE_URL }}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Next Steps
|
||||||
|
|
||||||
|
After loading the core pack:
|
||||||
|
|
||||||
|
1. **Create your first rule** using core triggers and actions
|
||||||
|
2. **Enable sensors** to start generating events
|
||||||
|
3. **Monitor executions** via the API or Web UI
|
||||||
|
4. **Explore pack documentation** in `README.md`
|
||||||
|
|
||||||
|
## Additional Resources
|
||||||
|
|
||||||
|
- **Pack README**: `packs/core/README.md` - Comprehensive component documentation
|
||||||
|
- **Testing Guide**: `packs/core/TESTING.md` - Testing procedures
|
||||||
|
- **API Documentation**: `docs/api-packs.md` - Pack management API
|
||||||
|
- **Action Development**: `docs/action-development.md` - Creating custom actions
|
||||||
|
|
||||||
|
## Support
|
||||||
|
|
||||||
|
If you encounter issues:
|
||||||
|
|
||||||
|
1. Check this troubleshooting section
|
||||||
|
2. Review logs from services (api, executor, worker, sensor)
|
||||||
|
3. Verify database state with SQL queries
|
||||||
|
4. File an issue with detailed error messages and logs
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Last Updated:** 2025-01-20
|
||||||
|
**Core Pack Version:** 1.0.0
|
||||||
410
docker/distributable/packs/core/TESTING.md
Normal file
410
docker/distributable/packs/core/TESTING.md
Normal file
@@ -0,0 +1,410 @@
|
|||||||
|
# Core Pack Testing Guide
|
||||||
|
|
||||||
|
Quick reference for testing core pack actions and sensors locally.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Ensure scripts are executable
|
||||||
|
chmod +x packs/core/actions/*.sh
|
||||||
|
chmod +x packs/core/actions/*.py
|
||||||
|
chmod +x packs/core/sensors/*.py
|
||||||
|
|
||||||
|
# Install Python dependencies
|
||||||
|
pip install requests>=2.28.0
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Testing Actions
|
||||||
|
|
||||||
|
Actions receive parameters via environment variables prefixed with `ATTUNE_ACTION_`.
|
||||||
|
|
||||||
|
### Test `core.echo`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Basic echo
|
||||||
|
export ATTUNE_ACTION_MESSAGE="Hello, Attune!"
|
||||||
|
./packs/core/actions/echo.sh
|
||||||
|
|
||||||
|
# With uppercase conversion
|
||||||
|
export ATTUNE_ACTION_MESSAGE="test message"
|
||||||
|
export ATTUNE_ACTION_UPPERCASE=true
|
||||||
|
./packs/core/actions/echo.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
**Expected Output:**
|
||||||
|
```
|
||||||
|
Hello, Attune!
|
||||||
|
TEST MESSAGE
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Test `core.sleep`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Sleep for 2 seconds
|
||||||
|
export ATTUNE_ACTION_SECONDS=2
|
||||||
|
export ATTUNE_ACTION_MESSAGE="Sleeping..."
|
||||||
|
time ./packs/core/actions/sleep.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
**Expected Output:**
|
||||||
|
```
|
||||||
|
Sleeping...
|
||||||
|
Slept for 2 seconds
|
||||||
|
|
||||||
|
real 0m2.004s
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Test `core.noop`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# No operation with message
|
||||||
|
export ATTUNE_ACTION_MESSAGE="Testing noop"
|
||||||
|
./packs/core/actions/noop.sh
|
||||||
|
|
||||||
|
# With custom exit code
|
||||||
|
export ATTUNE_ACTION_EXIT_CODE=0
|
||||||
|
./packs/core/actions/noop.sh
|
||||||
|
echo "Exit code: $?"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Expected Output:**
|
||||||
|
```
|
||||||
|
[NOOP] Testing noop
|
||||||
|
No operation completed successfully
|
||||||
|
Exit code: 0
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Test `core.http_request`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Simple GET request
|
||||||
|
export ATTUNE_ACTION_URL="https://httpbin.org/get"
|
||||||
|
export ATTUNE_ACTION_METHOD="GET"
|
||||||
|
python3 ./packs/core/actions/http_request.py
|
||||||
|
|
||||||
|
# POST with JSON body
|
||||||
|
export ATTUNE_ACTION_URL="https://httpbin.org/post"
|
||||||
|
export ATTUNE_ACTION_METHOD="POST"
|
||||||
|
export ATTUNE_ACTION_JSON_BODY='{"name": "test", "value": 123}'
|
||||||
|
python3 ./packs/core/actions/http_request.py
|
||||||
|
|
||||||
|
# With custom headers
|
||||||
|
export ATTUNE_ACTION_URL="https://httpbin.org/headers"
|
||||||
|
export ATTUNE_ACTION_METHOD="GET"
|
||||||
|
export ATTUNE_ACTION_HEADERS='{"X-Custom-Header": "test-value"}'
|
||||||
|
python3 ./packs/core/actions/http_request.py
|
||||||
|
|
||||||
|
# With query parameters
|
||||||
|
export ATTUNE_ACTION_URL="https://httpbin.org/get"
|
||||||
|
export ATTUNE_ACTION_METHOD="GET"
|
||||||
|
export ATTUNE_ACTION_QUERY_PARAMS='{"foo": "bar", "page": "1"}'
|
||||||
|
python3 ./packs/core/actions/http_request.py
|
||||||
|
|
||||||
|
# With timeout
|
||||||
|
export ATTUNE_ACTION_URL="https://httpbin.org/delay/5"
|
||||||
|
export ATTUNE_ACTION_METHOD="GET"
|
||||||
|
export ATTUNE_ACTION_TIMEOUT=2
|
||||||
|
python3 ./packs/core/actions/http_request.py
|
||||||
|
```
|
||||||
|
|
||||||
|
**Expected Output:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"status_code": 200,
|
||||||
|
"headers": {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
...
|
||||||
|
},
|
||||||
|
"body": "...",
|
||||||
|
"json": {
|
||||||
|
"args": {},
|
||||||
|
"headers": {...},
|
||||||
|
...
|
||||||
|
},
|
||||||
|
"elapsed_ms": 234,
|
||||||
|
"url": "https://httpbin.org/get",
|
||||||
|
"success": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Testing Sensors
|
||||||
|
|
||||||
|
Sensors receive configuration via environment variables prefixed with `ATTUNE_SENSOR_`.
|
||||||
|
|
||||||
|
### Test `core.interval_timer_sensor`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create test trigger instances JSON
|
||||||
|
export ATTUNE_SENSOR_TRIGGERS='[
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"ref": "core.intervaltimer",
|
||||||
|
"config": {
|
||||||
|
"unit": "seconds",
|
||||||
|
"interval": 5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]'
|
||||||
|
|
||||||
|
# Run sensor (will output events every 5 seconds)
|
||||||
|
python3 ./packs/core/sensors/interval_timer_sensor.py
|
||||||
|
```
|
||||||
|
|
||||||
|
**Expected Output:**
|
||||||
|
```
|
||||||
|
Interval Timer Sensor started (check_interval=1s)
|
||||||
|
{"type": "interval", "interval_seconds": 5, "fired_at": "2024-01-20T12:00:00Z", "execution_count": 1, "sensor_ref": "core.interval_timer_sensor", "trigger_instance_id": 1, "trigger_ref": "core.intervaltimer"}
|
||||||
|
{"type": "interval", "interval_seconds": 5, "fired_at": "2024-01-20T12:00:05Z", "execution_count": 2, "sensor_ref": "core.interval_timer_sensor", "trigger_instance_id": 1, "trigger_ref": "core.intervaltimer"}
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
Press `Ctrl+C` to stop the sensor.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Testing with Multiple Trigger Instances
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test multiple timers
|
||||||
|
export ATTUNE_SENSOR_TRIGGERS='[
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"ref": "core.intervaltimer",
|
||||||
|
"config": {"unit": "seconds", "interval": 3}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"ref": "core.intervaltimer",
|
||||||
|
"config": {"unit": "seconds", "interval": 5}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"ref": "core.intervaltimer",
|
||||||
|
"config": {"unit": "seconds", "interval": 10}
|
||||||
|
}
|
||||||
|
]'
|
||||||
|
|
||||||
|
python3 ./packs/core/sensors/interval_timer_sensor.py
|
||||||
|
```
|
||||||
|
|
||||||
|
You should see events firing at different intervals (3s, 5s, 10s).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Validation Tests
|
||||||
|
|
||||||
|
### Validate YAML Schemas
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install yamllint (optional)
|
||||||
|
pip install yamllint
|
||||||
|
|
||||||
|
# Validate all YAML files
|
||||||
|
yamllint packs/core/**/*.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Validate JSON Schemas
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check parameter schemas are valid JSON Schema
|
||||||
|
cat packs/core/actions/http_request.yaml | grep -A 50 "parameters:" | python3 -c "
|
||||||
|
import sys, yaml, json
|
||||||
|
data = yaml.safe_load(sys.stdin)
|
||||||
|
print(json.dumps(data, indent=2))
|
||||||
|
"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Error Testing
|
||||||
|
|
||||||
|
### Test Invalid Parameters
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Invalid seconds value for sleep
|
||||||
|
export ATTUNE_ACTION_SECONDS=-1
|
||||||
|
./packs/core/actions/sleep.sh
|
||||||
|
# Expected: ERROR: seconds must be between 0 and 3600
|
||||||
|
|
||||||
|
# Invalid exit code for noop
|
||||||
|
export ATTUNE_ACTION_EXIT_CODE=999
|
||||||
|
./packs/core/actions/noop.sh
|
||||||
|
# Expected: ERROR: exit_code must be between 0 and 255
|
||||||
|
|
||||||
|
# Missing required parameter for HTTP request
|
||||||
|
unset ATTUNE_ACTION_URL
|
||||||
|
python3 ./packs/core/actions/http_request.py
|
||||||
|
# Expected: ERROR: Required parameter 'url' not provided
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Performance Testing
|
||||||
|
|
||||||
|
### Measure Action Execution Time
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Echo action
|
||||||
|
time for i in {1..100}; do
|
||||||
|
export ATTUNE_ACTION_MESSAGE="Test $i"
|
||||||
|
./packs/core/actions/echo.sh > /dev/null
|
||||||
|
done
|
||||||
|
|
||||||
|
# HTTP request action
|
||||||
|
time for i in {1..10}; do
|
||||||
|
export ATTUNE_ACTION_URL="https://httpbin.org/get"
|
||||||
|
python3 ./packs/core/actions/http_request.py > /dev/null
|
||||||
|
done
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Integration Testing (with Attune Services)
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start Attune services
|
||||||
|
docker-compose up -d postgres rabbitmq redis
|
||||||
|
|
||||||
|
# Run migrations
|
||||||
|
sqlx migrate run
|
||||||
|
|
||||||
|
# Load core pack (future)
|
||||||
|
# attune pack load packs/core
|
||||||
|
```
|
||||||
|
|
||||||
|
### Test Action Execution via API
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create execution manually
|
||||||
|
curl -X POST http://localhost:8080/api/v1/executions \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"action_ref": "core.echo",
|
||||||
|
"parameters": {
|
||||||
|
"message": "API test",
|
||||||
|
"uppercase": true
|
||||||
|
}
|
||||||
|
}'
|
||||||
|
|
||||||
|
# Check execution status
|
||||||
|
curl http://localhost:8080/api/v1/executions/{execution_id}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Test Sensor via Sensor Service
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start sensor service (future)
|
||||||
|
# cargo run --bin attune-sensor
|
||||||
|
|
||||||
|
# Check events created
|
||||||
|
curl http://localhost:8080/api/v1/events?limit=10
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Action Not Executing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check file permissions
|
||||||
|
ls -la packs/core/actions/
|
||||||
|
|
||||||
|
# Ensure scripts are executable
|
||||||
|
chmod +x packs/core/actions/*.sh
|
||||||
|
chmod +x packs/core/actions/*.py
|
||||||
|
```
|
||||||
|
|
||||||
|
### Python Import Errors
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install required packages
|
||||||
|
pip install requests>=2.28.0
|
||||||
|
|
||||||
|
# Verify Python version
|
||||||
|
python3 --version # Should be 3.8+
|
||||||
|
```
|
||||||
|
|
||||||
|
### Environment Variables Not Working
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Print all ATTUNE_* environment variables
|
||||||
|
env | grep ATTUNE_
|
||||||
|
|
||||||
|
# Test with explicit export
|
||||||
|
export ATTUNE_ACTION_MESSAGE="test"
|
||||||
|
echo $ATTUNE_ACTION_MESSAGE
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Automated Test Script
|
||||||
|
|
||||||
|
Create a test script `test_core_pack.sh`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "Testing Core Pack Actions..."
|
||||||
|
|
||||||
|
# Test echo
|
||||||
|
echo "→ Testing core.echo..."
|
||||||
|
export ATTUNE_ACTION_MESSAGE="Test"
|
||||||
|
./packs/core/actions/echo.sh > /dev/null
|
||||||
|
echo "✓ core.echo passed"
|
||||||
|
|
||||||
|
# Test sleep
|
||||||
|
echo "→ Testing core.sleep..."
|
||||||
|
export ATTUNE_ACTION_SECONDS=1
|
||||||
|
./packs/core/actions/sleep.sh > /dev/null
|
||||||
|
echo "✓ core.sleep passed"
|
||||||
|
|
||||||
|
# Test noop
|
||||||
|
echo "→ Testing core.noop..."
|
||||||
|
export ATTUNE_ACTION_MESSAGE="test"
|
||||||
|
./packs/core/actions/noop.sh > /dev/null
|
||||||
|
echo "✓ core.noop passed"
|
||||||
|
|
||||||
|
# Test HTTP request
|
||||||
|
echo "→ Testing core.http_request..."
|
||||||
|
export ATTUNE_ACTION_URL="https://httpbin.org/get"
|
||||||
|
export ATTUNE_ACTION_METHOD="GET"
|
||||||
|
python3 ./packs/core/actions/http_request.py > /dev/null
|
||||||
|
echo "✓ core.http_request passed"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "All tests passed! ✓"
|
||||||
|
```
|
||||||
|
|
||||||
|
Run with:
|
||||||
|
```bash
|
||||||
|
chmod +x test_core_pack.sh
|
||||||
|
./test_core_pack.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Next Steps
|
||||||
|
|
||||||
|
1. Implement pack loader to register components in database
|
||||||
|
2. Update worker service to execute actions from filesystem
|
||||||
|
3. Update sensor service to run sensors from filesystem
|
||||||
|
4. Add comprehensive integration tests
|
||||||
|
5. Create CLI commands for pack management
|
||||||
|
|
||||||
|
See `docs/core-pack-integration.md` for implementation details.
|
||||||
362
docker/distributable/packs/core/actions/README.md
Normal file
362
docker/distributable/packs/core/actions/README.md
Normal file
@@ -0,0 +1,362 @@
|
|||||||
|
# Core Pack Actions
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
All actions in the core pack are implemented as **pure POSIX shell scripts** with **zero external dependencies** (except `curl` for HTTP actions). This design ensures maximum portability and minimal runtime requirements.
|
||||||
|
|
||||||
|
**Key Principles:**
|
||||||
|
- **POSIX shell only** - No bash-specific features, works everywhere
|
||||||
|
- **DOTENV parameter format** - Simple key=value format, no JSON parsing needed
|
||||||
|
- **No jq/yq/Python/Node.js** - Core pack depends only on standard POSIX utilities
|
||||||
|
- **Stdin parameter delivery** - Secure, never exposed in process list
|
||||||
|
- **Explicit output formats** - text, json, or yaml
|
||||||
|
|
||||||
|
## Parameter Delivery Method
|
||||||
|
|
||||||
|
**All actions use stdin with DOTENV format:**
|
||||||
|
- Parameters read from **stdin** in `key=value` format
|
||||||
|
- Use `parameter_delivery: stdin` and `parameter_format: dotenv` in YAML
|
||||||
|
- Stdin is closed after delivery; scripts read until EOF
|
||||||
|
- **DO NOT** use environment variables for parameters
|
||||||
|
|
||||||
|
**Example DOTENV input:**
|
||||||
|
```
|
||||||
|
message="Hello World"
|
||||||
|
seconds=5
|
||||||
|
enabled=true
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output Format
|
||||||
|
|
||||||
|
**All actions must specify an `output_format`:**
|
||||||
|
- `text` - Plain text output (stored as-is, no parsing)
|
||||||
|
- `json` - JSON structured data (parsed into JSONB field)
|
||||||
|
- `yaml` - YAML structured data (parsed into JSONB field)
|
||||||
|
|
||||||
|
**Output schema:**
|
||||||
|
- Only applicable for `json` and `yaml` formats
|
||||||
|
- Describes the structure of data written to stdout
|
||||||
|
- **Should NOT include** stdout/stderr/exit_code (captured automatically)
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
### Standard Environment Variables (Provided by Worker)
|
||||||
|
|
||||||
|
The worker automatically provides these environment variables to all action executions:
|
||||||
|
|
||||||
|
| Variable | Description | Always Present |
|
||||||
|
|----------|-------------|----------------|
|
||||||
|
| `ATTUNE_ACTION` | Action ref (e.g., `core.http_request`) | ✅ Yes |
|
||||||
|
| `ATTUNE_EXEC_ID` | Execution database ID | ✅ Yes |
|
||||||
|
| `ATTUNE_API_TOKEN` | Execution-scoped API token | ✅ Yes |
|
||||||
|
| `ATTUNE_RULE` | Rule ref that triggered execution | ❌ Only if from rule |
|
||||||
|
| `ATTUNE_TRIGGER` | Trigger ref that caused enforcement | ❌ Only if from trigger |
|
||||||
|
|
||||||
|
**Use cases:**
|
||||||
|
- Logging with execution context
|
||||||
|
- Calling Attune API (using `ATTUNE_API_TOKEN`)
|
||||||
|
- Conditional logic based on rule/trigger
|
||||||
|
- Creating child executions
|
||||||
|
- Accessing secrets via API
|
||||||
|
|
||||||
|
### Custom Environment Variables (Optional)
|
||||||
|
|
||||||
|
Custom environment variables can be set via `execution.env_vars` field for:
|
||||||
|
- **Debug/logging controls** (e.g., `DEBUG=1`, `LOG_LEVEL=debug`)
|
||||||
|
- **Runtime configuration** (e.g., custom paths, feature flags)
|
||||||
|
|
||||||
|
Environment variables should **NEVER** be used for:
|
||||||
|
- Action parameters (use stdin DOTENV instead)
|
||||||
|
- Secrets or credentials (use `ATTUNE_API_TOKEN` to fetch from key vault)
|
||||||
|
- User-provided data (use stdin parameters)
|
||||||
|
|
||||||
|
## Implementation Pattern
|
||||||
|
|
||||||
|
### POSIX Shell Actions (Standard Pattern)
|
||||||
|
|
||||||
|
All core pack actions follow this pattern:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
#!/bin/sh
|
||||||
|
# Action Name - Core Pack
|
||||||
|
# Brief description
|
||||||
|
#
|
||||||
|
# This script uses pure POSIX shell without external dependencies like jq.
|
||||||
|
# It reads parameters in DOTENV format from stdin until EOF.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Initialize variables with defaults
|
||||||
|
param1=""
|
||||||
|
param2="default_value"
|
||||||
|
|
||||||
|
# Read DOTENV-formatted parameters from stdin until EOF
|
||||||
|
while IFS= read -r line; do
|
||||||
|
[ -z "$line" ] && continue
|
||||||
|
|
||||||
|
key="${line%%=*}"
|
||||||
|
value="${line#*=}"
|
||||||
|
|
||||||
|
# Remove quotes if present
|
||||||
|
case "$value" in
|
||||||
|
\"*\") value="${value#\"}"; value="${value%\"}" ;;
|
||||||
|
\'*\') value="${value#\'}"; value="${value%\'}" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Process parameters
|
||||||
|
case "$key" in
|
||||||
|
param1) param1="$value" ;;
|
||||||
|
param2) param2="$value" ;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Validate required parameters
|
||||||
|
if [ -z "$param1" ]; then
|
||||||
|
echo "ERROR: param1 is required" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Action logic
|
||||||
|
echo "Processing: $param1"
|
||||||
|
|
||||||
|
exit 0
|
||||||
|
```
|
||||||
|
|
||||||
|
### Boolean Normalization
|
||||||
|
|
||||||
|
```sh
|
||||||
|
case "$bool_param" in
|
||||||
|
true|True|TRUE|yes|Yes|YES|1) bool_param="true" ;;
|
||||||
|
*) bool_param="false" ;;
|
||||||
|
esac
|
||||||
|
```
|
||||||
|
|
||||||
|
### Numeric Validation
|
||||||
|
|
||||||
|
```sh
|
||||||
|
case "$number" in
|
||||||
|
''|*[!0-9]*)
|
||||||
|
echo "ERROR: must be a number" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
```
|
||||||
|
|
||||||
|
## Core Pack Actions
|
||||||
|
|
||||||
|
### Simple Actions
|
||||||
|
|
||||||
|
1. **echo.sh** - Outputs a message (reference implementation)
|
||||||
|
2. **sleep.sh** - Pauses execution for a specified duration
|
||||||
|
3. **noop.sh** - Does nothing (useful for testing and placeholder workflows)
|
||||||
|
|
||||||
|
### HTTP Action
|
||||||
|
|
||||||
|
4. **http_request.sh** - Makes HTTP requests with full feature support:
|
||||||
|
- Multiple HTTP methods (GET, POST, PUT, PATCH, DELETE, etc.)
|
||||||
|
- Custom headers and query parameters
|
||||||
|
- Authentication (basic, bearer token)
|
||||||
|
- SSL verification control
|
||||||
|
- Redirect following
|
||||||
|
- JSON output with parsed response
|
||||||
|
|
||||||
|
### Pack Management Actions (API Wrappers)
|
||||||
|
|
||||||
|
These actions wrap Attune API endpoints for pack management:
|
||||||
|
|
||||||
|
5. **download_packs.sh** - Downloads packs from git/HTTP/registry
|
||||||
|
6. **build_pack_envs.sh** - Builds runtime environments for packs
|
||||||
|
7. **register_packs.sh** - Registers packs in the database
|
||||||
|
8. **get_pack_dependencies.sh** - Analyzes pack dependencies
|
||||||
|
|
||||||
|
All API wrappers:
|
||||||
|
- Accept parameters via DOTENV format
|
||||||
|
- Build JSON request bodies manually (no jq)
|
||||||
|
- Make authenticated API calls with curl
|
||||||
|
- Extract response data using simple sed patterns
|
||||||
|
- Return structured JSON output
|
||||||
|
|
||||||
|
## Testing Actions Locally
|
||||||
|
|
||||||
|
Test actions by echoing DOTENV format to stdin:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test echo action
|
||||||
|
printf 'message="Hello World"\n' | ./echo.sh
|
||||||
|
|
||||||
|
# Test with empty parameters
|
||||||
|
printf '' | ./echo.sh
|
||||||
|
|
||||||
|
# Test sleep action
|
||||||
|
printf 'seconds=2\nmessage="Sleeping..."\n' | ./sleep.sh
|
||||||
|
|
||||||
|
# Test http_request action
|
||||||
|
printf 'url="https://api.github.com"\nmethod="GET"\n' | ./http_request.sh
|
||||||
|
|
||||||
|
# Test with file input
|
||||||
|
cat params.dotenv | ./echo.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## YAML Configuration Example
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
ref: core.example_action
|
||||||
|
label: "Example Action"
|
||||||
|
description: "Example action demonstrating DOTENV format"
|
||||||
|
enabled: true
|
||||||
|
runner_type: shell
|
||||||
|
entry_point: example.sh
|
||||||
|
|
||||||
|
# IMPORTANT: Use DOTENV format for POSIX shell compatibility
|
||||||
|
parameter_delivery: stdin
|
||||||
|
parameter_format: dotenv
|
||||||
|
|
||||||
|
# Output format: text, json, or yaml
|
||||||
|
output_format: text
|
||||||
|
|
||||||
|
parameters:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
message:
|
||||||
|
type: string
|
||||||
|
description: "Message to output"
|
||||||
|
default: ""
|
||||||
|
count:
|
||||||
|
type: integer
|
||||||
|
description: "Number of times to repeat"
|
||||||
|
default: 1
|
||||||
|
required:
|
||||||
|
- message
|
||||||
|
```
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
**Core pack has ZERO runtime dependencies:**
|
||||||
|
|
||||||
|
✅ **Required (universally available):**
|
||||||
|
- POSIX-compliant shell (`/bin/sh`)
|
||||||
|
- `curl` (for HTTP actions only)
|
||||||
|
- Standard POSIX utilities: `sed`, `mktemp`, `cat`, `printf`, `sleep`
|
||||||
|
|
||||||
|
❌ **NOT Required:**
|
||||||
|
- `jq` - Eliminated (was used for JSON parsing)
|
||||||
|
- `yq` - Never used
|
||||||
|
- Python - Not used in core pack actions
|
||||||
|
- Node.js - Not used in core pack actions
|
||||||
|
- bash - Scripts are POSIX-compliant
|
||||||
|
- Any other external tools or libraries
|
||||||
|
|
||||||
|
This makes the core pack **maximally portable** and suitable for minimal containers (Alpine, distroless, etc.).
|
||||||
|
|
||||||
|
## Security Benefits
|
||||||
|
|
||||||
|
1. **No process exposure** - Parameters never appear in `ps`, `/proc/<pid>/environ`
|
||||||
|
2. **Secure by default** - All actions use stdin, no special configuration needed
|
||||||
|
3. **Clear separation** - Action parameters vs. environment configuration
|
||||||
|
4. **Audit friendly** - All sensitive data flows through stdin, not environment
|
||||||
|
5. **Minimal attack surface** - No external dependencies to exploit
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
### Parameters
|
||||||
|
1. **Always use stdin with DOTENV format** for action parameters
|
||||||
|
2. **Handle quoted values** - Remove both single and double quotes
|
||||||
|
3. **Provide sensible defaults** - Use empty string, 0, false as appropriate
|
||||||
|
4. **Validate required params** - Exit with error if truly required parameters missing
|
||||||
|
5. **Mark secrets** - Use `secret: true` in YAML for sensitive parameters
|
||||||
|
6. **Never use env vars for parameters** - Parameters come from stdin only
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
1. **Use standard ATTUNE_* variables** - Worker provides execution context
|
||||||
|
2. **Access API with ATTUNE_API_TOKEN** - Execution-scoped authentication
|
||||||
|
3. **Log with context** - Include `ATTUNE_ACTION` and `ATTUNE_EXEC_ID` in logs
|
||||||
|
4. **Never log ATTUNE_API_TOKEN** - Security sensitive
|
||||||
|
5. **Use env vars for runtime config only** - Not for user data or parameters
|
||||||
|
|
||||||
|
### Output Format
|
||||||
|
1. **Specify output_format** - Always set to "text", "json", or "yaml"
|
||||||
|
2. **Use text for simple output** - Messages, logs, unstructured data
|
||||||
|
3. **Use json for structured data** - API responses, complex results
|
||||||
|
4. **Define schema for structured output** - Only for json/yaml formats
|
||||||
|
5. **Use stderr for diagnostics** - Error messages go to stderr, not stdout
|
||||||
|
6. **Return proper exit codes** - 0 for success, non-zero for failure
|
||||||
|
|
||||||
|
### Shell Script Best Practices
|
||||||
|
1. **Use `#!/bin/sh`** - POSIX shell, not bash
|
||||||
|
2. **Use `set -e`** - Exit on error
|
||||||
|
3. **Quote all variables** - `"$var"` not `$var`
|
||||||
|
4. **Use `case` not `if`** - More portable for pattern matching
|
||||||
|
5. **Clean up temp files** - Use trap handlers
|
||||||
|
6. **Avoid bash-isms** - No `[[`, `${var^^}`, `=~`, arrays, etc.
|
||||||
|
|
||||||
|
## Execution Metadata (Automatic)
|
||||||
|
|
||||||
|
The following are **automatically captured** by the worker and should **NOT** be included in output schemas:
|
||||||
|
|
||||||
|
- `stdout` - Raw standard output (captured as-is)
|
||||||
|
- `stderr` - Standard error output (written to log file)
|
||||||
|
- `exit_code` - Process exit code (0 = success)
|
||||||
|
- `duration_ms` - Execution duration in milliseconds
|
||||||
|
|
||||||
|
These are execution system concerns, not action output concerns.
|
||||||
|
|
||||||
|
## Example: Complete Action
|
||||||
|
|
||||||
|
```sh
|
||||||
|
#!/bin/sh
|
||||||
|
# Example Action - Core Pack
|
||||||
|
# Demonstrates DOTENV parameter parsing and environment variable usage
|
||||||
|
#
|
||||||
|
# This script uses pure POSIX shell without external dependencies like jq.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Log execution start
|
||||||
|
echo "[$ATTUNE_ACTION] [Exec: $ATTUNE_EXEC_ID] Starting" >&2
|
||||||
|
|
||||||
|
# Initialize variables
|
||||||
|
url=""
|
||||||
|
timeout="30"
|
||||||
|
|
||||||
|
# Read DOTENV parameters from stdin until EOF
|
||||||
|
while IFS= read -r line; do
|
||||||
|
[ -z "$line" ] && continue
|
||||||
|
|
||||||
|
key="${line%%=*}"
|
||||||
|
value="${line#*=}"
|
||||||
|
|
||||||
|
case "$value" in
|
||||||
|
\"*\") value="${value#\"}"; value="${value%\"}" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
case "$key" in
|
||||||
|
url) url="$value" ;;
|
||||||
|
timeout) timeout="$value" ;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Validate
|
||||||
|
if [ -z "$url" ]; then
|
||||||
|
echo "ERROR: url is required" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Execute
|
||||||
|
echo "Fetching: $url" >&2
|
||||||
|
result=$(curl -s --max-time "$timeout" "$url")
|
||||||
|
|
||||||
|
# Output
|
||||||
|
echo "$result"
|
||||||
|
|
||||||
|
echo "[$ATTUNE_ACTION] [Exec: $ATTUNE_EXEC_ID] Completed" >&2
|
||||||
|
exit 0
|
||||||
|
```
|
||||||
|
|
||||||
|
## Further Documentation
|
||||||
|
|
||||||
|
- **Pattern Reference:** `docs/QUICKREF-dotenv-shell-actions.md`
|
||||||
|
- **Pack Structure:** `docs/pack-structure.md`
|
||||||
|
- **Example Actions:**
|
||||||
|
- `echo.sh` - Simplest reference implementation
|
||||||
|
- `http_request.sh` - Complex action with full HTTP client
|
||||||
|
- `register_packs.sh` - API wrapper with JSON construction
|
||||||
215
docker/distributable/packs/core/actions/build_pack_envs.sh
Executable file
215
docker/distributable/packs/core/actions/build_pack_envs.sh
Executable file
@@ -0,0 +1,215 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# Build Pack Environments Action - Core Pack
|
||||||
|
# API Wrapper for POST /api/v1/packs/build-envs
|
||||||
|
#
|
||||||
|
# This script uses pure POSIX shell without external dependencies like jq.
|
||||||
|
# It reads parameters in DOTENV format from stdin until EOF.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Initialize variables
|
||||||
|
pack_paths=""
|
||||||
|
packs_base_dir="/opt/attune/packs"
|
||||||
|
python_version="3.11"
|
||||||
|
nodejs_version="20"
|
||||||
|
skip_python="false"
|
||||||
|
skip_nodejs="false"
|
||||||
|
force_rebuild="false"
|
||||||
|
timeout="600"
|
||||||
|
api_url="http://localhost:8080"
|
||||||
|
api_token=""
|
||||||
|
|
||||||
|
# Read DOTENV-formatted parameters from stdin until EOF
|
||||||
|
while IFS= read -r line; do
|
||||||
|
[ -z "$line" ] && continue
|
||||||
|
|
||||||
|
key="${line%%=*}"
|
||||||
|
value="${line#*=}"
|
||||||
|
|
||||||
|
# Remove quotes if present (both single and double)
|
||||||
|
case "$value" in
|
||||||
|
\"*\")
|
||||||
|
value="${value#\"}"
|
||||||
|
value="${value%\"}"
|
||||||
|
;;
|
||||||
|
\'*\')
|
||||||
|
value="${value#\'}"
|
||||||
|
value="${value%\'}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Process parameters
|
||||||
|
case "$key" in
|
||||||
|
pack_paths)
|
||||||
|
pack_paths="$value"
|
||||||
|
;;
|
||||||
|
packs_base_dir)
|
||||||
|
packs_base_dir="$value"
|
||||||
|
;;
|
||||||
|
python_version)
|
||||||
|
python_version="$value"
|
||||||
|
;;
|
||||||
|
nodejs_version)
|
||||||
|
nodejs_version="$value"
|
||||||
|
;;
|
||||||
|
skip_python)
|
||||||
|
skip_python="$value"
|
||||||
|
;;
|
||||||
|
skip_nodejs)
|
||||||
|
skip_nodejs="$value"
|
||||||
|
;;
|
||||||
|
force_rebuild)
|
||||||
|
force_rebuild="$value"
|
||||||
|
;;
|
||||||
|
timeout)
|
||||||
|
timeout="$value"
|
||||||
|
;;
|
||||||
|
api_url)
|
||||||
|
api_url="$value"
|
||||||
|
;;
|
||||||
|
api_token)
|
||||||
|
api_token="$value"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Validate required parameters
|
||||||
|
if [ -z "$pack_paths" ]; then
|
||||||
|
printf '{"built_environments":[],"failed_environments":[],"summary":{"total_packs":0,"success_count":0,"failure_count":0,"python_envs_built":0,"nodejs_envs_built":0,"total_duration_ms":0}}\n'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Normalize booleans
|
||||||
|
case "$skip_python" in
|
||||||
|
true|True|TRUE|yes|Yes|YES|1) skip_python="true" ;;
|
||||||
|
*) skip_python="false" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
case "$skip_nodejs" in
|
||||||
|
true|True|TRUE|yes|Yes|YES|1) skip_nodejs="true" ;;
|
||||||
|
*) skip_nodejs="false" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
case "$force_rebuild" in
|
||||||
|
true|True|TRUE|yes|Yes|YES|1) force_rebuild="true" ;;
|
||||||
|
*) force_rebuild="false" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Validate timeout is numeric
|
||||||
|
case "$timeout" in
|
||||||
|
''|*[!0-9]*)
|
||||||
|
timeout="600"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Escape values for JSON
|
||||||
|
pack_paths_escaped=$(printf '%s' "$pack_paths" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
packs_base_dir_escaped=$(printf '%s' "$packs_base_dir" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
python_version_escaped=$(printf '%s' "$python_version" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
nodejs_version_escaped=$(printf '%s' "$nodejs_version" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
|
||||||
|
# Build JSON request body
|
||||||
|
request_body=$(cat <<EOF
|
||||||
|
{
|
||||||
|
"pack_paths": $pack_paths_escaped,
|
||||||
|
"packs_base_dir": "$packs_base_dir_escaped",
|
||||||
|
"python_version": "$python_version_escaped",
|
||||||
|
"nodejs_version": "$nodejs_version_escaped",
|
||||||
|
"skip_python": $skip_python,
|
||||||
|
"skip_nodejs": $skip_nodejs,
|
||||||
|
"force_rebuild": $force_rebuild,
|
||||||
|
"timeout": $timeout
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create temp files for curl
|
||||||
|
temp_response=$(mktemp)
|
||||||
|
temp_headers=$(mktemp)
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
rm -f "$temp_response" "$temp_headers"
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
# Calculate curl timeout (request timeout + buffer)
|
||||||
|
curl_timeout=$((timeout + 30))
|
||||||
|
|
||||||
|
# Make API call
|
||||||
|
http_code=$(curl -X POST \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "Accept: application/json" \
|
||||||
|
${api_token:+-H "Authorization: Bearer ${api_token}"} \
|
||||||
|
-d "$request_body" \
|
||||||
|
-s \
|
||||||
|
-w "%{http_code}" \
|
||||||
|
-o "$temp_response" \
|
||||||
|
--max-time "$curl_timeout" \
|
||||||
|
--connect-timeout 10 \
|
||||||
|
"${api_url}/api/v1/packs/build-envs" 2>/dev/null || echo "000")
|
||||||
|
|
||||||
|
# Check HTTP status
|
||||||
|
if [ "$http_code" -ge 200 ] && [ "$http_code" -lt 300 ]; then
|
||||||
|
# Success - extract data field from API response
|
||||||
|
response_body=$(cat "$temp_response")
|
||||||
|
|
||||||
|
# Try to extract .data field using simple text processing
|
||||||
|
# If response contains "data" field, extract it; otherwise use whole response
|
||||||
|
case "$response_body" in
|
||||||
|
*'"data":'*)
|
||||||
|
# Extract content after "data": up to the closing brace
|
||||||
|
# This is a simple extraction - assumes well-formed JSON
|
||||||
|
data_content=$(printf '%s' "$response_body" | sed -n 's/.*"data":\s*\(.*\)}/\1/p')
|
||||||
|
if [ -n "$data_content" ]; then
|
||||||
|
printf '%s\n' "$data_content"
|
||||||
|
else
|
||||||
|
cat "$temp_response"
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
cat "$temp_response"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
# Error response - try to extract error message
|
||||||
|
error_msg="API request failed"
|
||||||
|
if [ -s "$temp_response" ]; then
|
||||||
|
# Try to extract error or message field
|
||||||
|
response_content=$(cat "$temp_response")
|
||||||
|
case "$response_content" in
|
||||||
|
*'"error":'*)
|
||||||
|
error_msg=$(printf '%s' "$response_content" | sed -n 's/.*"error":\s*"\([^"]*\)".*/\1/p')
|
||||||
|
[ -z "$error_msg" ] && error_msg="API request failed"
|
||||||
|
;;
|
||||||
|
*'"message":'*)
|
||||||
|
error_msg=$(printf '%s' "$response_content" | sed -n 's/.*"message":\s*"\([^"]*\)".*/\1/p')
|
||||||
|
[ -z "$error_msg" ] && error_msg="API request failed"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Escape error message for JSON
|
||||||
|
error_msg_escaped=$(printf '%s' "$error_msg" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
{
|
||||||
|
"built_environments": [],
|
||||||
|
"failed_environments": [{
|
||||||
|
"pack_ref": "api",
|
||||||
|
"pack_path": "",
|
||||||
|
"runtime": "unknown",
|
||||||
|
"error": "API call failed (HTTP $http_code): $error_msg_escaped"
|
||||||
|
}],
|
||||||
|
"summary": {
|
||||||
|
"total_packs": 0,
|
||||||
|
"success_count": 0,
|
||||||
|
"failure_count": 1,
|
||||||
|
"python_envs_built": 0,
|
||||||
|
"nodejs_envs_built": 0,
|
||||||
|
"total_duration_ms": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
160
docker/distributable/packs/core/actions/build_pack_envs.yaml
Normal file
160
docker/distributable/packs/core/actions/build_pack_envs.yaml
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
# Build Pack Environments Action
|
||||||
|
# Creates runtime environments and installs dependencies for packs
|
||||||
|
|
||||||
|
ref: core.build_pack_envs
|
||||||
|
label: "Build Pack Environments"
|
||||||
|
description: "Build runtime environments for packs and install declared dependencies (Python requirements.txt, Node.js package.json)"
|
||||||
|
enabled: true
|
||||||
|
runner_type: shell
|
||||||
|
entry_point: build_pack_envs.sh
|
||||||
|
|
||||||
|
# Parameter delivery: stdin for secure parameter passing (no env vars)
|
||||||
|
parameter_delivery: stdin
|
||||||
|
parameter_format: dotenv
|
||||||
|
|
||||||
|
# Output format: json (structured data parsing enabled)
|
||||||
|
output_format: json
|
||||||
|
|
||||||
|
# Action parameters schema (StackStorm-style with inline required/secret)
|
||||||
|
parameters:
|
||||||
|
pack_paths:
|
||||||
|
type: array
|
||||||
|
description: "List of pack directory paths to build environments for"
|
||||||
|
required: true
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
minItems: 1
|
||||||
|
packs_base_dir:
|
||||||
|
type: string
|
||||||
|
description: "Base directory where packs are installed"
|
||||||
|
default: "/opt/attune/packs"
|
||||||
|
python_version:
|
||||||
|
type: string
|
||||||
|
description: "Python version to use for virtualenvs"
|
||||||
|
default: "3.11"
|
||||||
|
nodejs_version:
|
||||||
|
type: string
|
||||||
|
description: "Node.js version to use"
|
||||||
|
default: "20"
|
||||||
|
skip_python:
|
||||||
|
type: boolean
|
||||||
|
description: "Skip building Python environments"
|
||||||
|
default: false
|
||||||
|
skip_nodejs:
|
||||||
|
type: boolean
|
||||||
|
description: "Skip building Node.js environments"
|
||||||
|
default: false
|
||||||
|
force_rebuild:
|
||||||
|
type: boolean
|
||||||
|
description: "Force rebuild of existing environments"
|
||||||
|
default: false
|
||||||
|
timeout:
|
||||||
|
type: integer
|
||||||
|
description: "Timeout in seconds for building each environment"
|
||||||
|
default: 600
|
||||||
|
minimum: 60
|
||||||
|
maximum: 3600
|
||||||
|
|
||||||
|
# Output schema: describes the JSON structure written to stdout
|
||||||
|
# Note: stdout/stderr/exit_code are captured automatically by the execution system
|
||||||
|
output_schema:
|
||||||
|
built_environments:
|
||||||
|
type: array
|
||||||
|
description: "List of successfully built environments"
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
pack_ref:
|
||||||
|
type: string
|
||||||
|
description: "Pack reference"
|
||||||
|
pack_path:
|
||||||
|
type: string
|
||||||
|
description: "Pack directory path"
|
||||||
|
environments:
|
||||||
|
type: object
|
||||||
|
description: "Built environments for this pack"
|
||||||
|
properties:
|
||||||
|
python:
|
||||||
|
type: object
|
||||||
|
description: "Python environment details"
|
||||||
|
properties:
|
||||||
|
virtualenv_path:
|
||||||
|
type: string
|
||||||
|
description: "Path to Python virtualenv"
|
||||||
|
requirements_installed:
|
||||||
|
type: boolean
|
||||||
|
description: "Whether requirements.txt was installed"
|
||||||
|
package_count:
|
||||||
|
type: integer
|
||||||
|
description: "Number of packages installed"
|
||||||
|
python_version:
|
||||||
|
type: string
|
||||||
|
description: "Python version used"
|
||||||
|
nodejs:
|
||||||
|
type: object
|
||||||
|
description: "Node.js environment details"
|
||||||
|
properties:
|
||||||
|
node_modules_path:
|
||||||
|
type: string
|
||||||
|
description: "Path to node_modules directory"
|
||||||
|
dependencies_installed:
|
||||||
|
type: boolean
|
||||||
|
description: "Whether package.json was installed"
|
||||||
|
package_count:
|
||||||
|
type: integer
|
||||||
|
description: "Number of packages installed"
|
||||||
|
nodejs_version:
|
||||||
|
type: string
|
||||||
|
description: "Node.js version used"
|
||||||
|
duration_ms:
|
||||||
|
type: integer
|
||||||
|
description: "Time taken to build environments in milliseconds"
|
||||||
|
failed_environments:
|
||||||
|
type: array
|
||||||
|
description: "List of packs where environment build failed"
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
pack_ref:
|
||||||
|
type: string
|
||||||
|
description: "Pack reference"
|
||||||
|
pack_path:
|
||||||
|
type: string
|
||||||
|
description: "Pack directory path"
|
||||||
|
runtime:
|
||||||
|
type: string
|
||||||
|
description: "Runtime that failed (python or nodejs)"
|
||||||
|
error:
|
||||||
|
type: string
|
||||||
|
description: "Error message"
|
||||||
|
summary:
|
||||||
|
type: object
|
||||||
|
description: "Summary of environment build process"
|
||||||
|
properties:
|
||||||
|
total_packs:
|
||||||
|
type: integer
|
||||||
|
description: "Total number of packs processed"
|
||||||
|
success_count:
|
||||||
|
type: integer
|
||||||
|
description: "Number of packs with successful builds"
|
||||||
|
failure_count:
|
||||||
|
type: integer
|
||||||
|
description: "Number of packs with failed builds"
|
||||||
|
python_envs_built:
|
||||||
|
type: integer
|
||||||
|
description: "Number of Python environments built"
|
||||||
|
nodejs_envs_built:
|
||||||
|
type: integer
|
||||||
|
description: "Number of Node.js environments built"
|
||||||
|
total_duration_ms:
|
||||||
|
type: integer
|
||||||
|
description: "Total time taken for all builds in milliseconds"
|
||||||
|
|
||||||
|
# Tags for categorization
|
||||||
|
tags:
|
||||||
|
- pack
|
||||||
|
- environment
|
||||||
|
- dependencies
|
||||||
|
- python
|
||||||
|
- nodejs
|
||||||
|
- installation
|
||||||
201
docker/distributable/packs/core/actions/download_packs.sh
Executable file
201
docker/distributable/packs/core/actions/download_packs.sh
Executable file
@@ -0,0 +1,201 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# Download Packs Action - Core Pack
|
||||||
|
# API Wrapper for POST /api/v1/packs/download
|
||||||
|
#
|
||||||
|
# This script uses pure POSIX shell without external dependencies like jq.
|
||||||
|
# It reads parameters in DOTENV format from stdin until EOF.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Initialize variables
|
||||||
|
packs=""
|
||||||
|
destination_dir=""
|
||||||
|
registry_url="https://registry.attune.io/index.json"
|
||||||
|
ref_spec=""
|
||||||
|
timeout="300"
|
||||||
|
verify_ssl="true"
|
||||||
|
api_url="http://localhost:8080"
|
||||||
|
api_token=""
|
||||||
|
|
||||||
|
# Read DOTENV-formatted parameters from stdin until EOF
|
||||||
|
while IFS= read -r line; do
|
||||||
|
[ -z "$line" ] && continue
|
||||||
|
|
||||||
|
key="${line%%=*}"
|
||||||
|
value="${line#*=}"
|
||||||
|
|
||||||
|
# Remove quotes if present (both single and double)
|
||||||
|
case "$value" in
|
||||||
|
\"*\")
|
||||||
|
value="${value#\"}"
|
||||||
|
value="${value%\"}"
|
||||||
|
;;
|
||||||
|
\'*\')
|
||||||
|
value="${value#\'}"
|
||||||
|
value="${value%\'}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Process parameters
|
||||||
|
case "$key" in
|
||||||
|
packs)
|
||||||
|
packs="$value"
|
||||||
|
;;
|
||||||
|
destination_dir)
|
||||||
|
destination_dir="$value"
|
||||||
|
;;
|
||||||
|
registry_url)
|
||||||
|
registry_url="$value"
|
||||||
|
;;
|
||||||
|
ref_spec)
|
||||||
|
ref_spec="$value"
|
||||||
|
;;
|
||||||
|
timeout)
|
||||||
|
timeout="$value"
|
||||||
|
;;
|
||||||
|
verify_ssl)
|
||||||
|
verify_ssl="$value"
|
||||||
|
;;
|
||||||
|
api_url)
|
||||||
|
api_url="$value"
|
||||||
|
;;
|
||||||
|
api_token)
|
||||||
|
api_token="$value"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Validate required parameters
|
||||||
|
if [ -z "$destination_dir" ]; then
|
||||||
|
printf '{"downloaded_packs":[],"failed_packs":[{"source":"input","error":"destination_dir is required"}],"total_count":0,"success_count":0,"failure_count":1}\n'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Normalize boolean
|
||||||
|
case "$verify_ssl" in
|
||||||
|
true|True|TRUE|yes|Yes|YES|1) verify_ssl="true" ;;
|
||||||
|
*) verify_ssl="false" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Validate timeout is numeric
|
||||||
|
case "$timeout" in
|
||||||
|
''|*[!0-9]*)
|
||||||
|
timeout="300"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Escape values for JSON
|
||||||
|
packs_escaped=$(printf '%s' "$packs" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
destination_dir_escaped=$(printf '%s' "$destination_dir" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
registry_url_escaped=$(printf '%s' "$registry_url" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
|
||||||
|
# Build JSON request body
|
||||||
|
if [ -n "$ref_spec" ]; then
|
||||||
|
ref_spec_escaped=$(printf '%s' "$ref_spec" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
request_body=$(cat <<EOF
|
||||||
|
{
|
||||||
|
"packs": $packs_escaped,
|
||||||
|
"destination_dir": "$destination_dir_escaped",
|
||||||
|
"registry_url": "$registry_url_escaped",
|
||||||
|
"ref_spec": "$ref_spec_escaped",
|
||||||
|
"timeout": $timeout,
|
||||||
|
"verify_ssl": $verify_ssl
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
else
|
||||||
|
request_body=$(cat <<EOF
|
||||||
|
{
|
||||||
|
"packs": $packs_escaped,
|
||||||
|
"destination_dir": "$destination_dir_escaped",
|
||||||
|
"registry_url": "$registry_url_escaped",
|
||||||
|
"timeout": $timeout,
|
||||||
|
"verify_ssl": $verify_ssl
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create temp files for curl
|
||||||
|
temp_response=$(mktemp)
|
||||||
|
temp_headers=$(mktemp)
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
rm -f "$temp_response" "$temp_headers"
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
# Calculate curl timeout (request timeout + buffer)
|
||||||
|
curl_timeout=$((timeout + 30))
|
||||||
|
|
||||||
|
# Make API call
|
||||||
|
http_code=$(curl -X POST \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "Accept: application/json" \
|
||||||
|
${api_token:+-H "Authorization: Bearer ${api_token}"} \
|
||||||
|
-d "$request_body" \
|
||||||
|
-s \
|
||||||
|
-w "%{http_code}" \
|
||||||
|
-o "$temp_response" \
|
||||||
|
--max-time "$curl_timeout" \
|
||||||
|
--connect-timeout 10 \
|
||||||
|
"${api_url}/api/v1/packs/download" 2>/dev/null || echo "000")
|
||||||
|
|
||||||
|
# Check HTTP status
|
||||||
|
if [ "$http_code" -ge 200 ] && [ "$http_code" -lt 300 ]; then
|
||||||
|
# Success - extract data field from API response
|
||||||
|
response_body=$(cat "$temp_response")
|
||||||
|
|
||||||
|
# Try to extract .data field using simple text processing
|
||||||
|
# If response contains "data" field, extract it; otherwise use whole response
|
||||||
|
case "$response_body" in
|
||||||
|
*'"data":'*)
|
||||||
|
# Extract content after "data": up to the closing brace
|
||||||
|
# This is a simple extraction - assumes well-formed JSON
|
||||||
|
data_content=$(printf '%s' "$response_body" | sed -n 's/.*"data":\s*\(.*\)}/\1/p')
|
||||||
|
if [ -n "$data_content" ]; then
|
||||||
|
printf '%s\n' "$data_content"
|
||||||
|
else
|
||||||
|
cat "$temp_response"
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
cat "$temp_response"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
# Error response - try to extract error message
|
||||||
|
error_msg="API request failed"
|
||||||
|
if [ -s "$temp_response" ]; then
|
||||||
|
# Try to extract error or message field
|
||||||
|
response_content=$(cat "$temp_response")
|
||||||
|
case "$response_content" in
|
||||||
|
*'"error":'*)
|
||||||
|
error_msg=$(printf '%s' "$response_content" | sed -n 's/.*"error":\s*"\([^"]*\)".*/\1/p')
|
||||||
|
[ -z "$error_msg" ] && error_msg="API request failed"
|
||||||
|
;;
|
||||||
|
*'"message":'*)
|
||||||
|
error_msg=$(printf '%s' "$response_content" | sed -n 's/.*"message":\s*"\([^"]*\)".*/\1/p')
|
||||||
|
[ -z "$error_msg" ] && error_msg="API request failed"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Escape error message for JSON
|
||||||
|
error_msg_escaped=$(printf '%s' "$error_msg" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
{
|
||||||
|
"downloaded_packs": [],
|
||||||
|
"failed_packs": [{
|
||||||
|
"source": "api",
|
||||||
|
"error": "API call failed (HTTP $http_code): $error_msg_escaped"
|
||||||
|
}],
|
||||||
|
"total_count": 0,
|
||||||
|
"success_count": 0,
|
||||||
|
"failure_count": 1
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
115
docker/distributable/packs/core/actions/download_packs.yaml
Normal file
115
docker/distributable/packs/core/actions/download_packs.yaml
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
# Download Packs Action
|
||||||
|
# Downloads packs from various sources (git repositories, HTTP archives, or pack registry)
|
||||||
|
|
||||||
|
ref: core.download_packs
|
||||||
|
label: "Download Packs"
|
||||||
|
description: "Download packs from git repositories, HTTP archives, or pack registry to a temporary directory"
|
||||||
|
enabled: true
|
||||||
|
runner_type: shell
|
||||||
|
entry_point: download_packs.sh
|
||||||
|
|
||||||
|
# Parameter delivery: stdin for secure parameter passing (no env vars)
|
||||||
|
parameter_delivery: stdin
|
||||||
|
parameter_format: dotenv
|
||||||
|
|
||||||
|
# Output format: json (structured data parsing enabled)
|
||||||
|
output_format: json
|
||||||
|
|
||||||
|
# Action parameters schema (StackStorm-style with inline required/secret)
|
||||||
|
parameters:
|
||||||
|
packs:
|
||||||
|
type: array
|
||||||
|
description: "List of packs to download (git URLs, HTTP URLs, or pack refs)"
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
minItems: 1
|
||||||
|
required: true
|
||||||
|
destination_dir:
|
||||||
|
type: string
|
||||||
|
description: "Destination directory for downloaded packs"
|
||||||
|
required: true
|
||||||
|
registry_url:
|
||||||
|
type: string
|
||||||
|
description: "Pack registry URL for resolving pack refs (optional)"
|
||||||
|
default: "https://registry.attune.io/index.json"
|
||||||
|
ref_spec:
|
||||||
|
type: string
|
||||||
|
description: "Git reference to checkout (branch, tag, or commit) - applies to all git URLs"
|
||||||
|
timeout:
|
||||||
|
type: integer
|
||||||
|
description: "Download timeout in seconds per pack"
|
||||||
|
default: 300
|
||||||
|
minimum: 10
|
||||||
|
maximum: 3600
|
||||||
|
verify_ssl:
|
||||||
|
type: boolean
|
||||||
|
description: "Verify SSL certificates for HTTPS downloads"
|
||||||
|
default: true
|
||||||
|
api_url:
|
||||||
|
type: string
|
||||||
|
description: "Attune API URL for making registry lookups"
|
||||||
|
default: "http://localhost:8080"
|
||||||
|
|
||||||
|
# Output schema: describes the JSON structure written to stdout
|
||||||
|
# Note: stdout/stderr/exit_code are captured automatically by the execution system
|
||||||
|
output_schema:
|
||||||
|
downloaded_packs:
|
||||||
|
type: array
|
||||||
|
description: "List of successfully downloaded packs"
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
source:
|
||||||
|
type: string
|
||||||
|
description: "Original pack source (URL or ref)"
|
||||||
|
source_type:
|
||||||
|
type: string
|
||||||
|
description: "Type of source"
|
||||||
|
enum:
|
||||||
|
- git
|
||||||
|
- http
|
||||||
|
- registry
|
||||||
|
pack_path:
|
||||||
|
type: string
|
||||||
|
description: "Local filesystem path to downloaded pack"
|
||||||
|
pack_ref:
|
||||||
|
type: string
|
||||||
|
description: "Pack reference (from pack.yaml)"
|
||||||
|
pack_version:
|
||||||
|
type: string
|
||||||
|
description: "Pack version (from pack.yaml)"
|
||||||
|
git_commit:
|
||||||
|
type: string
|
||||||
|
description: "Git commit hash (for git sources)"
|
||||||
|
checksum:
|
||||||
|
type: string
|
||||||
|
description: "Directory checksum"
|
||||||
|
failed_packs:
|
||||||
|
type: array
|
||||||
|
description: "List of packs that failed to download"
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
source:
|
||||||
|
type: string
|
||||||
|
description: "Pack source that failed"
|
||||||
|
error:
|
||||||
|
type: string
|
||||||
|
description: "Error message"
|
||||||
|
total_count:
|
||||||
|
type: integer
|
||||||
|
description: "Total number of packs requested"
|
||||||
|
success_count:
|
||||||
|
type: integer
|
||||||
|
description: "Number of packs successfully downloaded"
|
||||||
|
failure_count:
|
||||||
|
type: integer
|
||||||
|
description: "Number of packs that failed"
|
||||||
|
|
||||||
|
# Tags for categorization
|
||||||
|
tags:
|
||||||
|
- pack
|
||||||
|
- download
|
||||||
|
- git
|
||||||
|
- installation
|
||||||
|
- registry
|
||||||
38
docker/distributable/packs/core/actions/echo.sh
Executable file
38
docker/distributable/packs/core/actions/echo.sh
Executable file
@@ -0,0 +1,38 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# Echo Action - Core Pack
|
||||||
|
# Outputs a message to stdout
|
||||||
|
#
|
||||||
|
# This script uses pure POSIX shell without external dependencies like jq or yq.
|
||||||
|
# It reads parameters in DOTENV format from stdin until EOF.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Initialize message variable
|
||||||
|
message=""
|
||||||
|
|
||||||
|
# Read DOTENV-formatted parameters from stdin until EOF
|
||||||
|
while IFS= read -r line; do
|
||||||
|
case "$line" in
|
||||||
|
message=*)
|
||||||
|
# Extract value after message=
|
||||||
|
message="${line#message=}"
|
||||||
|
# Remove quotes if present (both single and double)
|
||||||
|
case "$message" in
|
||||||
|
\"*\")
|
||||||
|
message="${message#\"}"
|
||||||
|
message="${message%\"}"
|
||||||
|
;;
|
||||||
|
\'*\')
|
||||||
|
message="${message#\'}"
|
||||||
|
message="${message%\'}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Echo the message (even if empty)
|
||||||
|
echo -n "$message"
|
||||||
|
|
||||||
|
# Exit successfully
|
||||||
|
exit 0
|
||||||
35
docker/distributable/packs/core/actions/echo.yaml
Normal file
35
docker/distributable/packs/core/actions/echo.yaml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# Echo Action
|
||||||
|
# Outputs a message to stdout
|
||||||
|
|
||||||
|
ref: core.echo
|
||||||
|
label: "Echo"
|
||||||
|
description: "Echo a message to stdout"
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
# Runner type determines how the action is executed
|
||||||
|
runner_type: shell
|
||||||
|
|
||||||
|
# Entry point is the shell command or script to execute
|
||||||
|
entry_point: echo.sh
|
||||||
|
|
||||||
|
# Parameter delivery: stdin for secure parameter passing (no env vars)
|
||||||
|
parameter_delivery: stdin
|
||||||
|
parameter_format: dotenv
|
||||||
|
|
||||||
|
# Output format: text (no structured data parsing)
|
||||||
|
output_format: text
|
||||||
|
|
||||||
|
# Action parameters schema (StackStorm-style: inline required/secret per parameter)
|
||||||
|
parameters:
|
||||||
|
message:
|
||||||
|
type: string
|
||||||
|
description: "Message to echo (empty string if not provided)"
|
||||||
|
|
||||||
|
# Output schema: not applicable for text output format
|
||||||
|
# The action outputs plain text to stdout
|
||||||
|
|
||||||
|
# Tags for categorization
|
||||||
|
tags:
|
||||||
|
- utility
|
||||||
|
- testing
|
||||||
|
- debug
|
||||||
154
docker/distributable/packs/core/actions/get_pack_dependencies.sh
Executable file
154
docker/distributable/packs/core/actions/get_pack_dependencies.sh
Executable file
@@ -0,0 +1,154 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# Get Pack Dependencies Action - Core Pack
|
||||||
|
# API Wrapper for POST /api/v1/packs/dependencies
|
||||||
|
#
|
||||||
|
# This script uses pure POSIX shell without external dependencies like jq.
|
||||||
|
# It reads parameters in DOTENV format from stdin until EOF.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Initialize variables
|
||||||
|
pack_paths=""
|
||||||
|
skip_validation="false"
|
||||||
|
api_url="http://localhost:8080"
|
||||||
|
api_token=""
|
||||||
|
|
||||||
|
# Read DOTENV-formatted parameters from stdin until EOF
|
||||||
|
while IFS= read -r line; do
|
||||||
|
[ -z "$line" ] && continue
|
||||||
|
|
||||||
|
key="${line%%=*}"
|
||||||
|
value="${line#*=}"
|
||||||
|
|
||||||
|
# Remove quotes if present (both single and double)
|
||||||
|
case "$value" in
|
||||||
|
\"*\")
|
||||||
|
value="${value#\"}"
|
||||||
|
value="${value%\"}"
|
||||||
|
;;
|
||||||
|
\'*\')
|
||||||
|
value="${value#\'}"
|
||||||
|
value="${value%\'}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Process parameters
|
||||||
|
case "$key" in
|
||||||
|
pack_paths)
|
||||||
|
pack_paths="$value"
|
||||||
|
;;
|
||||||
|
skip_validation)
|
||||||
|
skip_validation="$value"
|
||||||
|
;;
|
||||||
|
api_url)
|
||||||
|
api_url="$value"
|
||||||
|
;;
|
||||||
|
api_token)
|
||||||
|
api_token="$value"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Validate required parameters
|
||||||
|
if [ -z "$pack_paths" ]; then
|
||||||
|
printf '{"dependencies":[],"runtime_requirements":{},"missing_dependencies":[],"analyzed_packs":[],"errors":[{"pack_path":"input","error":"No pack paths provided"}]}\n'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Normalize boolean
|
||||||
|
case "$skip_validation" in
|
||||||
|
true|True|TRUE|yes|Yes|YES|1) skip_validation="true" ;;
|
||||||
|
*) skip_validation="false" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Build JSON request body (escape pack_paths value for JSON)
|
||||||
|
pack_paths_escaped=$(printf '%s' "$pack_paths" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
|
||||||
|
request_body=$(cat <<EOF
|
||||||
|
{
|
||||||
|
"pack_paths": $pack_paths_escaped,
|
||||||
|
"skip_validation": $skip_validation
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create temp files for curl
|
||||||
|
temp_response=$(mktemp)
|
||||||
|
temp_headers=$(mktemp)
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
rm -f "$temp_response" "$temp_headers"
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
# Make API call
|
||||||
|
http_code=$(curl -X POST \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "Accept: application/json" \
|
||||||
|
${api_token:+-H "Authorization: Bearer ${api_token}"} \
|
||||||
|
-d "$request_body" \
|
||||||
|
-s \
|
||||||
|
-w "%{http_code}" \
|
||||||
|
-o "$temp_response" \
|
||||||
|
--max-time 60 \
|
||||||
|
--connect-timeout 10 \
|
||||||
|
"${api_url}/api/v1/packs/dependencies" 2>/dev/null || echo "000")
|
||||||
|
|
||||||
|
# Check HTTP status
|
||||||
|
if [ "$http_code" -ge 200 ] && [ "$http_code" -lt 300 ]; then
|
||||||
|
# Success - extract data field from API response
|
||||||
|
response_body=$(cat "$temp_response")
|
||||||
|
|
||||||
|
# Try to extract .data field using simple text processing
|
||||||
|
# If response contains "data" field, extract it; otherwise use whole response
|
||||||
|
case "$response_body" in
|
||||||
|
*'"data":'*)
|
||||||
|
# Extract content after "data": up to the closing brace
|
||||||
|
# This is a simple extraction - assumes well-formed JSON
|
||||||
|
data_content=$(printf '%s' "$response_body" | sed -n 's/.*"data":\s*\(.*\)}/\1/p')
|
||||||
|
if [ -n "$data_content" ]; then
|
||||||
|
printf '%s\n' "$data_content"
|
||||||
|
else
|
||||||
|
cat "$temp_response"
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
cat "$temp_response"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
# Error response - try to extract error message
|
||||||
|
error_msg="API request failed"
|
||||||
|
if [ -s "$temp_response" ]; then
|
||||||
|
# Try to extract error or message field
|
||||||
|
response_content=$(cat "$temp_response")
|
||||||
|
case "$response_content" in
|
||||||
|
*'"error":'*)
|
||||||
|
error_msg=$(printf '%s' "$response_content" | sed -n 's/.*"error":\s*"\([^"]*\)".*/\1/p')
|
||||||
|
[ -z "$error_msg" ] && error_msg="API request failed"
|
||||||
|
;;
|
||||||
|
*'"message":'*)
|
||||||
|
error_msg=$(printf '%s' "$response_content" | sed -n 's/.*"message":\s*"\([^"]*\)".*/\1/p')
|
||||||
|
[ -z "$error_msg" ] && error_msg="API request failed"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Escape error message for JSON
|
||||||
|
error_msg_escaped=$(printf '%s' "$error_msg" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
{
|
||||||
|
"dependencies": [],
|
||||||
|
"runtime_requirements": {},
|
||||||
|
"missing_dependencies": [],
|
||||||
|
"analyzed_packs": [],
|
||||||
|
"errors": [{
|
||||||
|
"pack_path": "api",
|
||||||
|
"error": "API call failed (HTTP $http_code): $error_msg_escaped"
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
@@ -0,0 +1,137 @@
|
|||||||
|
# Get Pack Dependencies Action
|
||||||
|
# Parses pack.yaml files to identify pack and runtime dependencies
|
||||||
|
|
||||||
|
ref: core.get_pack_dependencies
|
||||||
|
label: "Get Pack Dependencies"
|
||||||
|
description: "Parse pack.yaml files to extract pack dependencies and runtime requirements"
|
||||||
|
enabled: true
|
||||||
|
runner_type: shell
|
||||||
|
entry_point: get_pack_dependencies.sh
|
||||||
|
|
||||||
|
# Parameter delivery: stdin for secure parameter passing (no env vars)
|
||||||
|
parameter_delivery: stdin
|
||||||
|
parameter_format: dotenv
|
||||||
|
|
||||||
|
# Output format: json (structured data parsing enabled)
|
||||||
|
output_format: json
|
||||||
|
|
||||||
|
# Action parameters schema (StackStorm-style with inline required/secret)
|
||||||
|
parameters:
|
||||||
|
pack_paths:
|
||||||
|
type: array
|
||||||
|
description: "List of pack directory paths to analyze"
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
minItems: 1
|
||||||
|
required: true
|
||||||
|
skip_validation:
|
||||||
|
type: boolean
|
||||||
|
description: "Skip validation of pack.yaml schema"
|
||||||
|
default: false
|
||||||
|
api_url:
|
||||||
|
type: string
|
||||||
|
description: "Attune API URL for checking installed packs"
|
||||||
|
default: "http://localhost:8080"
|
||||||
|
|
||||||
|
# Output schema: describes the JSON structure written to stdout
|
||||||
|
# Note: stdout/stderr/exit_code are captured automatically by the execution system
|
||||||
|
output_schema:
|
||||||
|
dependencies:
|
||||||
|
type: array
|
||||||
|
description: "List of pack dependencies that need to be installed"
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
pack_ref:
|
||||||
|
type: string
|
||||||
|
description: "Pack reference (e.g., 'core', 'slack')"
|
||||||
|
version_spec:
|
||||||
|
type: string
|
||||||
|
description: "Version specification (e.g., '>=1.0.0', '^2.1.0')"
|
||||||
|
required_by:
|
||||||
|
type: string
|
||||||
|
description: "Pack that requires this dependency"
|
||||||
|
already_installed:
|
||||||
|
type: boolean
|
||||||
|
description: "Whether this dependency is already installed"
|
||||||
|
runtime_requirements:
|
||||||
|
type: object
|
||||||
|
description: "Runtime environment requirements by pack"
|
||||||
|
additionalProperties:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
pack_ref:
|
||||||
|
type: string
|
||||||
|
description: "Pack reference"
|
||||||
|
python:
|
||||||
|
type: object
|
||||||
|
description: "Python runtime requirements"
|
||||||
|
properties:
|
||||||
|
version:
|
||||||
|
type: string
|
||||||
|
description: "Python version requirement"
|
||||||
|
requirements_file:
|
||||||
|
type: string
|
||||||
|
description: "Path to requirements.txt"
|
||||||
|
nodejs:
|
||||||
|
type: object
|
||||||
|
description: "Node.js runtime requirements"
|
||||||
|
properties:
|
||||||
|
version:
|
||||||
|
type: string
|
||||||
|
description: "Node.js version requirement"
|
||||||
|
package_file:
|
||||||
|
type: string
|
||||||
|
description: "Path to package.json"
|
||||||
|
missing_dependencies:
|
||||||
|
type: array
|
||||||
|
description: "Pack dependencies that are not yet installed"
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
pack_ref:
|
||||||
|
type: string
|
||||||
|
description: "Pack reference"
|
||||||
|
version_spec:
|
||||||
|
type: string
|
||||||
|
description: "Version specification"
|
||||||
|
required_by:
|
||||||
|
type: string
|
||||||
|
description: "Pack that requires this dependency"
|
||||||
|
analyzed_packs:
|
||||||
|
type: array
|
||||||
|
description: "List of packs that were analyzed"
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
pack_ref:
|
||||||
|
type: string
|
||||||
|
description: "Pack reference"
|
||||||
|
pack_path:
|
||||||
|
type: string
|
||||||
|
description: "Path to pack directory"
|
||||||
|
has_dependencies:
|
||||||
|
type: boolean
|
||||||
|
description: "Whether pack has dependencies"
|
||||||
|
dependency_count:
|
||||||
|
type: integer
|
||||||
|
description: "Number of dependencies"
|
||||||
|
errors:
|
||||||
|
type: array
|
||||||
|
description: "Errors encountered during analysis"
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
pack_path:
|
||||||
|
type: string
|
||||||
|
description: "Pack path where error occurred"
|
||||||
|
error:
|
||||||
|
type: string
|
||||||
|
description: "Error message"
|
||||||
|
|
||||||
|
# Tags for categorization
|
||||||
|
tags:
|
||||||
|
- pack
|
||||||
|
- dependencies
|
||||||
|
- validation
|
||||||
|
- installation
|
||||||
268
docker/distributable/packs/core/actions/http_request.sh
Executable file
268
docker/distributable/packs/core/actions/http_request.sh
Executable file
@@ -0,0 +1,268 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# HTTP Request Action - Core Pack
|
||||||
|
# Make HTTP requests to external APIs using curl
|
||||||
|
#
|
||||||
|
# This script uses pure POSIX shell without external dependencies like jq.
|
||||||
|
# It reads parameters in DOTENV format from stdin until EOF.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Initialize variables
|
||||||
|
url=""
|
||||||
|
method="GET"
|
||||||
|
body=""
|
||||||
|
json_body=""
|
||||||
|
timeout="30"
|
||||||
|
verify_ssl="true"
|
||||||
|
auth_type="none"
|
||||||
|
auth_username=""
|
||||||
|
auth_password=""
|
||||||
|
auth_token=""
|
||||||
|
follow_redirects="true"
|
||||||
|
max_redirects="10"
|
||||||
|
|
||||||
|
# Temporary files
|
||||||
|
headers_file=$(mktemp)
|
||||||
|
query_params_file=$(mktemp)
|
||||||
|
body_file=""
|
||||||
|
temp_headers=$(mktemp)
|
||||||
|
curl_output=$(mktemp)
|
||||||
|
write_out_file=$(mktemp)
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
local exit_code=$?
|
||||||
|
rm -f "$headers_file" "$query_params_file" "$temp_headers" "$curl_output" "$write_out_file"
|
||||||
|
[ -n "$body_file" ] && [ -f "$body_file" ] && rm -f "$body_file"
|
||||||
|
return "$exit_code"
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
# Read DOTENV-formatted parameters from stdin until EOF
|
||||||
|
while IFS= read -r line; do
|
||||||
|
[ -z "$line" ] && continue
|
||||||
|
|
||||||
|
key="${line%%=*}"
|
||||||
|
value="${line#*=}"
|
||||||
|
|
||||||
|
# Remove quotes
|
||||||
|
case "$value" in
|
||||||
|
\"*\") value="${value#\"}"; value="${value%\"}" ;;
|
||||||
|
\'*\') value="${value#\'}"; value="${value%\'}" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Process parameters
|
||||||
|
case "$key" in
|
||||||
|
url) url="$value" ;;
|
||||||
|
method) method="$value" ;;
|
||||||
|
body) body="$value" ;;
|
||||||
|
json_body) json_body="$value" ;;
|
||||||
|
timeout) timeout="$value" ;;
|
||||||
|
verify_ssl) verify_ssl="$value" ;;
|
||||||
|
auth_type) auth_type="$value" ;;
|
||||||
|
auth_username) auth_username="$value" ;;
|
||||||
|
auth_password) auth_password="$value" ;;
|
||||||
|
auth_token) auth_token="$value" ;;
|
||||||
|
follow_redirects) follow_redirects="$value" ;;
|
||||||
|
max_redirects) max_redirects="$value" ;;
|
||||||
|
headers.*)
|
||||||
|
printf '%s: %s\n' "${key#headers.}" "$value" >> "$headers_file"
|
||||||
|
;;
|
||||||
|
query_params.*)
|
||||||
|
printf '%s=%s\n' "${key#query_params.}" "$value" >> "$query_params_file"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Validate required
|
||||||
|
if [ -z "$url" ]; then
|
||||||
|
printf '{"status_code":0,"headers":{},"body":"","json":null,"elapsed_ms":0,"url":"","success":false,"error":"url parameter is required"}\n'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Normalize method
|
||||||
|
method=$(printf '%s' "$method" | tr '[:lower:]' '[:upper:]')
|
||||||
|
|
||||||
|
# URL encode helper
|
||||||
|
url_encode() {
|
||||||
|
printf '%s' "$1" | sed 's/ /%20/g; s/!/%21/g; s/"/%22/g; s/#/%23/g; s/\$/%24/g; s/&/%26/g; s/'\''/%27/g'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Build URL with query params
|
||||||
|
final_url="$url"
|
||||||
|
if [ -s "$query_params_file" ]; then
|
||||||
|
query_string=""
|
||||||
|
while IFS='=' read -r param_name param_value; do
|
||||||
|
[ -z "$param_name" ] && continue
|
||||||
|
encoded=$(url_encode "$param_value")
|
||||||
|
[ -z "$query_string" ] && query_string="${param_name}=${encoded}" || query_string="${query_string}&${param_name}=${encoded}"
|
||||||
|
done < "$query_params_file"
|
||||||
|
|
||||||
|
if [ -n "$query_string" ]; then
|
||||||
|
case "$final_url" in
|
||||||
|
*\?*) final_url="${final_url}&${query_string}" ;;
|
||||||
|
*) final_url="${final_url}?${query_string}" ;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Prepare body
|
||||||
|
if [ -n "$json_body" ]; then
|
||||||
|
body_file=$(mktemp)
|
||||||
|
printf '%s' "$json_body" > "$body_file"
|
||||||
|
elif [ -n "$body" ]; then
|
||||||
|
body_file=$(mktemp)
|
||||||
|
printf '%s' "$body" > "$body_file"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Build curl args file (avoid shell escaping issues)
|
||||||
|
curl_args=$(mktemp)
|
||||||
|
{
|
||||||
|
printf -- '-X\n%s\n' "$method"
|
||||||
|
printf -- '-s\n'
|
||||||
|
# Use @file for -w to avoid xargs escape interpretation issues
|
||||||
|
# curl's @file mode requires literal \n (two chars) not actual newlines
|
||||||
|
printf '\\n%%{http_code}\\n%%{url_effective}\\n' > "$write_out_file"
|
||||||
|
printf -- '-w\n@%s\n' "$write_out_file"
|
||||||
|
printf -- '--max-time\n%s\n' "$timeout"
|
||||||
|
printf -- '--connect-timeout\n10\n'
|
||||||
|
printf -- '--dump-header\n%s\n' "$temp_headers"
|
||||||
|
|
||||||
|
[ "$verify_ssl" = "false" ] && printf -- '-k\n'
|
||||||
|
|
||||||
|
if [ "$follow_redirects" = "true" ]; then
|
||||||
|
printf -- '-L\n'
|
||||||
|
printf -- '--max-redirs\n%s\n' "$max_redirects"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -s "$headers_file" ]; then
|
||||||
|
while IFS= read -r h; do
|
||||||
|
[ -n "$h" ] && printf -- '-H\n%s\n' "$h"
|
||||||
|
done < "$headers_file"
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "$auth_type" in
|
||||||
|
basic)
|
||||||
|
[ -n "$auth_username" ] && printf -- '-u\n%s:%s\n' "$auth_username" "$auth_password"
|
||||||
|
;;
|
||||||
|
bearer)
|
||||||
|
[ -n "$auth_token" ] && printf -- '-H\nAuthorization: Bearer %s\n' "$auth_token"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if [ -n "$body_file" ] && [ -f "$body_file" ]; then
|
||||||
|
[ -n "$json_body" ] && printf -- '-H\nContent-Type: application/json\n'
|
||||||
|
printf -- '-d\n@%s\n' "$body_file"
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf -- '%s\n' "$final_url"
|
||||||
|
} > "$curl_args"
|
||||||
|
|
||||||
|
# Execute curl
|
||||||
|
start_time=$(date +%s%3N 2>/dev/null || echo $(($(date +%s) * 1000)))
|
||||||
|
|
||||||
|
set +e
|
||||||
|
xargs -a "$curl_args" curl > "$curl_output" 2>&1
|
||||||
|
curl_exit_code=$?
|
||||||
|
set -e
|
||||||
|
|
||||||
|
rm -f "$curl_args"
|
||||||
|
|
||||||
|
end_time=$(date +%s%3N 2>/dev/null || echo $(($(date +%s) * 1000)))
|
||||||
|
elapsed_ms=$((end_time - start_time))
|
||||||
|
|
||||||
|
# Parse output
|
||||||
|
response=$(cat "$curl_output")
|
||||||
|
total_lines=$(printf '%s\n' "$response" | wc -l)
|
||||||
|
body_lines=$((total_lines - 2))
|
||||||
|
|
||||||
|
if [ "$body_lines" -gt 0 ]; then
|
||||||
|
body_output=$(printf '%s\n' "$response" | head -n "$body_lines")
|
||||||
|
else
|
||||||
|
body_output=""
|
||||||
|
fi
|
||||||
|
|
||||||
|
http_code=$(printf '%s\n' "$response" | tail -n 2 | head -n 1 | tr -d '\r\n ')
|
||||||
|
effective_url=$(printf '%s\n' "$response" | tail -n 1 | tr -d '\r\n')
|
||||||
|
|
||||||
|
case "$http_code" in
|
||||||
|
''|*[!0-9]*) http_code=0 ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Handle errors
|
||||||
|
if [ "$curl_exit_code" -ne 0 ]; then
|
||||||
|
error_msg="curl error code $curl_exit_code"
|
||||||
|
case $curl_exit_code in
|
||||||
|
6) error_msg="Could not resolve host" ;;
|
||||||
|
7) error_msg="Failed to connect to host" ;;
|
||||||
|
28) error_msg="Request timeout" ;;
|
||||||
|
35) error_msg="SSL/TLS connection error" ;;
|
||||||
|
52) error_msg="Empty reply from server" ;;
|
||||||
|
56) error_msg="Failure receiving network data" ;;
|
||||||
|
esac
|
||||||
|
error_msg=$(printf '%s' "$error_msg" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
printf '{"status_code":0,"headers":{},"body":"","json":null,"elapsed_ms":%d,"url":"%s","success":false,"error":"%s"}\n' \
|
||||||
|
"$elapsed_ms" "$final_url" "$error_msg"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Parse headers
|
||||||
|
headers_json="{"
|
||||||
|
first_header=true
|
||||||
|
if [ -f "$temp_headers" ]; then
|
||||||
|
while IFS= read -r line; do
|
||||||
|
case "$line" in HTTP/*|'') continue ;; esac
|
||||||
|
|
||||||
|
header_name="${line%%:*}"
|
||||||
|
header_value="${line#*:}"
|
||||||
|
[ "$header_name" = "$line" ] && continue
|
||||||
|
|
||||||
|
header_value=$(printf '%s' "$header_value" | sed 's/^ *//; s/ *$//; s/\r$//; s/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
header_name=$(printf '%s' "$header_name" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
|
||||||
|
if [ "$first_header" = true ]; then
|
||||||
|
headers_json="${headers_json}\"${header_name}\":\"${header_value}\""
|
||||||
|
first_header=false
|
||||||
|
else
|
||||||
|
headers_json="${headers_json},\"${header_name}\":\"${header_value}\""
|
||||||
|
fi
|
||||||
|
done < "$temp_headers"
|
||||||
|
fi
|
||||||
|
headers_json="${headers_json}}"
|
||||||
|
|
||||||
|
# Success check
|
||||||
|
success="false"
|
||||||
|
[ "$http_code" -ge 200 ] && [ "$http_code" -lt 300 ] && success="true"
|
||||||
|
|
||||||
|
# Escape body
|
||||||
|
body_escaped=$(printf '%s' "$body_output" | sed 's/\\/\\\\/g; s/"/\\"/g; s/ /\\t/g' | awk '{printf "%s\\n", $0}' | sed 's/\\n$//')
|
||||||
|
|
||||||
|
# Detect JSON
|
||||||
|
json_parsed="null"
|
||||||
|
if [ -n "$body_output" ]; then
|
||||||
|
first_char=$(printf '%s' "$body_output" | sed 's/^[[:space:]]*//' | head -c 1)
|
||||||
|
last_char=$(printf '%s' "$body_output" | sed 's/[[:space:]]*$//' | tail -c 1)
|
||||||
|
case "$first_char" in
|
||||||
|
'{'|'[')
|
||||||
|
case "$last_char" in
|
||||||
|
'}'|']')
|
||||||
|
# Compact multi-line JSON to single line to avoid breaking
|
||||||
|
# the worker's last-line JSON parser. In valid JSON, literal
|
||||||
|
# newlines only appear as whitespace outside strings (inside
|
||||||
|
# strings they must be escaped as \n), so tr is safe here.
|
||||||
|
json_parsed=$(printf '%s' "$body_output" | tr '\n' ' ' | tr '\r' ' ')
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Output
|
||||||
|
if [ "$json_parsed" = "null" ]; then
|
||||||
|
printf '{"status_code":%d,"headers":%s,"body":"%s","json":null,"elapsed_ms":%d,"url":"%s","success":%s}\n' \
|
||||||
|
"$http_code" "$headers_json" "$body_escaped" "$elapsed_ms" "$effective_url" "$success"
|
||||||
|
else
|
||||||
|
printf '{"status_code":%d,"headers":%s,"body":"%s","json":%s,"elapsed_ms":%d,"url":"%s","success":%s}\n' \
|
||||||
|
"$http_code" "$headers_json" "$body_escaped" "$json_parsed" "$elapsed_ms" "$effective_url" "$success"
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 0
|
||||||
126
docker/distributable/packs/core/actions/http_request.yaml
Normal file
126
docker/distributable/packs/core/actions/http_request.yaml
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
# HTTP Request Action
|
||||||
|
# Make HTTP requests to external APIs
|
||||||
|
|
||||||
|
ref: core.http_request
|
||||||
|
label: "HTTP Request"
|
||||||
|
description: "Make HTTP requests to external APIs with support for various methods, headers, and authentication"
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
# Runner type determines how the action is executed
|
||||||
|
runner_type: shell
|
||||||
|
|
||||||
|
# Entry point is the bash script to execute
|
||||||
|
entry_point: http_request.sh
|
||||||
|
|
||||||
|
# Parameter delivery configuration (for security)
|
||||||
|
# Use stdin + DOTENV for secure parameter passing (credentials won't appear in process list)
|
||||||
|
parameter_delivery: stdin
|
||||||
|
parameter_format: dotenv
|
||||||
|
|
||||||
|
# Output format: json (structured data parsing enabled)
|
||||||
|
output_format: json
|
||||||
|
|
||||||
|
# Action parameters schema (StackStorm-style with inline required/secret)
|
||||||
|
parameters:
|
||||||
|
url:
|
||||||
|
type: string
|
||||||
|
description: "URL to send the request to"
|
||||||
|
required: true
|
||||||
|
method:
|
||||||
|
type: string
|
||||||
|
description: "HTTP method to use"
|
||||||
|
default: "GET"
|
||||||
|
enum:
|
||||||
|
- GET
|
||||||
|
- POST
|
||||||
|
- PUT
|
||||||
|
- PATCH
|
||||||
|
- DELETE
|
||||||
|
- HEAD
|
||||||
|
- OPTIONS
|
||||||
|
headers:
|
||||||
|
type: object
|
||||||
|
description: "HTTP headers to include in the request"
|
||||||
|
default: {}
|
||||||
|
body:
|
||||||
|
type: string
|
||||||
|
description: "Request body (for POST, PUT, PATCH methods)"
|
||||||
|
json_body:
|
||||||
|
type: object
|
||||||
|
description: "JSON request body (alternative to body parameter)"
|
||||||
|
query_params:
|
||||||
|
type: object
|
||||||
|
description: "URL query parameters as key-value pairs"
|
||||||
|
default: {}
|
||||||
|
timeout:
|
||||||
|
type: integer
|
||||||
|
description: "Request timeout in seconds"
|
||||||
|
default: 30
|
||||||
|
minimum: 1
|
||||||
|
maximum: 300
|
||||||
|
verify_ssl:
|
||||||
|
type: boolean
|
||||||
|
description: "Verify SSL certificates"
|
||||||
|
default: true
|
||||||
|
auth_type:
|
||||||
|
type: string
|
||||||
|
description: "Authentication type"
|
||||||
|
enum:
|
||||||
|
- none
|
||||||
|
- basic
|
||||||
|
- bearer
|
||||||
|
auth_username:
|
||||||
|
type: string
|
||||||
|
description: "Username for basic authentication"
|
||||||
|
auth_password:
|
||||||
|
type: string
|
||||||
|
description: "Password for basic authentication"
|
||||||
|
secret: true
|
||||||
|
auth_token:
|
||||||
|
type: string
|
||||||
|
description: "Bearer token for bearer authentication"
|
||||||
|
secret: true
|
||||||
|
follow_redirects:
|
||||||
|
type: boolean
|
||||||
|
description: "Follow HTTP redirects"
|
||||||
|
default: true
|
||||||
|
max_redirects:
|
||||||
|
type: integer
|
||||||
|
description: "Maximum number of redirects to follow"
|
||||||
|
default: 10
|
||||||
|
|
||||||
|
# Output schema: describes the JSON structure written to stdout
|
||||||
|
# Note: stdout/stderr/exit_code are captured automatically by the execution system
|
||||||
|
output_schema:
|
||||||
|
status_code:
|
||||||
|
type: integer
|
||||||
|
description: "HTTP status code"
|
||||||
|
headers:
|
||||||
|
type: object
|
||||||
|
description: "Response headers"
|
||||||
|
body:
|
||||||
|
type: string
|
||||||
|
description: "Response body as text"
|
||||||
|
json:
|
||||||
|
type: object
|
||||||
|
description: "Parsed JSON response (if applicable, null otherwise)"
|
||||||
|
elapsed_ms:
|
||||||
|
type: integer
|
||||||
|
description: "Request duration in milliseconds"
|
||||||
|
url:
|
||||||
|
type: string
|
||||||
|
description: "Final URL after redirects"
|
||||||
|
success:
|
||||||
|
type: boolean
|
||||||
|
description: "Whether the request was successful (2xx status code)"
|
||||||
|
error:
|
||||||
|
type: string
|
||||||
|
description: "Error message if request failed (only present on failure)"
|
||||||
|
|
||||||
|
# Tags for categorization
|
||||||
|
tags:
|
||||||
|
- http
|
||||||
|
- api
|
||||||
|
- web
|
||||||
|
- utility
|
||||||
|
- integration
|
||||||
73
docker/distributable/packs/core/actions/noop.sh
Executable file
73
docker/distributable/packs/core/actions/noop.sh
Executable file
@@ -0,0 +1,73 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# No Operation Action - Core Pack
|
||||||
|
# Does nothing - useful for testing and placeholder workflows
|
||||||
|
#
|
||||||
|
# This script uses pure POSIX shell without external dependencies like jq or yq.
|
||||||
|
# It reads parameters in DOTENV format from stdin until EOF.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Initialize variables
|
||||||
|
message=""
|
||||||
|
exit_code="0"
|
||||||
|
|
||||||
|
# Read DOTENV-formatted parameters from stdin until EOF
|
||||||
|
while IFS= read -r line; do
|
||||||
|
case "$line" in
|
||||||
|
message=*)
|
||||||
|
# Extract value after message=
|
||||||
|
message="${line#message=}"
|
||||||
|
# Remove quotes if present (both single and double)
|
||||||
|
case "$message" in
|
||||||
|
\"*\")
|
||||||
|
message="${message#\"}"
|
||||||
|
message="${message%\"}"
|
||||||
|
;;
|
||||||
|
\'*\')
|
||||||
|
message="${message#\'}"
|
||||||
|
message="${message%\'}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
exit_code=*)
|
||||||
|
# Extract value after exit_code=
|
||||||
|
exit_code="${line#exit_code=}"
|
||||||
|
# Remove quotes if present
|
||||||
|
case "$exit_code" in
|
||||||
|
\"*\")
|
||||||
|
exit_code="${exit_code#\"}"
|
||||||
|
exit_code="${exit_code%\"}"
|
||||||
|
;;
|
||||||
|
\'*\')
|
||||||
|
exit_code="${exit_code#\'}"
|
||||||
|
exit_code="${exit_code%\'}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Validate exit code parameter (must be numeric)
|
||||||
|
case "$exit_code" in
|
||||||
|
''|*[!0-9]*)
|
||||||
|
echo "ERROR: exit_code must be a positive integer" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Validate exit code range (0-255)
|
||||||
|
if [ "$exit_code" -lt 0 ] || [ "$exit_code" -gt 255 ]; then
|
||||||
|
echo "ERROR: exit_code must be between 0 and 255" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Log message if provided
|
||||||
|
if [ -n "$message" ]; then
|
||||||
|
echo "[NOOP] $message"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Output result
|
||||||
|
echo "No operation completed successfully"
|
||||||
|
|
||||||
|
# Exit with specified code
|
||||||
|
exit "$exit_code"
|
||||||
42
docker/distributable/packs/core/actions/noop.yaml
Normal file
42
docker/distributable/packs/core/actions/noop.yaml
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
# No Operation Action
|
||||||
|
# Does nothing - useful for testing and placeholder workflows
|
||||||
|
|
||||||
|
ref: core.noop
|
||||||
|
label: "No-Op"
|
||||||
|
description: "Does nothing - useful for testing and placeholder workflows"
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
# Runner type determines how the action is executed
|
||||||
|
runner_type: shell
|
||||||
|
|
||||||
|
# Entry point is the shell command or script to execute
|
||||||
|
entry_point: noop.sh
|
||||||
|
|
||||||
|
# Parameter delivery: stdin for secure parameter passing (no env vars)
|
||||||
|
parameter_delivery: stdin
|
||||||
|
parameter_format: dotenv
|
||||||
|
|
||||||
|
# Output format: text (no structured data parsing)
|
||||||
|
output_format: text
|
||||||
|
|
||||||
|
# Action parameters schema (StackStorm-style inline format)
|
||||||
|
parameters:
|
||||||
|
message:
|
||||||
|
type: string
|
||||||
|
description: "Optional message to log (for debugging)"
|
||||||
|
exit_code:
|
||||||
|
type: integer
|
||||||
|
description: "Exit code to return (default: 0 for success)"
|
||||||
|
default: 0
|
||||||
|
minimum: 0
|
||||||
|
maximum: 255
|
||||||
|
|
||||||
|
# Output schema: not applicable for text output format
|
||||||
|
# The action outputs plain text to stdout
|
||||||
|
|
||||||
|
# Tags for categorization
|
||||||
|
tags:
|
||||||
|
- utility
|
||||||
|
- testing
|
||||||
|
- placeholder
|
||||||
|
- noop
|
||||||
187
docker/distributable/packs/core/actions/register_packs.sh
Executable file
187
docker/distributable/packs/core/actions/register_packs.sh
Executable file
@@ -0,0 +1,187 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# Register Packs Action - Core Pack
|
||||||
|
# API Wrapper for POST /api/v1/packs/register-batch
|
||||||
|
#
|
||||||
|
# This script uses pure POSIX shell without external dependencies like jq.
|
||||||
|
# It reads parameters in DOTENV format from stdin until EOF.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Initialize variables
|
||||||
|
pack_paths=""
|
||||||
|
packs_base_dir="/opt/attune/packs"
|
||||||
|
skip_validation="false"
|
||||||
|
skip_tests="false"
|
||||||
|
force="false"
|
||||||
|
api_url="http://localhost:8080"
|
||||||
|
api_token=""
|
||||||
|
|
||||||
|
# Read DOTENV-formatted parameters from stdin until EOF
|
||||||
|
while IFS= read -r line; do
|
||||||
|
[ -z "$line" ] && continue
|
||||||
|
|
||||||
|
key="${line%%=*}"
|
||||||
|
value="${line#*=}"
|
||||||
|
|
||||||
|
# Remove quotes if present (both single and double)
|
||||||
|
case "$value" in
|
||||||
|
\"*\")
|
||||||
|
value="${value#\"}"
|
||||||
|
value="${value%\"}"
|
||||||
|
;;
|
||||||
|
\'*\')
|
||||||
|
value="${value#\'}"
|
||||||
|
value="${value%\'}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Process parameters
|
||||||
|
case "$key" in
|
||||||
|
pack_paths)
|
||||||
|
pack_paths="$value"
|
||||||
|
;;
|
||||||
|
packs_base_dir)
|
||||||
|
packs_base_dir="$value"
|
||||||
|
;;
|
||||||
|
skip_validation)
|
||||||
|
skip_validation="$value"
|
||||||
|
;;
|
||||||
|
skip_tests)
|
||||||
|
skip_tests="$value"
|
||||||
|
;;
|
||||||
|
force)
|
||||||
|
force="$value"
|
||||||
|
;;
|
||||||
|
api_url)
|
||||||
|
api_url="$value"
|
||||||
|
;;
|
||||||
|
api_token)
|
||||||
|
api_token="$value"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Validate required parameters
|
||||||
|
if [ -z "$pack_paths" ]; then
|
||||||
|
printf '{"registered_packs":[],"failed_packs":[{"pack_ref":"input","pack_path":"","error":"No pack paths provided","error_stage":"input_validation"}],"summary":{"total_packs":0,"success_count":0,"failure_count":1,"total_components":0,"duration_ms":0}}\n'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Normalize booleans
|
||||||
|
case "$skip_validation" in
|
||||||
|
true|True|TRUE|yes|Yes|YES|1) skip_validation="true" ;;
|
||||||
|
*) skip_validation="false" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
case "$skip_tests" in
|
||||||
|
true|True|TRUE|yes|Yes|YES|1) skip_tests="true" ;;
|
||||||
|
*) skip_tests="false" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
case "$force" in
|
||||||
|
true|True|TRUE|yes|Yes|YES|1) force="true" ;;
|
||||||
|
*) force="false" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Escape values for JSON
|
||||||
|
pack_paths_escaped=$(printf '%s' "$pack_paths" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
packs_base_dir_escaped=$(printf '%s' "$packs_base_dir" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
|
||||||
|
# Build JSON request body
|
||||||
|
request_body=$(cat <<EOF
|
||||||
|
{
|
||||||
|
"pack_paths": $pack_paths_escaped,
|
||||||
|
"packs_base_dir": "$packs_base_dir_escaped",
|
||||||
|
"skip_validation": $skip_validation,
|
||||||
|
"skip_tests": $skip_tests,
|
||||||
|
"force": $force
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create temp files for curl
|
||||||
|
temp_response=$(mktemp)
|
||||||
|
temp_headers=$(mktemp)
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
rm -f "$temp_response" "$temp_headers"
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
# Make API call
|
||||||
|
http_code=$(curl -X POST \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "Accept: application/json" \
|
||||||
|
${api_token:+-H "Authorization: Bearer ${api_token}"} \
|
||||||
|
-d "$request_body" \
|
||||||
|
-s \
|
||||||
|
-w "%{http_code}" \
|
||||||
|
-o "$temp_response" \
|
||||||
|
--max-time 300 \
|
||||||
|
--connect-timeout 10 \
|
||||||
|
"${api_url}/api/v1/packs/register-batch" 2>/dev/null || echo "000")
|
||||||
|
|
||||||
|
# Check HTTP status
|
||||||
|
if [ "$http_code" -ge 200 ] && [ "$http_code" -lt 300 ]; then
|
||||||
|
# Success - extract data field from API response
|
||||||
|
response_body=$(cat "$temp_response")
|
||||||
|
|
||||||
|
# Try to extract .data field using simple text processing
|
||||||
|
# If response contains "data" field, extract it; otherwise use whole response
|
||||||
|
case "$response_body" in
|
||||||
|
*'"data":'*)
|
||||||
|
# Extract content after "data": up to the closing brace
|
||||||
|
# This is a simple extraction - assumes well-formed JSON
|
||||||
|
data_content=$(printf '%s' "$response_body" | sed -n 's/.*"data":\s*\(.*\)}/\1/p')
|
||||||
|
if [ -n "$data_content" ]; then
|
||||||
|
printf '%s\n' "$data_content"
|
||||||
|
else
|
||||||
|
cat "$temp_response"
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
cat "$temp_response"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
# Error response - try to extract error message
|
||||||
|
error_msg="API request failed"
|
||||||
|
if [ -s "$temp_response" ]; then
|
||||||
|
# Try to extract error or message field
|
||||||
|
response_content=$(cat "$temp_response")
|
||||||
|
case "$response_content" in
|
||||||
|
*'"error":'*)
|
||||||
|
error_msg=$(printf '%s' "$response_content" | sed -n 's/.*"error":\s*"\([^"]*\)".*/\1/p')
|
||||||
|
[ -z "$error_msg" ] && error_msg="API request failed"
|
||||||
|
;;
|
||||||
|
*'"message":'*)
|
||||||
|
error_msg=$(printf '%s' "$response_content" | sed -n 's/.*"message":\s*"\([^"]*\)".*/\1/p')
|
||||||
|
[ -z "$error_msg" ] && error_msg="API request failed"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Escape error message for JSON
|
||||||
|
error_msg_escaped=$(printf '%s' "$error_msg" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
{
|
||||||
|
"registered_packs": [],
|
||||||
|
"failed_packs": [{
|
||||||
|
"pack_ref": "api",
|
||||||
|
"pack_path": "",
|
||||||
|
"error": "API call failed (HTTP $http_code): $error_msg_escaped",
|
||||||
|
"error_stage": "api_call"
|
||||||
|
}],
|
||||||
|
"summary": {
|
||||||
|
"total_packs": 0,
|
||||||
|
"success_count": 0,
|
||||||
|
"failure_count": 1,
|
||||||
|
"total_components": 0,
|
||||||
|
"duration_ms": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
187
docker/distributable/packs/core/actions/register_packs.yaml
Normal file
187
docker/distributable/packs/core/actions/register_packs.yaml
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
# Register Packs Action
|
||||||
|
# Validates pack structure and loads components into database
|
||||||
|
|
||||||
|
ref: core.register_packs
|
||||||
|
label: "Register Packs"
|
||||||
|
description: "Register packs by validating schemas, loading components into database, and copying to permanent storage"
|
||||||
|
enabled: true
|
||||||
|
runner_type: shell
|
||||||
|
entry_point: register_packs.sh
|
||||||
|
|
||||||
|
# Parameter delivery: stdin for secure parameter passing (no env vars)
|
||||||
|
parameter_delivery: stdin
|
||||||
|
parameter_format: dotenv
|
||||||
|
|
||||||
|
# Output format: json (structured data parsing enabled)
|
||||||
|
output_format: json
|
||||||
|
|
||||||
|
# Action parameters schema (StackStorm-style with inline required/secret)
|
||||||
|
parameters:
|
||||||
|
pack_paths:
|
||||||
|
type: array
|
||||||
|
description: "List of pack directory paths to register"
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
minItems: 1
|
||||||
|
required: true
|
||||||
|
packs_base_dir:
|
||||||
|
type: string
|
||||||
|
description: "Base directory where packs are permanently stored"
|
||||||
|
default: "/opt/attune/packs"
|
||||||
|
skip_validation:
|
||||||
|
type: boolean
|
||||||
|
description: "Skip schema validation of pack components"
|
||||||
|
default: false
|
||||||
|
skip_tests:
|
||||||
|
type: boolean
|
||||||
|
description: "Skip running pack tests before registration"
|
||||||
|
default: false
|
||||||
|
force:
|
||||||
|
type: boolean
|
||||||
|
description: "Force registration even if pack already exists (will replace)"
|
||||||
|
default: false
|
||||||
|
api_url:
|
||||||
|
type: string
|
||||||
|
description: "Attune API URL for registration calls"
|
||||||
|
default: "http://localhost:8080"
|
||||||
|
api_token:
|
||||||
|
type: string
|
||||||
|
description: "API authentication token"
|
||||||
|
secret: true
|
||||||
|
|
||||||
|
# Output schema: describes the JSON structure written to stdout
|
||||||
|
# Note: stdout/stderr/exit_code are captured automatically by the execution system
|
||||||
|
output_schema:
|
||||||
|
registered_packs:
|
||||||
|
type: array
|
||||||
|
description: "List of successfully registered packs"
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
pack_ref:
|
||||||
|
type: string
|
||||||
|
description: "Pack reference"
|
||||||
|
pack_id:
|
||||||
|
type: integer
|
||||||
|
description: "Database ID of registered pack"
|
||||||
|
pack_version:
|
||||||
|
type: string
|
||||||
|
description: "Pack version"
|
||||||
|
storage_path:
|
||||||
|
type: string
|
||||||
|
description: "Permanent storage path"
|
||||||
|
components_registered:
|
||||||
|
type: object
|
||||||
|
description: "Count of registered components by type"
|
||||||
|
properties:
|
||||||
|
actions:
|
||||||
|
type: integer
|
||||||
|
description: "Number of actions registered"
|
||||||
|
sensors:
|
||||||
|
type: integer
|
||||||
|
description: "Number of sensors registered"
|
||||||
|
triggers:
|
||||||
|
type: integer
|
||||||
|
description: "Number of triggers registered"
|
||||||
|
rules:
|
||||||
|
type: integer
|
||||||
|
description: "Number of rules registered"
|
||||||
|
workflows:
|
||||||
|
type: integer
|
||||||
|
description: "Number of workflows registered"
|
||||||
|
policies:
|
||||||
|
type: integer
|
||||||
|
description: "Number of policies registered"
|
||||||
|
test_result:
|
||||||
|
type: object
|
||||||
|
description: "Pack test results (if tests were run)"
|
||||||
|
properties:
|
||||||
|
status:
|
||||||
|
type: string
|
||||||
|
description: "Test status"
|
||||||
|
enum:
|
||||||
|
- passed
|
||||||
|
- failed
|
||||||
|
- skipped
|
||||||
|
total_tests:
|
||||||
|
type: integer
|
||||||
|
description: "Total number of tests"
|
||||||
|
passed:
|
||||||
|
type: integer
|
||||||
|
description: "Number of passed tests"
|
||||||
|
failed:
|
||||||
|
type: integer
|
||||||
|
description: "Number of failed tests"
|
||||||
|
validation_results:
|
||||||
|
type: object
|
||||||
|
description: "Component validation results"
|
||||||
|
properties:
|
||||||
|
valid:
|
||||||
|
type: boolean
|
||||||
|
description: "Whether all components are valid"
|
||||||
|
errors:
|
||||||
|
type: array
|
||||||
|
description: "Validation errors found"
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
component_type:
|
||||||
|
type: string
|
||||||
|
description: "Type of component"
|
||||||
|
component_file:
|
||||||
|
type: string
|
||||||
|
description: "File with validation error"
|
||||||
|
error:
|
||||||
|
type: string
|
||||||
|
description: "Error message"
|
||||||
|
failed_packs:
|
||||||
|
type: array
|
||||||
|
description: "List of packs that failed to register"
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
pack_ref:
|
||||||
|
type: string
|
||||||
|
description: "Pack reference"
|
||||||
|
pack_path:
|
||||||
|
type: string
|
||||||
|
description: "Pack directory path"
|
||||||
|
error:
|
||||||
|
type: string
|
||||||
|
description: "Error message"
|
||||||
|
error_stage:
|
||||||
|
type: string
|
||||||
|
description: "Stage where error occurred"
|
||||||
|
enum:
|
||||||
|
- validation
|
||||||
|
- testing
|
||||||
|
- database_registration
|
||||||
|
- file_copy
|
||||||
|
- api_call
|
||||||
|
summary:
|
||||||
|
type: object
|
||||||
|
description: "Summary of registration process"
|
||||||
|
properties:
|
||||||
|
total_packs:
|
||||||
|
type: integer
|
||||||
|
description: "Total number of packs processed"
|
||||||
|
success_count:
|
||||||
|
type: integer
|
||||||
|
description: "Number of successfully registered packs"
|
||||||
|
failure_count:
|
||||||
|
type: integer
|
||||||
|
description: "Number of failed registrations"
|
||||||
|
total_components:
|
||||||
|
type: integer
|
||||||
|
description: "Total number of components registered"
|
||||||
|
duration_ms:
|
||||||
|
type: integer
|
||||||
|
description: "Total registration time in milliseconds"
|
||||||
|
|
||||||
|
# Tags for categorization
|
||||||
|
tags:
|
||||||
|
- pack
|
||||||
|
- registration
|
||||||
|
- validation
|
||||||
|
- installation
|
||||||
|
- database
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user