30 Commits

Author SHA1 Message Date
f93e9229d2 ha executor
Some checks failed
CI / Rustfmt (pull_request) Successful in 19s
CI / Cargo Audit & Deny (pull_request) Successful in 33s
CI / Security Blocking Checks (pull_request) Successful in 5s
CI / Web Blocking Checks (pull_request) Successful in 49s
CI / Web Advisory Checks (pull_request) Successful in 33s
CI / Clippy (pull_request) Has been cancelled
CI / Security Advisory Checks (pull_request) Has been cancelled
CI / Tests (pull_request) Has been cancelled
2026-04-02 17:15:59 -05:00
8e91440f23 [WIP] making executor ha 2026-04-02 11:33:26 -05:00
8278030699 fixing tests, making clippy happy
Some checks failed
CI / Rustfmt (push) Successful in 19s
CI / Cargo Audit & Deny (push) Successful in 33s
CI / Security Blocking Checks (push) Successful in 5s
CI / Web Advisory Checks (push) Successful in 28s
CI / Web Blocking Checks (push) Successful in 52s
Publish Images / Resolve Publish Metadata (push) Successful in 0s
CI / Security Advisory Checks (push) Successful in 23s
CI / Clippy (push) Successful in 2m4s
Publish Images / Publish Docker Dist Bundle (push) Successful in 4s
Publish Images / Publish web (amd64) (push) Successful in 45s
Publish Images / Publish web (arm64) (push) Successful in 3m32s
CI / Tests (push) Failing after 8m25s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 12m12s
Publish Images / Build Rust Bundles (amd64) (push) Successful in 12m39s
Publish Images / Publish agent (amd64) (push) Successful in 26s
Publish Images / Publish executor (amd64) (push) Successful in 40s
Publish Images / Publish api (amd64) (push) Successful in 30s
Publish Images / Publish notifier (amd64) (push) Successful in 41s
Publish Images / Publish agent (arm64) (push) Successful in 52s
Publish Images / Publish api (arm64) (push) Successful in 1m56s
Publish Images / Publish executor (arm64) (push) Successful in 1m57s
Publish Images / Publish notifier (arm64) (push) Successful in 1m50s
Publish Images / Publish manifest attune/agent (push) Successful in 15s
Publish Images / Publish manifest attune/api (push) Failing after 30s
Publish Images / Publish manifest attune/executor (push) Successful in 42s
Publish Images / Publish manifest attune/web (push) Failing after 17s
Publish Images / Publish manifest attune/notifier (push) Failing after 14m44s
2026-04-02 09:17:21 -05:00
b34617ded1 npm audit fix
Some checks failed
CI / Rustfmt (push) Successful in 19s
CI / Cargo Audit & Deny (push) Successful in 33s
CI / Security Blocking Checks (push) Successful in 5s
CI / Web Blocking Checks (push) Successful in 53s
CI / Web Advisory Checks (push) Successful in 34s
Publish Images / Resolve Publish Metadata (push) Successful in 1s
CI / Security Advisory Checks (push) Successful in 25s
CI / Clippy (push) Failing after 1m46s
Publish Images / Publish Docker Dist Bundle (push) Successful in 4s
Publish Images / Publish web (amd64) (push) Successful in 44s
Publish Images / Publish web (arm64) (push) Successful in 3m21s
CI / Tests (push) Failing after 6m7s
Publish Images / Build Rust Bundles (amd64) (push) Successful in 12m13s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 12m39s
Publish Images / Publish agent (amd64) (push) Successful in 21s
Publish Images / Publish executor (amd64) (push) Failing after 45s
Publish Images / Publish api (amd64) (push) Failing after 45s
Publish Images / Publish notifier (amd64) (push) Failing after 43s
Publish Images / Publish agent (arm64) (push) Successful in 59s
Publish Images / Publish executor (arm64) (push) Successful in 1m52s
Publish Images / Publish api (arm64) (push) Successful in 1m58s
Publish Images / Publish notifier (arm64) (push) Successful in 1m52s
Publish Images / Publish manifest attune/agent (push) Has been skipped
Publish Images / Publish manifest attune/api (push) Has been skipped
Publish Images / Publish manifest attune/executor (push) Has been skipped
Publish Images / Publish manifest attune/notifier (push) Has been skipped
Publish Images / Publish manifest attune/web (push) Has been skipped
2026-04-02 08:06:55 -05:00
b6446cc574 queueing fixes 2026-04-02 08:06:02 -05:00
cf82de87ea removing useless root-level package.json
Some checks failed
CI / Rustfmt (push) Successful in 19s
CI / Cargo Audit & Deny (push) Successful in 33s
CI / Security Blocking Checks (push) Successful in 5s
CI / Web Blocking Checks (push) Successful in 50s
CI / Web Advisory Checks (push) Successful in 34s
Publish Images / Resolve Publish Metadata (push) Successful in 1s
CI / Security Advisory Checks (push) Successful in 25s
CI / Clippy (push) Failing after 1m41s
Publish Images / Publish Docker Dist Bundle (push) Successful in 4s
Publish Images / Publish web (amd64) (push) Successful in 43s
Publish Images / Publish web (arm64) (push) Successful in 3m17s
CI / Tests (push) Failing after 6m0s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 12m17s
Publish Images / Build Rust Bundles (amd64) (push) Successful in 12m40s
Publish Images / Publish agent (amd64) (push) Successful in 26s
Publish Images / Publish notifier (amd64) (push) Failing after 11s
Publish Images / Publish executor (amd64) (push) Successful in 46s
Publish Images / Publish api (amd64) (push) Successful in 46s
Publish Images / Publish agent (arm64) (push) Successful in 56s
Publish Images / Publish api (arm64) (push) Successful in 2m4s
Publish Images / Publish executor (arm64) (push) Successful in 2m3s
Publish Images / Publish notifier (arm64) (push) Successful in 1m56s
Publish Images / Publish manifest attune/agent (push) Has been skipped
Publish Images / Publish manifest attune/api (push) Has been skipped
Publish Images / Publish manifest attune/notifier (push) Has been skipped
Publish Images / Publish manifest attune/web (push) Has been skipped
Publish Images / Publish manifest attune/executor (push) Has been skipped
2026-04-01 20:40:13 -05:00
a4c303ec84 merging semgrep-scan 2026-04-01 20:38:18 -05:00
a0f59114a3 Merge branch 'semgrep-scan' 2026-04-01 20:37:39 -05:00
104dcbb1b1 [WIP] client action streaming 2026-04-01 20:23:56 -05:00
b342005e17 addressing some semgrep issues 2026-04-01 19:27:37 -05:00
4b525f4641 attempting to fix build pipeline failures
All checks were successful
CI / Rustfmt (push) Successful in 23s
CI / Cargo Audit & Deny (push) Successful in 35s
CI / Security Blocking Checks (push) Successful in 10s
CI / Web Blocking Checks (push) Successful in 50s
CI / Web Advisory Checks (push) Successful in 35s
Publish Images / Resolve Publish Metadata (push) Successful in 1s
CI / Security Advisory Checks (push) Successful in 37s
CI / Clippy (push) Successful in 2m3s
Publish Images / Publish web (amd64) (push) Successful in 42s
Publish Images / Publish web (arm64) (push) Successful in 3m25s
CI / Tests (push) Successful in 8m51s
Publish Images / Build Rust Bundles (amd64) (push) Successful in 12m32s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 12m22s
Publish Images / Publish agent (amd64) (push) Successful in 21s
Publish Images / Publish notifier (amd64) (push) Successful in 37s
Publish Images / Publish executor (amd64) (push) Successful in 41s
Publish Images / Publish api (amd64) (push) Successful in 41s
Publish Images / Publish agent (arm64) (push) Successful in 55s
Publish Images / Publish api (arm64) (push) Successful in 1m58s
Publish Images / Publish executor (arm64) (push) Successful in 1m53s
Publish Images / Publish notifier (arm64) (push) Successful in 1m53s
Publish Images / Publish manifest attune/agent (push) Successful in 7s
Publish Images / Publish manifest attune/api (push) Successful in 16s
Publish Images / Publish manifest attune/executor (push) Successful in 10s
Publish Images / Publish manifest attune/notifier (push) Successful in 8s
Publish Images / Publish manifest attune/web (push) Successful in 7s
Publish Images / Publish Docker Dist Bundle (push) Successful in 4s
2026-03-28 14:21:09 -05:00
David Culbreth
7ef2b59b23 working on arm64 native
Some checks failed
CI / Rustfmt (push) Successful in 24s
CI / Cargo Audit & Deny (push) Successful in 36s
CI / Security Blocking Checks (push) Successful in 9s
CI / Web Blocking Checks (push) Successful in 48s
CI / Web Advisory Checks (push) Successful in 37s
Publish Images / Resolve Publish Metadata (push) Successful in 2s
CI / Clippy (push) Failing after 1m53s
Publish Images / Publish Docker Dist Bundle (push) Failing after 8s
Publish Images / Publish web (amd64) (push) Successful in 56s
CI / Security Advisory Checks (push) Successful in 38s
Publish Images / Publish web (arm64) (push) Successful in 3m29s
CI / Tests (push) Successful in 9m21s
Publish Images / Build Rust Bundles (amd64) (push) Failing after 12m28s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 12m20s
Publish Images / Publish agent (amd64) (push) Has been skipped
Publish Images / Publish api (amd64) (push) Has been skipped
Publish Images / Publish agent (arm64) (push) Has been skipped
Publish Images / Publish api (arm64) (push) Has been skipped
Publish Images / Publish executor (amd64) (push) Has been skipped
Publish Images / Publish notifier (amd64) (push) Has been skipped
Publish Images / Publish executor (arm64) (push) Has been skipped
Publish Images / Publish notifier (arm64) (push) Has been skipped
Publish Images / Publish manifest attune/agent (push) Has been skipped
Publish Images / Publish manifest attune/api (push) Has been skipped
Publish Images / Publish manifest attune/notifier (push) Has been skipped
Publish Images / Publish manifest attune/executor (push) Has been skipped
Publish Images / Publish manifest attune/web (push) Has been skipped
2026-03-27 16:37:46 -05:00
3a13bf754a fixing docker compose distribution
Some checks failed
CI / Rustfmt (push) Successful in 20s
CI / Clippy (push) Successful in 2m3s
CI / Cargo Audit & Deny (push) Successful in 32s
CI / Web Blocking Checks (push) Successful in 1m21s
CI / Security Blocking Checks (push) Successful in 10s
CI / Web Advisory Checks (push) Successful in 1m3s
CI / Security Advisory Checks (push) Successful in 37s
Publish Images / Resolve Publish Metadata (push) Successful in 1s
CI / Tests (push) Successful in 8m46s
Publish Images / Publish web (arm64) (push) Successful in 3m20s
Publish Images / Publish Docker Dist Bundle (push) Failing after 9s
Publish Images / Publish web (amd64) (push) Successful in 52s
Publish Images / Build Rust Bundles (amd64) (push) Successful in 12m20s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 12m30s
Publish Images / Publish agent (amd64) (push) Successful in 29s
Publish Images / Publish executor (amd64) (push) Successful in 35s
Publish Images / Publish api (amd64) (push) Successful in 42s
Publish Images / Publish notifier (amd64) (push) Successful in 35s
Publish Images / Publish agent (arm64) (push) Successful in 1m3s
Publish Images / Publish api (arm64) (push) Successful in 1m55s
Publish Images / Publish executor (arm64) (push) Successful in 2m1s
Publish Images / Publish notifier (arm64) (push) Successful in 1m54s
Publish Images / Publish manifest attune/agent (push) Successful in 10s
Publish Images / Publish manifest attune/api (push) Successful in 12s
Publish Images / Publish manifest attune/executor (push) Successful in 10s
Publish Images / Publish manifest attune/notifier (push) Successful in 9s
Publish Images / Publish manifest attune/web (push) Successful in 7s
2026-03-26 15:39:07 -05:00
f4ef823f43 fixing audit finding
Some checks failed
CI / Rustfmt (push) Successful in 21s
CI / Cargo Audit & Deny (push) Successful in 32s
CI / Security Blocking Checks (push) Successful in 9s
CI / Web Blocking Checks (push) Successful in 53s
CI / Web Advisory Checks (push) Successful in 34s
Publish Images / Resolve Publish Metadata (push) Successful in 1s
CI / Security Advisory Checks (push) Successful in 36s
CI / Clippy (push) Successful in 2m8s
Publish Images / Publish Docker Dist Bundle (push) Failing after 8s
Publish Images / Publish web (amd64) (push) Successful in 53s
Publish Images / Publish web (arm64) (push) Successful in 3m28s
CI / Tests (push) Successful in 9m20s
Publish Images / Build Rust Bundles (amd64) (push) Successful in 12m21s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 12m23s
Publish Images / Publish agent (amd64) (push) Successful in 23s
Publish Images / Publish api (amd64) (push) Successful in 33s
Publish Images / Publish notifier (amd64) (push) Successful in 38s
Publish Images / Publish executor (amd64) (push) Successful in 54s
Publish Images / Publish agent (arm64) (push) Successful in 59s
Publish Images / Publish executor (arm64) (push) Successful in 1m55s
Publish Images / Publish api (arm64) (push) Successful in 2m2s
Publish Images / Publish notifier (arm64) (push) Successful in 2m3s
Publish Images / Publish manifest attune/agent (push) Successful in 10s
Publish Images / Publish manifest attune/executor (push) Successful in 19s
Publish Images / Publish manifest attune/api (push) Successful in 21s
Publish Images / Publish manifest attune/notifier (push) Successful in 12s
Publish Images / Publish manifest attune/web (push) Successful in 7s
2026-03-26 14:05:53 -05:00
ab7d31de2f fixing docker compose distribution 2026-03-26 14:04:57 -05:00
938c271ff5 distributable, please
Some checks failed
CI / Rustfmt (push) Successful in 22s
CI / Cargo Audit & Deny (push) Successful in 36s
CI / Security Blocking Checks (push) Successful in 6s
CI / Web Blocking Checks (push) Successful in 53s
CI / Web Advisory Checks (push) Successful in 34s
Publish Images / Resolve Publish Metadata (push) Successful in 1s
CI / Security Advisory Checks (push) Successful in 38s
CI / Clippy (push) Successful in 2m7s
Publish Images / Publish Docker Dist Bundle (push) Failing after 19s
Publish Images / Publish web (amd64) (push) Successful in 49s
Publish Images / Publish web (arm64) (push) Successful in 3m31s
CI / Tests (push) Successful in 8m48s
Publish Images / Build Rust Bundles (amd64) (push) Successful in 12m42s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 12m19s
Publish Images / Publish agent (amd64) (push) Successful in 26s
Publish Images / Publish api (amd64) (push) Successful in 38s
Publish Images / Publish notifier (amd64) (push) Successful in 42s
Publish Images / Publish executor (amd64) (push) Successful in 46s
Publish Images / Publish agent (arm64) (push) Successful in 56s
Publish Images / Publish api (arm64) (push) Successful in 1m52s
Publish Images / Publish executor (arm64) (push) Successful in 2m2s
Publish Images / Publish notifier (arm64) (push) Successful in 2m3s
Publish Images / Publish manifest attune/agent (push) Successful in 6s
Publish Images / Publish manifest attune/api (push) Successful in 11s
Publish Images / Publish manifest attune/executor (push) Successful in 10s
Publish Images / Publish manifest attune/notifier (push) Successful in 8s
Publish Images / Publish manifest attune/web (push) Successful in 8s
2026-03-26 12:26:23 -05:00
da8055cb79 publishable docker compose?
Some checks failed
CI / Cargo Audit & Deny (push) Successful in 31s
CI / Rustfmt (push) Successful in 18s
CI / Security Blocking Checks (push) Successful in 6s
CI / Web Blocking Checks (push) Successful in 52s
CI / Web Advisory Checks (push) Successful in 31s
Publish Images / Resolve Publish Metadata (push) Successful in 2s
CI / Security Advisory Checks (push) Successful in 38s
CI / Clippy (push) Successful in 1m58s
Publish Images / Publish Docker Dist Bundle (push) Failing after 21s
Publish Images / Publish web (amd64) (push) Successful in 50s
Publish Images / Publish web (arm64) (push) Successful in 3m26s
CI / Tests (push) Successful in 9m1s
Publish Images / Build Rust Bundles (amd64) (push) Successful in 12m25s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 12m42s
Publish Images / Publish agent (amd64) (push) Successful in 28s
Publish Images / Publish api (amd64) (push) Successful in 45s
Publish Images / Publish executor (amd64) (push) Successful in 46s
Publish Images / Publish notifier (amd64) (push) Successful in 49s
Publish Images / Publish agent (arm64) (push) Successful in 1m0s
Publish Images / Publish api (arm64) (push) Successful in 1m51s
Publish Images / Publish executor (arm64) (push) Successful in 2m1s
Publish Images / Publish notifier (arm64) (push) Successful in 2m1s
Publish Images / Publish manifest attune/agent (push) Successful in 6s
Publish Images / Publish manifest attune/api (push) Successful in 10s
Publish Images / Publish manifest attune/executor (push) Successful in 7s
Publish Images / Publish manifest attune/notifier (push) Successful in 9s
Publish Images / Publish manifest attune/web (push) Successful in 7s
2026-03-26 08:46:18 -05:00
03a239d22b manifest publish retries and more descriptive logs.
All checks were successful
CI / Rustfmt (push) Successful in 22s
CI / Cargo Audit & Deny (push) Successful in 34s
CI / Security Blocking Checks (push) Successful in 8s
CI / Web Blocking Checks (push) Successful in 52s
CI / Web Advisory Checks (push) Successful in 38s
Publish Images / Resolve Publish Metadata (push) Successful in 2s
CI / Clippy (push) Successful in 2m1s
CI / Security Advisory Checks (push) Successful in 1m24s
Publish Images / Publish web (amd64) (push) Successful in 46s
Publish Images / Publish web (arm64) (push) Successful in 3m23s
CI / Tests (push) Successful in 8m54s
Publish Images / Build Rust Bundles (amd64) (push) Successful in 12m27s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 12m19s
Publish Images / Publish agent (amd64) (push) Successful in 23s
Publish Images / Publish api (amd64) (push) Successful in 37s
Publish Images / Publish executor (amd64) (push) Successful in 47s
Publish Images / Publish agent (arm64) (push) Successful in 1m1s
Publish Images / Publish notifier (amd64) (push) Successful in 40s
Publish Images / Publish api (arm64) (push) Successful in 1m51s
Publish Images / Publish executor (arm64) (push) Successful in 2m1s
Publish Images / Publish notifier (arm64) (push) Successful in 1m49s
Publish Images / Publish manifest attune/agent (push) Successful in 7s
Publish Images / Publish manifest attune/executor (push) Successful in 8s
Publish Images / Publish manifest attune/notifier (push) Successful in 9s
Publish Images / Publish manifest attune/api (push) Successful in 18s
Publish Images / Publish manifest attune/web (push) Successful in 8s
2026-03-26 07:40:07 -05:00
ba83958337 trying to fix manifest push
Some checks failed
CI / Rustfmt (push) Successful in 22s
CI / Cargo Audit & Deny (push) Successful in 35s
CI / Security Blocking Checks (push) Successful in 9s
CI / Web Blocking Checks (push) Successful in 51s
CI / Web Advisory Checks (push) Successful in 37s
Publish Images / Resolve Publish Metadata (push) Successful in 1s
CI / Clippy (push) Successful in 2m9s
CI / Security Advisory Checks (push) Successful in 1m25s
Publish Images / Publish web (amd64) (push) Successful in 52s
Publish Images / Publish web (arm64) (push) Successful in 3m27s
CI / Tests (push) Successful in 8m48s
Publish Images / Build Rust Bundles (amd64) (push) Successful in 12m50s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 12m29s
Publish Images / Publish agent (amd64) (push) Successful in 26s
Publish Images / Publish api (amd64) (push) Successful in 37s
Publish Images / Publish executor (amd64) (push) Successful in 40s
Publish Images / Publish agent (arm64) (push) Successful in 1m2s
Publish Images / Publish notifier (amd64) (push) Successful in 38s
Publish Images / Publish executor (arm64) (push) Successful in 1m57s
Publish Images / Publish api (arm64) (push) Successful in 1m58s
Publish Images / Publish notifier (arm64) (push) Successful in 2m6s
Publish Images / Publish manifest attune/agent (push) Successful in 12s
Publish Images / Publish manifest attune/api (push) Successful in 11s
Publish Images / Publish manifest attune/notifier (push) Successful in 13s
Publish Images / Publish manifest attune/executor (push) Successful in 16s
Publish Images / Publish manifest attune/web (push) Failing after 37s
2026-03-25 17:29:27 -05:00
c11bc1a2bf trying to fix manifest push
Some checks failed
CI / Rustfmt (push) Successful in 23s
CI / Clippy (push) Successful in 2m6s
CI / Cargo Audit & Deny (push) Successful in 33s
CI / Web Blocking Checks (push) Successful in 52s
CI / Security Blocking Checks (push) Successful in 6s
CI / Web Advisory Checks (push) Successful in 36s
CI / Security Advisory Checks (push) Successful in 38s
Publish Images / Resolve Publish Metadata (push) Successful in 1s
Publish Images / Publish web (arm64) (push) Successful in 3m26s
CI / Tests (push) Successful in 8m52s
Publish Images / Publish web (amd64) (push) Successful in 1m8s
Publish Images / Build Rust Bundles (amd64) (push) Successful in 12m29s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 12m46s
Publish Images / Publish agent (amd64) (push) Successful in 26s
Publish Images / Publish api (amd64) (push) Successful in 40s
Publish Images / Publish executor (amd64) (push) Successful in 39s
Publish Images / Publish agent (arm64) (push) Successful in 57s
Publish Images / Publish notifier (amd64) (push) Successful in 41s
Publish Images / Publish api (arm64) (push) Successful in 2m3s
Publish Images / Publish executor (arm64) (push) Successful in 2m2s
Publish Images / Publish notifier (arm64) (push) Successful in 1m57s
Publish Images / Publish manifest attune/api (push) Failing after 10s
Publish Images / Publish manifest attune/agent (push) Successful in 12s
Publish Images / Publish manifest attune/executor (push) Successful in 11s
Publish Images / Publish manifest attune/notifier (push) Successful in 11s
Publish Images / Publish manifest attune/web (push) Failing after 8s
2026-03-25 17:10:36 -05:00
eb82755137 trying different urls? not sure why publishing is only working for the arm64 builds
Some checks failed
CI / Rustfmt (push) Successful in 22s
CI / Security Blocking Checks (push) Has been cancelled
CI / Tests (push) Has been cancelled
CI / Cargo Audit & Deny (push) Has been cancelled
CI / Web Advisory Checks (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Security Advisory Checks (push) Has been cancelled
CI / Web Blocking Checks (push) Has been cancelled
Publish Images / Resolve Publish Metadata (push) Successful in 1s
Publish Images / Publish web (amd64) (push) Successful in 45s
Publish Images / Publish web (arm64) (push) Successful in 3m19s
Publish Images / Build Rust Bundles (amd64) (push) Successful in 12m24s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 12m43s
Publish Images / Publish agent (amd64) (push) Successful in 27s
Publish Images / Publish api (amd64) (push) Successful in 41s
Publish Images / Publish agent (arm64) (push) Successful in 1m0s
Publish Images / Publish notifier (amd64) (push) Successful in 40s
Publish Images / Publish executor (arm64) (push) Successful in 1m58s
Publish Images / Publish notifier (arm64) (push) Successful in 1m53s
Publish Images / Publish manifest attune/api (push) Has been skipped
Publish Images / Publish manifest attune/executor (push) Has been skipped
Publish Images / Publish manifest attune/notifier (push) Has been skipped
Publish Images / Publish manifest attune/web (push) Has been skipped
Publish Images / Publish executor (amd64) (push) Successful in 45s
Publish Images / Publish api (arm64) (push) Successful in 2m2s
Publish Images / Publish manifest attune/agent (push) Failing after 1s
2026-03-25 14:29:15 -05:00
058f392616 updating the publisher, again
Some checks failed
CI / Cargo Audit & Deny (push) Successful in 1m11s
CI / Rustfmt (push) Successful in 1m20s
CI / Security Blocking Checks (push) Successful in 9s
CI / Clippy (push) Successful in 2m1s
CI / Web Advisory Checks (push) Successful in 1m9s
CI / Web Blocking Checks (push) Successful in 1m26s
Publish Images / Resolve Publish Metadata (push) Successful in 1s
CI / Security Advisory Checks (push) Successful in 39s
Publish Images / Publish web (arm64) (push) Successful in 3m50s
CI / Tests (push) Successful in 9m4s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 12m17s
Publish Images / Build Rust Bundles (amd64) (push) Failing after 12m21s
Publish Images / Publish api (arm64) (push) Has been skipped
Publish Images / Publish executor (arm64) (push) Has been skipped
Publish Images / Publish notifier (amd64) (push) Has been skipped
Publish Images / Publish executor (amd64) (push) Has been skipped
Publish Images / Publish notifier (arm64) (push) Has been skipped
Publish Images / Publish manifest attune/api (push) Has been skipped
Publish Images / Publish manifest attune/executor (push) Has been skipped
Publish Images / Publish manifest attune/notifier (push) Has been skipped
Publish Images / Publish manifest attune/web (push) Has been skipped
Publish Images / Publish web (amd64) (push) Failing after 47s
Publish Images / Publish agent (amd64) (push) Has been skipped
Publish Images / Publish api (amd64) (push) Has been skipped
Publish Images / Publish agent (arm64) (push) Has been skipped
Publish Images / Publish manifest attune/agent (push) Has been skipped
2026-03-25 13:10:44 -05:00
0264a66b5a renaming container artifacts and adding project linking stage
Some checks failed
CI / Rustfmt (push) Successful in 21s
CI / Clippy (push) Successful in 2m3s
CI / Cargo Audit & Deny (push) Successful in 34s
CI / Web Blocking Checks (push) Successful in 1m27s
CI / Security Blocking Checks (push) Successful in 15s
CI / Web Advisory Checks (push) Successful in 32s
CI / Security Advisory Checks (push) Successful in 1m25s
Publish Images / Resolve Publish Metadata (push) Successful in 1s
CI / Tests (push) Successful in 8m56s
Publish Images / Publish web (arm64) (push) Failing after 3m49s
Publish Images / Publish web (amd64) (push) Failing after 1m28s
Publish Images / Build Rust Bundles (amd64) (push) Failing after 12m21s
Publish Images / Build Rust Bundles (arm64) (push) Failing after 12m28s
Publish Images / Publish agent (amd64) (push) Has been skipped
Publish Images / Publish api (amd64) (push) Has been skipped
Publish Images / Publish agent (arm64) (push) Has been skipped
Publish Images / Publish api (arm64) (push) Has been skipped
Publish Images / Publish executor (amd64) (push) Has been skipped
Publish Images / Publish executor (arm64) (push) Has been skipped
Publish Images / Publish notifier (amd64) (push) Has been skipped
Publish Images / Publish notifier (arm64) (push) Has been skipped
Publish Images / Publish manifest attune/api (push) Has been skipped
Publish Images / Publish manifest attune/executor (push) Has been skipped
Publish Images / Publish manifest attune/agent (push) Has been skipped
Publish Images / Publish manifest attune/notifier (push) Has been skipped
Publish Images / Publish manifest attune/web (push) Has been skipped
2026-03-25 12:39:47 -05:00
542e72a454 fixing glibc version check
Some checks failed
CI / Clippy (push) Successful in 2m1s
CI / Rustfmt (push) Successful in 21s
CI / Cargo Audit & Deny (push) Successful in 32s
CI / Web Blocking Checks (push) Successful in 53s
CI / Security Blocking Checks (push) Successful in 8s
CI / Web Advisory Checks (push) Successful in 37s
CI / Security Advisory Checks (push) Successful in 36s
Publish Images / Resolve Publish Metadata (push) Successful in 2s
Publish Images / Publish web (arm64) (push) Successful in 3m39s
CI / Tests (push) Successful in 8m37s
Publish Images / Build Rust Bundles (amd64) (push) Successful in 12m21s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 12m15s
Publish Images / Publish agent (amd64) (push) Successful in 26s
Publish Images / Publish api (amd64) (push) Successful in 39s
Publish Images / Publish executor (amd64) (push) Successful in 37s
Publish Images / Publish notifier (amd64) (push) Successful in 37s
Publish Images / Publish agent (arm64) (push) Successful in 1m34s
Publish Images / Publish executor (arm64) (push) Successful in 2m12s
Publish Images / Publish api (arm64) (push) Successful in 2m22s
Publish Images / Publish manifest attune-executor (push) Has been skipped
Publish Images / Publish manifest attune-notifier (push) Has been skipped
Publish Images / Publish manifest attune-web (push) Has been skipped
Publish Images / Publish notifier (arm64) (push) Successful in 2m10s
Publish Images / Publish web (amd64) (push) Successful in 47s
Publish Images / Publish manifest attune-agent (push) Failing after 2s
Publish Images / Publish manifest attune-api (push) Failing after 1s
2026-03-25 11:17:50 -05:00
a118563366 building? hopefully?
Some checks failed
CI / Rustfmt (push) Successful in 22s
CI / Clippy (push) Successful in 2m3s
CI / Cargo Audit & Deny (push) Successful in 32s
CI / Web Blocking Checks (push) Successful in 52s
CI / Security Blocking Checks (push) Successful in 8s
CI / Web Advisory Checks (push) Successful in 36s
CI / Security Advisory Checks (push) Successful in 43s
Publish Images / Resolve Publish Metadata (push) Successful in 2s
Publish Images / Publish web (arm64) (push) Failing after 3m53s
CI / Tests (push) Successful in 8m45s
Publish Images / Build Rust Bundles (amd64) (push) Failing after 8m57s
Publish Images / Publish web (amd64) (push) Successful in 48s
Publish Images / Publish agent (amd64) (push) Has been cancelled
Publish Images / Publish api (amd64) (push) Has been cancelled
Publish Images / Publish executor (amd64) (push) Has been cancelled
Publish Images / Publish notifier (amd64) (push) Has been cancelled
Publish Images / Publish agent (arm64) (push) Has been cancelled
Publish Images / Publish api (arm64) (push) Has been cancelled
Publish Images / Publish executor (arm64) (push) Has been cancelled
Publish Images / Build Rust Bundles (arm64) (push) Has been cancelled
Publish Images / Publish notifier (arm64) (push) Has been cancelled
Publish Images / Publish manifest attune-agent (push) Has been cancelled
Publish Images / Publish manifest attune-api (push) Has been cancelled
Publish Images / Publish manifest attune-executor (push) Has been cancelled
Publish Images / Publish manifest attune-notifier (push) Has been cancelled
Publish Images / Publish manifest attune-web (push) Has been cancelled
2026-03-25 10:52:07 -05:00
a057ad5db5 adjusting publish pipeline to cross-compile because rpis are slow
Some checks failed
CI / Rustfmt (push) Successful in 21s
CI / Clippy (push) Failing after 2m3s
CI / Cargo Audit & Deny (push) Successful in 33s
CI / Web Blocking Checks (push) Successful in 51s
CI / Security Blocking Checks (push) Successful in 5s
CI / Web Advisory Checks (push) Successful in 38s
CI / Security Advisory Checks (push) Successful in 36s
Publish Images / Resolve Publish Metadata (push) Successful in 1s
Publish Images / Publish web (arm64) (push) Successful in 3m34s
Publish Images / Build Rust Bundles (amd64) (push) Failing after 4m1s
CI / Tests (push) Successful in 8m47s
Publish Images / Publish web (amd64) (push) Failing after 46s
Publish Images / Build Rust Bundles (arm64) (push) Failing after 4m3s
Publish Images / Publish agent (arm64) (push) Has been skipped
Publish Images / Publish api (arm64) (push) Has been skipped
Publish Images / Publish agent (amd64) (push) Has been skipped
Publish Images / Publish api (amd64) (push) Has been skipped
Publish Images / Publish executor (arm64) (push) Has been skipped
Publish Images / Publish notifier (arm64) (push) Has been skipped
Publish Images / Publish executor (amd64) (push) Has been skipped
Publish Images / Publish notifier (amd64) (push) Has been skipped
Publish Images / Publish manifest attune-agent (push) Has been skipped
Publish Images / Publish manifest attune-api (push) Has been skipped
Publish Images / Publish manifest attune-executor (push) Has been skipped
Publish Images / Publish manifest attune-notifier (push) Has been skipped
Publish Images / Publish manifest attune-web (push) Has been skipped
2026-03-25 10:07:48 -05:00
8e273ec683 more adjustments to publisher 2026-03-25 08:14:06 -05:00
16f1c2f079 matching runner tags after changing runner tags
Some checks failed
CI / Rustfmt (push) Successful in 1m4s
CI / Clippy (push) Failing after 1m46s
CI / Cargo Audit & Deny (push) Successful in 34s
CI / Web Blocking Checks (push) Successful in 1m24s
CI / Security Blocking Checks (push) Successful in 8s
CI / Web Advisory Checks (push) Successful in 32s
CI / Security Advisory Checks (push) Successful in 1m26s
Publish Images / Resolve Publish Metadata (push) Successful in 1s
CI / Tests (push) Successful in 8m51s
Publish Images / Publish web (amd64) (push) Successful in 1m4s
Publish Images / Build Rust Bundles (amd64) (push) Successful in 10m59s
Publish Images / Build Rust Bundles (arm64) (push) Successful in 1h19m31s
Publish Images / Publish agent (amd64) (push) Failing after 14s
Publish Images / Publish executor (amd64) (push) Failing after 12s
Publish Images / Publish api (amd64) (push) Failing after 32s
Publish Images / Publish notifier (amd64) (push) Failing after 14s
Publish Images / Publish api (arm64) (push) Failing after 1m58s
Publish Images / Publish executor (arm64) (push) Failing after 49s
Publish Images / Publish notifier (arm64) (push) Failing after 48s
Publish Images / Publish web (arm64) (push) Successful in 3m47s
Publish Images / Publish agent (arm64) (push) Failing after 4m13s
Publish Images / Publish manifest attune-agent (push) Has been skipped
Publish Images / Publish manifest attune-api (push) Has been skipped
Publish Images / Publish manifest attune-executor (push) Has been skipped
Publish Images / Publish manifest attune-notifier (push) Has been skipped
Publish Images / Publish manifest attune-web (push) Has been skipped
2026-03-25 01:22:50 -05:00
62307e8c65 publishing with intentional architecture
Some checks failed
Publish Images / Resolve Publish Metadata (push) Successful in 18s
Publish Images / Publish web (arm64) (push) Successful in 7m16s
CI / Rustfmt (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Security Advisory Checks (push) Has been cancelled
CI / Tests (push) Has been cancelled
CI / Cargo Audit & Deny (push) Has been cancelled
CI / Web Blocking Checks (push) Has been cancelled
CI / Security Blocking Checks (push) Has been cancelled
CI / Web Advisory Checks (push) Has been cancelled
Publish Images / Publish agent (amd64) (push) Has been cancelled
Publish Images / Publish api (amd64) (push) Has been cancelled
Publish Images / Publish executor (amd64) (push) Has been cancelled
Publish Images / Publish notifier (amd64) (push) Has been cancelled
Publish Images / Publish agent (arm64) (push) Has been cancelled
Publish Images / Publish api (arm64) (push) Has been cancelled
Publish Images / Publish executor (arm64) (push) Has been cancelled
Publish Images / Publish notifier (arm64) (push) Has been cancelled
Publish Images / Publish web (amd64) (push) Has been cancelled
Publish Images / Build Rust Bundles (amd64) (push) Has started running
Publish Images / Publish manifest attune-agent (push) Has been cancelled
Publish Images / Publish manifest attune-api (push) Has been cancelled
Publish Images / Publish manifest attune-executor (push) Has been cancelled
Publish Images / Publish manifest attune-notifier (push) Has been cancelled
Publish Images / Build Rust Bundles (arm64) (push) Has been cancelled
Publish Images / Publish manifest attune-web (push) Has been cancelled
2026-03-25 01:10:10 -05:00
2ebb03b868 first pass at access control setup 2026-03-24 14:45:07 -05:00
258 changed files with 31229 additions and 3670 deletions

0
.codex Normal file
View File

0
.codex_write_test Normal file
View File

View File

@@ -19,7 +19,7 @@ env:
jobs:
rust-fmt:
name: Rustfmt
runs-on: ubuntu-latest
runs-on: build-amd64
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -45,7 +45,7 @@ jobs:
rust-clippy:
name: Clippy
runs-on: ubuntu-latest
runs-on: build-amd64
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -91,7 +91,7 @@ jobs:
rust-test:
name: Tests
runs-on: ubuntu-latest
runs-on: build-amd64
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -135,7 +135,7 @@ jobs:
rust-audit:
name: Cargo Audit & Deny
runs-on: ubuntu-latest
runs-on: build-amd64
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -188,7 +188,7 @@ jobs:
web-blocking:
name: Web Blocking Checks
runs-on: ubuntu-latest
runs-on: build-amd64
defaults:
run:
working-directory: web
@@ -217,7 +217,7 @@ jobs:
security-blocking:
name: Security Blocking Checks
runs-on: ubuntu-latest
runs-on: build-amd64
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -250,7 +250,7 @@ jobs:
web-advisory:
name: Web Advisory Checks
runs-on: ubuntu-latest
runs-on: build-amd64
continue-on-error: true
defaults:
run:
@@ -279,7 +279,7 @@ jobs:
security-advisory:
name: Security Advisory Checks
runs-on: ubuntu-latest
runs-on: build-amd64
continue-on-error: true
steps:
- name: Checkout

File diff suppressed because it is too large Load Diff

6
.gitignore vendored
View File

@@ -11,6 +11,7 @@ target/
# Configuration files (keep *.example.yaml)
config.yaml
config.*.yaml
!docker/distributable/config.docker.yaml
!config.example.yaml
!config.development.yaml
!config.test.yaml
@@ -35,6 +36,7 @@ logs/
# Build artifacts
dist/
build/
artifacts/
# Testing
coverage/
@@ -78,4 +80,8 @@ docker-compose.override.yml
*.pid
packs.examples/
packs.external/
codex/
# Compiled pack binaries (built via Docker or build-pack-binaries.sh)
packs/core/sensors/attune-core-timer-sensor

View File

@@ -4,3 +4,6 @@ web/node_modules/
web/src/api/
packs.dev/
packs.external/
tests/
docs/
*.md

View File

@@ -77,7 +77,7 @@ attune/
**Services**:
- **Infrastructure**: postgres (TimescaleDB), rabbitmq, redis
- **Init** (run-once): migrations, init-user, init-packs, init-agent
- **Init** (run-once): migrations, init-user, init-pack-binaries, init-packs, init-agent
- **Application**: api (8080), executor, worker-{shell,python,node,full}, sensor, notifier (8081), web (3000)
**Volumes** (named):
@@ -100,7 +100,8 @@ docker compose -f docker-compose.yaml -f docker-compose.agent.yaml up -d # Star
### Docker Build Optimization
- **Active Dockerfiles**: `docker/Dockerfile.optimized`, `docker/Dockerfile.agent`, `docker/Dockerfile.web`, and `docker/Dockerfile.pack-binaries`
- **Agent Dockerfile** (`docker/Dockerfile.agent`): Builds a statically-linked `attune-agent` binary using musl (`x86_64-unknown-linux-musl`). Three stages: `builder` (cross-compile), `agent-binary` (scratch — just the binary), `agent-init` (busybox — for volume population via `cp`). The binary has zero runtime dependencies (no glibc, no libssl). Build with `make docker-build-agent`.
- **Agent Dockerfile** (`docker/Dockerfile.agent`): Builds statically-linked `attune-agent` and `attune-sensor-agent` binaries using musl. Uses `cargo-zigbuild` (zig as the cross-compilation backend) so that any target architecture can be built from any host — e.g., building `aarch64-unknown-linux-musl` on an x86_64 host or vice versa. The `RUST_TARGET` build arg controls the output architecture (`x86_64-unknown-linux-musl` default, or `aarch64-unknown-linux-musl` for arm64). Three stages: `builder` (cross-compile with cargo-zigbuild), `agent-binary` (scratch — just the binaries), `agent-init` (busybox — for volume population via `cp`). The binaries have zero runtime dependencies (no glibc, no libssl). Build with `make docker-build-agent` (amd64), `make docker-build-agent-arm64` (arm64), or `make docker-build-agent-all` (both). In `docker-compose.yaml`, set `AGENT_RUST_TARGET=aarch64-unknown-linux-musl` env var to build arm64 agent binaries (defaults to x86_64).
- **Pack Binaries Dockerfile** (`docker/Dockerfile.pack-binaries`): Builds statically-linked pack binaries (sensors, etc.) using musl + cargo-zigbuild for cross-compilation. The `RUST_TARGET` build arg controls the output architecture (`x86_64-unknown-linux-musl` default, or `aarch64-unknown-linux-musl` for arm64). Three stages: `builder` (cross-compile with cargo-zigbuild), `output` (scratch — just the binaries for `docker cp` extraction), `pack-binaries-init` (busybox — for Docker Compose volume population via `cp`). Build with `make docker-build-pack-binaries` (amd64), `make docker-build-pack-binaries-arm64` (arm64), or `make docker-build-pack-binaries-all` (both). In `docker-compose.yaml`, set `PACK_BINARIES_RUST_TARGET=aarch64-unknown-linux-musl` env var to build arm64 pack binaries (defaults to x86_64). The `init-pack-binaries` Docker Compose service automatically builds and copies pack binaries into the `packs_data` volume before `init-packs` runs.
- **Strategy**: Selective crate copying - only copy crates needed for each service (not entire workspace)
- **Performance**: 90% faster incremental builds (~30 sec vs ~5 min for code changes)
- **BuildKit cache mounts**: Persist cargo registry and compilation artifacts between builds
@@ -123,7 +124,7 @@ docker compose -f docker-compose.yaml -f docker-compose.agent.yaml up -d # Star
- **Key Principle**: Packs are NOT copied into Docker images - they are mounted as volumes
- **Volume Flow**: Host `./packs/``init-packs` service → `packs_data` volume → mounted in all services
- **Benefits**: Update packs with restart (~5 sec) instead of rebuild (~5 min)
- **Pack Binaries**: Built separately with `./scripts/build-pack-binaries.sh` (GLIBC compatibility)
- **Pack Binaries**: Automatically built and deployed via the `init-pack-binaries` Docker Compose service (statically-linked musl binaries via cargo-zigbuild, supports cross-compilation via `PACK_BINARIES_RUST_TARGET` env var). Can also be built manually with `./scripts/build-pack-binaries.sh` or `make docker-build-pack-binaries`. The `init-packs` service depends on `init-pack-binaries` and preserves any ELF binaries already present in the target `sensors/` directory (detected via ELF magic bytes with `od`) — it backs them up before copying host pack files and restores them afterward, preventing the host's stale dynamically-linked binary from overwriting the freshly-built static one.
- **Development**: Use `./packs.dev/` for instant testing (direct bind mount, no restart needed)
- **Documentation**: See `docs/QUICKREF-packs-volumes.md`
@@ -273,7 +274,7 @@ Completion listener advances workflow → Schedules successor tasks → Complete
- **Pack Volume Strategy**: Packs are mounted as volumes (NOT copied into Docker images)
- Host `./packs/``packs_data` volume via `init-packs` service → mounted at `/opt/attune/packs` in all services
- Development packs in `./packs.dev/` are bind-mounted directly for instant updates
- **Pack Binaries**: Native binaries (sensors) built separately with `./scripts/build-pack-binaries.sh`
- **Pack Binaries**: Native binaries (sensors) automatically built by the `init-pack-binaries` Docker Compose service (statically-linked musl, cross-arch via `PACK_BINARIES_RUST_TARGET`). Can also be built manually with `./scripts/build-pack-binaries.sh` or `make docker-build-pack-binaries`.
- **Action Script Resolution**: Worker constructs file paths as `{packs_base_dir}/{pack_ref}/actions/{entrypoint}`
- **Workflow Action YAML (`workflow_file` field)**: An action YAML may include a `workflow_file` field (e.g., `workflow_file: workflows/timeline_demo.yaml`) pointing to a workflow definition file relative to the `actions/` directory. When present, the `PackComponentLoader` reads and parses the referenced workflow YAML, creates/updates a `workflow_definition` record, and links the action to it via `action.workflow_def`. This separates action-level metadata (ref, label, parameters, policies) from the workflow graph (tasks, transitions, variables), and allows **multiple actions to reference the same workflow file** with different parameter schemas or policy configurations. Workflow actions have no `runner_type` (runtime is `None`) — the executor orchestrates child task executions rather than sending to a worker.
- **Action-linked workflow files omit action-level metadata**: Workflow files referenced via `workflow_file` should contain **only the execution graph**: `version`, `vars`, `tasks`, `output_map`. The `ref`, `label`, `description`, `parameters`, `output`, and `tags` fields are omitted — the action YAML is the single authoritative source for those values. The `WorkflowDefinition` parser accepts empty `ref`/`label` (defaults to `""`), and the loader / registrar fall back to the action YAML (or filename-derived values) when they are missing. Standalone workflow files (in `workflows/`) still carry their own `ref`/`label` since they have no companion action YAML.
@@ -683,7 +684,7 @@ When reporting, ask: "Should I fix this first or continue with [original task]?"
- `docker/Dockerfile.optimized` - Optimized service builds (api, executor, notifier)
- `docker/Dockerfile.agent` - Statically-linked agent binary (musl, for injection into any container)
- `docker/Dockerfile.web` - Web UI build
- `docker/Dockerfile.pack-binaries` - Separate pack binary builder
- `docker/Dockerfile.pack-binaries` - Separate pack binary builder (cargo-zigbuild + musl static linking, 3 stages: builder, output, pack-binaries-init)
- `scripts/build-pack-binaries.sh` - Build pack binaries script
## Common Pitfalls to Avoid
@@ -703,7 +704,7 @@ When reporting, ask: "Should I fix this first or continue with [original task]?"
14. **REMEMBER** schema is determined by `search_path`, not hardcoded in queries (production uses `attune`, development uses `public`)
15. **REMEMBER** to regenerate SQLx metadata after schema-related changes: `cargo sqlx prepare`
16. **REMEMBER** packs are volumes - update with restart, not rebuild
17. **REMEMBER** to build pack binaries separately: `./scripts/build-pack-binaries.sh`
17. **REMEMBER** pack binaries are automatically built by `init-pack-binaries` in Docker Compose. For manual builds use `make docker-build-pack-binaries` or `./scripts/build-pack-binaries.sh`.
18. **REMEMBER** when adding mutable columns to `execution` or `worker`, add a corresponding `IS DISTINCT FROM` check to the entity's history trigger function in the TimescaleDB migration. Events and enforcements are hypertables without history tables — do NOT add frequently-mutated columns to them. Execution is both a hypertable AND has an `execution_history` table (because it is mutable with ~4 updates per row).
19. **REMEMBER** for large JSONB columns in history triggers (like `execution.result`), use `_jsonb_digest_summary()` instead of storing the raw value — see migration `000009_timescaledb_history`
20. **NEVER** use `SELECT *` on tables that have DB-only columns not in the Rust `FromRow` struct (e.g., `execution.is_workflow`, `execution.workflow_def` exist in SQL but not in the `Execution` model). Define a `SELECT_COLUMNS` constant in the repository (see `execution.rs`, `pack.rs`, `runtime_version.rs` for examples) and reference it from all queries — including queries outside the repository (e.g., `timeout_monitor.rs` imports `execution::SELECT_COLUMNS`).ause runtime deserialization failures.

98
Cargo.lock generated
View File

@@ -528,6 +528,7 @@ dependencies = [
"mockito",
"predicates",
"reqwest 0.13.2",
"reqwest-eventsource",
"serde",
"serde_json",
"serde_yaml_ng",
@@ -579,6 +580,7 @@ dependencies = [
"tokio",
"tracing",
"tracing-subscriber",
"url",
"utoipa",
"uuid",
"validator",
@@ -2150,21 +2152,6 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb"
[[package]]
name = "foreign-types"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
dependencies = [
"foreign-types-shared",
]
[[package]]
name = "foreign-types-shared"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]]
name = "form_urlencoded"
version = "1.2.2"
@@ -3065,15 +3052,17 @@ dependencies = [
"futures-util",
"lber",
"log",
"native-tls",
"nom 7.1.3",
"percent-encoding",
"rustls",
"rustls-native-certs",
"thiserror 2.0.18",
"tokio",
"tokio-native-tls",
"tokio-rustls",
"tokio-stream",
"tokio-util",
"url",
"x509-parser",
]
[[package]]
@@ -3314,23 +3303,6 @@ dependencies = [
"version_check",
]
[[package]]
name = "native-tls"
version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "465500e14ea162429d264d44189adc38b199b62b1c21eea9f69e4b73cb03bbf2"
dependencies = [
"libc",
"log",
"openssl",
"openssl-probe",
"openssl-sys",
"schannel",
"security-framework",
"security-framework-sys",
"tempfile",
]
[[package]]
name = "nom"
version = "7.1.3"
@@ -3576,50 +3548,12 @@ dependencies = [
"url",
]
[[package]]
name = "openssl"
version = "0.10.76"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "951c002c75e16ea2c65b8c7e4d3d51d5530d8dfa7d060b4776828c88cfb18ecf"
dependencies = [
"bitflags",
"cfg-if",
"foreign-types",
"libc",
"once_cell",
"openssl-macros",
"openssl-sys",
]
[[package]]
name = "openssl-macros"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "openssl-probe"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe"
[[package]]
name = "openssl-sys"
version = "0.9.112"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57d55af3b3e226502be1526dfdba67ab0e9c96fc293004e79576b2b9edb0dbdb"
dependencies = [
"cc",
"libc",
"pkg-config",
"vcpkg",
]
[[package]]
name = "option-ext"
version = "0.2.0"
@@ -4642,6 +4576,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4"
dependencies = [
"aws-lc-rs",
"log",
"once_cell",
"ring",
"rustls-pki-types",
@@ -5698,16 +5633,6 @@ dependencies = [
"syn",
]
[[package]]
name = "tokio-native-tls"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2"
dependencies = [
"native-tls",
"tokio",
]
[[package]]
name = "tokio-rustls"
version = "0.26.4"
@@ -5749,9 +5674,11 @@ checksum = "d25a406cddcc431a75d3d9afc6a7c0f7428d4891dd973e4d54c56b46127bf857"
dependencies = [
"futures-util",
"log",
"native-tls",
"rustls",
"rustls-native-certs",
"rustls-pki-types",
"tokio",
"tokio-native-tls",
"tokio-rustls",
"tungstenite",
]
@@ -5938,8 +5865,9 @@ dependencies = [
"http",
"httparse",
"log",
"native-tls",
"rand 0.9.2",
"rustls",
"rustls-pki-types",
"sha1",
"thiserror 2.0.18",
"utf-8",

View File

@@ -101,7 +101,7 @@ tar = "0.4"
flate2 = "1.1"
# WebSocket client
tokio-tungstenite = { version = "0.28", features = ["native-tls"] }
tokio-tungstenite = { version = "0.28", features = ["rustls-tls-native-roots"] }
# URL parsing
url = "2.5"

View File

@@ -5,8 +5,10 @@
docker-build-worker-node docker-build-worker-full deny ci-rust ci-web-blocking ci-web-advisory \
ci-security-blocking ci-security-advisory ci-blocking ci-advisory \
fmt-check pre-commit install-git-hooks \
build-agent docker-build-agent run-agent run-agent-release \
docker-up-agent docker-down-agent
build-agent docker-build-agent docker-build-agent-arm64 docker-build-agent-all \
run-agent run-agent-release \
docker-up-agent docker-down-agent \
docker-build-pack-binaries docker-build-pack-binaries-arm64 docker-build-pack-binaries-all
# Default target
help:
@@ -64,12 +66,19 @@ help:
@echo ""
@echo "Agent (Universal Worker):"
@echo " make build-agent - Build statically-linked agent binary (musl)"
@echo " make docker-build-agent - Build agent Docker image"
@echo " make docker-build-agent - Build agent Docker image (amd64, default)"
@echo " make docker-build-agent-arm64 - Build agent Docker image (arm64)"
@echo " make docker-build-agent-all - Build agent Docker images (amd64 + arm64)"
@echo " make run-agent - Run agent in development mode"
@echo " make run-agent-release - Run agent in release mode"
@echo " make docker-up-agent - Start all services + agent workers (ruby, etc.)"
@echo " make docker-down-agent - Stop agent stack"
@echo ""
@echo "Pack Binaries:"
@echo " make docker-build-pack-binaries - Build pack binaries Docker image (amd64, default)"
@echo " make docker-build-pack-binaries-arm64 - Build pack binaries Docker image (arm64)"
@echo " make docker-build-pack-binaries-all - Build pack binaries Docker images (amd64 + arm64)"
@echo ""
@echo "Development:"
@echo " make watch - Watch and rebuild on changes"
@echo " make install-tools - Install development tools"
@@ -238,23 +247,39 @@ docker-build-web:
docker compose build web
# Agent binary (statically-linked for injection into any container)
AGENT_RUST_TARGET ?= x86_64-unknown-linux-musl
# Pack binaries (statically-linked for packs volume)
PACK_BINARIES_RUST_TARGET ?= x86_64-unknown-linux-musl
build-agent:
@echo "Installing musl target (if not already installed)..."
rustup target add x86_64-unknown-linux-musl 2>/dev/null || true
rustup target add $(AGENT_RUST_TARGET) 2>/dev/null || true
@echo "Building statically-linked worker and sensor agent binaries..."
SQLX_OFFLINE=true cargo build --release --target x86_64-unknown-linux-musl --bin attune-agent --bin attune-sensor-agent
strip target/x86_64-unknown-linux-musl/release/attune-agent
strip target/x86_64-unknown-linux-musl/release/attune-sensor-agent
SQLX_OFFLINE=true cargo build --release --target $(AGENT_RUST_TARGET) --bin attune-agent --bin attune-sensor-agent
strip target/$(AGENT_RUST_TARGET)/release/attune-agent
strip target/$(AGENT_RUST_TARGET)/release/attune-sensor-agent
@echo "✅ Agent binaries built:"
@echo " - target/x86_64-unknown-linux-musl/release/attune-agent"
@echo " - target/x86_64-unknown-linux-musl/release/attune-sensor-agent"
@ls -lh target/x86_64-unknown-linux-musl/release/attune-agent
@ls -lh target/x86_64-unknown-linux-musl/release/attune-sensor-agent
@echo " - target/$(AGENT_RUST_TARGET)/release/attune-agent"
@echo " - target/$(AGENT_RUST_TARGET)/release/attune-sensor-agent"
@ls -lh target/$(AGENT_RUST_TARGET)/release/attune-agent
@ls -lh target/$(AGENT_RUST_TARGET)/release/attune-sensor-agent
docker-build-agent:
@echo "Building agent Docker image (statically-linked binary)..."
DOCKER_BUILDKIT=1 docker buildx build --target agent-init -f docker/Dockerfile.agent -t attune-agent:latest .
@echo "✅ Agent image built: attune-agent:latest"
@echo "Building agent Docker image ($(AGENT_RUST_TARGET))..."
DOCKER_BUILDKIT=1 docker buildx build --build-arg RUST_TARGET=$(AGENT_RUST_TARGET) --target agent-init -f docker/Dockerfile.agent -t attune-agent:latest .
@echo "✅ Agent image built: attune-agent:latest ($(AGENT_RUST_TARGET))"
docker-build-agent-arm64:
@echo "Building arm64 agent Docker image..."
DOCKER_BUILDKIT=1 docker buildx build --build-arg RUST_TARGET=aarch64-unknown-linux-musl --target agent-init -f docker/Dockerfile.agent -t attune-agent:arm64 .
@echo "✅ Agent image built: attune-agent:arm64"
docker-build-agent-all:
@echo "Building agent Docker images for all architectures..."
$(MAKE) docker-build-agent
$(MAKE) docker-build-agent-arm64
@echo "✅ All agent images built: attune-agent:latest (amd64), attune-agent:arm64"
run-agent:
cargo run --bin attune-agent
@@ -262,6 +287,23 @@ run-agent:
run-agent-release:
cargo run --bin attune-agent --release
# Pack binaries (statically-linked for packs volume)
docker-build-pack-binaries:
@echo "Building pack binaries Docker image ($(PACK_BINARIES_RUST_TARGET))..."
DOCKER_BUILDKIT=1 docker buildx build --build-arg RUST_TARGET=$(PACK_BINARIES_RUST_TARGET) --target pack-binaries-init -f docker/Dockerfile.pack-binaries -t attune-pack-builder:latest .
@echo "✅ Pack binaries image built: attune-pack-builder:latest ($(PACK_BINARIES_RUST_TARGET))"
docker-build-pack-binaries-arm64:
@echo "Building arm64 pack binaries Docker image..."
DOCKER_BUILDKIT=1 docker buildx build --build-arg RUST_TARGET=aarch64-unknown-linux-musl --target pack-binaries-init -f docker/Dockerfile.pack-binaries -t attune-pack-builder:arm64 .
@echo "✅ Pack binaries image built: attune-pack-builder:arm64"
docker-build-pack-binaries-all:
@echo "Building pack binaries Docker images for all architectures..."
$(MAKE) docker-build-pack-binaries
$(MAKE) docker-build-pack-binaries-arm64
@echo "✅ All pack binary images built: attune-pack-builder:latest (amd64), attune-pack-builder:arm64"
run-sensor-agent:
cargo run --bin attune-sensor-agent

View File

@@ -11,7 +11,7 @@ stringData:
ATTUNE__SECURITY__ENCRYPTION_KEY: {{ .Values.security.encryptionKey | quote }}
ATTUNE__DATABASE__URL: {{ include "attune.databaseUrl" . | quote }}
ATTUNE__MESSAGE_QUEUE__URL: {{ include "attune.rabbitmqUrl" . | quote }}
ATTUNE__CACHE__URL: {{ include "attune.redisUrl" . | quote }}
ATTUNE__REDIS__URL: {{ include "attune.redisUrl" . | quote }}
DB_HOST: {{ include "attune.postgresqlServiceName" . | quote }}
DB_PORT: {{ .Values.database.port | quote }}
DB_USER: {{ .Values.database.username | quote }}

View File

@@ -62,6 +62,8 @@ pack_registry:
enabled: true
default_registry: https://registry.attune.example.com
cache_ttl: 300
allowed_source_hosts:
- registry.attune.example.com
# Test worker configuration
# worker:

View File

@@ -70,7 +70,7 @@ jsonschema = { workspace = true }
# HTTP client
reqwest = { workspace = true }
openidconnect = "4.0"
ldap3 = "0.12"
ldap3 = { version = "0.12", default-features = false, features = ["sync", "tls-rustls-ring"] }
url = { workspace = true }
# Archive/compression

View File

@@ -3,7 +3,10 @@
use attune_common::{
config::LdapConfig,
repositories::{
identity::{CreateIdentityInput, IdentityRepository, UpdateIdentityInput},
identity::{
CreateIdentityInput, IdentityRepository, IdentityRoleAssignmentRepository,
UpdateIdentityInput,
},
Create, Update,
},
};
@@ -63,6 +66,11 @@ pub async fn authenticate(
// Upsert identity in DB and issue JWT tokens
let identity = upsert_identity(state, &claims).await?;
if identity.frozen {
return Err(ApiError::Forbidden(
"Identity is frozen and cannot authenticate".to_string(),
));
}
let access_token = generate_access_token(identity.id, &identity.login, &state.jwt_config)?;
let refresh_token = generate_refresh_token(identity.id, &identity.login, &state.jwt_config)?;
@@ -131,7 +139,8 @@ fn conn_settings(config: &LdapConfig) -> LdapConnSettings {
/// Open a new LDAP connection.
async fn connect(config: &LdapConfig) -> Result<Ldap, ApiError> {
let settings = conn_settings(config);
let (conn, ldap) = LdapConnAsync::with_settings(settings, &config.url)
let url = config.url.as_deref().unwrap_or_default();
let (conn, ldap) = LdapConnAsync::with_settings(settings, url)
.await
.map_err(|err| {
ApiError::InternalServerError(format!("Failed to connect to LDAP server: {err}"))
@@ -325,7 +334,7 @@ fn extract_claims(config: &LdapConfig, entry: &SearchEntry) -> LdapUserClaims {
.unwrap_or_default();
LdapUserClaims {
server_url: config.url.clone(),
server_url: config.url.clone().unwrap_or_default(),
dn: entry.dn.clone(),
login: first_attr(&config.login_attr),
email: first_attr(&config.email_attr),
@@ -351,10 +360,13 @@ async fn upsert_identity(
display_name,
password_hash: None,
attributes: Some(attributes),
frozen: None,
};
IdentityRepository::update(&state.db, identity.id, updated)
let identity = IdentityRepository::update(&state.db, identity.id, updated)
.await
.map_err(Into::into)
.map_err(ApiError::from)?;
sync_roles(&state.db, identity.id, "ldap", &claims.groups).await?;
Ok(identity)
}
None => {
// Avoid login collisions
@@ -363,7 +375,7 @@ async fn upsert_identity(
None => desired_login,
};
IdentityRepository::create(
let identity = IdentityRepository::create(
&state.db,
CreateIdentityInput {
login,
@@ -372,11 +384,24 @@ async fn upsert_identity(
attributes,
},
)
.await
.map_err(ApiError::from)?;
sync_roles(&state.db, identity.id, "ldap", &claims.groups).await?;
Ok(identity)
}
}
}
async fn sync_roles(
db: &sqlx::PgPool,
identity_id: i64,
source: &str,
roles: &[String],
) -> Result<(), ApiError> {
IdentityRoleAssignmentRepository::replace_managed_roles(db, identity_id, source, roles)
.await
.map_err(Into::into)
}
}
}
/// Derive the login name from LDAP claims.
fn derive_login(claims: &LdapUserClaims) -> String {

View File

@@ -3,7 +3,10 @@
use attune_common::{
config::OidcConfig,
repositories::{
identity::{CreateIdentityInput, IdentityRepository, UpdateIdentityInput},
identity::{
CreateIdentityInput, IdentityRepository, IdentityRoleAssignmentRepository,
UpdateIdentityInput,
},
Create, Update,
},
};
@@ -123,15 +126,17 @@ pub async fn build_login_redirect(
.map_err(|err| {
ApiError::InternalServerError(format!("Failed to build OIDC HTTP client: {err}"))
})?;
let redirect_uri = RedirectUrl::new(oidc.redirect_uri.clone()).map_err(|err| {
let redirect_uri_str = oidc.redirect_uri.clone().unwrap_or_default();
let redirect_uri = RedirectUrl::new(redirect_uri_str).map_err(|err| {
ApiError::InternalServerError(format!("Invalid OIDC redirect URI: {err}"))
})?;
let client_secret = oidc.client_secret.clone().ok_or_else(|| {
ApiError::InternalServerError("OIDC client secret is missing".to_string())
})?;
let client_id = oidc.client_id.clone().unwrap_or_default();
let client = CoreClient::from_provider_metadata(
discovery.metadata.clone(),
ClientId::new(oidc.client_id.clone()),
ClientId::new(client_id),
Some(ClientSecret::new(client_secret)),
)
.set_redirect_uri(redirect_uri);
@@ -235,15 +240,17 @@ pub async fn handle_callback(
.map_err(|err| {
ApiError::InternalServerError(format!("Failed to build OIDC HTTP client: {err}"))
})?;
let redirect_uri = RedirectUrl::new(oidc.redirect_uri.clone()).map_err(|err| {
let redirect_uri_str = oidc.redirect_uri.clone().unwrap_or_default();
let redirect_uri = RedirectUrl::new(redirect_uri_str).map_err(|err| {
ApiError::InternalServerError(format!("Invalid OIDC redirect URI: {err}"))
})?;
let client_secret = oidc.client_secret.clone().ok_or_else(|| {
ApiError::InternalServerError("OIDC client secret is missing".to_string())
})?;
let client_id = oidc.client_id.clone().unwrap_or_default();
let client = CoreClient::from_provider_metadata(
discovery.metadata.clone(),
ClientId::new(oidc.client_id.clone()),
ClientId::new(client_id),
Some(ClientSecret::new(client_secret)),
)
.set_redirect_uri(redirect_uri);
@@ -282,6 +289,11 @@ pub async fn handle_callback(
}
let identity = upsert_identity(state, &oidc_claims).await?;
if identity.frozen {
return Err(ApiError::Forbidden(
"Identity is frozen and cannot authenticate".to_string(),
));
}
let access_token = generate_access_token(identity.id, &identity.login, &state.jwt_config)?;
let refresh_token = generate_refresh_token(identity.id, &identity.login, &state.jwt_config)?;
@@ -328,7 +340,7 @@ pub async fn build_logout_redirect(
pairs.append_pair("id_token_hint", &id_token_hint);
}
pairs.append_pair("post_logout_redirect_uri", &post_logout_redirect_uri);
pairs.append_pair("client_id", &oidc.client_id);
pairs.append_pair("client_id", oidc.client_id.as_deref().unwrap_or_default());
}
String::from(url)
} else {
@@ -473,7 +485,8 @@ fn oidc_config(state: &SharedState) -> Result<OidcConfig, ApiError> {
}
async fn fetch_discovery_document(oidc: &OidcConfig) -> Result<OidcDiscoveryDocument, ApiError> {
let discovery = reqwest::get(&oidc.discovery_url).await.map_err(|err| {
let discovery_url = oidc.discovery_url.as_deref().unwrap_or_default();
let discovery = reqwest::get(discovery_url).await.map_err(|err| {
ApiError::InternalServerError(format!("Failed to fetch OIDC discovery document: {err}"))
})?;
@@ -511,10 +524,13 @@ async fn upsert_identity(
display_name,
password_hash: None,
attributes: Some(attributes.clone()),
frozen: None,
};
IdentityRepository::update(&state.db, identity.id, updated)
let identity = IdentityRepository::update(&state.db, identity.id, updated)
.await
.map_err(Into::into)
.map_err(ApiError::from)?;
sync_roles(&state.db, identity.id, "oidc", &oidc_claims.groups).await?;
Ok(identity)
}
None => {
let login = match IdentityRepository::find_by_login(&state.db, &desired_login).await? {
@@ -522,7 +538,7 @@ async fn upsert_identity(
None => desired_login,
};
IdentityRepository::create(
let identity = IdentityRepository::create(
&state.db,
CreateIdentityInput {
login,
@@ -531,11 +547,24 @@ async fn upsert_identity(
attributes,
},
)
.await
.map_err(ApiError::from)?;
sync_roles(&state.db, identity.id, "oidc", &oidc_claims.groups).await?;
Ok(identity)
}
}
}
async fn sync_roles(
db: &sqlx::PgPool,
identity_id: i64,
source: &str,
roles: &[String],
) -> Result<(), ApiError> {
IdentityRoleAssignmentRepository::replace_managed_roles(db, identity_id, source, roles)
.await
.map_err(Into::into)
}
}
}
fn derive_login(oidc_claims: &OidcIdentityClaims) -> String {
oidc_claims
@@ -597,7 +626,7 @@ async fn verify_id_token(
let issuer = discovery.metadata.issuer().to_string();
let mut validation = Validation::new(algorithm);
validation.set_issuer(&[issuer.as_str()]);
validation.set_audience(&[oidc.client_id.as_str()]);
validation.set_audience(&[oidc.client_id.as_deref().unwrap_or_default()]);
validation.set_required_spec_claims(&["exp", "iat", "iss", "sub", "aud"]);
validation.validate_nbf = false;
@@ -716,7 +745,8 @@ fn should_use_secure_cookies(state: &SharedState) -> bool {
.security
.oidc
.as_ref()
.map(|oidc| oidc.redirect_uri.starts_with("https://"))
.and_then(|oidc| oidc.redirect_uri.as_deref())
.map(|uri| uri.starts_with("https://"))
.unwrap_or(false)
}

View File

@@ -10,7 +10,7 @@ use crate::{
use attune_common::{
rbac::{Action, AuthorizationContext, Grant, Resource},
repositories::{
identity::{IdentityRepository, PermissionSetRepository},
identity::{IdentityRepository, IdentityRoleAssignmentRepository, PermissionSetRepository},
FindById,
},
};
@@ -95,8 +95,16 @@ impl AuthorizationService {
}
async fn load_effective_grants(&self, identity_id: i64) -> Result<Vec<Grant>, ApiError> {
let permission_sets =
let mut permission_sets =
PermissionSetRepository::find_by_identity(&self.db, identity_id).await?;
let roles =
IdentityRoleAssignmentRepository::find_role_names_by_identity(&self.db, identity_id)
.await?;
let role_permission_sets = PermissionSetRepository::find_by_roles(&self.db, &roles).await?;
permission_sets.extend(role_permission_sets);
let mut seen_permission_sets = std::collections::HashSet::new();
permission_sets.retain(|permission_set| seen_permission_sets.insert(permission_set.id));
let mut grants = Vec::new();
for permission_set in permission_sets {
@@ -126,10 +134,6 @@ fn resource_name(resource: Resource) -> &'static str {
Resource::Inquiries => "inquiries",
Resource::Keys => "keys",
Resource::Artifacts => "artifacts",
Resource::Workflows => "workflows",
Resource::Webhooks => "webhooks",
Resource::Analytics => "analytics",
Resource::History => "history",
Resource::Identities => "identities",
Resource::Permissions => "permissions",
}
@@ -145,5 +149,6 @@ fn action_name(action: Action) -> &'static str {
Action::Cancel => "cancel",
Action::Respond => "respond",
Action::Manage => "manage",
Action::Decrypt => "decrypt",
}
}

View File

@@ -25,9 +25,8 @@ pub struct CreateActionRequest {
pub label: String,
/// Action description
#[validate(length(min = 1))]
#[schema(example = "Posts a message to a Slack channel")]
pub description: String,
pub description: Option<String>,
/// Entry point for action execution (e.g., path to script, function name)
#[validate(length(min = 1, max = 1024))]
@@ -63,7 +62,6 @@ pub struct UpdateActionRequest {
pub label: Option<String>,
/// Action description
#[validate(length(min = 1))]
#[schema(example = "Posts a message to a Slack channel with enhanced features")]
pub description: Option<String>,
@@ -121,7 +119,7 @@ pub struct ActionResponse {
/// Action description
#[schema(example = "Posts a message to a Slack channel")]
pub description: String,
pub description: Option<String>,
/// Entry point
#[schema(example = "/actions/slack/post_message.py")]
@@ -183,7 +181,7 @@ pub struct ActionSummary {
/// Action description
#[schema(example = "Posts a message to a Slack channel")]
pub description: String,
pub description: Option<String>,
/// Entry point
#[schema(example = "/actions/slack/post_message.py")]
@@ -321,7 +319,7 @@ mod tests {
r#ref: "".to_string(), // Invalid: empty
pack_ref: "test-pack".to_string(),
label: "Test Action".to_string(),
description: "Test description".to_string(),
description: Some("Test description".to_string()),
entrypoint: "/actions/test.py".to_string(),
runtime: None,
runtime_version_constraint: None,
@@ -338,7 +336,7 @@ mod tests {
r#ref: "test.action".to_string(),
pack_ref: "test-pack".to_string(),
label: "Test Action".to_string(),
description: "Test description".to_string(),
description: Some("Test description".to_string()),
entrypoint: "/actions/test.py".to_string(),
runtime: None,
runtime_version_constraint: None,

View File

@@ -51,9 +51,10 @@ pub use inquiry::{
pub use key::{CreateKeyRequest, KeyQueryParams, KeyResponse, KeySummary, UpdateKeyRequest};
pub use pack::{CreatePackRequest, PackResponse, PackSummary, UpdatePackRequest};
pub use permission::{
CreateIdentityRequest, CreatePermissionAssignmentRequest, IdentityResponse, IdentitySummary,
PermissionAssignmentResponse, PermissionSetQueryParams, PermissionSetSummary,
UpdateIdentityRequest,
CreateIdentityRequest, CreateIdentityRoleAssignmentRequest, CreatePermissionAssignmentRequest,
CreatePermissionSetRoleAssignmentRequest, IdentityResponse, IdentityRoleAssignmentResponse,
IdentitySummary, PermissionAssignmentResponse, PermissionSetQueryParams,
PermissionSetRoleAssignmentResponse, PermissionSetSummary, UpdateIdentityRequest,
};
pub use rule::{CreateRuleRequest, RuleResponse, RuleSummary, UpdateRuleRequest};
pub use runtime::{CreateRuntimeRequest, RuntimeResponse, RuntimeSummary, UpdateRuntimeRequest};

View File

@@ -14,10 +14,32 @@ pub struct IdentitySummary {
pub id: i64,
pub login: String,
pub display_name: Option<String>,
pub frozen: bool,
pub attributes: JsonValue,
pub roles: Vec<String>,
}
pub type IdentityResponse = IdentitySummary;
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct IdentityRoleAssignmentResponse {
pub id: i64,
pub identity_id: i64,
pub role: String,
pub source: String,
pub managed: bool,
pub created: chrono::DateTime<chrono::Utc>,
pub updated: chrono::DateTime<chrono::Utc>,
}
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct IdentityResponse {
pub id: i64,
pub login: String,
pub display_name: Option<String>,
pub frozen: bool,
pub attributes: JsonValue,
pub roles: Vec<IdentityRoleAssignmentResponse>,
pub direct_permissions: Vec<PermissionAssignmentResponse>,
}
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct PermissionSetSummary {
@@ -27,6 +49,7 @@ pub struct PermissionSetSummary {
pub label: Option<String>,
pub description: Option<String>,
pub grants: JsonValue,
pub roles: Vec<PermissionSetRoleAssignmentResponse>,
}
#[derive(Debug, Clone, Serialize, ToSchema)]
@@ -38,6 +61,15 @@ pub struct PermissionAssignmentResponse {
pub created: chrono::DateTime<chrono::Utc>,
}
#[derive(Debug, Clone, Serialize, ToSchema)]
pub struct PermissionSetRoleAssignmentResponse {
pub id: i64,
pub permission_set_id: i64,
pub permission_set_ref: Option<String>,
pub role: String,
pub created: chrono::DateTime<chrono::Utc>,
}
#[derive(Debug, Clone, Deserialize, ToSchema)]
pub struct CreatePermissionAssignmentRequest {
pub identity_id: Option<i64>,
@@ -45,6 +77,18 @@ pub struct CreatePermissionAssignmentRequest {
pub permission_set_ref: String,
}
#[derive(Debug, Clone, Deserialize, Validate, ToSchema)]
pub struct CreateIdentityRoleAssignmentRequest {
#[validate(length(min = 1, max = 255))]
pub role: String,
}
#[derive(Debug, Clone, Deserialize, Validate, ToSchema)]
pub struct CreatePermissionSetRoleAssignmentRequest {
#[validate(length(min = 1, max = 255))]
pub role: String,
}
#[derive(Debug, Clone, Deserialize, Validate, ToSchema)]
pub struct CreateIdentityRequest {
#[validate(length(min = 3, max = 255))]
@@ -62,4 +106,5 @@ pub struct UpdateIdentityRequest {
pub display_name: Option<String>,
pub password: Option<String>,
pub attributes: Option<JsonValue>,
pub frozen: Option<bool>,
}

View File

@@ -25,9 +25,8 @@ pub struct CreateRuleRequest {
pub label: String,
/// Rule description
#[validate(length(min = 1))]
#[schema(example = "Send Slack notification when an error occurs")]
pub description: String,
pub description: Option<String>,
/// Action reference to execute when rule matches
#[validate(length(min = 1, max = 255))]
@@ -69,7 +68,6 @@ pub struct UpdateRuleRequest {
pub label: Option<String>,
/// Rule description
#[validate(length(min = 1))]
#[schema(example = "Enhanced error notification with filtering")]
pub description: Option<String>,
@@ -115,7 +113,7 @@ pub struct RuleResponse {
/// Rule description
#[schema(example = "Send Slack notification when an error occurs")]
pub description: String,
pub description: Option<String>,
/// Action ID (null if the referenced action has been deleted)
#[schema(example = 1)]
@@ -183,7 +181,7 @@ pub struct RuleSummary {
/// Rule description
#[schema(example = "Send Slack notification when an error occurs")]
pub description: String,
pub description: Option<String>,
/// Action reference
#[schema(example = "slack.post_message")]
@@ -297,7 +295,7 @@ mod tests {
r#ref: "".to_string(), // Invalid: empty
pack_ref: "test-pack".to_string(),
label: "Test Rule".to_string(),
description: "Test description".to_string(),
description: Some("Test description".to_string()),
action_ref: "test.action".to_string(),
trigger_ref: "test.trigger".to_string(),
conditions: default_empty_object(),
@@ -315,7 +313,7 @@ mod tests {
r#ref: "test.rule".to_string(),
pack_ref: "test-pack".to_string(),
label: "Test Rule".to_string(),
description: "Test description".to_string(),
description: Some("Test description".to_string()),
action_ref: "test.action".to_string(),
trigger_ref: "test.trigger".to_string(),
conditions: serde_json::json!({

View File

@@ -203,9 +203,8 @@ pub struct CreateSensorRequest {
pub label: String,
/// Sensor description
#[validate(length(min = 1))]
#[schema(example = "Monitors CPU usage and generates events")]
pub description: String,
pub description: Option<String>,
/// Entry point for sensor execution (e.g., path to script, function name)
#[validate(length(min = 1, max = 1024))]
@@ -247,7 +246,6 @@ pub struct UpdateSensorRequest {
pub label: Option<String>,
/// Sensor description
#[validate(length(min = 1))]
#[schema(example = "Enhanced CPU monitoring with alerts")]
pub description: Option<String>,
@@ -297,7 +295,7 @@ pub struct SensorResponse {
/// Sensor description
#[schema(example = "Monitors CPU usage and generates events")]
pub description: String,
pub description: Option<String>,
/// Entry point
#[schema(example = "/sensors/monitoring/cpu_monitor.py")]
@@ -357,7 +355,7 @@ pub struct SensorSummary {
/// Sensor description
#[schema(example = "Monitors CPU usage and generates events")]
pub description: String,
pub description: Option<String>,
/// Trigger reference
#[schema(example = "monitoring.cpu_threshold")]
@@ -499,7 +497,7 @@ mod tests {
r#ref: "test.sensor".to_string(),
pack_ref: "test-pack".to_string(),
label: "Test Sensor".to_string(),
description: "Test description".to_string(),
description: Some("Test description".to_string()),
entrypoint: "/sensors/test.py".to_string(),
runtime_ref: "python3".to_string(),
trigger_ref: "test.trigger".to_string(),

View File

@@ -27,8 +27,11 @@ use crate::dto::{
UpdatePackRequest, WorkflowSyncResult,
},
permission::{
CreateIdentityRequest, CreatePermissionAssignmentRequest, IdentityResponse,
IdentitySummary, PermissionAssignmentResponse, PermissionSetSummary, UpdateIdentityRequest,
CreateIdentityRequest, CreateIdentityRoleAssignmentRequest,
CreatePermissionAssignmentRequest, CreatePermissionSetRoleAssignmentRequest,
IdentityResponse, IdentityRoleAssignmentResponse, IdentitySummary,
PermissionAssignmentResponse, PermissionSetRoleAssignmentResponse, PermissionSetSummary,
UpdateIdentityRequest,
},
rule::{CreateRuleRequest, RuleResponse, RuleSummary, UpdateRuleRequest},
runtime::{CreateRuntimeRequest, RuntimeResponse, RuntimeSummary, UpdateRuntimeRequest},
@@ -185,6 +188,12 @@ use crate::dto::{
crate::routes::permissions::list_identity_permissions,
crate::routes::permissions::create_permission_assignment,
crate::routes::permissions::delete_permission_assignment,
crate::routes::permissions::create_identity_role_assignment,
crate::routes::permissions::delete_identity_role_assignment,
crate::routes::permissions::create_permission_set_role_assignment,
crate::routes::permissions::delete_permission_set_role_assignment,
crate::routes::permissions::freeze_identity,
crate::routes::permissions::unfreeze_identity,
// Workflows
crate::routes::workflows::list_workflows,
@@ -277,6 +286,10 @@ use crate::dto::{
PermissionSetSummary,
PermissionAssignmentResponse,
CreatePermissionAssignmentRequest,
CreateIdentityRoleAssignmentRequest,
IdentityRoleAssignmentResponse,
CreatePermissionSetRoleAssignmentRequest,
PermissionSetRoleAssignmentResponse,
// Runtime DTOs
CreateRuntimeRequest,

View File

@@ -277,7 +277,7 @@ pub async fn update_action(
// Create update input
let update_input = UpdateActionInput {
label: request.label,
description: request.description,
description: request.description.map(Patch::Set),
entrypoint: request.entrypoint,
runtime: request.runtime,
runtime_version_constraint: request.runtime_version_constraint.map(|patch| match patch {

View File

@@ -40,7 +40,8 @@ use attune_common::repositories::{
};
use crate::{
auth::middleware::RequireAuth,
auth::{jwt::TokenType, middleware::AuthenticatedUser, middleware::RequireAuth},
authz::{AuthorizationCheck, AuthorizationService},
dto::{
artifact::{
AllocateFileVersionByRefRequest, AppendProgressRequest, ArtifactExecutionPatch,
@@ -55,6 +56,7 @@ use crate::{
middleware::{ApiError, ApiResult},
state::AppState,
};
use attune_common::rbac::{Action, AuthorizationContext, Resource};
// ============================================================================
// Artifact CRUD
@@ -72,7 +74,7 @@ use crate::{
security(("bearer_auth" = []))
)]
pub async fn list_artifacts(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Query(query): Query<ArtifactQueryParams>,
) -> ApiResult<impl IntoResponse> {
@@ -88,8 +90,16 @@ pub async fn list_artifacts(
};
let result = ArtifactRepository::search(&state.db, &filters).await?;
let mut rows = result.rows;
let items: Vec<ArtifactSummary> = result.rows.into_iter().map(ArtifactSummary::from).collect();
if let Some((identity_id, grants)) = ensure_can_read_any_artifact(&state, &user).await? {
rows.retain(|artifact| {
let ctx = artifact_authorization_context(identity_id, artifact);
AuthorizationService::is_allowed(&grants, Resource::Artifacts, Action::Read, &ctx)
});
}
let items: Vec<ArtifactSummary> = rows.into_iter().map(ArtifactSummary::from).collect();
let pagination = PaginationParams {
page: query.page,
@@ -113,7 +123,7 @@ pub async fn list_artifacts(
security(("bearer_auth" = []))
)]
pub async fn get_artifact(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
) -> ApiResult<impl IntoResponse> {
@@ -121,6 +131,10 @@ pub async fn get_artifact(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
authorize_artifact_action(&state, &user, Action::Read, &artifact)
.await
.map_err(|_| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
Ok((
StatusCode::OK,
Json(ApiResponse::new(ArtifactResponse::from(artifact))),
@@ -140,7 +154,7 @@ pub async fn get_artifact(
security(("bearer_auth" = []))
)]
pub async fn get_artifact_by_ref(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(artifact_ref): Path<String>,
) -> ApiResult<impl IntoResponse> {
@@ -148,6 +162,10 @@ pub async fn get_artifact_by_ref(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact '{}' not found", artifact_ref)))?;
authorize_artifact_action(&state, &user, Action::Read, &artifact)
.await
.map_err(|_| ApiError::NotFound(format!("Artifact '{}' not found", artifact_ref)))?;
Ok((
StatusCode::OK,
Json(ApiResponse::new(ArtifactResponse::from(artifact))),
@@ -168,7 +186,7 @@ pub async fn get_artifact_by_ref(
security(("bearer_auth" = []))
)]
pub async fn create_artifact(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Json(request): Json<CreateArtifactRequest>,
) -> ApiResult<impl IntoResponse> {
@@ -200,6 +218,16 @@ pub async fn create_artifact(
}
});
authorize_artifact_create(
&state,
&user,
&request.r#ref,
request.scope,
&request.owner,
visibility,
)
.await?;
let input = CreateArtifactInput {
r#ref: request.r#ref,
scope: request.scope,
@@ -240,16 +268,18 @@ pub async fn create_artifact(
security(("bearer_auth" = []))
)]
pub async fn update_artifact(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
Json(request): Json<UpdateArtifactRequest>,
) -> ApiResult<impl IntoResponse> {
// Verify artifact exists
ArtifactRepository::find_by_id(&state.db, id)
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
authorize_artifact_action(&state, &user, Action::Update, &artifact).await?;
let input = UpdateArtifactInput {
r#ref: None, // Ref is immutable after creation
scope: request.scope,
@@ -305,7 +335,7 @@ pub async fn update_artifact(
security(("bearer_auth" = []))
)]
pub async fn delete_artifact(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
) -> ApiResult<impl IntoResponse> {
@@ -313,6 +343,8 @@ pub async fn delete_artifact(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
authorize_artifact_action(&state, &user, Action::Delete, &artifact).await?;
// Before deleting DB rows, clean up any file-backed versions on disk
let file_versions =
ArtifactVersionRepository::find_file_versions_by_artifact(&state.db, id).await?;
@@ -355,11 +387,17 @@ pub async fn delete_artifact(
security(("bearer_auth" = []))
)]
pub async fn list_artifacts_by_execution(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(execution_id): Path<i64>,
) -> ApiResult<impl IntoResponse> {
let artifacts = ArtifactRepository::find_by_execution(&state.db, execution_id).await?;
let mut artifacts = ArtifactRepository::find_by_execution(&state.db, execution_id).await?;
if let Some((identity_id, grants)) = ensure_can_read_any_artifact(&state, &user).await? {
artifacts.retain(|artifact| {
let ctx = artifact_authorization_context(identity_id, artifact);
AuthorizationService::is_allowed(&grants, Resource::Artifacts, Action::Read, &ctx)
});
}
let items: Vec<ArtifactSummary> = artifacts.into_iter().map(ArtifactSummary::from).collect();
Ok((StatusCode::OK, Json(ApiResponse::new(items))))
@@ -387,7 +425,7 @@ pub async fn list_artifacts_by_execution(
security(("bearer_auth" = []))
)]
pub async fn append_progress(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
Json(request): Json<AppendProgressRequest>,
@@ -396,6 +434,8 @@ pub async fn append_progress(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
authorize_artifact_action(&state, &user, Action::Update, &artifact).await?;
if artifact.r#type != ArtifactType::Progress {
return Err(ApiError::BadRequest(format!(
"Artifact '{}' is type {:?}, not progress. Use version endpoints for file artifacts.",
@@ -430,16 +470,18 @@ pub async fn append_progress(
security(("bearer_auth" = []))
)]
pub async fn set_artifact_data(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
Json(request): Json<SetDataRequest>,
) -> ApiResult<impl IntoResponse> {
// Verify exists
ArtifactRepository::find_by_id(&state.db, id)
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
authorize_artifact_action(&state, &user, Action::Update, &artifact).await?;
let updated = ArtifactRepository::set_data(&state.db, id, &request.data).await?;
Ok((
@@ -468,15 +510,19 @@ pub async fn set_artifact_data(
security(("bearer_auth" = []))
)]
pub async fn list_versions(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
) -> ApiResult<impl IntoResponse> {
// Verify artifact exists
ArtifactRepository::find_by_id(&state.db, id)
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
authorize_artifact_action(&state, &user, Action::Read, &artifact)
.await
.map_err(|_| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
let versions = ArtifactVersionRepository::list_by_artifact(&state.db, id).await?;
let items: Vec<ArtifactVersionSummary> = versions
.into_iter()
@@ -502,15 +548,19 @@ pub async fn list_versions(
security(("bearer_auth" = []))
)]
pub async fn get_version(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path((id, version)): Path<(i64, i32)>,
) -> ApiResult<impl IntoResponse> {
// Verify artifact exists
ArtifactRepository::find_by_id(&state.db, id)
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
authorize_artifact_action(&state, &user, Action::Read, &artifact)
.await
.map_err(|_| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
let ver = ArtifactVersionRepository::find_by_version(&state.db, id, version)
.await?
.ok_or_else(|| {
@@ -536,14 +586,18 @@ pub async fn get_version(
security(("bearer_auth" = []))
)]
pub async fn get_latest_version(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
) -> ApiResult<impl IntoResponse> {
ArtifactRepository::find_by_id(&state.db, id)
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
authorize_artifact_action(&state, &user, Action::Read, &artifact)
.await
.map_err(|_| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
let ver = ArtifactVersionRepository::find_latest(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("No versions found for artifact {}", id)))?;
@@ -568,15 +622,17 @@ pub async fn get_latest_version(
security(("bearer_auth" = []))
)]
pub async fn create_version_json(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
Json(request): Json<CreateVersionJsonRequest>,
) -> ApiResult<impl IntoResponse> {
ArtifactRepository::find_by_id(&state.db, id)
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
authorize_artifact_action(&state, &user, Action::Update, &artifact).await?;
let input = CreateArtifactVersionInput {
artifact: id,
content_type: Some(
@@ -624,7 +680,7 @@ pub async fn create_version_json(
security(("bearer_auth" = []))
)]
pub async fn create_version_file(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
Json(request): Json<CreateFileVersionRequest>,
@@ -633,6 +689,8 @@ pub async fn create_version_file(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
authorize_artifact_action(&state, &user, Action::Update, &artifact).await?;
// Validate this is a file-type artifact
if !is_file_backed_type(artifact.r#type) {
return Err(ApiError::BadRequest(format!(
@@ -726,15 +784,17 @@ pub async fn create_version_file(
security(("bearer_auth" = []))
)]
pub async fn upload_version(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
mut multipart: Multipart,
) -> ApiResult<impl IntoResponse> {
ArtifactRepository::find_by_id(&state.db, id)
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
authorize_artifact_action(&state, &user, Action::Update, &artifact).await?;
let mut file_data: Option<Vec<u8>> = None;
let mut content_type: Option<String> = None;
let mut meta: Option<serde_json::Value> = None;
@@ -854,7 +914,7 @@ pub async fn upload_version(
security(("bearer_auth" = []))
)]
pub async fn download_version(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path((id, version)): Path<(i64, i32)>,
) -> ApiResult<impl IntoResponse> {
@@ -862,6 +922,10 @@ pub async fn download_version(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
authorize_artifact_action(&state, &user, Action::Read, &artifact)
.await
.map_err(|_| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
// First try without content (cheaper query) to check for file_path
let ver = ArtifactVersionRepository::find_by_version(&state.db, id, version)
.await?
@@ -904,7 +968,7 @@ pub async fn download_version(
security(("bearer_auth" = []))
)]
pub async fn download_latest(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(id): Path<i64>,
) -> ApiResult<impl IntoResponse> {
@@ -912,6 +976,10 @@ pub async fn download_latest(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
authorize_artifact_action(&state, &user, Action::Read, &artifact)
.await
.map_err(|_| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
// First try without content (cheaper query) to check for file_path
let ver = ArtifactVersionRepository::find_latest(&state.db, id)
.await?
@@ -955,7 +1023,7 @@ pub async fn download_latest(
security(("bearer_auth" = []))
)]
pub async fn delete_version(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path((id, version)): Path<(i64, i32)>,
) -> ApiResult<impl IntoResponse> {
@@ -964,6 +1032,8 @@ pub async fn delete_version(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
authorize_artifact_action(&state, &user, Action::Delete, &artifact).await?;
// Find the version by artifact + version number
let ver = ArtifactVersionRepository::find_by_version(&state.db, id, version)
.await?
@@ -1042,7 +1112,7 @@ pub async fn delete_version(
security(("bearer_auth" = []))
)]
pub async fn upload_version_by_ref(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(artifact_ref): Path<String>,
mut multipart: Multipart,
@@ -1157,6 +1227,8 @@ pub async fn upload_version_by_ref(
// Upsert: find existing artifact or create a new one
let artifact = match ArtifactRepository::find_by_ref(&state.db, &artifact_ref).await? {
Some(existing) => {
authorize_artifact_action(&state, &user, Action::Update, &existing).await?;
// Update execution link if a new execution ID was provided
if execution_id.is_some() && execution_id != existing.execution {
let update_input = UpdateArtifactInput {
@@ -1211,6 +1283,16 @@ pub async fn upload_version_by_ref(
}
};
authorize_artifact_create(
&state,
&user,
&artifact_ref,
a_scope,
owner.as_deref().unwrap_or_default(),
a_visibility,
)
.await?;
// Parse retention
let a_retention_policy: RetentionPolicyType = match &retention_policy {
Some(rp) if !rp.is_empty() => {
@@ -1297,7 +1379,7 @@ pub async fn upload_version_by_ref(
security(("bearer_auth" = []))
)]
pub async fn allocate_file_version_by_ref(
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
State(state): State<Arc<AppState>>,
Path(artifact_ref): Path<String>,
Json(request): Json<AllocateFileVersionByRefRequest>,
@@ -1305,6 +1387,8 @@ pub async fn allocate_file_version_by_ref(
// Upsert: find existing artifact or create a new one
let artifact = match ArtifactRepository::find_by_ref(&state.db, &artifact_ref).await? {
Some(existing) => {
authorize_artifact_action(&state, &user, Action::Update, &existing).await?;
// Update execution link if a new execution ID was provided
if request.execution.is_some() && request.execution != existing.execution {
let update_input = UpdateArtifactInput {
@@ -1347,6 +1431,16 @@ pub async fn allocate_file_version_by_ref(
.unwrap_or(RetentionPolicyType::Versions);
let a_retention_limit = request.retention_limit.unwrap_or(10);
authorize_artifact_create(
&state,
&user,
&artifact_ref,
a_scope,
request.owner.as_deref().unwrap_or_default(),
a_visibility,
)
.await?;
let create_input = CreateArtifactInput {
r#ref: artifact_ref.clone(),
scope: a_scope,
@@ -1437,6 +1531,105 @@ pub async fn allocate_file_version_by_ref(
// Helpers
// ============================================================================
async fn authorize_artifact_action(
state: &Arc<AppState>,
user: &AuthenticatedUser,
action: Action,
artifact: &attune_common::models::artifact::Artifact,
) -> Result<(), ApiError> {
if user.claims.token_type != TokenType::Access {
return Ok(());
}
let identity_id = user
.identity_id()
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
let authz = AuthorizationService::new(state.db.clone());
authz
.authorize(
user,
AuthorizationCheck {
resource: Resource::Artifacts,
action,
context: artifact_authorization_context(identity_id, artifact),
},
)
.await
}
async fn authorize_artifact_create(
state: &Arc<AppState>,
user: &AuthenticatedUser,
artifact_ref: &str,
scope: OwnerType,
owner: &str,
visibility: ArtifactVisibility,
) -> Result<(), ApiError> {
if user.claims.token_type != TokenType::Access {
return Ok(());
}
let identity_id = user
.identity_id()
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
let authz = AuthorizationService::new(state.db.clone());
let mut ctx = AuthorizationContext::new(identity_id);
ctx.target_ref = Some(artifact_ref.to_string());
ctx.owner_type = Some(scope);
ctx.owner_ref = Some(owner.to_string());
ctx.visibility = Some(visibility);
authz
.authorize(
user,
AuthorizationCheck {
resource: Resource::Artifacts,
action: Action::Create,
context: ctx,
},
)
.await
}
async fn ensure_can_read_any_artifact(
state: &Arc<AppState>,
user: &AuthenticatedUser,
) -> Result<Option<(i64, Vec<attune_common::rbac::Grant>)>, ApiError> {
if user.claims.token_type != TokenType::Access {
return Ok(None);
}
let identity_id = user
.identity_id()
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
let authz = AuthorizationService::new(state.db.clone());
let grants = authz.effective_grants(user).await?;
let can_read_any_artifact = grants
.iter()
.any(|g| g.resource == Resource::Artifacts && g.actions.contains(&Action::Read));
if !can_read_any_artifact {
return Err(ApiError::Forbidden(
"Insufficient permissions: artifacts:read".to_string(),
));
}
Ok(Some((identity_id, grants)))
}
fn artifact_authorization_context(
identity_id: i64,
artifact: &attune_common::models::artifact::Artifact,
) -> AuthorizationContext {
let mut ctx = AuthorizationContext::new(identity_id);
ctx.target_id = Some(artifact.id);
ctx.target_ref = Some(artifact.r#ref.clone());
ctx.owner_type = Some(artifact.scope);
ctx.owner_ref = Some(artifact.owner.clone());
ctx.visibility = Some(artifact.visibility);
ctx
}
/// Returns true for artifact types that should use file-backed storage on disk.
fn is_file_backed_type(artifact_type: ArtifactType) -> bool {
matches!(
@@ -1775,14 +1968,19 @@ pub async fn stream_artifact(
let token = params.token.as_ref().ok_or(ApiError::Unauthorized(
"Missing authentication token".to_string(),
))?;
validate_token(token, &state.jwt_config)
let claims = validate_token(token, &state.jwt_config)
.map_err(|_| ApiError::Unauthorized("Invalid authentication token".to_string()))?;
let user = AuthenticatedUser { claims };
// --- resolve artifact + latest version ---------------------------------
let artifact = ArtifactRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
authorize_artifact_action(&state, &user, Action::Read, &artifact)
.await
.map_err(|_| ApiError::NotFound(format!("Artifact with ID {} not found", id)))?;
if !is_file_backed_type(artifact.r#type) {
return Err(ApiError::BadRequest(format!(
"Artifact '{}' is type {:?} which is not file-backed. \

View File

@@ -169,6 +169,12 @@ pub async fn login(
.await?
.ok_or_else(|| ApiError::Unauthorized("Invalid login or password".to_string()))?;
if identity.frozen {
return Err(ApiError::Forbidden(
"Identity is frozen and cannot authenticate".to_string(),
));
}
// Check if identity has a password set
let password_hash = identity
.password_hash
@@ -324,6 +330,12 @@ pub async fn refresh_token(
.await?
.ok_or_else(|| ApiError::Unauthorized("Identity not found".to_string()))?;
if identity.frozen {
return Err(ApiError::Forbidden(
"Identity is frozen and cannot authenticate".to_string(),
));
}
// Generate new tokens
let access_token = generate_access_token(identity.id, &identity.login, &state.jwt_config)?;
let refresh_token = generate_refresh_token(identity.id, &identity.login, &state.jwt_config)?;
@@ -380,6 +392,12 @@ pub async fn get_current_user(
.await?
.ok_or_else(|| ApiError::NotFound("Identity not found".to_string()))?;
if identity.frozen {
return Err(ApiError::Forbidden(
"Identity is frozen and cannot authenticate".to_string(),
));
}
let response = CurrentUserResponse {
id: identity.id,
login: identity.login,
@@ -551,6 +569,7 @@ pub async fn change_password(
display_name: None,
password_hash: Some(new_password_hash),
attributes: None,
frozen: None,
};
IdentityRepository::update(&state.db, identity_id, update_input).await?;

View File

@@ -82,6 +82,17 @@ pub async fn create_event(
State(state): State<Arc<AppState>>,
Json(payload): Json<CreateEventRequest>,
) -> ApiResult<impl IntoResponse> {
// Only sensor and execution tokens may create events directly.
// User sessions must go through the webhook receiver instead.
use crate::auth::jwt::TokenType;
if user.0.claims.token_type == TokenType::Access {
return Err(ApiError::Forbidden(
"Events may only be created by sensor services. To fire an event as a user, \
enable webhooks on the trigger and POST to its webhook URL."
.to_string(),
));
}
// Validate request
payload
.validate()
@@ -128,7 +139,6 @@ pub async fn create_event(
};
// Determine source (sensor) from authenticated user if it's a sensor token
use crate::auth::jwt::TokenType;
let (source_id, source_ref) = match user.0.claims.token_type {
TokenType::Sensor => {
// Extract sensor reference from login

View File

@@ -2,6 +2,7 @@
use axum::{
extract::{Path, Query, State},
http::HeaderMap,
http::StatusCode,
response::{
sse::{Event, KeepAlive, Sse},
@@ -13,6 +14,7 @@ use axum::{
use chrono::Utc;
use futures::stream::{Stream, StreamExt};
use std::sync::Arc;
use std::time::Duration;
use tokio_stream::wrappers::BroadcastStream;
use attune_common::models::enums::ExecutionStatus;
@@ -32,7 +34,10 @@ use attune_common::workflow::{CancellationPolicy, WorkflowDefinition};
use sqlx::Row;
use crate::{
auth::middleware::RequireAuth,
auth::{
jwt::{validate_token, Claims, JwtConfig, TokenType},
middleware::{AuthenticatedUser, RequireAuth},
},
authz::{AuthorizationCheck, AuthorizationService},
dto::{
common::{PaginatedResponse, PaginationParams},
@@ -46,6 +51,9 @@ use crate::{
};
use attune_common::rbac::{Action, AuthorizationContext, Resource};
const LOG_STREAM_POLL_INTERVAL: Duration = Duration::from_millis(250);
const LOG_STREAM_READ_CHUNK_SIZE: usize = 64 * 1024;
/// Create a new execution (manual execution)
///
/// This endpoint allows directly executing an action without a trigger or rule.
@@ -93,19 +101,6 @@ pub async fn create_execution(
},
)
.await?;
let mut execution_ctx = AuthorizationContext::new(identity_id);
execution_ctx.pack_ref = Some(action.pack_ref.clone());
authz
.authorize(
&user,
AuthorizationCheck {
resource: Resource::Executions,
action: Action::Create,
context: execution_ctx,
},
)
.await?;
}
// Create execution input
@@ -938,6 +933,398 @@ pub async fn stream_execution_updates(
Ok(Sse::new(filtered_stream).keep_alive(KeepAlive::default()))
}
#[derive(serde::Deserialize)]
pub struct StreamExecutionLogParams {
pub token: Option<String>,
pub offset: Option<u64>,
}
#[derive(Clone, Copy)]
enum ExecutionLogStream {
Stdout,
Stderr,
}
impl ExecutionLogStream {
fn parse(name: &str) -> Result<Self, ApiError> {
match name {
"stdout" => Ok(Self::Stdout),
"stderr" => Ok(Self::Stderr),
_ => Err(ApiError::BadRequest(format!(
"Unsupported log stream '{}'. Expected 'stdout' or 'stderr'.",
name
))),
}
}
fn file_name(self) -> &'static str {
match self {
Self::Stdout => "stdout.log",
Self::Stderr => "stderr.log",
}
}
}
enum ExecutionLogTailState {
WaitingForFile {
full_path: std::path::PathBuf,
execution_id: i64,
},
SendInitial {
full_path: std::path::PathBuf,
execution_id: i64,
offset: u64,
pending_utf8: Vec<u8>,
},
Tail {
full_path: std::path::PathBuf,
execution_id: i64,
offset: u64,
idle_polls: u32,
pending_utf8: Vec<u8>,
},
Finished,
}
/// Stream stdout/stderr for an execution as SSE.
///
/// This tails the worker's live log files directly from the shared artifacts
/// volume. The file may not exist yet when the worker has not emitted any
/// output, so the stream waits briefly for it to appear.
#[utoipa::path(
get,
path = "/api/v1/executions/{id}/logs/{stream}/stream",
tag = "executions",
params(
("id" = i64, Path, description = "Execution ID"),
("stream" = String, Path, description = "Log stream name: stdout or stderr"),
("token" = String, Query, description = "JWT access token for authentication"),
),
responses(
(status = 200, description = "SSE stream of execution log content", content_type = "text/event-stream"),
(status = 401, description = "Unauthorized"),
(status = 404, description = "Execution not found"),
),
)]
pub async fn stream_execution_log(
State(state): State<Arc<AppState>>,
headers: HeaderMap,
Path((id, stream_name)): Path<(i64, String)>,
Query(params): Query<StreamExecutionLogParams>,
user: Result<RequireAuth, crate::auth::middleware::AuthError>,
) -> Result<Sse<impl Stream<Item = Result<Event, std::convert::Infallible>>>, ApiError> {
let authenticated_user =
authenticate_execution_log_stream_user(&state, &headers, user, params.token.as_deref())?;
validate_execution_log_stream_user(&authenticated_user, id)?;
let execution = ExecutionRepository::find_by_id(&state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Execution with ID {} not found", id)))?;
authorize_execution_log_stream(&state, &authenticated_user, &execution).await?;
let stream_name = ExecutionLogStream::parse(&stream_name)?;
let full_path = std::path::PathBuf::from(&state.config.artifacts_dir)
.join(format!("execution_{}", id))
.join(stream_name.file_name());
let db = state.db.clone();
let initial_state = ExecutionLogTailState::WaitingForFile {
full_path,
execution_id: id,
};
let start_offset = params.offset.unwrap_or(0);
let stream = futures::stream::unfold(initial_state, move |state| {
let db = db.clone();
async move {
match state {
ExecutionLogTailState::Finished => None,
ExecutionLogTailState::WaitingForFile {
full_path,
execution_id,
} => {
if full_path.exists() {
Some((
Ok(Event::default().event("waiting").data("Log file found")),
ExecutionLogTailState::SendInitial {
full_path,
execution_id,
offset: start_offset,
pending_utf8: Vec::new(),
},
))
} else if execution_log_execution_terminal(&db, execution_id).await {
Some((
Ok(Event::default().event("done").data("")),
ExecutionLogTailState::Finished,
))
} else {
tokio::time::sleep(LOG_STREAM_POLL_INTERVAL).await;
Some((
Ok(Event::default()
.event("waiting")
.data("Waiting for log output")),
ExecutionLogTailState::WaitingForFile {
full_path,
execution_id,
},
))
}
}
ExecutionLogTailState::SendInitial {
full_path,
execution_id,
offset,
pending_utf8,
} => {
let pending_utf8_on_empty = pending_utf8.clone();
match read_log_chunk(
&full_path,
offset,
LOG_STREAM_READ_CHUNK_SIZE,
pending_utf8,
)
.await
{
Some((content, new_offset, pending_utf8)) => Some((
Ok(Event::default()
.id(new_offset.to_string())
.event("content")
.data(content)),
ExecutionLogTailState::SendInitial {
full_path,
execution_id,
offset: new_offset,
pending_utf8,
},
)),
None => Some((
Ok(Event::default().comment("initial-catchup-complete")),
ExecutionLogTailState::Tail {
full_path,
execution_id,
offset,
idle_polls: 0,
pending_utf8: pending_utf8_on_empty,
},
)),
}
}
ExecutionLogTailState::Tail {
full_path,
execution_id,
offset,
idle_polls,
pending_utf8,
} => {
let pending_utf8_on_empty = pending_utf8.clone();
match read_log_chunk(
&full_path,
offset,
LOG_STREAM_READ_CHUNK_SIZE,
pending_utf8,
)
.await
{
Some((append, new_offset, pending_utf8)) => Some((
Ok(Event::default()
.id(new_offset.to_string())
.event("append")
.data(append)),
ExecutionLogTailState::Tail {
full_path,
execution_id,
offset: new_offset,
idle_polls: 0,
pending_utf8,
},
)),
None => {
let terminal =
execution_log_execution_terminal(&db, execution_id).await;
if terminal && idle_polls >= 2 {
Some((
Ok(Event::default().event("done").data("Execution complete")),
ExecutionLogTailState::Finished,
))
} else {
tokio::time::sleep(LOG_STREAM_POLL_INTERVAL).await;
Some((
Ok(Event::default()
.event("waiting")
.data("Waiting for log output")),
ExecutionLogTailState::Tail {
full_path,
execution_id,
offset,
idle_polls: idle_polls + 1,
pending_utf8: pending_utf8_on_empty,
},
))
}
}
}
}
}
}
});
Ok(Sse::new(stream).keep_alive(KeepAlive::default()))
}
async fn read_log_chunk(
path: &std::path::Path,
offset: u64,
max_bytes: usize,
mut pending_utf8: Vec<u8>,
) -> Option<(String, u64, Vec<u8>)> {
use tokio::io::{AsyncReadExt, AsyncSeekExt};
let mut file = tokio::fs::File::open(path).await.ok()?;
let metadata = file.metadata().await.ok()?;
if metadata.len() <= offset {
return None;
}
file.seek(std::io::SeekFrom::Start(offset)).await.ok()?;
let bytes_to_read = ((metadata.len() - offset) as usize).min(max_bytes);
let mut buf = vec![0u8; bytes_to_read];
let read = file.read(&mut buf).await.ok()?;
buf.truncate(read);
if buf.is_empty() {
return None;
}
pending_utf8.extend_from_slice(&buf);
let (content, pending_utf8) = decode_utf8_chunk(pending_utf8);
Some((content, offset + read as u64, pending_utf8))
}
async fn execution_log_execution_terminal(db: &sqlx::PgPool, execution_id: i64) -> bool {
match ExecutionRepository::find_by_id(db, execution_id).await {
Ok(Some(execution)) => matches!(
execution.status,
ExecutionStatus::Completed
| ExecutionStatus::Failed
| ExecutionStatus::Cancelled
| ExecutionStatus::Timeout
| ExecutionStatus::Abandoned
),
_ => true,
}
}
fn decode_utf8_chunk(mut bytes: Vec<u8>) -> (String, Vec<u8>) {
match std::str::from_utf8(&bytes) {
Ok(valid) => (valid.to_string(), Vec::new()),
Err(err) if err.error_len().is_none() => {
let pending = bytes.split_off(err.valid_up_to());
(String::from_utf8_lossy(&bytes).into_owned(), pending)
}
Err(_) => (String::from_utf8_lossy(&bytes).into_owned(), Vec::new()),
}
}
async fn authorize_execution_log_stream(
state: &Arc<AppState>,
user: &AuthenticatedUser,
execution: &attune_common::models::Execution,
) -> Result<(), ApiError> {
if user.claims.token_type != TokenType::Access {
return Ok(());
}
let identity_id = user
.identity_id()
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
let authz = AuthorizationService::new(state.db.clone());
let mut ctx = AuthorizationContext::new(identity_id);
ctx.target_id = Some(execution.id);
ctx.target_ref = Some(execution.action_ref.clone());
authz
.authorize(
user,
AuthorizationCheck {
resource: Resource::Executions,
action: Action::Read,
context: ctx,
},
)
.await
}
fn authenticate_execution_log_stream_user(
state: &Arc<AppState>,
headers: &HeaderMap,
user: Result<RequireAuth, crate::auth::middleware::AuthError>,
query_token: Option<&str>,
) -> Result<AuthenticatedUser, ApiError> {
match user {
Ok(RequireAuth(user)) => Ok(user),
Err(_) => {
if let Some(user) = crate::auth::oidc::cookie_authenticated_user(headers, state)? {
return Ok(user);
}
let token = query_token.ok_or(ApiError::Unauthorized(
"Missing authentication token".to_string(),
))?;
authenticate_execution_log_stream_query_token(token, &state.jwt_config)
}
}
}
fn authenticate_execution_log_stream_query_token(
token: &str,
jwt_config: &JwtConfig,
) -> Result<AuthenticatedUser, ApiError> {
let claims = validate_token(token, jwt_config)
.map_err(|_| ApiError::Unauthorized("Invalid authentication token".to_string()))?;
Ok(AuthenticatedUser { claims })
}
fn validate_execution_log_stream_user(
user: &AuthenticatedUser,
execution_id: i64,
) -> Result<(), ApiError> {
let claims = &user.claims;
match claims.token_type {
TokenType::Access => Ok(()),
TokenType::Execution => validate_execution_token_scope(claims, execution_id),
TokenType::Sensor | TokenType::Refresh => Err(ApiError::Unauthorized(
"Invalid authentication token".to_string(),
)),
}
}
fn validate_execution_token_scope(claims: &Claims, execution_id: i64) -> Result<(), ApiError> {
if claims.scope.as_deref() != Some("execution") {
return Err(ApiError::Unauthorized(
"Invalid authentication token".to_string(),
));
}
let token_execution_id = claims
.metadata
.as_ref()
.and_then(|metadata| metadata.get("execution_id"))
.and_then(|value| value.as_i64())
.ok_or_else(|| ApiError::Unauthorized("Invalid authentication token".to_string()))?;
if token_execution_id != execution_id {
return Err(ApiError::Forbidden(format!(
"Execution token is not valid for execution {}",
execution_id
)));
}
Ok(())
}
#[derive(serde::Deserialize)]
pub struct StreamExecutionParams {
pub execution_id: Option<i64>,
@@ -950,6 +1337,10 @@ pub fn routes() -> Router<Arc<AppState>> {
.route("/executions/execute", axum::routing::post(create_execution))
.route("/executions/stats", get(get_execution_stats))
.route("/executions/stream", get(stream_execution_updates))
.route(
"/executions/{id}/logs/{stream}/stream",
get(stream_execution_log),
)
.route("/executions/{id}", get(get_execution))
.route(
"/executions/{id}/cancel",
@@ -968,10 +1359,26 @@ pub fn routes() -> Router<Arc<AppState>> {
#[cfg(test)]
mod tests {
use super::*;
use attune_common::auth::jwt::generate_execution_token;
#[test]
fn test_execution_routes_structure() {
// Just verify the router can be constructed
let _router = routes();
}
#[test]
fn execution_token_scope_must_match_requested_execution() {
let jwt_config = JwtConfig {
secret: "test_secret_key_for_testing".to_string(),
access_token_expiration: 3600,
refresh_token_expiration: 604800,
};
let token = generate_execution_token(42, 123, "core.echo", &jwt_config, None).unwrap();
let user = authenticate_execution_log_stream_query_token(&token, &jwt_config).unwrap();
let err = validate_execution_log_stream_user(&user, 456).unwrap_err();
assert!(matches!(err, ApiError::Forbidden(_)));
}
}

View File

@@ -120,12 +120,16 @@ pub async fn get_key(
.await?
.ok_or_else(|| ApiError::NotFound(format!("Key '{}' not found", key_ref)))?;
if user.0.claims.token_type == TokenType::Access {
// For encrypted keys, track whether this caller is permitted to see the value.
// Non-Access tokens (sensor, execution) always get full access.
let can_decrypt = if user.0.claims.token_type == TokenType::Access {
let identity_id = user
.0
.identity_id()
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
let authz = AuthorizationService::new(state.db.clone());
// Basic read check — hide behind 404 to prevent enumeration.
authz
.authorize(
&user.0,
@@ -136,19 +140,43 @@ pub async fn get_key(
},
)
.await
// Hide unauthorized records behind 404 to reduce enumeration leakage.
.map_err(|_| ApiError::NotFound(format!("Key '{}' not found", key_ref)))?;
}
// Decrypt value if encrypted
// For encrypted keys, separately check Keys::Decrypt.
// Failing this is not an error — we just return the value as null.
if key.encrypted {
let encryption_key = state
authz
.authorize(
&user.0,
AuthorizationCheck {
resource: Resource::Keys,
action: Action::Decrypt,
context: key_authorization_context(identity_id, &key),
},
)
.await
.is_ok()
} else {
true
}
} else {
true
};
// Decrypt value if encrypted and caller has permission.
// If they lack Keys::Decrypt, return null rather than the ciphertext.
if key.encrypted {
if can_decrypt {
let encryption_key =
state
.config
.security
.encryption_key
.as_ref()
.ok_or_else(|| {
ApiError::InternalServerError("Encryption key not configured on server".to_string())
ApiError::InternalServerError(
"Encryption key not configured on server".to_string(),
)
})?;
let decrypted_value = attune_common::crypto::decrypt_json(&key.value, encryption_key)
@@ -158,6 +186,9 @@ pub async fn get_key(
})?;
key.value = decrypted_value;
} else {
key.value = serde_json::Value::Null;
}
}
let response = ApiResponse::new(KeyResponse::from(key));
@@ -195,6 +226,7 @@ pub async fn create_key(
let mut ctx = AuthorizationContext::new(identity_id);
ctx.owner_identity_id = request.owner_identity;
ctx.owner_type = Some(request.owner_type);
ctx.owner_ref = requested_key_owner_ref(&request);
ctx.encrypted = Some(request.encrypted);
ctx.target_ref = Some(request.r#ref.clone());
@@ -541,6 +573,38 @@ fn key_authorization_context(identity_id: i64, key: &Key) -> AuthorizationContex
ctx.target_ref = Some(key.r#ref.clone());
ctx.owner_identity_id = key.owner_identity;
ctx.owner_type = Some(key.owner_type);
ctx.owner_ref = key_owner_ref(
key.owner_type,
key.owner.as_deref(),
key.owner_pack_ref.as_deref(),
key.owner_action_ref.as_deref(),
key.owner_sensor_ref.as_deref(),
);
ctx.encrypted = Some(key.encrypted);
ctx
}
fn requested_key_owner_ref(request: &CreateKeyRequest) -> Option<String> {
key_owner_ref(
request.owner_type,
request.owner.as_deref(),
request.owner_pack_ref.as_deref(),
request.owner_action_ref.as_deref(),
request.owner_sensor_ref.as_deref(),
)
}
fn key_owner_ref(
owner_type: OwnerType,
owner: Option<&str>,
owner_pack_ref: Option<&str>,
owner_action_ref: Option<&str>,
owner_sensor_ref: Option<&str>,
) -> Option<String> {
match owner_type {
OwnerType::Pack => owner_pack_ref.map(str::to_string),
OwnerType::Action => owner_action_ref.map(str::to_string),
OwnerType::Sensor => owner_sensor_ref.map(str::to_string),
_ => owner.map(str::to_string),
}
}

View File

@@ -9,12 +9,14 @@ use std::sync::Arc;
use validator::Validate;
use attune_common::{
models::identity::{Identity, PermissionSet},
models::identity::{Identity, IdentityRoleAssignment},
rbac::{Action, AuthorizationContext, Resource},
repositories::{
identity::{
CreateIdentityInput, CreatePermissionAssignmentInput, IdentityRepository,
PermissionAssignmentRepository, PermissionSetRepository, UpdateIdentityInput,
CreateIdentityInput, CreateIdentityRoleAssignmentInput,
CreatePermissionAssignmentInput, CreatePermissionSetRoleAssignmentInput,
IdentityRepository, IdentityRoleAssignmentRepository, PermissionAssignmentRepository,
PermissionSetRepository, PermissionSetRoleAssignmentRepository, UpdateIdentityInput,
},
Create, Delete, FindById, FindByRef, List, Update,
},
@@ -26,9 +28,12 @@ use crate::{
authz::{AuthorizationCheck, AuthorizationService},
dto::{
common::{PaginatedResponse, PaginationParams},
ApiResponse, CreateIdentityRequest, CreatePermissionAssignmentRequest, IdentityResponse,
IdentitySummary, PermissionAssignmentResponse, PermissionSetQueryParams,
PermissionSetSummary, SuccessResponse, UpdateIdentityRequest,
ApiResponse, CreateIdentityRequest, CreateIdentityRoleAssignmentRequest,
CreatePermissionAssignmentRequest, CreatePermissionSetRoleAssignmentRequest,
IdentityResponse, IdentityRoleAssignmentResponse, IdentitySummary,
PermissionAssignmentResponse, PermissionSetQueryParams,
PermissionSetRoleAssignmentResponse, PermissionSetSummary, SuccessResponse,
UpdateIdentityRequest,
},
middleware::{ApiError, ApiResult},
state::AppState,
@@ -58,16 +63,22 @@ pub async fn list_identities(
let page_items = if start >= identities.len() {
Vec::new()
} else {
identities[start..end]
.iter()
.cloned()
.map(IdentitySummary::from)
.collect()
identities[start..end].to_vec()
};
let mut summaries = Vec::with_capacity(page_items.len());
for identity in page_items {
let role_assignments =
IdentityRoleAssignmentRepository::find_by_identity(&state.db, identity.id).await?;
let roles = role_assignments.into_iter().map(|ra| ra.role).collect();
let mut summary = IdentitySummary::from(identity);
summary.roles = roles;
summaries.push(summary);
}
Ok((
StatusCode::OK,
Json(PaginatedResponse::new(page_items, &query, total)),
Json(PaginatedResponse::new(summaries, &query, total)),
))
}
@@ -94,10 +105,42 @@ pub async fn get_identity(
let identity = IdentityRepository::find_by_id(&state.db, identity_id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Identity '{}' not found", identity_id)))?;
let roles = IdentityRoleAssignmentRepository::find_by_identity(&state.db, identity_id).await?;
let assignments =
PermissionAssignmentRepository::find_by_identity(&state.db, identity_id).await?;
let permission_sets = PermissionSetRepository::find_by_identity(&state.db, identity_id).await?;
let permission_set_refs = permission_sets
.into_iter()
.map(|ps| (ps.id, ps.r#ref))
.collect::<std::collections::HashMap<_, _>>();
Ok((
StatusCode::OK,
Json(ApiResponse::new(IdentityResponse::from(identity))),
Json(ApiResponse::new(IdentityResponse {
id: identity.id,
login: identity.login,
display_name: identity.display_name,
frozen: identity.frozen,
attributes: identity.attributes,
roles: roles
.into_iter()
.map(IdentityRoleAssignmentResponse::from)
.collect(),
direct_permissions: assignments
.into_iter()
.filter_map(|assignment| {
permission_set_refs.get(&assignment.permset).cloned().map(
|permission_set_ref| PermissionAssignmentResponse {
id: assignment.id,
identity_id: assignment.identity,
permission_set_id: assignment.permset,
permission_set_ref,
created: assignment.created,
},
)
})
.collect(),
})),
))
}
@@ -180,6 +223,7 @@ pub async fn update_identity(
display_name: request.display_name,
password_hash,
attributes: request.attributes,
frozen: request.frozen,
},
)
.await?;
@@ -257,10 +301,33 @@ pub async fn list_permission_sets(
permission_sets.retain(|ps| ps.pack_ref.as_deref() == Some(pack_ref.as_str()));
}
let response: Vec<PermissionSetSummary> = permission_sets
let mut response = Vec::with_capacity(permission_sets.len());
for permission_set in permission_sets {
let permission_set_ref = permission_set.r#ref.clone();
let roles = PermissionSetRoleAssignmentRepository::find_by_permission_set(
&state.db,
permission_set.id,
)
.await?;
response.push(PermissionSetSummary {
id: permission_set.id,
r#ref: permission_set.r#ref,
pack_ref: permission_set.pack_ref,
label: permission_set.label,
description: permission_set.description,
grants: permission_set.grants,
roles: roles
.into_iter()
.map(PermissionSetSummary::from)
.collect();
.map(|assignment| PermissionSetRoleAssignmentResponse {
id: assignment.id,
permission_set_id: assignment.permset,
permission_set_ref: Some(permission_set_ref.clone()),
role: assignment.role,
created: assignment.created,
})
.collect(),
});
}
Ok((StatusCode::OK, Json(response)))
}
@@ -412,6 +479,229 @@ pub async fn delete_permission_assignment(
))
}
#[utoipa::path(
post,
path = "/api/v1/identities/{id}/roles",
tag = "permissions",
params(
("id" = i64, Path, description = "Identity ID")
),
request_body = CreateIdentityRoleAssignmentRequest,
responses(
(status = 201, description = "Identity role assignment created", body = inline(ApiResponse<IdentityRoleAssignmentResponse>)),
(status = 404, description = "Identity not found")
),
security(("bearer_auth" = []))
)]
pub async fn create_identity_role_assignment(
State(state): State<Arc<AppState>>,
RequireAuth(user): RequireAuth,
Path(identity_id): Path<i64>,
Json(request): Json<CreateIdentityRoleAssignmentRequest>,
) -> ApiResult<impl IntoResponse> {
authorize_permissions(&state, &user, Resource::Permissions, Action::Manage).await?;
request.validate()?;
IdentityRepository::find_by_id(&state.db, identity_id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Identity '{}' not found", identity_id)))?;
let assignment = IdentityRoleAssignmentRepository::create(
&state.db,
CreateIdentityRoleAssignmentInput {
identity: identity_id,
role: request.role,
source: "manual".to_string(),
managed: false,
},
)
.await?;
Ok((
StatusCode::CREATED,
Json(ApiResponse::new(IdentityRoleAssignmentResponse::from(
assignment,
))),
))
}
#[utoipa::path(
delete,
path = "/api/v1/identities/roles/{id}",
tag = "permissions",
params(
("id" = i64, Path, description = "Identity role assignment ID")
),
responses(
(status = 200, description = "Identity role assignment deleted", body = inline(ApiResponse<SuccessResponse>)),
(status = 404, description = "Identity role assignment not found")
),
security(("bearer_auth" = []))
)]
pub async fn delete_identity_role_assignment(
State(state): State<Arc<AppState>>,
RequireAuth(user): RequireAuth,
Path(assignment_id): Path<i64>,
) -> ApiResult<impl IntoResponse> {
authorize_permissions(&state, &user, Resource::Permissions, Action::Manage).await?;
let assignment = IdentityRoleAssignmentRepository::find_by_id(&state.db, assignment_id)
.await?
.ok_or_else(|| {
ApiError::NotFound(format!(
"Identity role assignment '{}' not found",
assignment_id
))
})?;
if assignment.managed {
return Err(ApiError::BadRequest(
"Managed role assignments must be updated through the identity provider sync"
.to_string(),
));
}
IdentityRoleAssignmentRepository::delete(&state.db, assignment_id).await?;
Ok((
StatusCode::OK,
Json(ApiResponse::new(SuccessResponse::new(
"Identity role assignment deleted successfully",
))),
))
}
#[utoipa::path(
post,
path = "/api/v1/permissions/sets/{id}/roles",
tag = "permissions",
params(
("id" = i64, Path, description = "Permission set ID")
),
request_body = CreatePermissionSetRoleAssignmentRequest,
responses(
(status = 201, description = "Permission set role assignment created", body = inline(ApiResponse<PermissionSetRoleAssignmentResponse>)),
(status = 404, description = "Permission set not found")
),
security(("bearer_auth" = []))
)]
pub async fn create_permission_set_role_assignment(
State(state): State<Arc<AppState>>,
RequireAuth(user): RequireAuth,
Path(permission_set_id): Path<i64>,
Json(request): Json<CreatePermissionSetRoleAssignmentRequest>,
) -> ApiResult<impl IntoResponse> {
authorize_permissions(&state, &user, Resource::Permissions, Action::Manage).await?;
request.validate()?;
let permission_set = PermissionSetRepository::find_by_id(&state.db, permission_set_id)
.await?
.ok_or_else(|| {
ApiError::NotFound(format!("Permission set '{}' not found", permission_set_id))
})?;
let assignment = PermissionSetRoleAssignmentRepository::create(
&state.db,
CreatePermissionSetRoleAssignmentInput {
permset: permission_set_id,
role: request.role,
},
)
.await?;
Ok((
StatusCode::CREATED,
Json(ApiResponse::new(PermissionSetRoleAssignmentResponse {
id: assignment.id,
permission_set_id: assignment.permset,
permission_set_ref: Some(permission_set.r#ref),
role: assignment.role,
created: assignment.created,
})),
))
}
#[utoipa::path(
delete,
path = "/api/v1/permissions/sets/roles/{id}",
tag = "permissions",
params(
("id" = i64, Path, description = "Permission set role assignment ID")
),
responses(
(status = 200, description = "Permission set role assignment deleted", body = inline(ApiResponse<SuccessResponse>)),
(status = 404, description = "Permission set role assignment not found")
),
security(("bearer_auth" = []))
)]
pub async fn delete_permission_set_role_assignment(
State(state): State<Arc<AppState>>,
RequireAuth(user): RequireAuth,
Path(assignment_id): Path<i64>,
) -> ApiResult<impl IntoResponse> {
authorize_permissions(&state, &user, Resource::Permissions, Action::Manage).await?;
PermissionSetRoleAssignmentRepository::find_by_id(&state.db, assignment_id)
.await?
.ok_or_else(|| {
ApiError::NotFound(format!(
"Permission set role assignment '{}' not found",
assignment_id
))
})?;
PermissionSetRoleAssignmentRepository::delete(&state.db, assignment_id).await?;
Ok((
StatusCode::OK,
Json(ApiResponse::new(SuccessResponse::new(
"Permission set role assignment deleted successfully",
))),
))
}
#[utoipa::path(
post,
path = "/api/v1/identities/{id}/freeze",
tag = "permissions",
params(
("id" = i64, Path, description = "Identity ID")
),
responses(
(status = 200, description = "Identity frozen", body = inline(ApiResponse<SuccessResponse>)),
(status = 404, description = "Identity not found")
),
security(("bearer_auth" = []))
)]
pub async fn freeze_identity(
State(state): State<Arc<AppState>>,
RequireAuth(user): RequireAuth,
Path(identity_id): Path<i64>,
) -> ApiResult<impl IntoResponse> {
set_identity_frozen(&state, &user, identity_id, true).await
}
#[utoipa::path(
post,
path = "/api/v1/identities/{id}/unfreeze",
tag = "permissions",
params(
("id" = i64, Path, description = "Identity ID")
),
responses(
(status = 200, description = "Identity unfrozen", body = inline(ApiResponse<SuccessResponse>)),
(status = 404, description = "Identity not found")
),
security(("bearer_auth" = []))
)]
pub async fn unfreeze_identity(
State(state): State<Arc<AppState>>,
RequireAuth(user): RequireAuth,
Path(identity_id): Path<i64>,
) -> ApiResult<impl IntoResponse> {
set_identity_frozen(&state, &user, identity_id, false).await
}
pub fn routes() -> Router<Arc<AppState>> {
Router::new()
.route("/identities", get(list_identities).post(create_identity))
@@ -421,11 +711,29 @@ pub fn routes() -> Router<Arc<AppState>> {
.put(update_identity)
.delete(delete_identity),
)
.route(
"/identities/{id}/roles",
post(create_identity_role_assignment),
)
.route(
"/identities/{id}/permissions",
get(list_identity_permissions),
)
.route("/identities/{id}/freeze", post(freeze_identity))
.route("/identities/{id}/unfreeze", post(unfreeze_identity))
.route(
"/identities/roles/{id}",
delete(delete_identity_role_assignment),
)
.route("/permissions/sets", get(list_permission_sets))
.route(
"/permissions/sets/{id}/roles",
post(create_permission_set_role_assignment),
)
.route(
"/permissions/sets/roles/{id}",
delete(delete_permission_set_role_assignment),
)
.route(
"/permissions/assignments",
post(create_permission_assignment),
@@ -488,20 +796,82 @@ impl From<Identity> for IdentitySummary {
id: value.id,
login: value.login,
display_name: value.display_name,
frozen: value.frozen,
attributes: value.attributes,
roles: Vec::new(),
}
}
}
impl From<PermissionSet> for PermissionSetSummary {
fn from(value: PermissionSet) -> Self {
impl From<IdentityRoleAssignment> for IdentityRoleAssignmentResponse {
fn from(value: IdentityRoleAssignment) -> Self {
Self {
id: value.id,
r#ref: value.r#ref,
pack_ref: value.pack_ref,
label: value.label,
description: value.description,
grants: value.grants,
identity_id: value.identity,
role: value.role,
source: value.source,
managed: value.managed,
created: value.created,
updated: value.updated,
}
}
}
impl From<Identity> for IdentityResponse {
fn from(value: Identity) -> Self {
Self {
id: value.id,
login: value.login,
display_name: value.display_name,
frozen: value.frozen,
attributes: value.attributes,
roles: Vec::new(),
direct_permissions: Vec::new(),
}
}
}
async fn set_identity_frozen(
state: &Arc<AppState>,
user: &crate::auth::middleware::AuthenticatedUser,
identity_id: i64,
frozen: bool,
) -> ApiResult<impl IntoResponse> {
authorize_permissions(state, user, Resource::Identities, Action::Update).await?;
let caller_identity_id = user
.identity_id()
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
if caller_identity_id == identity_id && frozen {
return Err(ApiError::BadRequest(
"Refusing to freeze the currently authenticated identity".to_string(),
));
}
IdentityRepository::find_by_id(&state.db, identity_id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Identity '{}' not found", identity_id)))?;
IdentityRepository::update(
&state.db,
identity_id,
UpdateIdentityInput {
display_name: None,
password_hash: None,
attributes: None,
frozen: Some(frozen),
},
)
.await?;
let message = if frozen {
"Identity frozen successfully"
} else {
"Identity unfrozen successfully"
};
Ok((
StatusCode::OK,
Json(ApiResponse::new(SuccessResponse::new(message))),
))
}

View File

@@ -20,7 +20,7 @@ use attune_common::repositories::{
pack::PackRepository,
rule::{CreateRuleInput, RuleRepository, RuleSearchFilters, UpdateRuleInput},
trigger::TriggerRepository,
Create, Delete, FindByRef, Update,
Create, Delete, FindByRef, Patch, Update,
};
use crate::{
@@ -474,7 +474,7 @@ pub async fn update_rule(
// Create update input
let update_input = UpdateRuleInput {
label: request.label,
description: request.description,
description: request.description.map(Patch::Set),
conditions: request.conditions,
action_params: request.action_params,
trigger_params: request.trigger_params,

View File

@@ -724,7 +724,7 @@ pub async fn update_sensor(
// Create update input
let update_input = UpdateSensorInput {
label: request.label,
description: request.description,
description: request.description.map(Patch::Set),
entrypoint: request.entrypoint,
runtime: None,
runtime_ref: None,

View File

@@ -20,8 +20,11 @@ use attune_common::{
},
};
use attune_common::rbac::{Action, AuthorizationContext, Resource};
use crate::{
auth::middleware::RequireAuth,
authz::{AuthorizationCheck, AuthorizationService},
dto::{
trigger::TriggerResponse,
webhook::{WebhookReceiverRequest, WebhookReceiverResponse},
@@ -170,7 +173,7 @@ fn get_webhook_config_array(
)]
pub async fn enable_webhook(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
Path(trigger_ref): Path<String>,
) -> ApiResult<impl IntoResponse> {
// First, find the trigger by ref to get its ID
@@ -179,6 +182,26 @@ pub async fn enable_webhook(
.map_err(|e| ApiError::InternalServerError(e.to_string()))?
.ok_or_else(|| ApiError::NotFound(format!("Trigger '{}' not found", trigger_ref)))?;
if user.claims.token_type == crate::auth::jwt::TokenType::Access {
let identity_id = user
.identity_id()
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
let authz = AuthorizationService::new(state.db.clone());
let mut ctx = AuthorizationContext::new(identity_id);
ctx.target_ref = Some(trigger.r#ref.clone());
ctx.pack_ref = trigger.pack_ref.clone();
authz
.authorize(
&user,
AuthorizationCheck {
resource: Resource::Triggers,
action: Action::Update,
context: ctx,
},
)
.await?;
}
// Enable webhooks for this trigger
let _webhook_info = TriggerRepository::enable_webhook(&state.db, trigger.id)
.await
@@ -213,7 +236,7 @@ pub async fn enable_webhook(
)]
pub async fn disable_webhook(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
Path(trigger_ref): Path<String>,
) -> ApiResult<impl IntoResponse> {
// First, find the trigger by ref to get its ID
@@ -222,6 +245,26 @@ pub async fn disable_webhook(
.map_err(|e| ApiError::InternalServerError(e.to_string()))?
.ok_or_else(|| ApiError::NotFound(format!("Trigger '{}' not found", trigger_ref)))?;
if user.claims.token_type == crate::auth::jwt::TokenType::Access {
let identity_id = user
.identity_id()
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
let authz = AuthorizationService::new(state.db.clone());
let mut ctx = AuthorizationContext::new(identity_id);
ctx.target_ref = Some(trigger.r#ref.clone());
ctx.pack_ref = trigger.pack_ref.clone();
authz
.authorize(
&user,
AuthorizationCheck {
resource: Resource::Triggers,
action: Action::Update,
context: ctx,
},
)
.await?;
}
// Disable webhooks for this trigger
TriggerRepository::disable_webhook(&state.db, trigger.id)
.await
@@ -257,7 +300,7 @@ pub async fn disable_webhook(
)]
pub async fn regenerate_webhook_key(
State(state): State<Arc<AppState>>,
RequireAuth(_user): RequireAuth,
RequireAuth(user): RequireAuth,
Path(trigger_ref): Path<String>,
) -> ApiResult<impl IntoResponse> {
// First, find the trigger by ref to get its ID
@@ -266,6 +309,26 @@ pub async fn regenerate_webhook_key(
.map_err(|e| ApiError::InternalServerError(e.to_string()))?
.ok_or_else(|| ApiError::NotFound(format!("Trigger '{}' not found", trigger_ref)))?;
if user.claims.token_type == crate::auth::jwt::TokenType::Access {
let identity_id = user
.identity_id()
.map_err(|_| ApiError::Unauthorized("Invalid user identity".to_string()))?;
let authz = AuthorizationService::new(state.db.clone());
let mut ctx = AuthorizationContext::new(identity_id);
ctx.target_ref = Some(trigger.r#ref.clone());
ctx.pack_ref = trigger.pack_ref.clone();
authz
.authorize(
&user,
AuthorizationCheck {
resource: Resource::Triggers,
action: Action::Update,
context: ctx,
},
)
.await?;
}
// Check if webhooks are enabled
if !trigger.webhook_enabled {
return Err(ApiError::BadRequest(

View File

@@ -18,7 +18,7 @@ use attune_common::repositories::{
CreateWorkflowDefinitionInput, UpdateWorkflowDefinitionInput, WorkflowDefinitionRepository,
WorkflowSearchFilters,
},
Create, Delete, FindByRef, Update,
Create, Delete, FindByRef, Patch, Update,
};
use crate::{
@@ -217,7 +217,7 @@ pub async fn create_workflow(
pack.id,
&pack.r#ref,
&request.label,
&request.description.clone().unwrap_or_default(),
request.description.as_deref(),
"workflow",
request.param_schema.as_ref(),
request.out_schema.as_ref(),
@@ -416,7 +416,7 @@ pub async fn save_workflow_file(
pack.id,
&pack.r#ref,
&request.label,
&request.description.clone().unwrap_or_default(),
request.description.as_deref(),
&entrypoint,
request.param_schema.as_ref(),
request.out_schema.as_ref(),
@@ -499,7 +499,7 @@ pub async fn update_workflow_file(
pack.id,
&pack.r#ref,
&request.label,
&request.description.unwrap_or_default(),
request.description.as_deref(),
&entrypoint,
request.param_schema.as_ref(),
request.out_schema.as_ref(),
@@ -702,7 +702,7 @@ async fn create_companion_action(
pack_id: i64,
pack_ref: &str,
label: &str,
description: &str,
description: Option<&str>,
entrypoint: &str,
param_schema: Option<&serde_json::Value>,
out_schema: Option<&serde_json::Value>,
@@ -713,7 +713,7 @@ async fn create_companion_action(
pack: pack_id,
pack_ref: pack_ref.to_string(),
label: label.to_string(),
description: description.to_string(),
description: description.map(|s| s.to_string()),
entrypoint: entrypoint.to_string(),
runtime: None,
runtime_version_constraint: None,
@@ -787,7 +787,7 @@ async fn update_companion_action(
if let Some(action) = existing_action {
let update_input = UpdateActionInput {
label: label.map(|s| s.to_string()),
description: description.map(|s| s.to_string()),
description: description.map(|s| Patch::Set(s.to_string())),
entrypoint: None,
runtime: None,
runtime_version_constraint: None,
@@ -838,7 +838,7 @@ async fn ensure_companion_action(
pack_id: i64,
pack_ref: &str,
label: &str,
description: &str,
description: Option<&str>,
entrypoint: &str,
param_schema: Option<&serde_json::Value>,
out_schema: Option<&serde_json::Value>,
@@ -853,7 +853,10 @@ async fn ensure_companion_action(
// Update existing companion action
let update_input = UpdateActionInput {
label: Some(label.to_string()),
description: Some(description.to_string()),
description: Some(match description {
Some(description) => Patch::Set(description.to_string()),
None => Patch::Clear,
}),
entrypoint: Some(entrypoint.to_string()),
runtime: None,
runtime_version_constraint: None,

View File

@@ -362,7 +362,7 @@ mod tests {
pack: 1,
pack_ref: "test".to_string(),
label: "Test Action".to_string(),
description: "Test action".to_string(),
description: Some("Test action".to_string()),
entrypoint: "test.sh".to_string(),
runtime: Some(1),
runtime_version_constraint: None,

View File

@@ -241,6 +241,7 @@ impl TestContext {
}
/// Create and authenticate a test user
#[allow(dead_code)]
pub async fn with_auth(mut self) -> Result<Self> {
// Generate unique username to avoid conflicts in parallel tests
let unique_id = uuid::Uuid::new_v4().to_string().replace("-", "")[..8].to_string();
@@ -394,6 +395,7 @@ impl TestContext {
}
/// Get authenticated token
#[allow(dead_code)]
pub fn token(&self) -> Option<&str> {
self.token.as_deref()
}
@@ -495,7 +497,7 @@ pub async fn create_test_action(pool: &PgPool, pack_id: i64, ref_name: &str) ->
pack: pack_id,
pack_ref: format!("pack_{}", pack_id),
label: format!("Test Action {}", ref_name),
description: format!("Test action for {}", ref_name),
description: Some(format!("Test action for {}", ref_name)),
entrypoint: "main.py".to_string(),
runtime: None,
runtime_version_constraint: None,

View File

@@ -0,0 +1,276 @@
use axum::http::StatusCode;
use helpers::*;
use serde_json::json;
use attune_common::{
models::enums::{ArtifactType, ArtifactVisibility, OwnerType, RetentionPolicyType},
repositories::{
artifact::{ArtifactRepository, CreateArtifactInput},
identity::{
CreatePermissionAssignmentInput, CreatePermissionSetInput, IdentityRepository,
PermissionAssignmentRepository, PermissionSetRepository,
},
key::{CreateKeyInput, KeyRepository},
Create,
},
};
mod helpers;
async fn register_scoped_user(
ctx: &TestContext,
login: &str,
grants: serde_json::Value,
) -> Result<String> {
let response = ctx
.post(
"/auth/register",
json!({
"login": login,
"password": "TestPassword123!",
"display_name": format!("Scoped User {}", login),
}),
None,
)
.await?;
assert_eq!(response.status(), StatusCode::CREATED);
let body: serde_json::Value = response.json().await?;
let token = body["data"]["access_token"]
.as_str()
.expect("missing access token")
.to_string();
let identity = IdentityRepository::find_by_login(&ctx.pool, login)
.await?
.expect("registered identity should exist");
let permset = PermissionSetRepository::create(
&ctx.pool,
CreatePermissionSetInput {
r#ref: format!("test.scoped_{}", uuid::Uuid::new_v4().simple()),
pack: None,
pack_ref: None,
label: Some("Scoped Test Permission Set".to_string()),
description: Some("Scoped test grants".to_string()),
grants,
},
)
.await?;
PermissionAssignmentRepository::create(
&ctx.pool,
CreatePermissionAssignmentInput {
identity: identity.id,
permset: permset.id,
},
)
.await?;
Ok(token)
}
#[tokio::test]
#[ignore = "integration test — requires database"]
async fn test_pack_scoped_key_permissions_enforce_owner_refs() {
let ctx = TestContext::new()
.await
.expect("Failed to create test context");
let token = register_scoped_user(
&ctx,
&format!("scoped_keys_{}", uuid::Uuid::new_v4().simple()),
json!([
{
"resource": "keys",
"actions": ["read"],
"constraints": {
"owner_types": ["pack"],
"owner_refs": ["python_example"]
}
}
]),
)
.await
.expect("Failed to register scoped user");
KeyRepository::create(
&ctx.pool,
CreateKeyInput {
r#ref: format!("python_example_key_{}", uuid::Uuid::new_v4().simple()),
owner_type: OwnerType::Pack,
owner: Some("python_example".to_string()),
owner_identity: None,
owner_pack: None,
owner_pack_ref: Some("python_example".to_string()),
owner_action: None,
owner_action_ref: None,
owner_sensor: None,
owner_sensor_ref: None,
name: "Python Example Key".to_string(),
encrypted: false,
encryption_key_hash: None,
value: json!("allowed"),
},
)
.await
.expect("Failed to create scoped key");
let blocked_key = KeyRepository::create(
&ctx.pool,
CreateKeyInput {
r#ref: format!("other_pack_key_{}", uuid::Uuid::new_v4().simple()),
owner_type: OwnerType::Pack,
owner: Some("other_pack".to_string()),
owner_identity: None,
owner_pack: None,
owner_pack_ref: Some("other_pack".to_string()),
owner_action: None,
owner_action_ref: None,
owner_sensor: None,
owner_sensor_ref: None,
name: "Other Pack Key".to_string(),
encrypted: false,
encryption_key_hash: None,
value: json!("blocked"),
},
)
.await
.expect("Failed to create blocked key");
let allowed_list = ctx
.get("/api/v1/keys", Some(&token))
.await
.expect("Failed to list keys");
assert_eq!(allowed_list.status(), StatusCode::OK);
let allowed_body: serde_json::Value = allowed_list.json().await.expect("Invalid key list");
assert_eq!(
allowed_body["data"]
.as_array()
.expect("expected list")
.len(),
1
);
assert_eq!(allowed_body["data"][0]["owner"], "python_example");
let blocked_get = ctx
.get(&format!("/api/v1/keys/{}", blocked_key.r#ref), Some(&token))
.await
.expect("Failed to fetch blocked key");
assert_eq!(blocked_get.status(), StatusCode::NOT_FOUND);
}
#[tokio::test]
#[ignore = "integration test — requires database"]
async fn test_pack_scoped_artifact_permissions_enforce_owner_refs() {
let ctx = TestContext::new()
.await
.expect("Failed to create test context");
let token = register_scoped_user(
&ctx,
&format!("scoped_artifacts_{}", uuid::Uuid::new_v4().simple()),
json!([
{
"resource": "artifacts",
"actions": ["read", "create"],
"constraints": {
"owner_types": ["pack"],
"owner_refs": ["python_example"]
}
}
]),
)
.await
.expect("Failed to register scoped user");
let allowed_artifact = ArtifactRepository::create(
&ctx.pool,
CreateArtifactInput {
r#ref: format!("python_example.allowed_{}", uuid::Uuid::new_v4().simple()),
scope: OwnerType::Pack,
owner: "python_example".to_string(),
r#type: ArtifactType::FileText,
visibility: ArtifactVisibility::Private,
retention_policy: RetentionPolicyType::Versions,
retention_limit: 5,
name: Some("Allowed Artifact".to_string()),
description: None,
content_type: Some("text/plain".to_string()),
execution: None,
data: None,
},
)
.await
.expect("Failed to create allowed artifact");
let blocked_artifact = ArtifactRepository::create(
&ctx.pool,
CreateArtifactInput {
r#ref: format!("other_pack.blocked_{}", uuid::Uuid::new_v4().simple()),
scope: OwnerType::Pack,
owner: "other_pack".to_string(),
r#type: ArtifactType::FileText,
visibility: ArtifactVisibility::Private,
retention_policy: RetentionPolicyType::Versions,
retention_limit: 5,
name: Some("Blocked Artifact".to_string()),
description: None,
content_type: Some("text/plain".to_string()),
execution: None,
data: None,
},
)
.await
.expect("Failed to create blocked artifact");
let allowed_get = ctx
.get(
&format!("/api/v1/artifacts/{}", allowed_artifact.id),
Some(&token),
)
.await
.expect("Failed to fetch allowed artifact");
assert_eq!(allowed_get.status(), StatusCode::OK);
let blocked_get = ctx
.get(
&format!("/api/v1/artifacts/{}", blocked_artifact.id),
Some(&token),
)
.await
.expect("Failed to fetch blocked artifact");
assert_eq!(blocked_get.status(), StatusCode::NOT_FOUND);
let create_allowed = ctx
.post(
"/api/v1/artifacts",
json!({
"ref": format!("python_example.created_{}", uuid::Uuid::new_v4().simple()),
"scope": "pack",
"owner": "python_example",
"type": "file_text",
"name": "Created Artifact"
}),
Some(&token),
)
.await
.expect("Failed to create allowed artifact");
assert_eq!(create_allowed.status(), StatusCode::CREATED);
let create_blocked = ctx
.post(
"/api/v1/artifacts",
json!({
"ref": format!("other_pack.created_{}", uuid::Uuid::new_v4().simple()),
"scope": "pack",
"owner": "other_pack",
"type": "file_text",
"name": "Blocked Artifact"
}),
Some(&token),
)
.await
.expect("Failed to create blocked artifact");
assert_eq!(create_blocked.status(), StatusCode::FORBIDDEN);
}

View File

@@ -52,7 +52,7 @@ async fn setup_test_pack_and_action(pool: &PgPool) -> Result<(Pack, Action)> {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Action".to_string(),
description: "Test action for SSE tests".to_string(),
description: Some("Test action for SSE tests".to_string()),
entrypoint: "test.sh".to_string(),
runtime: None,
runtime_version_constraint: None,

View File

@@ -23,6 +23,7 @@ clap = { workspace = true, features = ["derive", "env", "string"] }
# HTTP client
reqwest = { workspace = true, features = ["multipart", "stream"] }
reqwest-eventsource = { workspace = true }
# Serialization
serde = { workspace = true }

View File

@@ -21,6 +21,11 @@ pub struct ApiResponse<T> {
pub data: T,
}
#[derive(Debug, serde::Deserialize)]
struct PaginatedResponse<T> {
data: Vec<T>,
}
/// API error response
#[derive(Debug, serde::Deserialize)]
pub struct ApiError {
@@ -55,6 +60,10 @@ impl ApiClient {
&self.base_url
}
pub fn auth_token(&self) -> Option<&str> {
self.auth_token.as_deref()
}
#[cfg(test)]
pub fn new(base_url: String, auth_token: Option<String>) -> Self {
let client = HttpClient::builder()
@@ -255,6 +264,31 @@ impl ApiClient {
}
}
async fn handle_paginated_response<T: DeserializeOwned>(
&self,
response: reqwest::Response,
) -> Result<Vec<T>> {
let status = response.status();
if status.is_success() {
let paginated: PaginatedResponse<T> = response
.json()
.await
.context("Failed to parse paginated API response")?;
Ok(paginated.data)
} else {
let error_text = response
.text()
.await
.unwrap_or_else(|_| "Unknown error".to_string());
if let Ok(api_error) = serde_json::from_str::<ApiError>(&error_text) {
anyhow::bail!("API error ({}): {}", status, api_error.error);
} else {
anyhow::bail!("API error ({}): {}", status, error_text);
}
}
}
/// Handle a response where we only care about success/failure, not a body.
async fn handle_empty_response(&self, response: reqwest::Response) -> Result<()> {
let status = response.status();
@@ -281,6 +315,25 @@ impl ApiClient {
self.execute_json::<T, ()>(Method::GET, path, None).await
}
pub async fn get_paginated<T: DeserializeOwned>(&mut self, path: &str) -> Result<Vec<T>> {
let req = self.build_request(Method::GET, path);
let response = req.send().await.context("Failed to send request to API")?;
if response.status() == StatusCode::UNAUTHORIZED
&& self.refresh_token.is_some()
&& self.refresh_auth_token().await?
{
let req = self.build_request(Method::GET, path);
let response = req
.send()
.await
.context("Failed to send request to API (retry)")?;
return self.handle_paginated_response(response).await;
}
self.handle_paginated_response(response).await
}
/// GET request with query parameters (query string must be in path)
///
/// Part of REST client API - reserved for future advanced filtering/search features.

View File

@@ -6,7 +6,7 @@ use std::collections::HashMap;
use crate::client::ApiClient;
use crate::config::CliConfig;
use crate::output::{self, OutputFormat};
use crate::wait::{wait_for_execution, WaitOptions};
use crate::wait::{extract_stdout, spawn_execution_output_watch, wait_for_execution, WaitOptions};
#[derive(Subcommand)]
pub enum ActionCommands {
@@ -90,7 +90,7 @@ struct Action {
action_ref: String,
pack_ref: String,
label: String,
description: String,
description: Option<String>,
entrypoint: String,
runtime: Option<i64>,
created: String,
@@ -105,7 +105,7 @@ struct ActionDetail {
pack: i64,
pack_ref: String,
label: String,
description: String,
description: Option<String>,
entrypoint: String,
runtime: Option<i64>,
param_schema: Option<serde_json::Value>,
@@ -253,7 +253,7 @@ async fn handle_list(
.runtime
.map(|r| r.to_string())
.unwrap_or_else(|| "none".to_string()),
output::truncate(&action.description, 40),
output::truncate(&action.description.unwrap_or_default(), 40),
]);
}
@@ -288,7 +288,10 @@ async fn handle_show(
("Reference", action.action_ref.clone()),
("Pack", action.pack_ref.clone()),
("Label", action.label.clone()),
("Description", action.description.clone()),
(
"Description",
action.description.unwrap_or_else(|| "None".to_string()),
),
("Entry Point", action.entrypoint.clone()),
(
"Runtime",
@@ -356,7 +359,10 @@ async fn handle_update(
("Ref", action.action_ref.clone()),
("Pack", action.pack_ref.clone()),
("Label", action.label.clone()),
("Description", action.description.clone()),
(
"Description",
action.description.unwrap_or_else(|| "None".to_string()),
),
("Entrypoint", action.entrypoint.clone()),
(
"Runtime",
@@ -487,6 +493,15 @@ async fn handle_execute(
}
let verbose = matches!(output_format, OutputFormat::Table);
let watch_task = if verbose {
Some(spawn_execution_output_watch(
ApiClient::from_config(&config, api_url),
execution.id,
verbose,
))
} else {
None
};
let summary = wait_for_execution(WaitOptions {
execution_id: execution.id,
timeout_secs: timeout,
@@ -495,6 +510,13 @@ async fn handle_execute(
verbose,
})
.await?;
let suppress_final_stdout = watch_task
.as_ref()
.is_some_and(|task| task.delivered_output() && task.root_stdout_completed());
if let Some(task) = watch_task {
let _ = tokio::time::timeout(tokio::time::Duration::from_secs(2), task.handle).await;
}
match output_format {
OutputFormat::Json | OutputFormat::Yaml => {
@@ -511,7 +533,20 @@ async fn handle_execute(
("Updated", output::format_timestamp(&summary.updated)),
]);
if let Some(result) = summary.result {
let stdout = extract_stdout(&summary.result);
if !suppress_final_stdout {
if let Some(stdout) = &stdout {
output::print_section("Stdout");
println!("{}", stdout);
}
}
if let Some(mut result) = summary.result {
if stdout.is_some() {
if let Some(obj) = result.as_object_mut() {
obj.remove("stdout");
}
}
if !result.is_null() {
output::print_section("Result");
println!("{}", serde_json::to_string_pretty(&result)?);

View File

@@ -803,6 +803,7 @@ async fn handle_upload(
api_url: &Option<String>,
output_format: OutputFormat,
) -> Result<()> {
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- CLI users explicitly choose a local file to upload; this is not a server-side path sink.
let file_path = Path::new(&file);
if !file_path.exists() {
anyhow::bail!("File not found: {}", file);
@@ -811,6 +812,7 @@ async fn handle_upload(
anyhow::bail!("Not a file: {}", file);
}
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- The validated CLI-selected upload path is intentionally read and sent to the API.
let file_bytes = tokio::fs::read(file_path).await?;
let file_name = file_path
.file_name()

View File

@@ -840,6 +840,7 @@ async fn handle_upload(
api_url: &Option<String>,
output_format: OutputFormat,
) -> Result<()> {
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- CLI pack commands intentionally operate on operator-supplied local paths.
let pack_dir = Path::new(&path);
// Validate the directory exists and contains pack.yaml
@@ -855,6 +856,7 @@ async fn handle_upload(
}
// Read pack ref from pack.yaml so we can display it
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Reading local pack metadata from the user-selected pack directory is expected CLI behavior.
let pack_yaml_content =
std::fs::read_to_string(&pack_yaml_path).context("Failed to read pack.yaml")?;
let pack_yaml: serde_yaml_ng::Value =
@@ -957,6 +959,7 @@ fn append_dir_to_tar<W: std::io::Write>(
base: &Path,
dir: &Path,
) -> Result<()> {
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- The archiver walks a validated local directory selected by the CLI operator.
for entry in std::fs::read_dir(dir).context("Failed to read directory")? {
let entry = entry.context("Failed to read directory entry")?;
let entry_path = entry.path();
@@ -1061,6 +1064,7 @@ async fn handle_test(
use std::path::{Path, PathBuf};
// Determine if pack is a path or a pack name
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Pack test targets are local CLI inputs, not remote request paths.
let pack_path = Path::new(&pack);
let (pack_dir, pack_ref, pack_version) = if pack_path.exists() && pack_path.is_dir() {
// Local pack directory
@@ -1072,6 +1076,7 @@ async fn handle_test(
anyhow::bail!("pack.yaml not found in directory: {}", pack);
}
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- This reads pack.yaml from a local directory explicitly selected by the CLI operator.
let pack_yaml_content = std::fs::read_to_string(&pack_yaml_path)?;
let pack_yaml: serde_yaml_ng::Value = serde_yaml_ng::from_str(&pack_yaml_content)?;
@@ -1107,6 +1112,7 @@ async fn handle_test(
anyhow::bail!("pack.yaml not found for pack: {}", pack);
}
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Installed pack tests intentionally read local metadata from the workspace packs directory.
let pack_yaml_content = std::fs::read_to_string(&pack_yaml_path)?;
let pack_yaml: serde_yaml_ng::Value = serde_yaml_ng::from_str(&pack_yaml_content)?;
@@ -1120,6 +1126,7 @@ async fn handle_test(
// Load pack.yaml and extract test configuration
let pack_yaml_path = pack_dir.join("pack.yaml");
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Test configuration is loaded from the validated local pack directory.
let pack_yaml_content = std::fs::read_to_string(&pack_yaml_path)?;
let pack_yaml: serde_yaml_ng::Value = serde_yaml_ng::from_str(&pack_yaml_content)?;
@@ -1484,6 +1491,7 @@ fn detect_source_type(source: &str, ref_spec: Option<&str>, no_registry: bool) -
async fn handle_checksum(path: String, json: bool, output_format: OutputFormat) -> Result<()> {
use attune_common::pack_registry::{calculate_directory_checksum, calculate_file_checksum};
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Checksum generation intentionally accepts arbitrary local paths from the CLI operator.
let path_obj = Path::new(&path);
if !path_obj.exists() {
@@ -1581,6 +1589,7 @@ async fn handle_index_entry(
) -> Result<()> {
use attune_common::pack_registry::calculate_directory_checksum;
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Index-entry generation intentionally inspects a local pack directory chosen by the CLI operator.
let path_obj = Path::new(&path);
if !path_obj.exists() {
@@ -1606,6 +1615,7 @@ async fn handle_index_entry(
}
// Read and parse pack.yaml
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Reading local pack metadata for index generation is expected CLI behavior.
let pack_yaml_content = std::fs::read_to_string(&pack_yaml_path)?;
let pack_yaml: serde_yaml_ng::Value = serde_yaml_ng::from_str(&pack_yaml_content)?;

View File

@@ -19,11 +19,13 @@ pub async fn handle_index_update(
output_format: OutputFormat,
) -> Result<()> {
// Load existing index
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Registry index maintenance is a local CLI/admin operation over operator-supplied files.
let index_file_path = Path::new(&index_path);
if !index_file_path.exists() {
return Err(anyhow::anyhow!("Index file not found: {}", index_path));
}
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- The CLI intentionally reads the local index file selected by the operator.
let index_content = fs::read_to_string(index_file_path)?;
let mut index: JsonValue = serde_json::from_str(&index_content)?;
@@ -34,6 +36,7 @@ pub async fn handle_index_update(
.ok_or_else(|| anyhow::anyhow!("Invalid index format: missing 'packs' array"))?;
// Load pack.yaml from the pack directory
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Local pack directories are explicit CLI inputs, not remote taint.
let pack_dir = Path::new(&pack_path);
if !pack_dir.exists() || !pack_dir.is_dir() {
return Err(anyhow::anyhow!("Pack directory not found: {}", pack_path));
@@ -47,6 +50,7 @@ pub async fn handle_index_update(
));
}
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Reading pack.yaml from a local operator-selected pack directory is expected CLI behavior.
let pack_yaml_content = fs::read_to_string(&pack_yaml_path)?;
let pack_yaml: serde_yaml_ng::Value = serde_yaml_ng::from_str(&pack_yaml_content)?;
@@ -250,6 +254,7 @@ pub async fn handle_index_merge(
output_format: OutputFormat,
) -> Result<()> {
// Check if output file exists
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Index merge output is a local CLI path controlled by the operator.
let output_file_path = Path::new(&output_path);
if output_file_path.exists() && !force {
return Err(anyhow::anyhow!(
@@ -265,6 +270,7 @@ pub async fn handle_index_merge(
// Load and merge all input files
for input_path in &input_paths {
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Index merge inputs are local operator-selected files.
let input_file_path = Path::new(input_path);
if !input_file_path.exists() {
if output_format == OutputFormat::Table {
@@ -277,6 +283,7 @@ pub async fn handle_index_merge(
output::print_info(&format!("Loading: {}", input_path));
}
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- The CLI intentionally reads each local input index file during merge.
let index_content = fs::read_to_string(input_file_path)?;
let index: JsonValue = serde_json::from_str(&index_content)?;

View File

@@ -112,7 +112,7 @@ struct Rule {
pack: Option<i64>,
pack_ref: String,
label: String,
description: String,
description: Option<String>,
#[serde(default)]
trigger: Option<i64>,
trigger_ref: String,
@@ -133,7 +133,7 @@ struct RuleDetail {
pack: Option<i64>,
pack_ref: String,
label: String,
description: String,
description: Option<String>,
#[serde(default)]
trigger: Option<i64>,
trigger_ref: String,
@@ -321,7 +321,10 @@ async fn handle_show(
("Ref", rule.rule_ref.clone()),
("Pack", rule.pack_ref.clone()),
("Label", rule.label.clone()),
("Description", rule.description.clone()),
(
"Description",
rule.description.unwrap_or_else(|| "None".to_string()),
),
("Trigger", rule.trigger_ref.clone()),
("Action", rule.action_ref.clone()),
("Enabled", output::format_bool(rule.enabled)),
@@ -440,7 +443,10 @@ async fn handle_update(
("Ref", rule.rule_ref.clone()),
("Pack", rule.pack_ref.clone()),
("Label", rule.label.clone()),
("Description", rule.description.clone()),
(
"Description",
rule.description.unwrap_or_else(|| "None".to_string()),
),
("Trigger", rule.trigger_ref.clone()),
("Action", rule.action_ref.clone()),
("Enabled", output::format_bool(rule.enabled)),

View File

@@ -172,6 +172,7 @@ async fn handle_upload(
api_url: &Option<String>,
output_format: OutputFormat,
) -> Result<()> {
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Workflow upload reads local files chosen by the CLI operator; it is not a server-side path sink.
let action_path = Path::new(&action_file);
// ── 1. Validate & read the action YAML ──────────────────────────────
@@ -182,6 +183,7 @@ async fn handle_upload(
anyhow::bail!("Path is not a file: {}", action_file);
}
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- The action YAML is intentionally read from the validated local CLI path.
let action_yaml_content =
std::fs::read_to_string(action_path).context("Failed to read action YAML file")?;
@@ -216,6 +218,7 @@ async fn handle_upload(
}
// ── 4. Read and parse the workflow YAML ─────────────────────────────
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- The workflow file path is confined to the pack directory before this local read occurs.
let workflow_yaml_content =
std::fs::read_to_string(&workflow_path).context("Failed to read workflow YAML file")?;
@@ -616,12 +619,41 @@ fn split_action_ref(action_ref: &str) -> Result<(String, String)> {
/// resolved relative to the action YAML's parent directory.
fn resolve_workflow_path(action_yaml_path: &Path, workflow_file: &str) -> Result<PathBuf> {
let action_dir = action_yaml_path.parent().unwrap_or(Path::new("."));
let pack_root = action_dir
.parent()
.ok_or_else(|| anyhow::anyhow!("Action YAML must live inside a pack actions/ directory"))?;
let canonical_pack_root = pack_root
.canonicalize()
.context("Failed to resolve pack root for workflow file")?;
let canonical_action_dir = action_dir
.canonicalize()
.context("Failed to resolve action directory for workflow file")?;
let canonical_workflow_path = normalize_path_from_base(&canonical_action_dir, workflow_file);
let resolved = action_dir.join(workflow_file);
if !canonical_workflow_path.starts_with(&canonical_pack_root) {
anyhow::bail!(
"Workflow file resolves outside the pack directory: {}",
workflow_file
);
}
// Canonicalize if possible (for better error messages), but don't fail
// if the file doesn't exist yet — we'll check existence later.
Ok(resolved)
Ok(canonical_workflow_path)
}
fn normalize_path_from_base(base: &Path, relative_path: &str) -> PathBuf {
let mut normalized = PathBuf::new();
for component in base.join(relative_path).components() {
match component {
std::path::Component::Prefix(prefix) => normalized.push(prefix.as_os_str()),
std::path::Component::RootDir => normalized.push(std::path::MAIN_SEPARATOR.to_string()),
std::path::Component::CurDir => {}
std::path::Component::ParentDir => {
normalized.pop();
}
std::path::Component::Normal(part) => normalized.push(part),
}
}
normalized
}
#[cfg(test)]
@@ -655,23 +687,62 @@ mod tests {
#[test]
fn test_resolve_workflow_path() {
let action_path = Path::new("/packs/mypack/actions/deploy.yaml");
let temp = tempfile::tempdir().unwrap();
let pack_dir = temp.path().join("mypack");
let actions_dir = pack_dir.join("actions");
let workflow_dir = actions_dir.join("workflows");
std::fs::create_dir_all(&workflow_dir).unwrap();
let action_path = actions_dir.join("deploy.yaml");
let workflow_path = workflow_dir.join("deploy.workflow.yaml");
std::fs::write(
&action_path,
"ref: mypack.deploy\nworkflow_file: workflows/deploy.workflow.yaml\n",
)
.unwrap();
std::fs::write(&workflow_path, "version: 1.0.0\n").unwrap();
let resolved =
resolve_workflow_path(action_path, "workflows/deploy.workflow.yaml").unwrap();
assert_eq!(
resolved,
PathBuf::from("/packs/mypack/actions/workflows/deploy.workflow.yaml")
);
resolve_workflow_path(&action_path, "workflows/deploy.workflow.yaml").unwrap();
assert_eq!(resolved, workflow_path.canonicalize().unwrap());
}
#[test]
fn test_resolve_workflow_path_relative() {
let action_path = Path::new("actions/deploy.yaml");
let temp = tempfile::tempdir().unwrap();
let pack_dir = temp.path().join("mypack");
let actions_dir = pack_dir.join("actions");
let workflows_dir = pack_dir.join("workflows");
std::fs::create_dir_all(&actions_dir).unwrap();
std::fs::create_dir_all(&workflows_dir).unwrap();
let action_path = actions_dir.join("deploy.yaml");
let workflow_path = workflows_dir.join("deploy.workflow.yaml");
std::fs::write(
&action_path,
"ref: mypack.deploy\nworkflow_file: ../workflows/deploy.workflow.yaml\n",
)
.unwrap();
std::fs::write(&workflow_path, "version: 1.0.0\n").unwrap();
let resolved =
resolve_workflow_path(action_path, "workflows/deploy.workflow.yaml").unwrap();
assert_eq!(
resolved,
PathBuf::from("actions/workflows/deploy.workflow.yaml")
);
resolve_workflow_path(&action_path, "../workflows/deploy.workflow.yaml").unwrap();
assert_eq!(resolved, workflow_path.canonicalize().unwrap());
}
#[test]
fn test_resolve_workflow_path_rejects_traversal_outside_pack() {
let temp = tempfile::tempdir().unwrap();
let pack_dir = temp.path().join("mypack");
let actions_dir = pack_dir.join("actions");
std::fs::create_dir_all(&actions_dir).unwrap();
let action_path = actions_dir.join("deploy.yaml");
let outside = temp.path().join("outside.yaml");
std::fs::write(&action_path, "ref: mypack.deploy\n").unwrap();
std::fs::write(&outside, "version: 1.0.0\n").unwrap();
let err = resolve_workflow_path(&action_path, "../../outside.yaml").unwrap_err();
assert!(err.to_string().contains("outside the pack directory"));
}
}

View File

@@ -11,7 +11,13 @@
use anyhow::Result;
use futures::{SinkExt, StreamExt};
use reqwest_eventsource::{Event as SseEvent, EventSource};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::{
atomic::{AtomicBool, AtomicU64, Ordering},
Arc,
};
use std::time::{Duration, Instant};
use tokio_tungstenite::{connect_async, tungstenite::Message};
@@ -54,6 +60,22 @@ pub struct WaitOptions<'a> {
pub verbose: bool,
}
pub struct OutputWatchTask {
pub handle: tokio::task::JoinHandle<()>,
delivered_output: Arc<AtomicBool>,
root_stdout_completed: Arc<AtomicBool>,
}
impl OutputWatchTask {
pub fn delivered_output(&self) -> bool {
self.delivered_output.load(Ordering::Relaxed)
}
pub fn root_stdout_completed(&self) -> bool {
self.root_stdout_completed.load(Ordering::Relaxed)
}
}
// ── notifier WebSocket messages (mirrors websocket_server.rs) ────────────────
#[derive(Debug, Serialize)]
@@ -102,6 +124,58 @@ struct RestExecution {
updated: String,
}
#[derive(Debug, Clone, Deserialize)]
struct WorkflowTaskMetadata {
task_name: String,
#[serde(default)]
task_index: Option<i32>,
}
#[derive(Debug, Clone, Deserialize)]
struct ExecutionListItem {
id: i64,
action_ref: String,
status: String,
#[serde(default)]
workflow_task: Option<WorkflowTaskMetadata>,
}
#[derive(Debug)]
struct ChildWatchState {
label: String,
status: String,
announced_terminal: bool,
stream_handles: Vec<StreamWatchHandle>,
}
struct RootWatchState {
stream_handles: Vec<StreamWatchHandle>,
}
#[derive(Debug)]
struct StreamWatchHandle {
stream_name: &'static str,
offset: Arc<AtomicU64>,
handle: tokio::task::JoinHandle<()>,
}
#[derive(Clone)]
struct StreamWatchConfig {
base_url: String,
token: String,
execution_id: i64,
prefix: Option<String>,
verbose: bool,
delivered_output: Arc<AtomicBool>,
root_stdout_completed: Option<Arc<AtomicBool>>,
}
struct StreamLogTask {
stream_name: &'static str,
offset: Arc<AtomicU64>,
config: StreamWatchConfig,
}
impl From<RestExecution> for ExecutionSummary {
fn from(e: RestExecution) -> Self {
Self {
@@ -177,6 +251,260 @@ pub async fn wait_for_execution(opts: WaitOptions<'_>) -> Result<ExecutionSummar
.await
}
pub fn spawn_execution_output_watch(
mut client: ApiClient,
execution_id: i64,
verbose: bool,
) -> OutputWatchTask {
let delivered_output = Arc::new(AtomicBool::new(false));
let root_stdout_completed = Arc::new(AtomicBool::new(false));
let delivered_output_for_task = delivered_output.clone();
let root_stdout_completed_for_task = root_stdout_completed.clone();
let handle = tokio::spawn(async move {
if let Err(err) = watch_execution_output(
&mut client,
execution_id,
verbose,
delivered_output_for_task,
root_stdout_completed_for_task,
)
.await
{
if verbose {
eprintln!(" [watch] {}", err);
}
}
});
OutputWatchTask {
handle,
delivered_output,
root_stdout_completed,
}
}
async fn watch_execution_output(
client: &mut ApiClient,
execution_id: i64,
verbose: bool,
delivered_output: Arc<AtomicBool>,
root_stdout_completed: Arc<AtomicBool>,
) -> Result<()> {
let base_url = client.base_url().to_string();
let mut root_watch: Option<RootWatchState> = None;
let mut children: HashMap<i64, ChildWatchState> = HashMap::new();
loop {
let execution: RestExecution = client.get(&format!("/executions/{}", execution_id)).await?;
if root_watch
.as_ref()
.is_none_or(|state| streams_need_restart(&state.stream_handles))
{
if let Some(token) = client.auth_token().map(str::to_string) {
match root_watch.as_mut() {
Some(state) => restart_finished_streams(
&mut state.stream_handles,
&StreamWatchConfig {
base_url: base_url.clone(),
token,
execution_id,
prefix: None,
verbose,
delivered_output: delivered_output.clone(),
root_stdout_completed: Some(root_stdout_completed.clone()),
},
),
None => {
root_watch = Some(RootWatchState {
stream_handles: spawn_execution_log_streams(StreamWatchConfig {
base_url: base_url.clone(),
token,
execution_id,
verbose,
prefix: None,
delivered_output: delivered_output.clone(),
root_stdout_completed: Some(root_stdout_completed.clone()),
}),
});
}
}
}
}
let child_items = list_child_executions(client, execution_id)
.await
.unwrap_or_default();
for child in child_items {
let label = format_task_label(&child.workflow_task, &child.action_ref, child.id);
let entry = children.entry(child.id).or_insert_with(|| {
if verbose {
eprintln!(" [{}] started ({})", label, child.action_ref);
}
let stream_handles = client
.auth_token()
.map(str::to_string)
.map(|token| {
spawn_execution_log_streams(StreamWatchConfig {
base_url: base_url.clone(),
token,
execution_id: child.id,
prefix: Some(label.clone()),
verbose,
delivered_output: delivered_output.clone(),
root_stdout_completed: None,
})
})
.unwrap_or_default();
ChildWatchState {
label,
status: child.status.clone(),
announced_terminal: false,
stream_handles,
}
});
if entry.status != child.status {
entry.status = child.status.clone();
}
let child_is_terminal = is_terminal(&entry.status);
if !child_is_terminal && streams_need_restart(&entry.stream_handles) {
if let Some(token) = client.auth_token().map(str::to_string) {
restart_finished_streams(
&mut entry.stream_handles,
&StreamWatchConfig {
base_url: base_url.clone(),
token,
execution_id: child.id,
prefix: Some(entry.label.clone()),
verbose,
delivered_output: delivered_output.clone(),
root_stdout_completed: None,
},
);
}
}
if !entry.announced_terminal && is_terminal(&child.status) {
entry.announced_terminal = true;
if verbose {
eprintln!(" [{}] {}", entry.label, child.status);
}
}
}
if is_terminal(&execution.status) {
break;
}
tokio::time::sleep(Duration::from_millis(500)).await;
}
if let Some(root_watch) = root_watch {
wait_for_stream_handles(root_watch.stream_handles).await;
}
for child in children.into_values() {
wait_for_stream_handles(child.stream_handles).await;
}
Ok(())
}
fn spawn_execution_log_streams(config: StreamWatchConfig) -> Vec<StreamWatchHandle> {
["stdout", "stderr"]
.into_iter()
.map(|stream_name| {
let offset = Arc::new(AtomicU64::new(0));
let completion_flag = if stream_name == "stdout" {
config.root_stdout_completed.clone()
} else {
None
};
StreamWatchHandle {
stream_name,
handle: tokio::spawn(stream_execution_log(StreamLogTask {
stream_name,
offset: offset.clone(),
config: StreamWatchConfig {
base_url: config.base_url.clone(),
token: config.token.clone(),
execution_id: config.execution_id,
prefix: config.prefix.clone(),
verbose: config.verbose,
delivered_output: config.delivered_output.clone(),
root_stdout_completed: completion_flag,
},
})),
offset,
}
})
.collect()
}
fn streams_need_restart(handles: &[StreamWatchHandle]) -> bool {
handles.is_empty() || handles.iter().any(|handle| handle.handle.is_finished())
}
fn restart_finished_streams(handles: &mut [StreamWatchHandle], config: &StreamWatchConfig) {
for stream in handles.iter_mut() {
if stream.handle.is_finished() {
let offset = stream.offset.clone();
let completion_flag = if stream.stream_name == "stdout" {
config.root_stdout_completed.clone()
} else {
None
};
stream.handle = tokio::spawn(stream_execution_log(StreamLogTask {
stream_name: stream.stream_name,
offset,
config: StreamWatchConfig {
base_url: config.base_url.clone(),
token: config.token.clone(),
execution_id: config.execution_id,
prefix: config.prefix.clone(),
verbose: config.verbose,
delivered_output: config.delivered_output.clone(),
root_stdout_completed: completion_flag,
},
}));
}
}
}
async fn wait_for_stream_handles(handles: Vec<StreamWatchHandle>) {
for handle in handles {
let _ = handle.handle.await;
}
}
async fn list_child_executions(
client: &mut ApiClient,
execution_id: i64,
) -> Result<Vec<ExecutionListItem>> {
const PER_PAGE: u32 = 100;
let mut page = 1;
let mut all_children = Vec::new();
loop {
let path = format!("/executions?parent={execution_id}&page={page}&per_page={PER_PAGE}");
let mut page_items: Vec<ExecutionListItem> = client.get_paginated(&path).await?;
let page_len = page_items.len();
all_children.append(&mut page_items);
if page_len < PER_PAGE as usize {
break;
}
page += 1;
}
Ok(all_children)
}
// ── WebSocket path ────────────────────────────────────────────────────────────
async fn wait_via_websocket(
@@ -482,6 +810,7 @@ fn resolve_ws_url(opts: &WaitOptions<'_>) -> Option<String> {
/// - `https://api.example.com` → `wss://api.example.com:8081`
/// - `http://api.example.com:9000` → `ws://api.example.com:8081`
fn derive_notifier_url(api_url: &str) -> Option<String> {
// nosemgrep: javascript.lang.security.detect-insecure-websocket.detect-insecure-websocket -- The function upgrades https->wss and only returns ws for explicit http base URLs or test examples.
let url = url::Url::parse(api_url).ok()?;
let ws_scheme = match url.scheme() {
"https" => "wss",
@@ -491,6 +820,148 @@ fn derive_notifier_url(api_url: &str) -> Option<String> {
Some(format!("{}://{}:8081", ws_scheme, host))
}
pub fn extract_stdout(result: &Option<serde_json::Value>) -> Option<String> {
result
.as_ref()
.and_then(|value| value.get("stdout"))
.and_then(|stdout| stdout.as_str())
.filter(|stdout| !stdout.is_empty())
.map(ToOwned::to_owned)
}
fn format_task_label(
workflow_task: &Option<WorkflowTaskMetadata>,
action_ref: &str,
execution_id: i64,
) -> String {
if let Some(workflow_task) = workflow_task {
if let Some(index) = workflow_task.task_index {
format!("{}[{}]", workflow_task.task_name, index)
} else {
workflow_task.task_name.clone()
}
} else {
format!("{}#{}", action_ref, execution_id)
}
}
async fn stream_execution_log(task: StreamLogTask) {
let StreamLogTask {
stream_name,
offset,
config:
StreamWatchConfig {
base_url,
token,
execution_id,
prefix,
verbose,
delivered_output,
root_stdout_completed,
},
} = task;
let mut stream_url = match url::Url::parse(&format!(
"{}/api/v1/executions/{}/logs/{}/stream",
base_url.trim_end_matches('/'),
execution_id,
stream_name
)) {
Ok(url) => url,
Err(err) => {
if verbose {
eprintln!(" [watch] failed to build stream URL: {}", err);
}
return;
}
};
let current_offset = offset.load(Ordering::Relaxed).to_string();
stream_url
.query_pairs_mut()
.append_pair("token", &token)
.append_pair("offset", &current_offset);
let mut event_source = EventSource::get(stream_url);
let mut carry = String::new();
while let Some(event) = event_source.next().await {
match event {
Ok(SseEvent::Open) => {}
Ok(SseEvent::Message(message)) => match message.event.as_str() {
"content" | "append" => {
if let Ok(server_offset) = message.id.parse::<u64>() {
offset.store(server_offset, Ordering::Relaxed);
}
if !message.data.is_empty() {
delivered_output.store(true, Ordering::Relaxed);
}
print_stream_chunk(prefix.as_deref(), &message.data, &mut carry);
}
"done" => {
if let Some(flag) = &root_stdout_completed {
flag.store(true, Ordering::Relaxed);
}
flush_stream_chunk(prefix.as_deref(), &mut carry);
break;
}
"error" => {
if verbose && !message.data.is_empty() {
eprintln!(" [watch] {}", message.data);
}
break;
}
_ => {}
},
Err(err) => {
flush_stream_chunk(prefix.as_deref(), &mut carry);
if verbose {
eprintln!(
" [watch] stream error for execution {}: {}",
execution_id, err
);
}
break;
}
}
}
flush_stream_chunk(prefix.as_deref(), &mut carry);
event_source.close();
}
fn print_stream_chunk(prefix: Option<&str>, chunk: &str, carry: &mut String) {
carry.push_str(chunk);
while let Some(idx) = carry.find('\n') {
let mut line = carry.drain(..=idx).collect::<String>();
if line.ends_with('\n') {
line.pop();
}
if line.ends_with('\r') {
line.pop();
}
if let Some(prefix) = prefix {
eprintln!("[{}] {}", prefix, line);
} else {
eprintln!("{}", line);
}
}
}
fn flush_stream_chunk(prefix: Option<&str>, carry: &mut String) {
if carry.is_empty() {
return;
}
if let Some(prefix) = prefix {
eprintln!("[{}] {}", prefix, carry);
} else {
eprintln!("{}", carry);
}
carry.clear();
}
#[cfg(test)]
mod tests {
use super::*;
@@ -553,4 +1024,26 @@ mod tests {
assert_eq!(summary.status, "failed");
assert_eq!(summary.action_ref, "");
}
#[test]
fn test_extract_stdout() {
let result = Some(serde_json::json!({
"stdout": "hello world",
"stderr_log": "/tmp/stderr.log"
}));
assert_eq!(extract_stdout(&result).as_deref(), Some("hello world"));
}
#[test]
fn test_format_task_label() {
let workflow_task = Some(WorkflowTaskMetadata {
task_name: "build".to_string(),
task_index: Some(2),
});
assert_eq!(
format_task_label(&workflow_task, "core.echo", 42),
"build[2]"
);
assert_eq!(format_task_label(&None, "core.echo", 42), "core.echo#42");
}
}

View File

@@ -73,6 +73,7 @@ regex = { workspace = true }
# Version matching
semver = { workspace = true }
url = { workspace = true }
[dev-dependencies]
mockall = { workspace = true }

View File

@@ -355,10 +355,14 @@ pub struct OidcConfig {
pub enabled: bool,
/// OpenID Provider discovery document URL.
pub discovery_url: String,
/// Required when `enabled` is true; ignored otherwise.
#[serde(default)]
pub discovery_url: Option<String>,
/// Confidential client ID.
pub client_id: String,
/// Required when `enabled` is true; ignored otherwise.
#[serde(default)]
pub client_id: Option<String>,
/// Provider name used in login-page overrides such as `?auth=<provider_name>`.
#[serde(default = "default_oidc_provider_name")]
@@ -374,7 +378,9 @@ pub struct OidcConfig {
pub client_secret: Option<String>,
/// Redirect URI registered with the provider.
pub redirect_uri: String,
/// Required when `enabled` is true; ignored otherwise.
#[serde(default)]
pub redirect_uri: Option<String>,
/// Optional post-logout redirect URI.
pub post_logout_redirect_uri: Option<String>,
@@ -396,7 +402,9 @@ pub struct LdapConfig {
pub enabled: bool,
/// LDAP server URL (e.g., "ldap://ldap.example.com:389" or "ldaps://ldap.example.com:636").
pub url: String,
/// Required when `enabled` is true; ignored otherwise.
#[serde(default)]
pub url: Option<String>,
/// Bind DN template. Use `{login}` as placeholder for the user-supplied login.
/// Example: "uid={login},ou=users,dc=example,dc=com"
@@ -650,6 +658,11 @@ pub struct PackRegistryConfig {
#[serde(default = "default_true")]
pub verify_checksums: bool,
/// Additional remote hosts allowed for pack archive/git downloads.
/// Hosts from enabled registry indices are implicitly allowed.
#[serde(default)]
pub allowed_source_hosts: Vec<String>,
/// Allow HTTP (non-HTTPS) registries
#[serde(default)]
pub allow_http: bool,
@@ -672,6 +685,7 @@ impl Default for PackRegistryConfig {
cache_enabled: true,
timeout: default_registry_timeout(),
verify_checksums: true,
allowed_source_hosts: Vec::new(),
allow_http: false,
}
}
@@ -985,14 +999,20 @@ impl Config {
if let Some(oidc) = &self.security.oidc {
if oidc.enabled {
if oidc.discovery_url.trim().is_empty() {
if oidc
.discovery_url
.as_deref()
.unwrap_or("")
.trim()
.is_empty()
{
return Err(crate::Error::validation(
"OIDC discovery URL cannot be empty when OIDC is enabled",
"OIDC discovery URL is required when OIDC is enabled",
));
}
if oidc.client_id.trim().is_empty() {
if oidc.client_id.as_deref().unwrap_or("").trim().is_empty() {
return Err(crate::Error::validation(
"OIDC client ID cannot be empty when OIDC is enabled",
"OIDC client ID is required when OIDC is enabled",
));
}
if oidc
@@ -1006,14 +1026,22 @@ impl Config {
"OIDC client secret is required when OIDC is enabled",
));
}
if oidc.redirect_uri.trim().is_empty() {
if oidc.redirect_uri.as_deref().unwrap_or("").trim().is_empty() {
return Err(crate::Error::validation(
"OIDC redirect URI cannot be empty when OIDC is enabled",
"OIDC redirect URI is required when OIDC is enabled",
));
}
}
}
if let Some(ldap) = &self.security.ldap {
if ldap.enabled && ldap.url.as_deref().unwrap_or("").trim().is_empty() {
return Err(crate::Error::validation(
"LDAP server URL is required when LDAP is enabled",
));
}
}
// Validate encryption key if provided
if let Some(ref key) = self.security.encryption_key {
if key.len() < 32 {
@@ -1172,6 +1200,31 @@ mod tests {
assert!(config.validate().is_err());
}
#[test]
fn test_oidc_config_disabled_no_urls_required() {
let yaml = r#"
enabled: false
"#;
let cfg: OidcConfig = serde_yaml_ng::from_str(yaml).unwrap();
assert!(!cfg.enabled);
assert!(cfg.discovery_url.is_none());
assert!(cfg.client_id.is_none());
assert!(cfg.redirect_uri.is_none());
assert!(cfg.client_secret.is_none());
assert_eq!(cfg.provider_name, "oidc");
}
#[test]
fn test_ldap_config_disabled_no_url_required() {
let yaml = r#"
enabled: false
"#;
let cfg: LdapConfig = serde_yaml_ng::from_str(yaml).unwrap();
assert!(!cfg.enabled);
assert!(cfg.url.is_none());
assert_eq!(cfg.provider_name, "ldap");
}
#[test]
fn test_ldap_config_defaults() {
let yaml = r#"
@@ -1182,7 +1235,7 @@ client_id: "test"
let cfg: LdapConfig = serde_yaml_ng::from_str(yaml).unwrap();
assert!(cfg.enabled);
assert_eq!(cfg.url, "ldap://localhost:389");
assert_eq!(cfg.url.as_deref(), Some("ldap://localhost:389"));
assert_eq!(cfg.user_filter, "(uid={login})");
assert_eq!(cfg.login_attr, "uid");
assert_eq!(cfg.email_attr, "mail");
@@ -1222,7 +1275,7 @@ provider_icon_url: "https://corp.com/icon.svg"
let cfg: LdapConfig = serde_yaml_ng::from_str(yaml).unwrap();
assert!(cfg.enabled);
assert_eq!(cfg.url, "ldaps://ldap.corp.com:636");
assert_eq!(cfg.url.as_deref(), Some("ldaps://ldap.corp.com:636"));
assert_eq!(
cfg.bind_dn_template.as_deref(),
Some("uid={login},ou=people,dc=corp,dc=com")

View File

@@ -444,13 +444,55 @@ pub mod runtime {
/// Optional environment variables to set during action execution.
///
/// Values support the same template variables as other fields:
/// Entries support the same template variables as other fields:
/// `{pack_dir}`, `{env_dir}`, `{interpreter}`, `{manifest_path}`.
///
/// Example: `{"NODE_PATH": "{env_dir}/node_modules"}` ensures Node.js
/// can find packages installed in the isolated runtime environment.
/// The shorthand string form replaces the variable entirely:
/// `{"NODE_PATH": "{env_dir}/node_modules"}`
///
/// The object form supports declarative merge semantics:
/// `{"PYTHONPATH": {"value": "{pack_dir}/lib", "operation": "prepend"}}`
#[serde(default)]
pub env_vars: HashMap<String, String>,
pub env_vars: HashMap<String, RuntimeEnvVarConfig>,
}
/// Declarative configuration for a single runtime environment variable.
///
/// The string form is shorthand for `{ "value": "...", "operation": "set" }`.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(untagged)]
pub enum RuntimeEnvVarConfig {
Value(String),
Spec(RuntimeEnvVarSpec),
}
/// Full configuration for a runtime environment variable.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct RuntimeEnvVarSpec {
/// Template value to resolve for this variable.
pub value: String,
/// How the resolved value should be merged with any existing value.
#[serde(default)]
pub operation: RuntimeEnvVarOperation,
/// Separator used for prepend/append operations.
#[serde(default = "default_env_var_separator")]
pub separator: String,
}
/// Merge behavior for runtime-provided environment variables.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Default)]
#[serde(rename_all = "snake_case")]
pub enum RuntimeEnvVarOperation {
#[default]
Set,
Prepend,
Append,
}
fn default_env_var_separator() -> String {
":".to_string()
}
/// Controls how inline code is materialized before execution.
@@ -768,6 +810,43 @@ pub mod runtime {
}
}
impl RuntimeEnvVarConfig {
/// Resolve this environment variable against the current template
/// variables and any existing value already present in the process env.
pub fn resolve(
&self,
vars: &HashMap<&str, String>,
existing_value: Option<&str>,
) -> String {
match self {
Self::Value(value) => RuntimeExecutionConfig::resolve_template(value, vars),
Self::Spec(spec) => {
let resolved = RuntimeExecutionConfig::resolve_template(&spec.value, vars);
match spec.operation {
RuntimeEnvVarOperation::Set => resolved,
RuntimeEnvVarOperation::Prepend => {
join_env_var_values(&resolved, existing_value, &spec.separator)
}
RuntimeEnvVarOperation::Append => join_env_var_values(
existing_value.unwrap_or_default(),
Some(&resolved),
&spec.separator,
),
}
}
}
}
}
fn join_env_var_values(left: &str, right: Option<&str>, separator: &str) -> String {
match (left.is_empty(), right.unwrap_or_default().is_empty()) {
(true, true) => String::new(),
(false, true) => left.to_string(),
(true, false) => right.unwrap_or_default().to_string(),
(false, false) => format!("{}{}{}", left, separator, right.unwrap_or_default()),
}
}
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct Runtime {
pub id: Id,
@@ -887,7 +966,7 @@ pub mod trigger {
pub pack: Option<Id>,
pub pack_ref: Option<String>,
pub label: String,
pub description: String,
pub description: Option<String>,
pub entrypoint: String,
pub runtime: Id,
pub runtime_ref: String,
@@ -915,7 +994,7 @@ pub mod action {
pub pack: Id,
pub pack_ref: String,
pub label: String,
pub description: String,
pub description: Option<String>,
pub entrypoint: String,
pub runtime: Option<Id>,
/// Optional semver version constraint for the runtime
@@ -965,7 +1044,7 @@ pub mod rule {
pub pack: Id,
pub pack_ref: String,
pub label: String,
pub description: String,
pub description: Option<String>,
pub action: Option<Id>,
pub action_ref: String,
pub trigger: Option<Id>,
@@ -1221,6 +1300,7 @@ pub mod identity {
pub display_name: Option<String>,
pub password_hash: Option<String>,
pub attributes: JsonDict,
pub frozen: bool,
pub created: DateTime<Utc>,
pub updated: DateTime<Utc>,
}
@@ -1245,6 +1325,25 @@ pub mod identity {
pub permset: Id,
pub created: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct IdentityRoleAssignment {
pub id: Id,
pub identity: Id,
pub role: String,
pub source: String,
pub managed: bool,
pub created: DateTime<Utc>,
pub updated: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
pub struct PermissionSetRoleAssignment {
pub id: Id,
pub permset: Id,
pub role: String,
pub created: DateTime<Utc>,
}
}
/// Key/Value storage
@@ -1313,7 +1412,7 @@ pub mod artifact {
pub content_type: Option<String>,
/// Size of the latest version's content in bytes
pub size_bytes: Option<i64>,
/// Execution that produced this artifact (no FK — execution is a hypertable)
/// Execution that produced this artifact (no FK by design)
pub execution: Option<Id>,
/// Structured JSONB data for progress artifacts or metadata
pub data: Option<serde_json::Value>,
@@ -1620,3 +1719,68 @@ pub mod entity_history {
}
}
}
#[cfg(test)]
mod tests {
use super::runtime::{
RuntimeEnvVarConfig, RuntimeEnvVarOperation, RuntimeEnvVarSpec, RuntimeExecutionConfig,
};
use serde_json::json;
use std::collections::HashMap;
#[test]
fn runtime_execution_config_env_vars_accept_string_and_object_forms() {
let config: RuntimeExecutionConfig = serde_json::from_value(json!({
"env_vars": {
"NODE_PATH": "{env_dir}/node_modules",
"PYTHONPATH": {
"value": "{pack_dir}/lib",
"operation": "prepend",
"separator": ":"
}
}
}))
.expect("runtime execution config should deserialize");
assert!(matches!(
config.env_vars.get("NODE_PATH"),
Some(RuntimeEnvVarConfig::Value(value)) if value == "{env_dir}/node_modules"
));
assert!(matches!(
config.env_vars.get("PYTHONPATH"),
Some(RuntimeEnvVarConfig::Spec(RuntimeEnvVarSpec {
value,
operation: RuntimeEnvVarOperation::Prepend,
separator,
})) if value == "{pack_dir}/lib" && separator == ":"
));
}
#[test]
fn runtime_env_var_config_resolves_prepend_and_append_against_existing_values() {
let mut vars = HashMap::new();
vars.insert("pack_dir", "/packs/example".to_string());
vars.insert("env_dir", "/runtime_envs/example/python".to_string());
let prepend = RuntimeEnvVarConfig::Spec(RuntimeEnvVarSpec {
value: "{pack_dir}/lib".to_string(),
operation: RuntimeEnvVarOperation::Prepend,
separator: ":".to_string(),
});
assert_eq!(
prepend.resolve(&vars, Some("/already/set")),
"/packs/example/lib:/already/set"
);
let append = RuntimeEnvVarConfig::Spec(RuntimeEnvVarSpec {
value: "{env_dir}/node_modules".to_string(),
operation: RuntimeEnvVarOperation::Append,
separator: ":".to_string(),
});
assert_eq!(
append.resolve(&vars, Some("/base/modules")),
"/base/modules:/runtime_envs/example/python/node_modules"
);
}
}

View File

@@ -102,7 +102,12 @@ impl MqError {
pub fn is_retriable(&self) -> bool {
matches!(
self,
MqError::Connection(_) | MqError::Channel(_) | MqError::Timeout(_) | MqError::Pool(_)
MqError::Connection(_)
| MqError::Channel(_)
| MqError::Publish(_)
| MqError::Timeout(_)
| MqError::Pool(_)
| MqError::Lapin(_)
)
}

View File

@@ -12,6 +12,7 @@ use crate::models::Runtime;
use crate::repositories::action::ActionRepository;
use crate::repositories::runtime::{self, RuntimeRepository};
use crate::repositories::FindById as _;
use regex::Regex;
use serde_json::Value as JsonValue;
use sqlx::{PgPool, Row};
use std::collections::{HashMap, HashSet};
@@ -94,10 +95,7 @@ pub struct PackEnvironmentManager {
impl PackEnvironmentManager {
/// Create a new pack environment manager
pub fn new(pool: PgPool, config: &Config) -> Self {
let base_path = PathBuf::from(&config.packs_base_dir)
.parent()
.map(|p| p.join("packenvs"))
.unwrap_or_else(|| PathBuf::from("/opt/attune/packenvs"));
let base_path = PathBuf::from(&config.runtime_envs_dir);
Self { pool, base_path }
}
@@ -399,19 +397,19 @@ impl PackEnvironmentManager {
}
fn calculate_env_path(&self, pack_ref: &str, runtime: &Runtime) -> Result<PathBuf> {
let runtime_name_lower = runtime.name.to_lowercase();
let template = runtime
.installers
.get("base_path_template")
.and_then(|v| v.as_str())
.unwrap_or("/opt/attune/packenvs/{pack_ref}/{runtime_name_lower}");
.unwrap_or("{pack_ref}/{runtime_name_lower}");
let runtime_name_lower = runtime.name.to_lowercase();
let path_str = template
.replace("{pack_ref}", pack_ref)
.replace("{runtime_ref}", &runtime.r#ref)
.replace("{runtime_name_lower}", &runtime_name_lower);
Ok(PathBuf::from(path_str))
resolve_env_path(&self.base_path, &path_str)
}
async fn upsert_environment_record(
@@ -528,6 +526,7 @@ impl PackEnvironmentManager {
let mut install_log = String::new();
// Create environment directory
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- env_path comes from validated runtime-env path construction under runtime_envs_dir.
let env_path = PathBuf::from(&pack_env.env_path);
if env_path.exists() {
warn!(
@@ -659,6 +658,8 @@ impl PackEnvironmentManager {
env_path,
&pack_path_str,
)?;
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- The candidate command path is validated and confined before any execution is attempted.
let command = validate_installer_command(&command, pack_path, Path::new(env_path))?;
let args_template = installer
.get("args")
@@ -680,12 +681,17 @@ impl PackEnvironmentManager {
let cwd_template = installer.get("cwd").and_then(|v| v.as_str());
let cwd = if let Some(cwd_t) = cwd_template {
Some(self.resolve_template(
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Installer cwd values are validated to stay under the pack root or environment directory.
Some(validate_installer_path(
&self.resolve_template(
cwd_t,
pack_ref,
runtime_ref,
env_path,
&pack_path_str,
)?,
pack_path,
Path::new(env_path),
)?)
} else {
None
@@ -763,6 +769,7 @@ impl PackEnvironmentManager {
async fn execute_installer_action(&self, action: &InstallerAction) -> Result<String> {
debug!("Executing: {} {:?}", action.command, action.args);
// nosemgrep: rust.actix.command-injection.rust-actix-command-injection.rust-actix-command-injection -- action.command is accepted only after strict validation of executable shape and allowed path roots.
let mut cmd = Command::new(&action.command);
cmd.args(&action.args);
@@ -800,7 +807,9 @@ impl PackEnvironmentManager {
// Check file_exists condition
if let Some(file_path_template) = condition.get("file_exists").and_then(|v| v.as_str()) {
let file_path = file_path_template.replace("{pack_path}", &pack_path.to_string_lossy());
return Ok(PathBuf::from(file_path).exists());
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Conditional file checks are validated to stay under trusted pack/environment roots before filesystem access.
let validated = validate_installer_path(&file_path, pack_path, &self.base_path)?;
return Ok(PathBuf::from(validated).exists());
}
// Default: condition is true
@@ -816,6 +825,93 @@ impl PackEnvironmentManager {
}
}
fn resolve_env_path(base_path: &Path, path_str: &str) -> Result<PathBuf> {
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- This helper normalizes env paths and preserves legacy absolute templates while still rejecting parent traversal.
let raw_path = Path::new(path_str);
if raw_path.is_absolute() {
return normalize_relative_or_absolute_path(raw_path);
}
let joined = base_path.join(raw_path);
normalize_relative_or_absolute_path(&joined)
}
fn normalize_relative_or_absolute_path(path: &Path) -> Result<PathBuf> {
let mut normalized = PathBuf::new();
for component in path.components() {
match component {
std::path::Component::Prefix(prefix) => normalized.push(prefix.as_os_str()),
std::path::Component::RootDir => normalized.push(std::path::MAIN_SEPARATOR.to_string()),
std::path::Component::CurDir => {}
std::path::Component::ParentDir => {
return Err(Error::validation(format!(
"Parent-directory traversal is not allowed in installer paths: {}",
path.display()
)));
}
std::path::Component::Normal(part) => normalized.push(part),
}
}
Ok(normalized)
}
fn validate_installer_command(command: &str, pack_path: &Path, env_path: &Path) -> Result<String> {
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Command validation inspects the path form before enforcing allowed executable rules.
let command_path = Path::new(command);
if command_path.is_absolute() {
return validate_installer_path(command, pack_path, env_path);
}
if command.contains(std::path::MAIN_SEPARATOR) {
return Err(Error::validation(format!(
"Installer command must be a bare executable name or an allowed absolute path: {}",
command
)));
}
let command_name_re = Regex::new(r"^[A-Za-z0-9._+-]+$").expect("valid installer regex");
if !command_name_re.is_match(command) {
return Err(Error::validation(format!(
"Installer command contains invalid characters: {}",
command
)));
}
Ok(command.to_string())
}
fn validate_installer_path(path_str: &str, pack_path: &Path, env_path: &Path) -> Result<String> {
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Path validation normalizes candidate installer paths before enforcing root confinement.
let path = normalize_path(Path::new(path_str));
let normalized_pack_path = normalize_path(pack_path);
let normalized_env_path = normalize_path(env_path);
if path.starts_with(&normalized_pack_path) || path.starts_with(&normalized_env_path) {
Ok(path.to_string_lossy().to_string())
} else {
Err(Error::validation(format!(
"Installer path must remain under the pack or environment directory: {}",
path_str
)))
}
}
fn normalize_path(path: &Path) -> PathBuf {
let mut normalized = PathBuf::new();
for component in path.components() {
match component {
std::path::Component::Prefix(prefix) => normalized.push(prefix.as_os_str()),
std::path::Component::RootDir => normalized.push(std::path::MAIN_SEPARATOR.to_string()),
std::path::Component::CurDir => {}
std::path::Component::ParentDir => {
normalized.pop();
}
std::path::Component::Normal(part) => normalized.push(part),
}
}
normalized
}
/// Collect the lowercase runtime names that require environment setup for a pack.
///
/// This queries the pack's actions, resolves their runtimes, and returns the names

View File

@@ -349,6 +349,7 @@ mod tests {
cache_enabled: true,
timeout: 120,
verify_checksums: true,
allowed_source_hosts: Vec::new(),
allow_http: false,
};

View File

@@ -11,10 +11,14 @@
use super::{Checksum, InstallSource, PackIndexEntry, RegistryClient};
use crate::config::PackRegistryConfig;
use crate::error::{Error, Result};
use std::collections::HashSet;
use std::net::{IpAddr, Ipv6Addr};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use tokio::fs;
use tokio::net::lookup_host;
use tokio::process::Command;
use url::Url;
/// Progress callback type
pub type ProgressCallback = Arc<dyn Fn(ProgressEvent) + Send + Sync>;
@@ -53,6 +57,12 @@ pub struct PackInstaller {
/// Whether to verify checksums
verify_checksums: bool,
/// Whether HTTP remote sources are allowed
allow_http: bool,
/// Remote hosts allowed for archive/git installs
allowed_remote_hosts: Option<HashSet<String>>,
/// Progress callback (optional)
progress_callback: Option<ProgressCallback>,
}
@@ -106,17 +116,32 @@ impl PackInstaller {
.await
.map_err(|e| Error::internal(format!("Failed to create temp directory: {}", e)))?;
let (registry_client, verify_checksums) = if let Some(config) = registry_config {
let (registry_client, verify_checksums, allow_http, allowed_remote_hosts) =
if let Some(config) = registry_config {
let verify_checksums = config.verify_checksums;
(Some(RegistryClient::new(config)?), verify_checksums)
let allow_http = config.allow_http;
let allowed_remote_hosts = collect_allowed_remote_hosts(&config)?;
let allowed_remote_hosts = if allowed_remote_hosts.is_empty() {
None
} else {
(None, false)
Some(allowed_remote_hosts)
};
(
Some(RegistryClient::new(config)?),
verify_checksums,
allow_http,
allowed_remote_hosts,
)
} else {
(None, false, false, None)
};
Ok(Self {
temp_dir,
registry_client,
verify_checksums,
allow_http,
allowed_remote_hosts,
progress_callback: None,
})
}
@@ -152,6 +177,7 @@ impl PackInstaller {
/// Install from git repository
async fn install_from_git(&self, url: &str, git_ref: Option<&str>) -> Result<InstalledPack> {
self.validate_git_source(url).await?;
tracing::info!("Installing pack from git: {} (ref: {:?})", url, git_ref);
self.report_progress(ProgressEvent::StepStarted {
@@ -405,10 +431,12 @@ impl PackInstaller {
/// Download an archive from a URL
async fn download_archive(&self, url: &str) -> Result<PathBuf> {
let parsed_url = self.validate_remote_url(url).await?;
let client = reqwest::Client::new();
// nosemgrep: rust.actix.ssrf.reqwest-taint.reqwest-taint -- Remote source URLs are restricted to configured allowlisted hosts, HTTPS, and public IPs before request execution.
let response = client
.get(url)
.get(parsed_url.clone())
.send()
.await
.map_err(|e| Error::internal(format!("Failed to download archive: {}", e)))?;
@@ -421,11 +449,7 @@ impl PackInstaller {
}
// Determine filename from URL
let filename = url
.split('/')
.next_back()
.unwrap_or("archive.zip")
.to_string();
let filename = archive_filename_from_url(&parsed_url);
let archive_path = self.temp_dir.join(&filename);
@@ -442,6 +466,116 @@ impl PackInstaller {
Ok(archive_path)
}
async fn validate_remote_url(&self, raw_url: &str) -> Result<Url> {
let parsed = Url::parse(raw_url)
.map_err(|e| Error::validation(format!("Invalid remote URL '{}': {}", raw_url, e)))?;
if parsed.scheme() != "https" && !(self.allow_http && parsed.scheme() == "http") {
return Err(Error::validation(format!(
"Remote URL must use https{}: {}",
if self.allow_http {
" or http when pack_registry.allow_http is enabled"
} else {
""
},
raw_url
)));
}
if !parsed.username().is_empty() || parsed.password().is_some() {
return Err(Error::validation(
"Remote URLs with embedded credentials are not allowed".to_string(),
));
}
let host = parsed.host_str().ok_or_else(|| {
Error::validation(format!("Remote URL is missing a host: {}", raw_url))
})?;
let normalized_host = host.to_ascii_lowercase();
if normalized_host == "localhost" {
return Err(Error::validation(format!(
"Remote URL host is not allowed: {}",
host
)));
}
if let Some(allowed_remote_hosts) = &self.allowed_remote_hosts {
if !allowed_remote_hosts.contains(&normalized_host) {
return Err(Error::validation(format!(
"Remote URL host '{}' is not in the configured allowlist. Add it to pack_registry.allowed_source_hosts.",
host
)));
}
}
if let Some(ip) = parsed.host().and_then(|host| match host {
url::Host::Ipv4(ip) => Some(IpAddr::V4(ip)),
url::Host::Ipv6(ip) => Some(IpAddr::V6(ip)),
url::Host::Domain(_) => None,
}) {
ensure_public_ip(ip)?;
}
let port = parsed.port_or_known_default().ok_or_else(|| {
Error::validation(format!("Remote URL is missing a usable port: {}", raw_url))
})?;
let resolved = lookup_host((host, port))
.await
.map_err(|e| Error::validation(format!("Failed to resolve host '{}': {}", host, e)))?;
let mut saw_address = false;
for addr in resolved {
saw_address = true;
ensure_public_ip(addr.ip())?;
}
if !saw_address {
return Err(Error::validation(format!(
"Remote URL host did not resolve to any addresses: {}",
host
)));
}
Ok(parsed)
}
async fn validate_git_source(&self, raw_url: &str) -> Result<()> {
if raw_url.starts_with("http://") || raw_url.starts_with("https://") {
self.validate_remote_url(raw_url).await?;
return Ok(());
}
if let Some(host) = extract_git_host(raw_url) {
self.validate_remote_host(&host)?;
}
Ok(())
}
fn validate_remote_host(&self, host: &str) -> Result<()> {
let normalized_host = host.to_ascii_lowercase();
if normalized_host == "localhost" {
return Err(Error::validation(format!(
"Remote host is not allowed: {}",
host
)));
}
if let Some(allowed_remote_hosts) = &self.allowed_remote_hosts {
if !allowed_remote_hosts.contains(&normalized_host) {
return Err(Error::validation(format!(
"Remote host '{}' is not in the configured allowlist. Add it to pack_registry.allowed_source_hosts.",
host
)));
}
}
Ok(())
}
/// Extract an archive (zip or tar.gz)
async fn extract_archive(&self, archive_path: &Path) -> Result<PathBuf> {
let extract_dir = self.create_temp_dir().await?;
@@ -583,6 +717,7 @@ impl PackInstaller {
}
// Check in first subdirectory (common for GitHub archives)
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Archive inspection is limited to the temporary extraction directory created by this installer.
let mut entries = fs::read_dir(base_dir)
.await
.map_err(|e| Error::internal(format!("Failed to read directory: {}", e)))?;
@@ -618,6 +753,7 @@ impl PackInstaller {
})?;
// Read source directory
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Directory copy operates on installer-managed local paths, not request-derived paths.
let mut entries = fs::read_dir(src)
.await
.map_err(|e| Error::internal(format!("Failed to read source directory: {}", e)))?;
@@ -674,6 +810,111 @@ impl PackInstaller {
}
}
fn collect_allowed_remote_hosts(config: &PackRegistryConfig) -> Result<HashSet<String>> {
let mut hosts = HashSet::new();
for index in &config.indices {
if !index.enabled {
continue;
}
let parsed = Url::parse(&index.url).map_err(|e| {
Error::validation(format!("Invalid registry index URL '{}': {}", index.url, e))
})?;
let host = parsed.host_str().ok_or_else(|| {
Error::validation(format!(
"Registry index URL '{}' is missing a host",
index.url
))
})?;
hosts.insert(host.to_ascii_lowercase());
}
for host in &config.allowed_source_hosts {
let normalized = host.trim().to_ascii_lowercase();
if !normalized.is_empty() {
hosts.insert(normalized);
}
}
Ok(hosts)
}
fn extract_git_host(raw_url: &str) -> Option<String> {
if let Ok(parsed) = Url::parse(raw_url) {
return parsed.host_str().map(|host| host.to_ascii_lowercase());
}
raw_url.split_once('@').and_then(|(_, rest)| {
rest.split_once(':')
.map(|(host, _)| host.to_ascii_lowercase())
})
}
fn archive_filename_from_url(url: &Url) -> String {
let raw_name = url
.path_segments()
.and_then(|mut segments| segments.rfind(|segment| !segment.is_empty()))
.unwrap_or("archive.bin");
let sanitized: String = raw_name
.chars()
.map(|ch| match ch {
'a'..='z' | 'A'..='Z' | '0'..='9' | '.' | '-' | '_' => ch,
_ => '_',
})
.collect();
let filename = sanitized.trim_matches('.');
if filename.is_empty() {
"archive.bin".to_string()
} else {
filename.to_string()
}
}
fn ensure_public_ip(ip: IpAddr) -> Result<()> {
let is_blocked = match ip {
IpAddr::V4(ip) => {
let octets = ip.octets();
let is_documentation_range = matches!(
octets,
[192, 0, 2, _] | [198, 51, 100, _] | [203, 0, 113, _]
);
ip.is_private()
|| ip.is_loopback()
|| ip.is_link_local()
|| ip.is_multicast()
|| ip.is_broadcast()
|| is_documentation_range
|| ip.is_unspecified()
|| octets[0] == 0
}
IpAddr::V6(ip) => {
let segments = ip.segments();
let is_documentation_range = segments[0] == 0x2001 && segments[1] == 0x0db8;
ip.is_loopback()
|| ip.is_unspecified()
|| ip.is_multicast()
|| ip.is_unique_local()
|| ip.is_unicast_link_local()
|| is_documentation_range
|| ip == Ipv6Addr::LOCALHOST
}
};
if is_blocked {
return Err(Error::validation(format!(
"Remote URL resolved to a non-public address: {}",
ip
)));
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
@@ -721,4 +962,52 @@ mod tests {
assert!(matches!(source, InstallSource::Git { .. }));
}
#[test]
fn test_archive_filename_from_url_sanitizes_path_segments() {
let url = Url::parse("https://example.com/releases/../../pack.zip?token=x").unwrap();
assert_eq!(archive_filename_from_url(&url), "pack.zip");
}
#[test]
fn test_ensure_public_ip_rejects_private_ipv4() {
let err = ensure_public_ip(IpAddr::V4(std::net::Ipv4Addr::new(127, 0, 0, 1))).unwrap_err();
assert!(err.to_string().contains("non-public"));
}
#[test]
fn test_collect_allowed_remote_hosts_includes_indices_and_overrides() {
let config = PackRegistryConfig {
indices: vec![crate::config::RegistryIndexConfig {
url: "https://registry.example.com/index.json".to_string(),
priority: 1,
enabled: true,
name: None,
headers: std::collections::HashMap::new(),
}],
allowed_source_hosts: vec!["github.com".to_string(), "cdn.example.com".to_string()],
..Default::default()
};
let hosts = collect_allowed_remote_hosts(&config).unwrap();
assert!(hosts.contains("registry.example.com"));
assert!(hosts.contains("github.com"));
assert!(hosts.contains("cdn.example.com"));
}
#[test]
fn test_extract_git_host_from_scp_style_source() {
assert_eq!(
extract_git_host("git@github.com:org/repo.git"),
Some("github.com".to_string())
);
}
#[test]
fn test_extract_git_host_from_git_scheme_source() {
assert_eq!(
extract_git_host("git://github.com/org/repo.git"),
Some("github.com".to_string())
);
}
}

View File

@@ -31,7 +31,7 @@
//! can reference the same workflow file with different configurations.
use std::collections::HashMap;
use std::path::Path;
use std::path::{Path, PathBuf};
use sqlx::PgPool;
use tracing::{debug, info, warn};
@@ -725,8 +725,7 @@ impl<'a> PackComponentLoader<'a> {
let description = data
.get("description")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
.map(|s| s.to_string());
let enabled = data
.get("enabled")
@@ -745,7 +744,10 @@ impl<'a> PackComponentLoader<'a> {
if let Some(existing) = TriggerRepository::find_by_ref(self.pool, &trigger_ref).await? {
let update_input = UpdateTriggerInput {
label: Some(label),
description: Some(Patch::Set(description)),
description: Some(match description {
Some(description) => Patch::Set(description),
None => Patch::Clear,
}),
enabled: Some(enabled),
param_schema: Some(match param_schema {
Some(value) => Patch::Set(value),
@@ -778,7 +780,7 @@ impl<'a> PackComponentLoader<'a> {
pack: Some(self.pack_id),
pack_ref: Some(self.pack_ref.clone()),
label,
description: Some(description),
description,
enabled,
param_schema,
out_schema,
@@ -858,8 +860,7 @@ impl<'a> PackComponentLoader<'a> {
let description = data
.get("description")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
.map(|s| s.to_string());
// ── Workflow file handling ──────────────────────────────────
// If the action declares `workflow_file`, load the referenced
@@ -876,7 +877,7 @@ impl<'a> PackComponentLoader<'a> {
wf_path,
&action_ref,
&label,
&description,
description.as_deref().unwrap_or(""),
&data,
)
.await
@@ -956,7 +957,10 @@ impl<'a> PackComponentLoader<'a> {
if let Some(existing) = ActionRepository::find_by_ref(self.pool, &action_ref).await? {
let update_input = UpdateActionInput {
label: Some(label),
description: Some(description),
description: Some(match description {
Some(description) => Patch::Set(description),
None => Patch::Clear,
}),
entrypoint: Some(entrypoint),
runtime: runtime_id,
runtime_version_constraint: Some(match runtime_version_constraint {
@@ -1087,7 +1091,10 @@ impl<'a> PackComponentLoader<'a> {
action_description: &str,
action_data: &serde_yaml_ng::Value,
) -> Result<Id> {
let full_path = actions_dir.join(workflow_file_path);
let pack_root = actions_dir.parent().ok_or_else(|| {
Error::validation("Actions directory must live inside a pack directory".to_string())
})?;
let full_path = resolve_pack_relative_path(pack_root, actions_dir, workflow_file_path)?;
if !full_path.exists() {
return Err(Error::validation(format!(
"Workflow file '{}' not found at '{}'",
@@ -1096,6 +1103,7 @@ impl<'a> PackComponentLoader<'a> {
)));
}
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- The workflow path is normalized and confined to the pack root before this local read.
let content = std::fs::read_to_string(&full_path).map_err(|e| {
Error::io(format!(
"Failed to read workflow file '{}': {}",
@@ -1310,8 +1318,7 @@ impl<'a> PackComponentLoader<'a> {
let description = data
.get("description")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
.map(|s| s.to_string());
let enabled = data
.get("enabled")
@@ -1347,7 +1354,10 @@ impl<'a> PackComponentLoader<'a> {
if let Some(existing) = SensorRepository::find_by_ref(self.pool, &sensor_ref).await? {
let update_input = UpdateSensorInput {
label: Some(label),
description: Some(description),
description: Some(match description {
Some(description) => Patch::Set(description),
None => Patch::Clear,
}),
entrypoint: Some(entrypoint),
runtime: Some(sensor_runtime_id),
runtime_ref: Some(sensor_runtime_ref.clone()),
@@ -1643,11 +1653,60 @@ impl<'a> PackComponentLoader<'a> {
}
}
fn resolve_pack_relative_path(
pack_root: &Path,
base_dir: &Path,
relative_path: &str,
) -> Result<PathBuf> {
let canonical_pack_root = pack_root.canonicalize().map_err(|e| {
Error::io(format!(
"Failed to resolve pack root '{}': {}",
pack_root.display(),
e
))
})?;
let canonical_base_dir = base_dir.canonicalize().map_err(|e| {
Error::io(format!(
"Failed to resolve base directory '{}': {}",
base_dir.display(),
e
))
})?;
let canonical_candidate = normalize_path_from_base(&canonical_base_dir, relative_path);
if !canonical_candidate.starts_with(&canonical_pack_root) {
return Err(Error::validation(format!(
"Resolved path '{}' escapes pack root '{}'",
canonical_candidate.display(),
canonical_pack_root.display()
)));
}
Ok(canonical_candidate)
}
fn normalize_path_from_base(base: &Path, relative_path: &str) -> PathBuf {
let mut normalized = PathBuf::new();
for component in base.join(relative_path).components() {
match component {
std::path::Component::Prefix(prefix) => normalized.push(prefix.as_os_str()),
std::path::Component::RootDir => normalized.push(std::path::MAIN_SEPARATOR.to_string()),
std::path::Component::CurDir => {}
std::path::Component::ParentDir => {
normalized.pop();
}
std::path::Component::Normal(part) => normalized.push(part),
}
}
normalized
}
/// Read all YAML files from a directory, returning `(filename, content)` pairs
/// sorted by filename for deterministic ordering.
fn read_yaml_files(dir: &Path) -> Result<Vec<(String, String)>> {
let mut files = Vec::new();
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Pack loader scans pack-owned directories on disk after selecting the pack root.
let entries = std::fs::read_dir(dir)
.map_err(|e| Error::io(format!("Failed to read directory {}: {}", dir.display(), e)))?;
@@ -1670,6 +1729,7 @@ fn read_yaml_files(dir: &Path) -> Result<Vec<(String, String)>> {
let path = entry.path();
let filename = entry.file_name().to_string_lossy().to_string();
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- YAML files are read only after being discovered under the selected pack directory.
let content = std::fs::read_to_string(&path)
.map_err(|e| Error::io(format!("Failed to read file {}: {}", path.display(), e)))?;

View File

@@ -292,6 +292,7 @@ fn copy_dir_all(src: &Path, dst: &Path) -> Result<()> {
))
})?;
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Pack storage copy recursively processes validated local directories under the configured pack store.
for entry in fs::read_dir(src).map_err(|e| {
Error::io(format!(
"Failed to read source directory {}: {}",

View File

@@ -21,10 +21,6 @@ pub enum Resource {
Inquiries,
Keys,
Artifacts,
Workflows,
Webhooks,
Analytics,
History,
Identities,
Permissions,
}
@@ -40,6 +36,7 @@ pub enum Action {
Cancel,
Respond,
Manage,
Decrypt,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
@@ -69,6 +66,8 @@ pub struct GrantConstraints {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub owner_types: Option<Vec<OwnerType>>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub owner_refs: Option<Vec<String>>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub visibility: Option<Vec<ArtifactVisibility>>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub execution_scope: Option<ExecutionScopeConstraint>,
@@ -99,6 +98,7 @@ pub struct AuthorizationContext {
pub pack_ref: Option<String>,
pub owner_identity_id: Option<Id>,
pub owner_type: Option<OwnerType>,
pub owner_ref: Option<String>,
pub visibility: Option<ArtifactVisibility>,
pub encrypted: Option<bool>,
pub execution_owner_identity_id: Option<Id>,
@@ -115,6 +115,7 @@ impl AuthorizationContext {
pack_ref: None,
owner_identity_id: None,
owner_type: None,
owner_ref: None,
visibility: None,
encrypted: None,
execution_owner_identity_id: None,
@@ -162,6 +163,15 @@ impl Grant {
}
}
if let Some(owner_refs) = &constraints.owner_refs {
let Some(owner_ref) = &ctx.owner_ref else {
return false;
};
if !owner_refs.contains(owner_ref) {
return false;
}
}
if let Some(visibility) = &constraints.visibility {
let Some(target_visibility) = ctx.visibility else {
return false;
@@ -289,4 +299,28 @@ mod tests {
.insert("team".to_string(), json!("infra"));
assert!(!grant.allows(Resource::Packs, Action::Read, &ctx));
}
#[test]
fn owner_ref_constraint_requires_exact_value_match() {
let grant = Grant {
resource: Resource::Artifacts,
actions: vec![Action::Read],
constraints: Some(GrantConstraints {
owner_types: Some(vec![OwnerType::Pack]),
owner_refs: Some(vec!["python_example".to_string()]),
..Default::default()
}),
};
let mut ctx = AuthorizationContext::new(1);
ctx.owner_type = Some(OwnerType::Pack);
ctx.owner_ref = Some("python_example".to_string());
assert!(grant.allows(Resource::Artifacts, Action::Read, &ctx));
ctx.owner_ref = Some("other_pack".to_string());
assert!(!grant.allows(Resource::Artifacts, Action::Read, &ctx));
ctx.owner_ref = None;
assert!(!grant.allows(Resource::Artifacts, Action::Read, &ctx));
}
}

View File

@@ -51,7 +51,7 @@ pub struct CreateActionInput {
pub pack: Id,
pub pack_ref: String,
pub label: String,
pub description: String,
pub description: Option<String>,
pub entrypoint: String,
pub runtime: Option<Id>,
pub runtime_version_constraint: Option<String>,
@@ -64,7 +64,7 @@ pub struct CreateActionInput {
#[derive(Debug, Clone, Default)]
pub struct UpdateActionInput {
pub label: Option<String>,
pub description: Option<String>,
pub description: Option<Patch<String>>,
pub entrypoint: Option<String>,
pub runtime: Option<Id>,
pub runtime_version_constraint: Option<Patch<String>>,
@@ -210,7 +210,10 @@ impl Update for ActionRepository {
query.push(", ");
}
query.push("description = ");
query.push_bind(description);
match description {
Patch::Set(value) => query.push_bind(value),
Patch::Clear => query.push_bind(Option::<String>::None),
};
has_updates = true;
}
@@ -568,7 +571,7 @@ impl Repository for PolicyRepository {
type Entity = Policy;
fn table_name() -> &'static str {
"policies"
"policy"
}
}
@@ -609,7 +612,7 @@ impl FindById for PolicyRepository {
r#"
SELECT id, ref, pack, pack_ref, action, action_ref, parameters, method,
threshold, name, description, tags, created, updated
FROM policies
FROM policy
WHERE id = $1
"#,
)
@@ -631,7 +634,7 @@ impl FindByRef for PolicyRepository {
r#"
SELECT id, ref, pack, pack_ref, action, action_ref, parameters, method,
threshold, name, description, tags, created, updated
FROM policies
FROM policy
WHERE ref = $1
"#,
)
@@ -653,7 +656,7 @@ impl List for PolicyRepository {
r#"
SELECT id, ref, pack, pack_ref, action, action_ref, parameters, method,
threshold, name, description, tags, created, updated
FROM policies
FROM policy
ORDER BY ref ASC
"#,
)
@@ -675,7 +678,7 @@ impl Create for PolicyRepository {
// Try to insert - database will enforce uniqueness constraint
let policy = sqlx::query_as::<_, Policy>(
r#"
INSERT INTO policies (ref, pack, pack_ref, action, action_ref, parameters,
INSERT INTO policy (ref, pack, pack_ref, action, action_ref, parameters,
method, threshold, name, description, tags)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
RETURNING id, ref, pack, pack_ref, action, action_ref, parameters, method,
@@ -717,7 +720,7 @@ impl Update for PolicyRepository {
where
E: Executor<'e, Database = Postgres> + 'e,
{
let mut query = QueryBuilder::new("UPDATE policies SET ");
let mut query = QueryBuilder::new("UPDATE policy SET ");
let mut has_updates = false;
if let Some(parameters) = &input.parameters {
@@ -795,7 +798,7 @@ impl Delete for PolicyRepository {
where
E: Executor<'e, Database = Postgres> + 'e,
{
let result = sqlx::query("DELETE FROM policies WHERE id = $1")
let result = sqlx::query("DELETE FROM policy WHERE id = $1")
.bind(id)
.execute(executor)
.await?;
@@ -814,7 +817,7 @@ impl PolicyRepository {
r#"
SELECT id, ref, pack, pack_ref, action, action_ref, parameters, method,
threshold, name, description, tags, created, updated
FROM policies
FROM policy
WHERE action = $1
ORDER BY ref ASC
"#,
@@ -835,7 +838,7 @@ impl PolicyRepository {
r#"
SELECT id, ref, pack, pack_ref, action, action_ref, parameters, method,
threshold, name, description, tags, created, updated
FROM policies
FROM policy
WHERE $1 = ANY(tags)
ORDER BY ref ASC
"#,
@@ -846,4 +849,69 @@ impl PolicyRepository {
Ok(policies)
}
/// Find the most recent action-specific policy.
pub async fn find_latest_by_action<'e, E>(executor: E, action_id: Id) -> Result<Option<Policy>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let policy = sqlx::query_as::<_, Policy>(
r#"
SELECT id, ref, pack, pack_ref, action, action_ref, parameters, method,
threshold, name, description, tags, created, updated
FROM policy
WHERE action = $1
ORDER BY created DESC
LIMIT 1
"#,
)
.bind(action_id)
.fetch_optional(executor)
.await?;
Ok(policy)
}
/// Find the most recent pack-specific policy.
pub async fn find_latest_by_pack<'e, E>(executor: E, pack_id: Id) -> Result<Option<Policy>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let policy = sqlx::query_as::<_, Policy>(
r#"
SELECT id, ref, pack, pack_ref, action, action_ref, parameters, method,
threshold, name, description, tags, created, updated
FROM policy
WHERE pack = $1 AND action IS NULL
ORDER BY created DESC
LIMIT 1
"#,
)
.bind(pack_id)
.fetch_optional(executor)
.await?;
Ok(policy)
}
/// Find the most recent global policy.
pub async fn find_latest_global<'e, E>(executor: E) -> Result<Option<Policy>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let policy = sqlx::query_as::<_, Policy>(
r#"
SELECT id, ref, pack, pack_ref, action, action_ref, parameters, method,
threshold, name, description, tags, created, updated
FROM policy
WHERE pack IS NULL AND action IS NULL
ORDER BY created DESC
LIMIT 1
"#,
)
.fetch_optional(executor)
.await?;
Ok(policy)
}
}

View File

@@ -80,7 +80,7 @@ pub struct EnforcementVolumeBucket {
pub enforcement_count: i64,
}
/// A single hourly bucket of execution volume (from execution hypertable directly).
/// A single hourly bucket of execution volume (from the execution table directly).
#[derive(Debug, Clone, Serialize, FromRow)]
pub struct ExecutionVolumeBucket {
/// Start of the 1-hour bucket
@@ -468,7 +468,7 @@ impl AnalyticsRepository {
}
// =======================================================================
// Execution volume (from execution hypertable directly)
// Execution volume (from the execution table directly)
// =======================================================================
/// Query the `execution_volume_hourly` continuous aggregate for execution

View File

@@ -577,6 +577,14 @@ pub struct CreateArtifactVersionInput {
}
impl ArtifactVersionRepository {
fn select_columns_with_alias(alias: &str) -> String {
format!(
"{alias}.id, {alias}.artifact, {alias}.version, {alias}.content_type, \
{alias}.size_bytes, NULL::bytea AS content, {alias}.content_json, \
{alias}.file_path, {alias}.meta, {alias}.created_by, {alias}.created"
)
}
/// Find a version by ID (without binary content for performance)
pub async fn find_by_id<'e, E>(executor: E, id: i64) -> Result<Option<ArtifactVersion>>
where
@@ -812,14 +820,11 @@ impl ArtifactVersionRepository {
E: Executor<'e, Database = Postgres> + 'e,
{
let query = format!(
"SELECT av.{} \
"SELECT {} \
FROM artifact_version av \
JOIN artifact a ON av.artifact = a.id \
WHERE a.execution = $1 AND av.file_path IS NOT NULL",
artifact_version::SELECT_COLUMNS
.split(", ")
.collect::<Vec<_>>()
.join(", av.")
Self::select_columns_with_alias("av")
);
sqlx::query_as::<_, ArtifactVersion>(&query)
.bind(execution_id)
@@ -847,3 +852,18 @@ impl ArtifactVersionRepository {
.map_err(Into::into)
}
}
#[cfg(test)]
mod tests {
use super::ArtifactVersionRepository;
#[test]
fn aliased_select_columns_keep_null_content_expression_unqualified() {
let columns = ArtifactVersionRepository::select_columns_with_alias("av");
assert!(columns.contains("av.id"));
assert!(columns.contains("av.file_path"));
assert!(columns.contains("NULL::bytea AS content"));
assert!(!columns.contains("av.NULL::bytea AS content"));
}
}

View File

@@ -65,6 +65,12 @@ pub struct EnforcementSearchResult {
pub total: u64,
}
#[derive(Debug, Clone)]
pub struct EnforcementCreateOrGetResult {
pub enforcement: Enforcement,
pub created: bool,
}
/// Repository for Event operations
pub struct EventRepository;
@@ -416,7 +422,115 @@ impl Update for EnforcementRepository {
where
E: Executor<'e, Database = Postgres> + 'e,
{
// Build update query
if input.status.is_none() && input.payload.is_none() && input.resolved_at.is_none() {
return Self::get_by_id(executor, id).await;
}
Self::update_with_locator(executor, input, |query| {
query.push(" WHERE id = ");
query.push_bind(id);
})
.await
}
}
#[async_trait::async_trait]
impl Delete for EnforcementRepository {
async fn delete<'e, E>(executor: E, id: i64) -> Result<bool>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let result = sqlx::query("DELETE FROM enforcement WHERE id = $1")
.bind(id)
.execute(executor)
.await?;
Ok(result.rows_affected() > 0)
}
}
impl EnforcementRepository {
async fn update_with_locator<'e, E, F>(
executor: E,
input: UpdateEnforcementInput,
where_clause: F,
) -> Result<Enforcement>
where
E: Executor<'e, Database = Postgres> + 'e,
F: FnOnce(&mut QueryBuilder<'_, Postgres>),
{
let mut query = QueryBuilder::new("UPDATE enforcement SET ");
let mut has_updates = false;
if let Some(status) = input.status {
query.push("status = ");
query.push_bind(status);
has_updates = true;
}
if let Some(payload) = &input.payload {
if has_updates {
query.push(", ");
}
query.push("payload = ");
query.push_bind(payload);
has_updates = true;
}
if let Some(resolved_at) = input.resolved_at {
if has_updates {
query.push(", ");
}
query.push("resolved_at = ");
query.push_bind(resolved_at);
}
where_clause(&mut query);
query.push(
" RETURNING id, rule, rule_ref, trigger_ref, config, event, status, payload, \
condition, conditions, created, resolved_at",
);
let enforcement = query
.build_query_as::<Enforcement>()
.fetch_one(executor)
.await?;
Ok(enforcement)
}
/// Update an enforcement using the loaded row's primary key.
pub async fn update_loaded<'e, E>(
executor: E,
enforcement: &Enforcement,
input: UpdateEnforcementInput,
) -> Result<Enforcement>
where
E: Executor<'e, Database = Postgres> + 'e,
{
if input.status.is_none() && input.payload.is_none() && input.resolved_at.is_none() {
return Ok(enforcement.clone());
}
Self::update_with_locator(executor, input, |query| {
query.push(" WHERE id = ");
query.push_bind(enforcement.id);
})
.await
}
pub async fn update_loaded_if_status<'e, E>(
executor: E,
enforcement: &Enforcement,
expected_status: EnforcementStatus,
input: UpdateEnforcementInput,
) -> Result<Option<Enforcement>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
if input.status.is_none() && input.payload.is_none() && input.resolved_at.is_none() {
return Ok(Some(enforcement.clone()));
}
let mut query = QueryBuilder::new("UPDATE enforcement SET ");
let mut has_updates = false;
@@ -446,39 +560,25 @@ impl Update for EnforcementRepository {
}
if !has_updates {
// No updates requested, fetch and return existing entity
return Self::get_by_id(executor, id).await;
return Ok(Some(enforcement.clone()));
}
query.push(" WHERE id = ");
query.push_bind(id);
query.push(" RETURNING id, rule, rule_ref, trigger_ref, config, event, status, payload, condition, conditions, created, resolved_at");
query.push_bind(enforcement.id);
query.push(" AND status = ");
query.push_bind(expected_status);
query.push(
" RETURNING id, rule, rule_ref, trigger_ref, config, event, status, payload, \
condition, conditions, created, resolved_at",
);
let enforcement = query
query
.build_query_as::<Enforcement>()
.fetch_one(executor)
.await?;
Ok(enforcement)
}
.fetch_optional(executor)
.await
.map_err(Into::into)
}
#[async_trait::async_trait]
impl Delete for EnforcementRepository {
async fn delete<'e, E>(executor: E, id: i64) -> Result<bool>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let result = sqlx::query("DELETE FROM enforcement WHERE id = $1")
.bind(id)
.execute(executor)
.await?;
Ok(result.rows_affected() > 0)
}
}
impl EnforcementRepository {
/// Find enforcements by rule ID
pub async fn find_by_rule<'e, E>(executor: E, rule_id: Id) -> Result<Vec<Enforcement>>
where
@@ -545,6 +645,90 @@ impl EnforcementRepository {
Ok(enforcements)
}
pub async fn find_by_rule_and_event<'e, E>(
executor: E,
rule_id: Id,
event_id: Id,
) -> Result<Option<Enforcement>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Enforcement>(
r#"
SELECT id, rule, rule_ref, trigger_ref, config, event, status, payload,
condition, conditions, created, resolved_at
FROM enforcement
WHERE rule = $1 AND event = $2
LIMIT 1
"#,
)
.bind(rule_id)
.bind(event_id)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
pub async fn create_or_get_by_rule_event<'e, E>(
executor: E,
input: CreateEnforcementInput,
) -> Result<EnforcementCreateOrGetResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
let (Some(rule_id), Some(event_id)) = (input.rule, input.event) else {
let enforcement = Self::create(executor, input).await?;
return Ok(EnforcementCreateOrGetResult {
enforcement,
created: true,
});
};
let inserted = sqlx::query_as::<_, Enforcement>(
r#"
INSERT INTO enforcement (rule, rule_ref, trigger_ref, config, event, status,
payload, condition, conditions)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT (rule, event) WHERE rule IS NOT NULL AND event IS NOT NULL DO NOTHING
RETURNING id, rule, rule_ref, trigger_ref, config, event, status, payload,
condition, conditions, created, resolved_at
"#,
)
.bind(input.rule)
.bind(&input.rule_ref)
.bind(&input.trigger_ref)
.bind(&input.config)
.bind(input.event)
.bind(input.status)
.bind(&input.payload)
.bind(input.condition)
.bind(&input.conditions)
.fetch_optional(executor)
.await?;
if let Some(enforcement) = inserted {
return Ok(EnforcementCreateOrGetResult {
enforcement,
created: true,
});
}
let enforcement = Self::find_by_rule_and_event(executor, rule_id, event_id)
.await?
.ok_or_else(|| {
anyhow::anyhow!(
"enforcement for rule {} and event {} disappeared after dedupe conflict",
rule_id,
event_id
)
})?;
Ok(EnforcementCreateOrGetResult {
enforcement,
created: false,
})
}
/// Search enforcements with all filters pushed into SQL.
///
/// All filter fields are combinable (AND). Pagination is server-side.

View File

@@ -4,7 +4,8 @@ use chrono::{DateTime, Utc};
use crate::models::{enums::ExecutionStatus, execution::*, Id, JsonDict};
use crate::Result;
use sqlx::{Executor, Postgres, QueryBuilder};
use sqlx::{Executor, PgConnection, PgPool, Postgres, QueryBuilder};
use tokio::time::{sleep, Duration};
use super::{Create, Delete, FindById, List, Repository, Update};
@@ -41,6 +42,18 @@ pub struct ExecutionSearchResult {
pub total: u64,
}
#[derive(Debug, Clone)]
pub struct WorkflowTaskExecutionCreateOrGetResult {
pub execution: Execution,
pub created: bool,
}
#[derive(Debug, Clone)]
pub struct EnforcementExecutionCreateOrGetResult {
pub execution: Execution,
pub created: bool,
}
/// An execution row with optional `rule_ref` / `trigger_ref` populated from
/// the joined `enforcement` table. This avoids a separate in-memory lookup.
#[derive(Debug, Clone, sqlx::FromRow)]
@@ -191,7 +204,577 @@ impl Update for ExecutionRepository {
where
E: Executor<'e, Database = Postgres> + 'e,
{
// Build update query
if input.status.is_none()
&& input.result.is_none()
&& input.executor.is_none()
&& input.worker.is_none()
&& input.started_at.is_none()
&& input.workflow_task.is_none()
{
return Self::get_by_id(executor, id).await;
}
Self::update_with_locator(executor, input, |query| {
query.push(" WHERE id = ").push_bind(id);
})
.await
}
}
impl ExecutionRepository {
pub async fn find_top_level_by_enforcement<'e, E>(
executor: E,
enforcement_id: Id,
) -> Result<Option<Execution>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let sql = format!(
"SELECT {SELECT_COLUMNS} \
FROM execution \
WHERE enforcement = $1
AND parent IS NULL
AND (config IS NULL OR NOT (config ? 'retry_of')) \
ORDER BY created ASC \
LIMIT 1"
);
sqlx::query_as::<_, Execution>(&sql)
.bind(enforcement_id)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
pub async fn create_top_level_for_enforcement_if_absent<'e, E>(
executor: E,
input: CreateExecutionInput,
enforcement_id: Id,
) -> Result<EnforcementExecutionCreateOrGetResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
let inserted = sqlx::query_as::<_, Execution>(&format!(
"INSERT INTO execution \
(action, action_ref, config, env_vars, parent, enforcement, executor, worker, status, result, workflow_task) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) \
ON CONFLICT (enforcement)
WHERE enforcement IS NOT NULL
AND parent IS NULL
AND (config IS NULL OR NOT (config ? 'retry_of'))
DO NOTHING \
RETURNING {SELECT_COLUMNS}"
))
.bind(input.action)
.bind(&input.action_ref)
.bind(&input.config)
.bind(&input.env_vars)
.bind(input.parent)
.bind(input.enforcement)
.bind(input.executor)
.bind(input.worker)
.bind(input.status)
.bind(&input.result)
.bind(sqlx::types::Json(&input.workflow_task))
.fetch_optional(executor)
.await?;
if let Some(execution) = inserted {
return Ok(EnforcementExecutionCreateOrGetResult {
execution,
created: true,
});
}
let execution = Self::find_top_level_by_enforcement(executor, enforcement_id)
.await?
.ok_or_else(|| {
anyhow::anyhow!(
"top-level execution for enforcement {} disappeared after dedupe conflict",
enforcement_id
)
})?;
Ok(EnforcementExecutionCreateOrGetResult {
execution,
created: false,
})
}
async fn claim_workflow_task_dispatch<'e, E>(
executor: E,
workflow_execution_id: Id,
task_name: &str,
task_index: Option<i32>,
) -> Result<bool>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let inserted: Option<(i64,)> = sqlx::query_as(
"INSERT INTO workflow_task_dispatch (workflow_execution, task_name, task_index)
VALUES ($1, $2, $3)
ON CONFLICT (workflow_execution, task_name, COALESCE(task_index, -1)) DO NOTHING
RETURNING id",
)
.bind(workflow_execution_id)
.bind(task_name)
.bind(task_index)
.fetch_optional(executor)
.await?;
Ok(inserted.is_some())
}
async fn assign_workflow_task_dispatch_execution<'e, E>(
executor: E,
workflow_execution_id: Id,
task_name: &str,
task_index: Option<i32>,
execution_id: Id,
) -> Result<()>
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query(
"UPDATE workflow_task_dispatch
SET execution_id = COALESCE(execution_id, $4)
WHERE workflow_execution = $1
AND task_name = $2
AND task_index IS NOT DISTINCT FROM $3",
)
.bind(workflow_execution_id)
.bind(task_name)
.bind(task_index)
.bind(execution_id)
.execute(executor)
.await?;
Ok(())
}
async fn lock_workflow_task_dispatch<'e, E>(
executor: E,
workflow_execution_id: Id,
task_name: &str,
task_index: Option<i32>,
) -> Result<Option<Option<Id>>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let row: Option<(Option<i64>,)> = sqlx::query_as(
"SELECT execution_id
FROM workflow_task_dispatch
WHERE workflow_execution = $1
AND task_name = $2
AND task_index IS NOT DISTINCT FROM $3
FOR UPDATE",
)
.bind(workflow_execution_id)
.bind(task_name)
.bind(task_index)
.fetch_optional(executor)
.await?;
// Map the outer Option to distinguish three cases:
// - None → no row exists
// - Some(None) → row exists but execution_id is still NULL (mid-creation)
// - Some(Some(id)) → row exists with a completed execution_id
Ok(row.map(|(execution_id,)| execution_id))
}
async fn create_workflow_task_if_absent_in_conn(
conn: &mut PgConnection,
input: CreateExecutionInput,
workflow_execution_id: Id,
task_name: &str,
task_index: Option<i32>,
) -> Result<WorkflowTaskExecutionCreateOrGetResult> {
let claimed = Self::claim_workflow_task_dispatch(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
)
.await?;
if claimed {
let execution = Self::create(&mut *conn, input).await?;
Self::assign_workflow_task_dispatch_execution(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
execution.id,
)
.await?;
return Ok(WorkflowTaskExecutionCreateOrGetResult {
execution,
created: true,
});
}
let dispatch_state = Self::lock_workflow_task_dispatch(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
)
.await?;
match dispatch_state {
Some(Some(existing_execution_id)) => {
// Row exists with execution_id — return the existing execution.
let execution = Self::find_by_id(&mut *conn, existing_execution_id)
.await?
.ok_or_else(|| {
anyhow::anyhow!(
"workflow child execution {} missing for workflow_execution {} task '{}' index {:?}",
existing_execution_id,
workflow_execution_id,
task_name,
task_index
)
})?;
Ok(WorkflowTaskExecutionCreateOrGetResult {
execution,
created: false,
})
}
Some(None) => {
// Row exists but execution_id is still NULL: another transaction is
// mid-creation (between claim and assign). Retry until it's filled in.
// If the original creator's transaction rolled back, the row also
// disappears — handled by the `None` branch inside the loop.
'wait: {
for _ in 0..20_u32 {
sleep(Duration::from_millis(50)).await;
match Self::lock_workflow_task_dispatch(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
)
.await?
{
Some(Some(execution_id)) => {
let execution =
Self::find_by_id(&mut *conn, execution_id).await?.ok_or_else(
|| {
anyhow::anyhow!(
"workflow child execution {} missing for workflow_execution {} task '{}' index {:?}",
execution_id,
workflow_execution_id,
task_name,
task_index
)
},
)?;
return Ok(WorkflowTaskExecutionCreateOrGetResult {
execution,
created: false,
});
}
Some(None) => {} // still NULL, keep waiting
None => break 'wait, // row rolled back; fall through to re-claim
}
}
// Exhausted all retries without the execution_id being set.
return Err(anyhow::anyhow!(
"Timed out waiting for workflow task dispatch execution_id to be set \
for workflow_execution {} task '{}' index {:?}",
workflow_execution_id,
task_name,
task_index
)
.into());
}
// Row disappeared (original creator rolled back) — re-claim and create.
let re_claimed = Self::claim_workflow_task_dispatch(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
)
.await?;
if !re_claimed {
return Err(anyhow::anyhow!(
"Workflow task dispatch for workflow_execution {} task '{}' index {:?} \
was reclaimed by another executor after rollback",
workflow_execution_id,
task_name,
task_index
)
.into());
}
let execution = Self::create(&mut *conn, input).await?;
Self::assign_workflow_task_dispatch_execution(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
execution.id,
)
.await?;
Ok(WorkflowTaskExecutionCreateOrGetResult {
execution,
created: true,
})
}
None => {
// No row at all — the original INSERT was rolled back before we arrived.
// Attempt to re-claim and create as if this were a fresh dispatch.
let re_claimed = Self::claim_workflow_task_dispatch(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
)
.await?;
if !re_claimed {
return Err(anyhow::anyhow!(
"Workflow task dispatch for workflow_execution {} task '{}' index {:?} \
was claimed by another executor",
workflow_execution_id,
task_name,
task_index
)
.into());
}
let execution = Self::create(&mut *conn, input).await?;
Self::assign_workflow_task_dispatch_execution(
&mut *conn,
workflow_execution_id,
task_name,
task_index,
execution.id,
)
.await?;
Ok(WorkflowTaskExecutionCreateOrGetResult {
execution,
created: true,
})
}
}
}
pub async fn create_workflow_task_if_absent(
pool: &PgPool,
input: CreateExecutionInput,
workflow_execution_id: Id,
task_name: &str,
task_index: Option<i32>,
) -> Result<WorkflowTaskExecutionCreateOrGetResult> {
let mut conn = pool.acquire().await?;
sqlx::query("BEGIN").execute(&mut *conn).await?;
let result = Self::create_workflow_task_if_absent_in_conn(
&mut conn,
input,
workflow_execution_id,
task_name,
task_index,
)
.await;
match result {
Ok(result) => {
sqlx::query("COMMIT").execute(&mut *conn).await?;
Ok(result)
}
Err(err) => {
sqlx::query("ROLLBACK").execute(&mut *conn).await?;
Err(err)
}
}
}
pub async fn create_workflow_task_if_absent_with_conn(
conn: &mut PgConnection,
input: CreateExecutionInput,
workflow_execution_id: Id,
task_name: &str,
task_index: Option<i32>,
) -> Result<WorkflowTaskExecutionCreateOrGetResult> {
Self::create_workflow_task_if_absent_in_conn(
conn,
input,
workflow_execution_id,
task_name,
task_index,
)
.await
}
pub async fn claim_for_scheduling<'e, E>(
executor: E,
id: Id,
claiming_executor: Option<Id>,
) -> Result<Option<Execution>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let sql = format!(
"UPDATE execution \
SET status = $2, executor = COALESCE($3, executor), updated = NOW() \
WHERE id = $1 AND status = $4 \
RETURNING {SELECT_COLUMNS}"
);
sqlx::query_as::<_, Execution>(&sql)
.bind(id)
.bind(ExecutionStatus::Scheduling)
.bind(claiming_executor)
.bind(ExecutionStatus::Requested)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
pub async fn reclaim_stale_scheduling<'e, E>(
executor: E,
id: Id,
claiming_executor: Option<Id>,
stale_before: DateTime<Utc>,
) -> Result<Option<Execution>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let sql = format!(
"UPDATE execution \
SET executor = COALESCE($2, executor), updated = NOW() \
WHERE id = $1 AND status = $3 AND updated <= $4 \
RETURNING {SELECT_COLUMNS}"
);
sqlx::query_as::<_, Execution>(&sql)
.bind(id)
.bind(claiming_executor)
.bind(ExecutionStatus::Scheduling)
.bind(stale_before)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
pub async fn update_if_status<'e, E>(
executor: E,
id: Id,
expected_status: ExecutionStatus,
input: UpdateExecutionInput,
) -> Result<Option<Execution>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
if input.status.is_none()
&& input.result.is_none()
&& input.executor.is_none()
&& input.worker.is_none()
&& input.started_at.is_none()
&& input.workflow_task.is_none()
{
return Self::find_by_id(executor, id).await;
}
Self::update_with_locator_optional(executor, input, |query| {
query.push(" WHERE id = ").push_bind(id);
query.push(" AND status = ").push_bind(expected_status);
})
.await
}
pub async fn update_if_status_and_updated_before<'e, E>(
executor: E,
id: Id,
expected_status: ExecutionStatus,
stale_before: DateTime<Utc>,
input: UpdateExecutionInput,
) -> Result<Option<Execution>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
if input.status.is_none()
&& input.result.is_none()
&& input.executor.is_none()
&& input.worker.is_none()
&& input.started_at.is_none()
&& input.workflow_task.is_none()
{
return Self::find_by_id(executor, id).await;
}
Self::update_with_locator_optional(executor, input, |query| {
query.push(" WHERE id = ").push_bind(id);
query.push(" AND status = ").push_bind(expected_status);
query.push(" AND updated < ").push_bind(stale_before);
})
.await
}
pub async fn update_if_status_and_updated_at<'e, E>(
executor: E,
id: Id,
expected_status: ExecutionStatus,
expected_updated: DateTime<Utc>,
input: UpdateExecutionInput,
) -> Result<Option<Execution>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
if input.status.is_none()
&& input.result.is_none()
&& input.executor.is_none()
&& input.worker.is_none()
&& input.started_at.is_none()
&& input.workflow_task.is_none()
{
return Self::find_by_id(executor, id).await;
}
Self::update_with_locator_optional(executor, input, |query| {
query.push(" WHERE id = ").push_bind(id);
query.push(" AND status = ").push_bind(expected_status);
query.push(" AND updated = ").push_bind(expected_updated);
})
.await
}
pub async fn revert_scheduled_to_requested<'e, E>(
executor: E,
id: Id,
) -> Result<Option<Execution>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let sql = format!(
"UPDATE execution \
SET status = $2, worker = NULL, executor = NULL, updated = NOW() \
WHERE id = $1 AND status = $3 \
RETURNING {SELECT_COLUMNS}"
);
sqlx::query_as::<_, Execution>(&sql)
.bind(id)
.bind(ExecutionStatus::Requested)
.bind(ExecutionStatus::Scheduled)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
async fn update_with_locator<'e, E, F>(
executor: E,
input: UpdateExecutionInput,
where_clause: F,
) -> Result<Execution>
where
E: Executor<'e, Database = Postgres> + 'e,
F: FnOnce(&mut QueryBuilder<'_, Postgres>),
{
let mut query = QueryBuilder::new("UPDATE execution SET ");
let mut has_updates = false;
@@ -234,15 +817,10 @@ impl Update for ExecutionRepository {
query
.push("workflow_task = ")
.push_bind(sqlx::types::Json(workflow_task));
has_updates = true;
}
if !has_updates {
// No updates requested, fetch and return existing entity
return Self::get_by_id(executor, id).await;
}
query.push(", updated = NOW() WHERE id = ").push_bind(id);
query.push(", updated = NOW()");
where_clause(&mut query);
query.push(" RETURNING ");
query.push(SELECT_COLUMNS);
@@ -252,6 +830,96 @@ impl Update for ExecutionRepository {
.await
.map_err(Into::into)
}
async fn update_with_locator_optional<'e, E, F>(
executor: E,
input: UpdateExecutionInput,
where_clause: F,
) -> Result<Option<Execution>>
where
E: Executor<'e, Database = Postgres> + 'e,
F: FnOnce(&mut QueryBuilder<'_, Postgres>),
{
let mut query = QueryBuilder::new("UPDATE execution SET ");
let mut has_updates = false;
if let Some(status) = input.status {
query.push("status = ").push_bind(status);
has_updates = true;
}
if let Some(result) = &input.result {
if has_updates {
query.push(", ");
}
query.push("result = ").push_bind(result);
has_updates = true;
}
if let Some(executor_id) = input.executor {
if has_updates {
query.push(", ");
}
query.push("executor = ").push_bind(executor_id);
has_updates = true;
}
if let Some(worker_id) = input.worker {
if has_updates {
query.push(", ");
}
query.push("worker = ").push_bind(worker_id);
has_updates = true;
}
if let Some(started_at) = input.started_at {
if has_updates {
query.push(", ");
}
query.push("started_at = ").push_bind(started_at);
has_updates = true;
}
if let Some(workflow_task) = &input.workflow_task {
if has_updates {
query.push(", ");
}
query
.push("workflow_task = ")
.push_bind(sqlx::types::Json(workflow_task));
}
query.push(", updated = NOW()");
where_clause(&mut query);
query.push(" RETURNING ");
query.push(SELECT_COLUMNS);
query
.build_query_as::<Execution>()
.fetch_optional(executor)
.await
.map_err(Into::into)
}
/// Update an execution using the loaded row's primary key.
pub async fn update_loaded<'e, E>(
executor: E,
execution: &Execution,
input: UpdateExecutionInput,
) -> Result<Execution>
where
E: Executor<'e, Database = Postgres> + 'e,
{
if input.status.is_none()
&& input.result.is_none()
&& input.executor.is_none()
&& input.worker.is_none()
&& input.started_at.is_none()
&& input.workflow_task.is_none()
{
return Ok(execution.clone());
}
Self::update_with_locator(executor, input, |query| {
query.push(" WHERE id = ").push_bind(execution.id);
})
.await
}
}
#[async_trait::async_trait]
@@ -303,6 +971,34 @@ impl ExecutionRepository {
.map_err(Into::into)
}
pub async fn find_by_workflow_task<'e, E>(
executor: E,
workflow_execution_id: Id,
task_name: &str,
task_index: Option<i32>,
) -> Result<Option<Execution>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let sql = format!(
"SELECT {SELECT_COLUMNS} \
FROM execution \
WHERE workflow_task->>'workflow_execution' = $1::text \
AND workflow_task->>'task_name' = $2 \
AND (workflow_task->>'task_index')::int IS NOT DISTINCT FROM $3 \
ORDER BY created ASC \
LIMIT 1"
);
sqlx::query_as::<_, Execution>(&sql)
.bind(workflow_execution_id.to_string())
.bind(task_name)
.bind(task_index)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
/// Find all child executions for a given parent execution ID.
///
/// Returns child executions ordered by creation time (ascending),

View File

@@ -0,0 +1,909 @@
use chrono::{DateTime, Utc};
use sqlx::{PgPool, Postgres, Row, Transaction};
use crate::error::Result;
use crate::models::Id;
use crate::repositories::queue_stats::{QueueStatsRepository, UpsertQueueStatsInput};
#[derive(Debug, Clone)]
pub struct AdmissionSlotAcquireOutcome {
pub acquired: bool,
pub current_count: u32,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AdmissionEnqueueOutcome {
Acquired,
Enqueued,
}
#[derive(Debug, Clone)]
pub struct AdmissionSlotReleaseOutcome {
pub action_id: Id,
pub group_key: Option<String>,
pub next_execution_id: Option<Id>,
}
#[derive(Debug, Clone)]
pub struct AdmissionQueuedRemovalOutcome {
pub action_id: Id,
pub group_key: Option<String>,
pub next_execution_id: Option<Id>,
pub execution_id: Id,
pub queue_order: i64,
pub enqueued_at: DateTime<Utc>,
pub removed_index: usize,
}
#[derive(Debug, Clone)]
pub struct AdmissionQueueStats {
pub action_id: Id,
pub queue_length: usize,
pub active_count: u32,
pub max_concurrent: u32,
pub oldest_enqueued_at: Option<DateTime<Utc>>,
pub total_enqueued: u64,
pub total_completed: u64,
}
#[derive(Debug, Clone)]
struct AdmissionState {
id: Id,
action_id: Id,
group_key: Option<String>,
max_concurrent: i32,
}
#[derive(Debug, Clone)]
struct ExecutionEntry {
state_id: Id,
action_id: Id,
group_key: Option<String>,
status: String,
queue_order: i64,
enqueued_at: DateTime<Utc>,
}
pub struct ExecutionAdmissionRepository;
impl ExecutionAdmissionRepository {
pub async fn enqueue(
pool: &PgPool,
max_queue_length: usize,
action_id: Id,
execution_id: Id,
max_concurrent: u32,
group_key: Option<String>,
) -> Result<AdmissionEnqueueOutcome> {
let mut tx = pool.begin().await?;
let state = Self::lock_state(&mut tx, action_id, group_key, max_concurrent).await?;
let outcome =
Self::enqueue_in_state(&mut tx, &state, max_queue_length, execution_id, true).await?;
Self::refresh_queue_stats(&mut tx, action_id).await?;
tx.commit().await?;
Ok(outcome)
}
pub async fn wait_status(pool: &PgPool, execution_id: Id) -> Result<Option<bool>> {
let row = sqlx::query_scalar::<Postgres, bool>(
r#"
SELECT status = 'active'
FROM execution_admission_entry
WHERE execution_id = $1
"#,
)
.bind(execution_id)
.fetch_optional(pool)
.await?;
Ok(row)
}
pub async fn try_acquire(
pool: &PgPool,
action_id: Id,
execution_id: Id,
max_concurrent: u32,
group_key: Option<String>,
) -> Result<AdmissionSlotAcquireOutcome> {
let mut tx = pool.begin().await?;
let state = Self::lock_state(&mut tx, action_id, group_key, max_concurrent).await?;
let active_count = Self::active_count(&mut tx, state.id).await? as u32;
let outcome = match Self::find_execution_entry(&mut tx, execution_id).await? {
Some(entry) if entry.status == "active" => AdmissionSlotAcquireOutcome {
acquired: true,
current_count: active_count,
},
Some(entry) if entry.status == "queued" && entry.state_id == state.id => {
let promoted =
Self::maybe_promote_existing_queued(&mut tx, &state, execution_id).await?;
AdmissionSlotAcquireOutcome {
acquired: promoted,
current_count: active_count,
}
}
Some(_) => AdmissionSlotAcquireOutcome {
acquired: false,
current_count: active_count,
},
None => {
if active_count < max_concurrent
&& Self::queued_count(&mut tx, state.id).await? == 0
{
let queue_order = Self::allocate_queue_order(&mut tx, state.id).await?;
Self::insert_entry(
&mut tx,
state.id,
execution_id,
"active",
queue_order,
Utc::now(),
)
.await?;
Self::increment_total_enqueued(&mut tx, state.id).await?;
Self::refresh_queue_stats(&mut tx, action_id).await?;
AdmissionSlotAcquireOutcome {
acquired: true,
current_count: active_count,
}
} else {
AdmissionSlotAcquireOutcome {
acquired: false,
current_count: active_count,
}
}
}
};
tx.commit().await?;
Ok(outcome)
}
pub async fn release_active_slot(
pool: &PgPool,
execution_id: Id,
) -> Result<Option<AdmissionSlotReleaseOutcome>> {
let mut tx = pool.begin().await?;
let Some(entry) = Self::find_execution_entry_for_update(&mut tx, execution_id).await?
else {
tx.commit().await?;
return Ok(None);
};
if entry.status != "active" {
tx.commit().await?;
return Ok(None);
}
let state = Self::lock_existing_state(&mut tx, entry.action_id, entry.group_key.clone())
.await?
.ok_or_else(|| {
crate::Error::internal("missing execution_admission_state for active execution")
})?;
sqlx::query("DELETE FROM execution_admission_entry WHERE execution_id = $1")
.bind(execution_id)
.execute(&mut *tx)
.await?;
Self::increment_total_completed(&mut tx, state.id).await?;
let next_execution_id = Self::promote_next_queued(&mut tx, &state).await?;
Self::refresh_queue_stats(&mut tx, state.action_id).await?;
tx.commit().await?;
Ok(Some(AdmissionSlotReleaseOutcome {
action_id: state.action_id,
group_key: state.group_key,
next_execution_id,
}))
}
pub async fn restore_active_slot(
pool: &PgPool,
execution_id: Id,
outcome: &AdmissionSlotReleaseOutcome,
) -> Result<()> {
let mut tx = pool.begin().await?;
let state =
Self::lock_existing_state(&mut tx, outcome.action_id, outcome.group_key.clone())
.await?
.ok_or_else(|| {
crate::Error::internal("missing execution_admission_state on restore")
})?;
if let Some(next_execution_id) = outcome.next_execution_id {
sqlx::query(
r#"
UPDATE execution_admission_entry
SET status = 'queued', activated_at = NULL
WHERE execution_id = $1
AND state_id = $2
AND status = 'active'
"#,
)
.bind(next_execution_id)
.bind(state.id)
.execute(&mut *tx)
.await?;
}
sqlx::query(
r#"
INSERT INTO execution_admission_entry (
state_id, execution_id, status, queue_order, enqueued_at, activated_at
) VALUES ($1, $2, 'active', $3, NOW(), NOW())
ON CONFLICT (execution_id) DO UPDATE
SET state_id = EXCLUDED.state_id,
status = 'active',
activated_at = EXCLUDED.activated_at
"#,
)
.bind(state.id)
.bind(execution_id)
.bind(Self::allocate_queue_order(&mut tx, state.id).await?)
.execute(&mut *tx)
.await?;
sqlx::query(
r#"
UPDATE execution_admission_state
SET total_completed = GREATEST(total_completed - 1, 0)
WHERE id = $1
"#,
)
.bind(state.id)
.execute(&mut *tx)
.await?;
Self::refresh_queue_stats(&mut tx, state.action_id).await?;
tx.commit().await?;
Ok(())
}
pub async fn remove_queued_execution(
pool: &PgPool,
execution_id: Id,
) -> Result<Option<AdmissionQueuedRemovalOutcome>> {
let mut tx = pool.begin().await?;
let Some(entry) = Self::find_execution_entry_for_update(&mut tx, execution_id).await?
else {
tx.commit().await?;
return Ok(None);
};
if entry.status != "queued" {
tx.commit().await?;
return Ok(None);
}
let state = Self::lock_existing_state(&mut tx, entry.action_id, entry.group_key.clone())
.await?
.ok_or_else(|| {
crate::Error::internal("missing execution_admission_state for queued execution")
})?;
let removed_index = sqlx::query_scalar::<Postgres, i64>(
r#"
SELECT COUNT(*)
FROM execution_admission_entry
WHERE state_id = $1
AND status = 'queued'
AND (enqueued_at, id) < (
SELECT enqueued_at, id
FROM execution_admission_entry
WHERE execution_id = $2
)
"#,
)
.bind(state.id)
.bind(execution_id)
.fetch_one(&mut *tx)
.await? as usize;
sqlx::query("DELETE FROM execution_admission_entry WHERE execution_id = $1")
.bind(execution_id)
.execute(&mut *tx)
.await?;
let next_execution_id =
if Self::active_count(&mut tx, state.id).await? < state.max_concurrent as i64 {
Self::promote_next_queued(&mut tx, &state).await?
} else {
None
};
Self::refresh_queue_stats(&mut tx, state.action_id).await?;
tx.commit().await?;
Ok(Some(AdmissionQueuedRemovalOutcome {
action_id: state.action_id,
group_key: state.group_key,
next_execution_id,
execution_id,
queue_order: entry.queue_order,
enqueued_at: entry.enqueued_at,
removed_index,
}))
}
pub async fn restore_queued_execution(
pool: &PgPool,
outcome: &AdmissionQueuedRemovalOutcome,
) -> Result<()> {
let mut tx = pool.begin().await?;
let state =
Self::lock_existing_state(&mut tx, outcome.action_id, outcome.group_key.clone())
.await?
.ok_or_else(|| {
crate::Error::internal("missing execution_admission_state on queued restore")
})?;
if let Some(next_execution_id) = outcome.next_execution_id {
sqlx::query(
r#"
UPDATE execution_admission_entry
SET status = 'queued', activated_at = NULL
WHERE execution_id = $1
AND state_id = $2
AND status = 'active'
"#,
)
.bind(next_execution_id)
.bind(state.id)
.execute(&mut *tx)
.await?;
}
sqlx::query(
r#"
INSERT INTO execution_admission_entry (
state_id, execution_id, status, queue_order, enqueued_at, activated_at
) VALUES ($1, $2, 'queued', $3, $4, NULL)
ON CONFLICT (execution_id) DO NOTHING
"#,
)
.bind(state.id)
.bind(outcome.execution_id)
.bind(outcome.queue_order)
.bind(outcome.enqueued_at)
.execute(&mut *tx)
.await?;
Self::refresh_queue_stats(&mut tx, state.action_id).await?;
tx.commit().await?;
Ok(())
}
pub async fn get_queue_stats(
pool: &PgPool,
action_id: Id,
) -> Result<Option<AdmissionQueueStats>> {
let row = sqlx::query(
r#"
WITH state_rows AS (
SELECT
COUNT(*) AS state_count,
COALESCE(SUM(max_concurrent), 0) AS max_concurrent,
COALESCE(SUM(total_enqueued), 0) AS total_enqueued,
COALESCE(SUM(total_completed), 0) AS total_completed
FROM execution_admission_state
WHERE action_id = $1
),
entry_rows AS (
SELECT
COUNT(*) FILTER (WHERE e.status = 'queued') AS queue_length,
COUNT(*) FILTER (WHERE e.status = 'active') AS active_count,
MIN(e.enqueued_at) FILTER (WHERE e.status = 'queued') AS oldest_enqueued_at
FROM execution_admission_state s
LEFT JOIN execution_admission_entry e ON e.state_id = s.id
WHERE s.action_id = $1
)
SELECT
sr.state_count,
er.queue_length,
er.active_count,
sr.max_concurrent,
er.oldest_enqueued_at,
sr.total_enqueued,
sr.total_completed
FROM state_rows sr
CROSS JOIN entry_rows er
"#,
)
.bind(action_id)
.fetch_one(pool)
.await?;
let state_count: i64 = row.try_get("state_count")?;
if state_count == 0 {
return Ok(None);
}
Ok(Some(AdmissionQueueStats {
action_id,
queue_length: row.try_get::<i64, _>("queue_length")? as usize,
active_count: row.try_get::<i64, _>("active_count")? as u32,
max_concurrent: row.try_get::<i64, _>("max_concurrent")? as u32,
oldest_enqueued_at: row.try_get("oldest_enqueued_at")?,
total_enqueued: row.try_get::<i64, _>("total_enqueued")? as u64,
total_completed: row.try_get::<i64, _>("total_completed")? as u64,
}))
}
async fn enqueue_in_state(
tx: &mut Transaction<'_, Postgres>,
state: &AdmissionState,
max_queue_length: usize,
execution_id: Id,
allow_queue: bool,
) -> Result<AdmissionEnqueueOutcome> {
if let Some(entry) = Self::find_execution_entry(tx, execution_id).await? {
if entry.status == "active" {
return Ok(AdmissionEnqueueOutcome::Acquired);
}
if entry.status == "queued" && entry.state_id == state.id {
if Self::maybe_promote_existing_queued(tx, state, execution_id).await? {
return Ok(AdmissionEnqueueOutcome::Acquired);
}
return Ok(AdmissionEnqueueOutcome::Enqueued);
}
return Ok(AdmissionEnqueueOutcome::Enqueued);
}
let active_count = Self::active_count(tx, state.id).await?;
let queued_count = Self::queued_count(tx, state.id).await?;
if active_count < state.max_concurrent as i64 && queued_count == 0 {
let queue_order = Self::allocate_queue_order(tx, state.id).await?;
Self::insert_entry(
tx,
state.id,
execution_id,
"active",
queue_order,
Utc::now(),
)
.await?;
Self::increment_total_enqueued(tx, state.id).await?;
return Ok(AdmissionEnqueueOutcome::Acquired);
}
if !allow_queue {
return Ok(AdmissionEnqueueOutcome::Enqueued);
}
if queued_count >= max_queue_length as i64 {
return Err(anyhow::anyhow!(
"Queue full for action {}: maximum {} entries",
state.action_id,
max_queue_length
)
.into());
}
let queue_order = Self::allocate_queue_order(tx, state.id).await?;
Self::insert_entry(
tx,
state.id,
execution_id,
"queued",
queue_order,
Utc::now(),
)
.await?;
Self::increment_total_enqueued(tx, state.id).await?;
Ok(AdmissionEnqueueOutcome::Enqueued)
}
async fn maybe_promote_existing_queued(
tx: &mut Transaction<'_, Postgres>,
state: &AdmissionState,
execution_id: Id,
) -> Result<bool> {
let active_count = Self::active_count(tx, state.id).await?;
if active_count >= state.max_concurrent as i64 {
return Ok(false);
}
let front_execution_id = sqlx::query_scalar::<Postgres, Id>(
r#"
SELECT execution_id
FROM execution_admission_entry
WHERE state_id = $1
AND status = 'queued'
ORDER BY queue_order ASC
LIMIT 1
"#,
)
.bind(state.id)
.fetch_optional(&mut **tx)
.await?;
if front_execution_id != Some(execution_id) {
return Ok(false);
}
sqlx::query(
r#"
UPDATE execution_admission_entry
SET status = 'active',
activated_at = NOW()
WHERE execution_id = $1
AND state_id = $2
AND status = 'queued'
"#,
)
.bind(execution_id)
.bind(state.id)
.execute(&mut **tx)
.await?;
Ok(true)
}
async fn promote_next_queued(
tx: &mut Transaction<'_, Postgres>,
state: &AdmissionState,
) -> Result<Option<Id>> {
let next_execution_id = sqlx::query_scalar::<Postgres, Id>(
r#"
SELECT execution_id
FROM execution_admission_entry
WHERE state_id = $1
AND status = 'queued'
ORDER BY queue_order ASC
LIMIT 1
"#,
)
.bind(state.id)
.fetch_optional(&mut **tx)
.await?;
if let Some(next_execution_id) = next_execution_id {
sqlx::query(
r#"
UPDATE execution_admission_entry
SET status = 'active',
activated_at = NOW()
WHERE execution_id = $1
AND state_id = $2
AND status = 'queued'
"#,
)
.bind(next_execution_id)
.bind(state.id)
.execute(&mut **tx)
.await?;
}
Ok(next_execution_id)
}
async fn lock_state(
tx: &mut Transaction<'_, Postgres>,
action_id: Id,
group_key: Option<String>,
max_concurrent: u32,
) -> Result<AdmissionState> {
sqlx::query(
r#"
INSERT INTO execution_admission_state (action_id, group_key, max_concurrent)
VALUES ($1, $2, $3)
ON CONFLICT (action_id, group_key_normalized)
DO UPDATE SET max_concurrent = EXCLUDED.max_concurrent
"#,
)
.bind(action_id)
.bind(group_key.clone())
.bind(max_concurrent as i32)
.execute(&mut **tx)
.await?;
let state = sqlx::query(
r#"
SELECT id, action_id, group_key, max_concurrent
FROM execution_admission_state
WHERE action_id = $1
AND group_key_normalized = COALESCE($2, '')
FOR UPDATE
"#,
)
.bind(action_id)
.bind(group_key)
.fetch_one(&mut **tx)
.await?;
Ok(AdmissionState {
id: state.try_get("id")?,
action_id: state.try_get("action_id")?,
group_key: state.try_get("group_key")?,
max_concurrent: state.try_get("max_concurrent")?,
})
}
async fn lock_existing_state(
tx: &mut Transaction<'_, Postgres>,
action_id: Id,
group_key: Option<String>,
) -> Result<Option<AdmissionState>> {
let row = sqlx::query(
r#"
SELECT id, action_id, group_key, max_concurrent
FROM execution_admission_state
WHERE action_id = $1
AND group_key_normalized = COALESCE($2, '')
FOR UPDATE
"#,
)
.bind(action_id)
.bind(group_key)
.fetch_optional(&mut **tx)
.await?;
Ok(row.map(|state| AdmissionState {
id: state.try_get("id").expect("state.id"),
action_id: state.try_get("action_id").expect("state.action_id"),
group_key: state.try_get("group_key").expect("state.group_key"),
max_concurrent: state
.try_get("max_concurrent")
.expect("state.max_concurrent"),
}))
}
async fn find_execution_entry(
tx: &mut Transaction<'_, Postgres>,
execution_id: Id,
) -> Result<Option<ExecutionEntry>> {
let row = sqlx::query(
r#"
SELECT
e.state_id,
s.action_id,
s.group_key,
e.execution_id,
e.status,
e.queue_order,
e.enqueued_at
FROM execution_admission_entry e
JOIN execution_admission_state s ON s.id = e.state_id
WHERE e.execution_id = $1
"#,
)
.bind(execution_id)
.fetch_optional(&mut **tx)
.await?;
Ok(row.map(|entry| ExecutionEntry {
state_id: entry.try_get("state_id").expect("entry.state_id"),
action_id: entry.try_get("action_id").expect("entry.action_id"),
group_key: entry.try_get("group_key").expect("entry.group_key"),
status: entry.try_get("status").expect("entry.status"),
queue_order: entry.try_get("queue_order").expect("entry.queue_order"),
enqueued_at: entry.try_get("enqueued_at").expect("entry.enqueued_at"),
}))
}
async fn find_execution_entry_for_update(
tx: &mut Transaction<'_, Postgres>,
execution_id: Id,
) -> Result<Option<ExecutionEntry>> {
let row = sqlx::query(
r#"
SELECT
e.state_id,
s.action_id,
s.group_key,
e.execution_id,
e.status,
e.queue_order,
e.enqueued_at
FROM execution_admission_entry e
JOIN execution_admission_state s ON s.id = e.state_id
WHERE e.execution_id = $1
FOR UPDATE OF e, s
"#,
)
.bind(execution_id)
.fetch_optional(&mut **tx)
.await?;
Ok(row.map(|entry| ExecutionEntry {
state_id: entry.try_get("state_id").expect("entry.state_id"),
action_id: entry.try_get("action_id").expect("entry.action_id"),
group_key: entry.try_get("group_key").expect("entry.group_key"),
status: entry.try_get("status").expect("entry.status"),
queue_order: entry.try_get("queue_order").expect("entry.queue_order"),
enqueued_at: entry.try_get("enqueued_at").expect("entry.enqueued_at"),
}))
}
async fn active_count(tx: &mut Transaction<'_, Postgres>, state_id: Id) -> Result<i64> {
Ok(sqlx::query_scalar::<Postgres, i64>(
r#"
SELECT COUNT(*)
FROM execution_admission_entry
WHERE state_id = $1
AND status = 'active'
"#,
)
.bind(state_id)
.fetch_one(&mut **tx)
.await?)
}
async fn queued_count(tx: &mut Transaction<'_, Postgres>, state_id: Id) -> Result<i64> {
Ok(sqlx::query_scalar::<Postgres, i64>(
r#"
SELECT COUNT(*)
FROM execution_admission_entry
WHERE state_id = $1
AND status = 'queued'
"#,
)
.bind(state_id)
.fetch_one(&mut **tx)
.await?)
}
async fn insert_entry(
tx: &mut Transaction<'_, Postgres>,
state_id: Id,
execution_id: Id,
status: &str,
queue_order: i64,
enqueued_at: DateTime<Utc>,
) -> Result<()> {
sqlx::query(
r#"
INSERT INTO execution_admission_entry (
state_id, execution_id, status, queue_order, enqueued_at, activated_at
) VALUES (
$1, $2, $3, $4, $5,
CASE WHEN $3 = 'active' THEN NOW() ELSE NULL END
)
"#,
)
.bind(state_id)
.bind(execution_id)
.bind(status)
.bind(queue_order)
.bind(enqueued_at)
.execute(&mut **tx)
.await?;
Ok(())
}
async fn allocate_queue_order(tx: &mut Transaction<'_, Postgres>, state_id: Id) -> Result<i64> {
let queue_order = sqlx::query_scalar::<Postgres, i64>(
r#"
UPDATE execution_admission_state
SET next_queue_order = next_queue_order + 1
WHERE id = $1
RETURNING next_queue_order - 1
"#,
)
.bind(state_id)
.fetch_one(&mut **tx)
.await?;
Ok(queue_order)
}
async fn increment_total_enqueued(
tx: &mut Transaction<'_, Postgres>,
state_id: Id,
) -> Result<()> {
sqlx::query(
r#"
UPDATE execution_admission_state
SET total_enqueued = total_enqueued + 1
WHERE id = $1
"#,
)
.bind(state_id)
.execute(&mut **tx)
.await?;
Ok(())
}
async fn increment_total_completed(
tx: &mut Transaction<'_, Postgres>,
state_id: Id,
) -> Result<()> {
sqlx::query(
r#"
UPDATE execution_admission_state
SET total_completed = total_completed + 1
WHERE id = $1
"#,
)
.bind(state_id)
.execute(&mut **tx)
.await?;
Ok(())
}
async fn refresh_queue_stats(tx: &mut Transaction<'_, Postgres>, action_id: Id) -> Result<()> {
let Some(stats) = Self::get_queue_stats_from_tx(tx, action_id).await? else {
QueueStatsRepository::delete(&mut **tx, action_id).await?;
return Ok(());
};
QueueStatsRepository::upsert(
&mut **tx,
UpsertQueueStatsInput {
action_id,
queue_length: stats.queue_length as i32,
active_count: stats.active_count as i32,
max_concurrent: stats.max_concurrent as i32,
oldest_enqueued_at: stats.oldest_enqueued_at,
total_enqueued: stats.total_enqueued as i64,
total_completed: stats.total_completed as i64,
},
)
.await?;
Ok(())
}
async fn get_queue_stats_from_tx(
tx: &mut Transaction<'_, Postgres>,
action_id: Id,
) -> Result<Option<AdmissionQueueStats>> {
let row = sqlx::query(
r#"
WITH state_rows AS (
SELECT
COUNT(*) AS state_count,
COALESCE(SUM(max_concurrent), 0) AS max_concurrent,
COALESCE(SUM(total_enqueued), 0) AS total_enqueued,
COALESCE(SUM(total_completed), 0) AS total_completed
FROM execution_admission_state
WHERE action_id = $1
),
entry_rows AS (
SELECT
COUNT(*) FILTER (WHERE e.status = 'queued') AS queue_length,
COUNT(*) FILTER (WHERE e.status = 'active') AS active_count,
MIN(e.enqueued_at) FILTER (WHERE e.status = 'queued') AS oldest_enqueued_at
FROM execution_admission_state s
LEFT JOIN execution_admission_entry e ON e.state_id = s.id
WHERE s.action_id = $1
)
SELECT
sr.state_count,
er.queue_length,
er.active_count,
sr.max_concurrent,
er.oldest_enqueued_at,
sr.total_enqueued,
sr.total_completed
FROM state_rows sr
CROSS JOIN entry_rows er
"#,
)
.bind(action_id)
.fetch_one(&mut **tx)
.await?;
let state_count: i64 = row.try_get("state_count")?;
if state_count == 0 {
return Ok(None);
}
Ok(Some(AdmissionQueueStats {
action_id,
queue_length: row.try_get::<i64, _>("queue_length")? as usize,
active_count: row.try_get::<i64, _>("active_count")? as u32,
max_concurrent: row.try_get::<i64, _>("max_concurrent")? as u32,
oldest_enqueued_at: row.try_get("oldest_enqueued_at")?,
total_enqueued: row.try_get::<i64, _>("total_enqueued")? as u64,
total_completed: row.try_get::<i64, _>("total_completed")? as u64,
}))
}
}

View File

@@ -28,6 +28,7 @@ pub struct UpdateIdentityInput {
pub display_name: Option<String>,
pub password_hash: Option<String>,
pub attributes: Option<JsonDict>,
pub frozen: Option<bool>,
}
#[async_trait::async_trait]
@@ -37,7 +38,7 @@ impl FindById for IdentityRepository {
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Identity>(
"SELECT id, login, display_name, password_hash, attributes, created, updated FROM identity WHERE id = $1"
"SELECT id, login, display_name, password_hash, attributes, frozen, created, updated FROM identity WHERE id = $1"
).bind(id).fetch_optional(executor).await.map_err(Into::into)
}
}
@@ -49,7 +50,7 @@ impl List for IdentityRepository {
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Identity>(
"SELECT id, login, display_name, password_hash, attributes, created, updated FROM identity ORDER BY login ASC"
"SELECT id, login, display_name, password_hash, attributes, frozen, created, updated FROM identity ORDER BY login ASC"
).fetch_all(executor).await.map_err(Into::into)
}
}
@@ -62,7 +63,7 @@ impl Create for IdentityRepository {
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Identity>(
"INSERT INTO identity (login, display_name, password_hash, attributes) VALUES ($1, $2, $3, $4) RETURNING id, login, display_name, password_hash, attributes, created, updated"
"INSERT INTO identity (login, display_name, password_hash, attributes) VALUES ($1, $2, $3, $4) RETURNING id, login, display_name, password_hash, attributes, frozen, created, updated"
)
.bind(&input.login)
.bind(&input.display_name)
@@ -111,6 +112,13 @@ impl Update for IdentityRepository {
query.push("attributes = ").push_bind(attributes);
has_updates = true;
}
if let Some(frozen) = input.frozen {
if has_updates {
query.push(", ");
}
query.push("frozen = ").push_bind(frozen);
has_updates = true;
}
if !has_updates {
// No updates requested, fetch and return existing entity
@@ -119,7 +127,7 @@ impl Update for IdentityRepository {
query.push(", updated = NOW() WHERE id = ").push_bind(id);
query.push(
" RETURNING id, login, display_name, password_hash, attributes, created, updated",
" RETURNING id, login, display_name, password_hash, attributes, frozen, created, updated",
);
query
@@ -156,7 +164,7 @@ impl IdentityRepository {
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Identity>(
"SELECT id, login, display_name, password_hash, attributes, created, updated FROM identity WHERE login = $1"
"SELECT id, login, display_name, password_hash, attributes, frozen, created, updated FROM identity WHERE login = $1"
).bind(login).fetch_optional(executor).await.map_err(Into::into)
}
@@ -169,7 +177,7 @@ impl IdentityRepository {
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Identity>(
"SELECT id, login, display_name, password_hash, attributes, created, updated
"SELECT id, login, display_name, password_hash, attributes, frozen, created, updated
FROM identity
WHERE attributes->'oidc'->>'issuer' = $1
AND attributes->'oidc'->>'sub' = $2",
@@ -190,7 +198,7 @@ impl IdentityRepository {
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, Identity>(
"SELECT id, login, display_name, password_hash, attributes, created, updated
"SELECT id, login, display_name, password_hash, attributes, frozen, created, updated
FROM identity
WHERE attributes->'ldap'->>'server_url' = $1
AND attributes->'ldap'->>'dn' = $2",
@@ -363,6 +371,27 @@ impl PermissionSetRepository {
.map_err(Into::into)
}
pub async fn find_by_roles<'e, E>(executor: E, roles: &[String]) -> Result<Vec<PermissionSet>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
if roles.is_empty() {
return Ok(Vec::new());
}
sqlx::query_as::<_, PermissionSet>(
"SELECT DISTINCT ps.id, ps.ref, ps.pack, ps.pack_ref, ps.label, ps.description, ps.grants, ps.created, ps.updated
FROM permission_set ps
INNER JOIN permission_set_role_assignment psra ON psra.permset = ps.id
WHERE psra.role = ANY($1)
ORDER BY ps.ref ASC",
)
.bind(roles)
.fetch_all(executor)
.await
.map_err(Into::into)
}
/// Delete permission sets belonging to a pack whose refs are NOT in the given set.
///
/// Used during pack reinstallation to clean up permission sets that were
@@ -481,3 +510,231 @@ impl PermissionAssignmentRepository {
.map_err(Into::into)
}
}
pub struct IdentityRoleAssignmentRepository;
impl Repository for IdentityRoleAssignmentRepository {
type Entity = IdentityRoleAssignment;
fn table_name() -> &'static str {
"identity_role_assignment"
}
}
#[derive(Debug, Clone)]
pub struct CreateIdentityRoleAssignmentInput {
pub identity: Id,
pub role: String,
pub source: String,
pub managed: bool,
}
#[async_trait::async_trait]
impl FindById for IdentityRoleAssignmentRepository {
async fn find_by_id<'e, E>(executor: E, id: i64) -> Result<Option<Self::Entity>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, IdentityRoleAssignment>(
"SELECT id, identity, role, source, managed, created, updated FROM identity_role_assignment WHERE id = $1"
)
.bind(id)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
}
#[async_trait::async_trait]
impl Create for IdentityRoleAssignmentRepository {
type CreateInput = CreateIdentityRoleAssignmentInput;
async fn create<'e, E>(executor: E, input: Self::CreateInput) -> Result<Self::Entity>
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, IdentityRoleAssignment>(
"INSERT INTO identity_role_assignment (identity, role, source, managed)
VALUES ($1, $2, $3, $4)
RETURNING id, identity, role, source, managed, created, updated",
)
.bind(input.identity)
.bind(&input.role)
.bind(&input.source)
.bind(input.managed)
.fetch_one(executor)
.await
.map_err(Into::into)
}
}
#[async_trait::async_trait]
impl Delete for IdentityRoleAssignmentRepository {
async fn delete<'e, E>(executor: E, id: i64) -> Result<bool>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let result = sqlx::query("DELETE FROM identity_role_assignment WHERE id = $1")
.bind(id)
.execute(executor)
.await?;
Ok(result.rows_affected() > 0)
}
}
impl IdentityRoleAssignmentRepository {
pub async fn find_by_identity<'e, E>(
executor: E,
identity_id: Id,
) -> Result<Vec<IdentityRoleAssignment>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, IdentityRoleAssignment>(
"SELECT id, identity, role, source, managed, created, updated
FROM identity_role_assignment
WHERE identity = $1
ORDER BY role ASC",
)
.bind(identity_id)
.fetch_all(executor)
.await
.map_err(Into::into)
}
pub async fn find_role_names_by_identity<'e, E>(
executor: E,
identity_id: Id,
) -> Result<Vec<String>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_scalar::<_, String>(
"SELECT role FROM identity_role_assignment WHERE identity = $1 ORDER BY role ASC",
)
.bind(identity_id)
.fetch_all(executor)
.await
.map_err(Into::into)
}
pub async fn replace_managed_roles<'e, E>(
executor: E,
identity_id: Id,
source: &str,
roles: &[String],
) -> Result<()>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
sqlx::query(
"DELETE FROM identity_role_assignment WHERE identity = $1 AND source = $2 AND managed = true",
)
.bind(identity_id)
.bind(source)
.execute(executor)
.await?;
for role in roles {
sqlx::query(
"INSERT INTO identity_role_assignment (identity, role, source, managed)
VALUES ($1, $2, $3, true)
ON CONFLICT (identity, role) DO UPDATE
SET source = EXCLUDED.source,
managed = EXCLUDED.managed,
updated = NOW()",
)
.bind(identity_id)
.bind(role)
.bind(source)
.execute(executor)
.await?;
}
Ok(())
}
}
pub struct PermissionSetRoleAssignmentRepository;
impl Repository for PermissionSetRoleAssignmentRepository {
type Entity = PermissionSetRoleAssignment;
fn table_name() -> &'static str {
"permission_set_role_assignment"
}
}
#[derive(Debug, Clone)]
pub struct CreatePermissionSetRoleAssignmentInput {
pub permset: Id,
pub role: String,
}
#[async_trait::async_trait]
impl FindById for PermissionSetRoleAssignmentRepository {
async fn find_by_id<'e, E>(executor: E, id: i64) -> Result<Option<Self::Entity>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, PermissionSetRoleAssignment>(
"SELECT id, permset, role, created FROM permission_set_role_assignment WHERE id = $1",
)
.bind(id)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
}
#[async_trait::async_trait]
impl Create for PermissionSetRoleAssignmentRepository {
type CreateInput = CreatePermissionSetRoleAssignmentInput;
async fn create<'e, E>(executor: E, input: Self::CreateInput) -> Result<Self::Entity>
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, PermissionSetRoleAssignment>(
"INSERT INTO permission_set_role_assignment (permset, role)
VALUES ($1, $2)
RETURNING id, permset, role, created",
)
.bind(input.permset)
.bind(&input.role)
.fetch_one(executor)
.await
.map_err(Into::into)
}
}
#[async_trait::async_trait]
impl Delete for PermissionSetRoleAssignmentRepository {
async fn delete<'e, E>(executor: E, id: i64) -> Result<bool>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let result = sqlx::query("DELETE FROM permission_set_role_assignment WHERE id = $1")
.bind(id)
.execute(executor)
.await?;
Ok(result.rows_affected() > 0)
}
}
impl PermissionSetRoleAssignmentRepository {
pub async fn find_by_permission_set<'e, E>(
executor: E,
permset_id: Id,
) -> Result<Vec<PermissionSetRoleAssignment>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, PermissionSetRoleAssignment>(
"SELECT id, permset, role, created
FROM permission_set_role_assignment
WHERE permset = $1
ORDER BY role ASC",
)
.bind(permset_id)
.fetch_all(executor)
.await
.map_err(Into::into)
}
}

View File

@@ -33,6 +33,7 @@ pub mod artifact;
pub mod entity_history;
pub mod event;
pub mod execution;
pub mod execution_admission;
pub mod identity;
pub mod inquiry;
pub mod key;
@@ -53,6 +54,7 @@ pub use artifact::{ArtifactRepository, ArtifactVersionRepository};
pub use entity_history::EntityHistoryRepository;
pub use event::{EnforcementRepository, EventRepository};
pub use execution::ExecutionRepository;
pub use execution_admission::ExecutionAdmissionRepository;
pub use identity::{IdentityRepository, PermissionAssignmentRepository, PermissionSetRepository};
pub use inquiry::InquiryRepository;
pub use key::KeyRepository;

View File

@@ -3,7 +3,7 @@
//! Provides database operations for queue statistics persistence.
use chrono::{DateTime, Utc};
use sqlx::{PgPool, Postgres, QueryBuilder};
use sqlx::{Executor, PgPool, Postgres, QueryBuilder};
use crate::error::Result;
use crate::models::Id;
@@ -38,7 +38,10 @@ pub struct QueueStatsRepository;
impl QueueStatsRepository {
/// Upsert queue statistics (insert or update)
pub async fn upsert(pool: &PgPool, input: UpsertQueueStatsInput) -> Result<QueueStats> {
pub async fn upsert<'e, E>(executor: E, input: UpsertQueueStatsInput) -> Result<QueueStats>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let stats = sqlx::query_as::<Postgres, QueueStats>(
r#"
INSERT INTO queue_stats (
@@ -69,14 +72,17 @@ impl QueueStatsRepository {
.bind(input.oldest_enqueued_at)
.bind(input.total_enqueued)
.bind(input.total_completed)
.fetch_one(pool)
.fetch_one(executor)
.await?;
Ok(stats)
}
/// Get queue statistics for a specific action
pub async fn find_by_action(pool: &PgPool, action_id: Id) -> Result<Option<QueueStats>> {
pub async fn find_by_action<'e, E>(executor: E, action_id: Id) -> Result<Option<QueueStats>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let stats = sqlx::query_as::<Postgres, QueueStats>(
r#"
SELECT
@@ -93,14 +99,17 @@ impl QueueStatsRepository {
"#,
)
.bind(action_id)
.fetch_optional(pool)
.fetch_optional(executor)
.await?;
Ok(stats)
}
/// List all queue statistics with active queues (queue_length > 0 or active_count > 0)
pub async fn list_active(pool: &PgPool) -> Result<Vec<QueueStats>> {
pub async fn list_active<'e, E>(executor: E) -> Result<Vec<QueueStats>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let stats = sqlx::query_as::<Postgres, QueueStats>(
r#"
SELECT
@@ -117,14 +126,17 @@ impl QueueStatsRepository {
ORDER BY last_updated DESC
"#,
)
.fetch_all(pool)
.fetch_all(executor)
.await?;
Ok(stats)
}
/// List all queue statistics
pub async fn list_all(pool: &PgPool) -> Result<Vec<QueueStats>> {
pub async fn list_all<'e, E>(executor: E) -> Result<Vec<QueueStats>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let stats = sqlx::query_as::<Postgres, QueueStats>(
r#"
SELECT
@@ -140,14 +152,17 @@ impl QueueStatsRepository {
ORDER BY last_updated DESC
"#,
)
.fetch_all(pool)
.fetch_all(executor)
.await?;
Ok(stats)
}
/// Delete queue statistics for a specific action
pub async fn delete(pool: &PgPool, action_id: Id) -> Result<bool> {
pub async fn delete<'e, E>(executor: E, action_id: Id) -> Result<bool>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let result = sqlx::query(
r#"
DELETE FROM queue_stats
@@ -155,7 +170,7 @@ impl QueueStatsRepository {
"#,
)
.bind(action_id)
.execute(pool)
.execute(executor)
.await?;
Ok(result.rows_affected() > 0)
@@ -163,7 +178,7 @@ impl QueueStatsRepository {
/// Batch upsert multiple queue statistics
pub async fn batch_upsert(
pool: &PgPool,
executor: &PgPool,
inputs: Vec<UpsertQueueStatsInput>,
) -> Result<Vec<QueueStats>> {
if inputs.is_empty() {
@@ -213,14 +228,17 @@ impl QueueStatsRepository {
let stats = query_builder
.build_query_as::<QueueStats>()
.fetch_all(pool)
.fetch_all(executor)
.await?;
Ok(stats)
}
/// Clear stale statistics (older than specified duration)
pub async fn clear_stale(pool: &PgPool, older_than_seconds: i64) -> Result<u64> {
pub async fn clear_stale<'e, E>(executor: E, older_than_seconds: i64) -> Result<u64>
where
E: Executor<'e, Database = Postgres> + 'e,
{
let result = sqlx::query(
r#"
DELETE FROM queue_stats
@@ -230,7 +248,7 @@ impl QueueStatsRepository {
"#,
)
.bind(older_than_seconds)
.execute(pool)
.execute(executor)
.await?;
Ok(result.rows_affected())

View File

@@ -6,7 +6,7 @@ use crate::models::{rule::*, Id};
use crate::{Error, Result};
use sqlx::{Executor, Postgres, QueryBuilder};
use super::{Create, Delete, FindById, FindByRef, List, Repository, Update};
use super::{Create, Delete, FindById, FindByRef, List, Patch, Repository, Update};
/// Filters for [`RuleRepository::list_search`].
///
@@ -41,7 +41,7 @@ pub struct RestoreRuleInput {
pub pack: Id,
pub pack_ref: String,
pub label: String,
pub description: String,
pub description: Option<String>,
pub action: Option<Id>,
pub action_ref: String,
pub trigger: Option<Id>,
@@ -70,7 +70,7 @@ pub struct CreateRuleInput {
pub pack: Id,
pub pack_ref: String,
pub label: String,
pub description: String,
pub description: Option<String>,
pub action: Id,
pub action_ref: String,
pub trigger: Id,
@@ -86,7 +86,7 @@ pub struct CreateRuleInput {
#[derive(Debug, Clone, Default)]
pub struct UpdateRuleInput {
pub label: Option<String>,
pub description: Option<String>,
pub description: Option<Patch<String>>,
pub conditions: Option<serde_json::Value>,
pub action_params: Option<serde_json::Value>,
pub trigger_params: Option<serde_json::Value>,
@@ -228,7 +228,10 @@ impl Update for RuleRepository {
query.push(", ");
}
query.push("description = ");
query.push_bind(description);
match description {
Patch::Set(value) => query.push_bind(value),
Patch::Clear => query.push_bind(Option::<String>::None),
};
has_updates = true;
}

View File

@@ -237,7 +237,7 @@ impl Update for RuntimeRepository {
query.push(", updated = NOW() WHERE id = ");
query.push_bind(id);
query.push(&format!(" RETURNING {}", SELECT_COLUMNS));
query.push(format!(" RETURNING {}", SELECT_COLUMNS));
let runtime = query
.build_query_as::<Runtime>()

View File

@@ -665,7 +665,7 @@ pub struct CreateSensorInput {
pub pack: Option<Id>,
pub pack_ref: Option<String>,
pub label: String,
pub description: String,
pub description: Option<String>,
pub entrypoint: String,
pub runtime: Id,
pub runtime_ref: String,
@@ -681,7 +681,7 @@ pub struct CreateSensorInput {
#[derive(Debug, Clone, Default)]
pub struct UpdateSensorInput {
pub label: Option<String>,
pub description: Option<String>,
pub description: Option<Patch<String>>,
pub entrypoint: Option<String>,
pub runtime: Option<Id>,
pub runtime_ref: Option<String>,
@@ -830,7 +830,10 @@ impl Update for SensorRepository {
query.push(", ");
}
query.push("description = ");
query.push_bind(description);
match description {
Patch::Set(value) => query.push_bind(value),
Patch::Clear => query.push_bind(Option::<String>::None),
};
has_updates = true;
}

View File

@@ -411,6 +411,12 @@ impl WorkflowDefinitionRepository {
pub struct WorkflowExecutionRepository;
#[derive(Debug, Clone)]
pub struct WorkflowExecutionCreateOrGetResult {
pub workflow_execution: WorkflowExecution,
pub created: bool,
}
impl Repository for WorkflowExecutionRepository {
type Entity = WorkflowExecution;
fn table_name() -> &'static str {
@@ -606,6 +612,71 @@ impl Delete for WorkflowExecutionRepository {
}
impl WorkflowExecutionRepository {
pub async fn find_by_id_for_update<'e, E>(
executor: E,
id: Id,
) -> Result<Option<WorkflowExecution>>
where
E: Executor<'e, Database = Postgres> + 'e,
{
sqlx::query_as::<_, WorkflowExecution>(
"SELECT id, execution, workflow_def, current_tasks, completed_tasks, failed_tasks, skipped_tasks,
variables, task_graph, status, error_message, paused, pause_reason, created, updated
FROM workflow_execution
WHERE id = $1
FOR UPDATE"
)
.bind(id)
.fetch_optional(executor)
.await
.map_err(Into::into)
}
pub async fn create_or_get_by_execution<'e, E>(
executor: E,
input: CreateWorkflowExecutionInput,
) -> Result<WorkflowExecutionCreateOrGetResult>
where
E: Executor<'e, Database = Postgres> + Copy + 'e,
{
let inserted = sqlx::query_as::<_, WorkflowExecution>(
"INSERT INTO workflow_execution
(execution, workflow_def, task_graph, variables, status)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT (execution) DO NOTHING
RETURNING id, execution, workflow_def, current_tasks, completed_tasks, failed_tasks, skipped_tasks,
variables, task_graph, status, error_message, paused, pause_reason, created, updated"
)
.bind(input.execution)
.bind(input.workflow_def)
.bind(&input.task_graph)
.bind(&input.variables)
.bind(input.status)
.fetch_optional(executor)
.await?;
if let Some(workflow_execution) = inserted {
return Ok(WorkflowExecutionCreateOrGetResult {
workflow_execution,
created: true,
});
}
let workflow_execution = Self::find_by_execution(executor, input.execution)
.await?
.ok_or_else(|| {
anyhow::anyhow!(
"workflow_execution for parent execution {} disappeared after conflict",
input.execution
)
})?;
Ok(WorkflowExecutionCreateOrGetResult {
workflow_execution,
created: false,
})
}
/// Find workflow execution by the parent execution ID
pub async fn find_by_execution<'e, E>(
executor: E,

View File

@@ -172,6 +172,7 @@ impl WorkflowLoader {
}
// Read and parse YAML
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Workflow files come from previously discovered pack directories under packs_base_dir.
let content = fs::read_to_string(&file.path)
.await
.map_err(|e| Error::validation(format!("Failed to read workflow file: {}", e)))?;
@@ -292,6 +293,7 @@ impl WorkflowLoader {
pack_name: &str,
) -> Result<Vec<WorkflowFile>> {
let mut workflow_files = Vec::new();
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Workflow scanning only traverses pack workflow directories derived from packs_base_dir.
let mut entries = fs::read_dir(workflows_dir)
.await
.map_err(|e| Error::validation(format!("Failed to read workflows directory: {}", e)))?;

View File

@@ -13,6 +13,7 @@
use crate::error::{Error, Result};
use crate::repositories::action::{ActionRepository, CreateActionInput, UpdateActionInput};
use crate::repositories::workflow::{CreateWorkflowDefinitionInput, UpdateWorkflowDefinitionInput};
use crate::repositories::Patch;
use crate::repositories::{
Create, Delete, FindByRef, PackRepository, Update, WorkflowDefinitionRepository,
};
@@ -270,7 +271,7 @@ impl WorkflowRegistrar {
pack: pack_id,
pack_ref: pack_ref.to_string(),
label: effective_label.to_string(),
description: workflow.description.clone().unwrap_or_default(),
description: workflow.description.clone(),
entrypoint,
runtime: None,
runtime_version_constraint: None,
@@ -317,7 +318,10 @@ impl WorkflowRegistrar {
// Update the existing companion action to stay in sync
let update_input = UpdateActionInput {
label: Some(effective_label.to_string()),
description: workflow.description.clone(),
description: Some(match workflow.description.clone() {
Some(description) => Patch::Set(description),
None => Patch::Clear,
}),
entrypoint: Some(format!("workflows/{}.workflow.yaml", workflow_name)),
runtime: None,
runtime_version_constraint: None,

View File

@@ -66,7 +66,10 @@ async fn test_create_action_with_optional_fields() {
.unwrap();
assert_eq!(action.label, "Full Test Action");
assert_eq!(action.description, "Action with all optional fields");
assert_eq!(
action.description,
Some("Action with all optional fields".to_string())
);
assert_eq!(action.entrypoint, "custom.py");
assert!(action.param_schema.is_some());
assert!(action.out_schema.is_some());
@@ -204,7 +207,9 @@ async fn test_update_action() {
let update = UpdateActionInput {
label: Some("Updated Label".to_string()),
description: Some("Updated description".to_string()),
description: Some(attune_common::repositories::Patch::Set(
"Updated description".to_string(),
)),
..Default::default()
};
@@ -214,7 +219,7 @@ async fn test_update_action() {
assert_eq!(updated.id, action.id);
assert_eq!(updated.label, "Updated Label");
assert_eq!(updated.description, "Updated description");
assert_eq!(updated.description, Some("Updated description".to_string()));
assert_eq!(updated.entrypoint, action.entrypoint); // Unchanged
assert!(updated.updated > original_updated);
}
@@ -338,7 +343,7 @@ async fn test_action_foreign_key_constraint() {
pack: 99999,
pack_ref: "nonexistent.pack".to_string(),
label: "Test Action".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
entrypoint: "main.py".to_string(),
runtime: None,
runtime_version_constraint: None,

View File

@@ -49,7 +49,7 @@ async fn test_create_enforcement_minimal() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -121,7 +121,7 @@ async fn test_create_enforcement_with_event() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -189,7 +189,7 @@ async fn test_create_enforcement_with_conditions() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -255,7 +255,7 @@ async fn test_create_enforcement_with_any_condition() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -397,7 +397,7 @@ async fn test_find_enforcement_by_id() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -471,7 +471,7 @@ async fn test_get_enforcement_by_id() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -552,7 +552,7 @@ async fn test_list_enforcements() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -624,7 +624,7 @@ async fn test_update_enforcement_status() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -690,7 +690,7 @@ async fn test_update_enforcement_status_transitions() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -769,7 +769,7 @@ async fn test_update_enforcement_payload() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -832,7 +832,7 @@ async fn test_update_enforcement_both_fields() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -896,7 +896,7 @@ async fn test_update_enforcement_no_changes() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -981,7 +981,7 @@ async fn test_delete_enforcement() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -1056,7 +1056,7 @@ async fn test_find_enforcements_by_rule() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Rule 1".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -1078,7 +1078,7 @@ async fn test_find_enforcements_by_rule() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Rule 2".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -1149,7 +1149,7 @@ async fn test_find_enforcements_by_status() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -1239,7 +1239,7 @@ async fn test_find_enforcements_by_event() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -1324,7 +1324,7 @@ async fn test_delete_rule_sets_enforcement_rule_to_null() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -1390,7 +1390,7 @@ async fn test_enforcement_resolved_at_lifecycle() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -1430,3 +1430,70 @@ async fn test_enforcement_resolved_at_lifecycle() {
assert!(updated.resolved_at.is_some());
assert!(updated.resolved_at.unwrap() >= enforcement.created);
}
#[tokio::test]
#[ignore = "integration test — requires database"]
async fn test_update_loaded_enforcement_uses_loaded_locator() {
let pool = create_test_pool().await.unwrap();
let pack = PackFixture::new_unique("targeted_update_pack")
.create(&pool)
.await
.unwrap();
let trigger = TriggerFixture::new_unique(Some(pack.id), Some(pack.r#ref.clone()), "webhook")
.create(&pool)
.await
.unwrap();
let action = ActionFixture::new_unique(pack.id, &pack.r#ref, "action")
.create(&pool)
.await
.unwrap();
use attune_common::repositories::rule::{CreateRuleInput, RuleRepository};
let rule = RuleRepository::create(
&pool,
CreateRuleInput {
r#ref: format!("{}.test_rule", pack.r#ref),
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
trigger_ref: trigger.r#ref.clone(),
conditions: json!({}),
action_params: json!({}),
trigger_params: json!({}),
enabled: true,
is_adhoc: false,
},
)
.await
.unwrap();
let enforcement = EnforcementFixture::new_unique(Some(rule.id), &rule.r#ref, &trigger.r#ref)
.create(&pool)
.await
.unwrap();
let updated = EnforcementRepository::update_loaded(
&pool,
&enforcement,
UpdateEnforcementInput {
status: Some(EnforcementStatus::Processed),
payload: None,
resolved_at: Some(chrono::Utc::now()),
},
)
.await
.unwrap();
assert_eq!(updated.id, enforcement.id);
assert_eq!(updated.created, enforcement.created);
assert_eq!(updated.rule_ref, enforcement.rule_ref);
assert_eq!(updated.status, EnforcementStatus::Processed);
assert!(updated.resolved_at.is_some());
}

View File

@@ -449,7 +449,7 @@ async fn test_delete_event_enforcement_retains_event_id() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,

View File

@@ -1153,3 +1153,108 @@ async fn test_execution_result_json() {
assert_eq!(updated.result, Some(complex_result));
}
#[tokio::test]
#[ignore = "integration test — requires database"]
async fn test_claim_for_scheduling_succeeds_once() {
let pool = create_test_pool().await.unwrap();
let pack = PackFixture::new_unique("claim_pack")
.create(&pool)
.await
.unwrap();
let action = ActionFixture::new_unique(pack.id, &pack.r#ref, "claim_action")
.create(&pool)
.await
.unwrap();
let created = ExecutionRepository::create(
&pool,
CreateExecutionInput {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
worker: None,
status: ExecutionStatus::Requested,
result: None,
workflow_task: None,
},
)
.await
.unwrap();
let first = ExecutionRepository::claim_for_scheduling(&pool, created.id, None)
.await
.unwrap();
let second = ExecutionRepository::claim_for_scheduling(&pool, created.id, None)
.await
.unwrap();
assert_eq!(first.unwrap().status, ExecutionStatus::Scheduling);
assert!(second.is_none());
}
#[tokio::test]
#[ignore = "integration test — requires database"]
async fn test_update_if_status_only_updates_matching_row() {
let pool = create_test_pool().await.unwrap();
let pack = PackFixture::new_unique("conditional_pack")
.create(&pool)
.await
.unwrap();
let action = ActionFixture::new_unique(pack.id, &pack.r#ref, "conditional_action")
.create(&pool)
.await
.unwrap();
let created = ExecutionRepository::create(
&pool,
CreateExecutionInput {
action: Some(action.id),
action_ref: action.r#ref.clone(),
config: None,
env_vars: None,
parent: None,
enforcement: None,
executor: None,
worker: None,
status: ExecutionStatus::Scheduling,
result: None,
workflow_task: None,
},
)
.await
.unwrap();
let updated = ExecutionRepository::update_if_status(
&pool,
created.id,
ExecutionStatus::Scheduling,
UpdateExecutionInput {
status: Some(ExecutionStatus::Scheduled),
worker: Some(77),
..Default::default()
},
)
.await
.unwrap();
let skipped = ExecutionRepository::update_if_status(
&pool,
created.id,
ExecutionStatus::Scheduling,
UpdateExecutionInput {
status: Some(ExecutionStatus::Failed),
..Default::default()
},
)
.await
.unwrap();
assert_eq!(updated.unwrap().status, ExecutionStatus::Scheduled);
assert!(skipped.is_none());
}

View File

@@ -454,7 +454,7 @@ impl ActionFixture {
pack_ref: self.pack_ref,
r#ref: self.r#ref,
label: self.label,
description: self.description,
description: Some(self.description),
entrypoint: self.entrypoint,
runtime: self.runtime,
runtime_version_constraint: None,
@@ -1088,7 +1088,7 @@ impl SensorFixture {
pack: self.pack_id,
pack_ref: self.pack_ref,
label: self.label,
description: self.description,
description: Some(self.description),
entrypoint: self.entrypoint,
runtime: self.runtime_id,
runtime_ref: self.runtime_ref,

View File

@@ -219,6 +219,7 @@ async fn test_update_identity() {
display_name: Some("Updated Name".to_string()),
password_hash: None,
attributes: Some(json!({"key": "updated", "new_key": "new_value"})),
frozen: None,
};
let updated = IdentityRepository::update(&pool, identity.id, update_input)
@@ -252,6 +253,7 @@ async fn test_update_identity_partial() {
display_name: Some("Only Display Name Changed".to_string()),
password_hash: None,
attributes: None,
frozen: None,
};
let updated = IdentityRepository::update(&pool, identity.id, update_input)
@@ -274,6 +276,7 @@ async fn test_update_identity_not_found() {
display_name: Some("Updated Name".to_string()),
password_hash: None,
attributes: None,
frozen: None,
};
let result = IdentityRepository::update(&pool, 999999, update_input).await;
@@ -380,6 +383,7 @@ async fn test_identity_updated_changes_on_update() {
display_name: Some("Updated".to_string()),
password_hash: None,
attributes: None,
frozen: None,
};
let updated = IdentityRepository::update(&pool, identity.id, update_input)

View File

@@ -8,7 +8,7 @@ mod helpers;
use attune_common::{
repositories::{
rule::{CreateRuleInput, RuleRepository, UpdateRuleInput},
Create, Delete, FindById, FindByRef, List, Update,
Create, Delete, FindById, FindByRef, List, Patch, Update,
},
Error,
};
@@ -48,7 +48,7 @@ async fn test_create_rule() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test Rule".to_string(),
description: "A test rule".to_string(),
description: Some("A test rule".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -66,7 +66,7 @@ async fn test_create_rule() {
assert_eq!(rule.pack, pack.id);
assert_eq!(rule.pack_ref, pack.r#ref);
assert_eq!(rule.label, "Test Rule");
assert_eq!(rule.description, "A test rule");
assert_eq!(rule.description, Some("A test rule".to_string()));
assert_eq!(rule.action, Some(action.id));
assert_eq!(rule.action_ref, action.r#ref);
assert_eq!(rule.trigger, Some(trigger.id));
@@ -105,7 +105,7 @@ async fn test_create_rule_disabled() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Disabled Rule".to_string(),
description: "A disabled rule".to_string(),
description: Some("A disabled rule".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -155,7 +155,7 @@ async fn test_create_rule_with_complex_conditions() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Complex Rule".to_string(),
description: "Rule with complex conditions".to_string(),
description: Some("Rule with complex conditions".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -200,7 +200,7 @@ async fn test_create_rule_duplicate_ref() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "First Rule".to_string(),
description: "First".to_string(),
description: Some("First".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -220,7 +220,7 @@ async fn test_create_rule_duplicate_ref() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Second Rule".to_string(),
description: "Second".to_string(),
description: Some("Second".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -274,7 +274,7 @@ async fn test_create_rule_invalid_ref_format_uppercase() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Upper Rule".to_string(),
description: "Invalid uppercase ref".to_string(),
description: Some("Invalid uppercase ref".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -316,7 +316,7 @@ async fn test_create_rule_invalid_ref_format_no_dot() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "No Dot Rule".to_string(),
description: "Invalid ref without dot".to_string(),
description: Some("Invalid ref without dot".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -362,7 +362,7 @@ async fn test_find_rule_by_id() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Find Rule".to_string(),
description: "Rule to find".to_string(),
description: Some("Rule to find".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -422,7 +422,7 @@ async fn test_find_rule_by_ref() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Find By Ref Rule".to_string(),
description: "Find by ref".to_string(),
description: Some("Find by ref".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -484,7 +484,7 @@ async fn test_list_rules() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: format!("List Rule {}", i),
description: format!("Rule {}", i),
description: Some(format!("Rule {}", i)),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -538,7 +538,7 @@ async fn test_list_rules_ordered_by_ref() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: name.to_string(),
description: name.to_string(),
description: Some(name.to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -594,7 +594,7 @@ async fn test_update_rule_label() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Original Label".to_string(),
description: "Original".to_string(),
description: Some("Original".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -618,7 +618,7 @@ async fn test_update_rule_label() {
.unwrap();
assert_eq!(updated.label, "Updated Label");
assert_eq!(updated.description, "Original"); // unchanged
assert_eq!(updated.description, Some("Original".to_string())); // unchanged
assert!(updated.updated > created.updated);
}
@@ -647,7 +647,7 @@ async fn test_update_rule_description() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test".to_string(),
description: "Old description".to_string(),
description: Some("Old description".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -662,7 +662,7 @@ async fn test_update_rule_description() {
let created = RuleRepository::create(&pool, input).await.unwrap();
let update = UpdateRuleInput {
description: Some("New description".to_string()),
description: Some(Patch::Set("New description".to_string())),
..Default::default()
};
@@ -670,7 +670,7 @@ async fn test_update_rule_description() {
.await
.unwrap();
assert_eq!(updated.description, "New description");
assert_eq!(updated.description, Some("New description".to_string()));
}
#[tokio::test]
@@ -698,7 +698,7 @@ async fn test_update_rule_conditions() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -750,7 +750,7 @@ async fn test_update_rule_enabled() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -803,7 +803,7 @@ async fn test_update_rule_multiple_fields() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Old".to_string(),
description: "Old".to_string(),
description: Some("Old".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -819,7 +819,7 @@ async fn test_update_rule_multiple_fields() {
let update = UpdateRuleInput {
label: Some("New Label".to_string()),
description: Some("New Description".to_string()),
description: Some(Patch::Set("New Description".to_string())),
conditions: Some(json!({"updated": true})),
action_params: None,
trigger_params: None,
@@ -831,7 +831,7 @@ async fn test_update_rule_multiple_fields() {
.unwrap();
assert_eq!(updated.label, "New Label");
assert_eq!(updated.description, "New Description");
assert_eq!(updated.description, Some("New Description".to_string()));
assert_eq!(updated.conditions, json!({"updated": true}));
assert!(!updated.enabled);
}
@@ -861,7 +861,7 @@ async fn test_update_rule_no_changes() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Test".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -914,7 +914,7 @@ async fn test_delete_rule() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "To Delete".to_string(),
description: "Will be deleted".to_string(),
description: Some("Will be deleted".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -995,7 +995,7 @@ async fn test_find_rules_by_pack() {
pack: pack1.id,
pack_ref: pack1.r#ref.clone(),
label: format!("Rule {}", i),
description: format!("Rule {}", i),
description: Some(format!("Rule {}", i)),
action: action1.id,
action_ref: action1.r#ref.clone(),
trigger: trigger1.id,
@@ -1016,7 +1016,7 @@ async fn test_find_rules_by_pack() {
pack: pack2.id,
pack_ref: pack2.r#ref.clone(),
label: "Pack2 Rule".to_string(),
description: "Pack2".to_string(),
description: Some("Pack2".to_string()),
action: action2.id,
action_ref: action2.r#ref.clone(),
trigger: trigger2.id,
@@ -1073,7 +1073,7 @@ async fn test_find_rules_by_action() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: format!("Action1 Rule {}", i),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action1.id,
action_ref: action1.r#ref.clone(),
trigger: trigger.id,
@@ -1094,7 +1094,7 @@ async fn test_find_rules_by_action() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Action2 Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action2.id,
action_ref: action2.r#ref.clone(),
trigger: trigger.id,
@@ -1155,7 +1155,7 @@ async fn test_find_rules_by_trigger() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: format!("Trigger1 Rule {}", i),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger1.id,
@@ -1176,7 +1176,7 @@ async fn test_find_rules_by_trigger() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Trigger2 Rule".to_string(),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger2.id,
@@ -1234,7 +1234,7 @@ async fn test_find_enabled_rules() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: format!("Enabled {}", i),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -1256,7 +1256,7 @@ async fn test_find_enabled_rules() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: format!("Disabled {}", i),
description: "Test".to_string(),
description: Some("Test".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -1312,7 +1312,7 @@ async fn test_cascade_delete_pack_deletes_rules() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Cascade Rule".to_string(),
description: "Will be cascade deleted".to_string(),
description: Some("Will be cascade deleted".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,
@@ -1368,7 +1368,7 @@ async fn test_rule_timestamps() {
pack: pack.id,
pack_ref: pack.r#ref.clone(),
label: "Timestamp Rule".to_string(),
description: "Test timestamps".to_string(),
description: Some("Test timestamps".to_string()),
action: action.id,
action_ref: action.r#ref.clone(),
trigger: trigger.id,

View File

@@ -179,7 +179,7 @@ async fn test_create_sensor_duplicate_ref_fails() {
pack: Some(pack.id),
pack_ref: Some(pack.r#ref.clone()),
label: "Duplicate Sensor".to_string(),
description: "Test sensor".to_string(),
description: Some("Test sensor".to_string()),
entrypoint: "sensors/dup.py".to_string(),
runtime: runtime.id,
runtime_ref: runtime.r#ref.clone(),
@@ -235,7 +235,7 @@ async fn test_create_sensor_invalid_ref_format_fails() {
pack: Some(pack.id),
pack_ref: Some(pack.r#ref.clone()),
label: "Invalid Sensor".to_string(),
description: "Test sensor".to_string(),
description: Some("Test sensor".to_string()),
entrypoint: "sensors/invalid.py".to_string(),
runtime: runtime.id,
runtime_ref: runtime.r#ref.clone(),
@@ -276,7 +276,7 @@ async fn test_create_sensor_invalid_pack_fails() {
pack: Some(99999), // Non-existent pack
pack_ref: Some("invalid".to_string()),
label: "Invalid Pack Sensor".to_string(),
description: "Test sensor".to_string(),
description: Some("Test sensor".to_string()),
entrypoint: "sensors/invalid.py".to_string(),
runtime: runtime.id,
runtime_ref: runtime.r#ref.clone(),
@@ -308,7 +308,7 @@ async fn test_create_sensor_invalid_trigger_fails() {
pack: None,
pack_ref: None,
label: "Invalid Trigger Sensor".to_string(),
description: "Test sensor".to_string(),
description: Some("Test sensor".to_string()),
entrypoint: "sensors/invalid.py".to_string(),
runtime: runtime.id,
runtime_ref: runtime.r#ref.clone(),
@@ -340,7 +340,7 @@ async fn test_create_sensor_invalid_runtime_fails() {
pack: None,
pack_ref: None,
label: "Invalid Runtime Sensor".to_string(),
description: "Test sensor".to_string(),
description: Some("Test sensor".to_string()),
entrypoint: "sensors/invalid.py".to_string(),
runtime: 99999, // Non-existent runtime
runtime_ref: "invalid.runtime".to_string(),
@@ -728,7 +728,7 @@ async fn test_update_description() {
.unwrap();
let input = UpdateSensorInput {
description: Some("New description for the sensor".to_string()),
description: Some(Patch::Set("New description for the sensor".to_string())),
..Default::default()
};
@@ -736,7 +736,10 @@ async fn test_update_description() {
.await
.unwrap();
assert_eq!(updated.description, "New description for the sensor");
assert_eq!(
updated.description,
Some("New description for the sensor".to_string())
);
}
#[tokio::test]
@@ -934,7 +937,7 @@ async fn test_update_multiple_fields() {
let input = UpdateSensorInput {
label: Some("Multi Update".to_string()),
description: Some("Updated multiple fields".to_string()),
description: Some(Patch::Set("Updated multiple fields".to_string())),
entrypoint: Some("sensors/multi.py".to_string()),
enabled: Some(false),
param_schema: Some(Patch::Set(json!({"type": "object"}))),
@@ -946,7 +949,10 @@ async fn test_update_multiple_fields() {
.unwrap();
assert_eq!(updated.label, "Multi Update");
assert_eq!(updated.description, "Updated multiple fields");
assert_eq!(
updated.description,
Some("Updated multiple fields".to_string())
);
assert_eq!(updated.entrypoint, "sensors/multi.py");
assert!(!updated.enabled);
assert_eq!(updated.param_schema, Some(json!({"type": "object"})));

View File

@@ -182,6 +182,7 @@ mod tests {
#[test]
fn test_decode_valid_token() {
// Valid JWT with exp and iat claims
// nosemgrep: generic.secrets.security.detected-jwt-token.detected-jwt-token -- This is a non-secret test fixture with a dummy signature used only for JWT parsing tests.
let token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJzZW5zb3I6Y29yZS50aW1lciIsImlhdCI6MTcwNjM1NjQ5NiwiZXhwIjoxNzE0MTMyNDk2fQ.signature";
let manager = TokenRefreshManager::new(

View File

@@ -11,7 +11,10 @@
use anyhow::Result;
use attune_common::{
mq::{Consumer, ExecutionCompletedPayload, MessageEnvelope, Publisher},
mq::{
Consumer, ExecutionCompletedPayload, ExecutionRequestedPayload, MessageEnvelope,
MessageType, MqError, Publisher,
},
repositories::{execution::ExecutionRepository, FindById},
};
use sqlx::PgPool;
@@ -36,6 +39,19 @@ pub struct CompletionListener {
}
impl CompletionListener {
fn retryable_mq_error(error: &anyhow::Error) -> Option<MqError> {
let mq_error = error.downcast_ref::<MqError>()?;
Some(match mq_error {
MqError::Connection(msg) => MqError::Connection(msg.clone()),
MqError::Channel(msg) => MqError::Channel(msg.clone()),
MqError::Publish(msg) => MqError::Publish(msg.clone()),
MqError::Timeout(msg) => MqError::Timeout(msg.clone()),
MqError::Pool(msg) => MqError::Pool(msg.clone()),
MqError::Lapin(err) => MqError::Connection(err.to_string()),
_ => return None,
})
}
/// Create a new completion listener
pub fn new(
pool: PgPool,
@@ -82,6 +98,9 @@ impl CompletionListener {
{
error!("Error processing execution completion: {}", e);
// Return error to trigger nack with requeue
if let Some(mq_err) = Self::retryable_mq_error(&e) {
return Err(mq_err);
}
return Err(
format!("Failed to process execution completion: {}", e).into()
);
@@ -138,7 +157,11 @@ impl CompletionListener {
"Failed to advance workflow for execution {}: {}",
execution_id, e
);
// Continue processing — don't fail the entire completion
if let Some(mq_err) = Self::retryable_mq_error(&e) {
return Err(mq_err.into());
}
// Non-retryable workflow advancement errors are logged but
// do not fail the entire completion processing path.
}
}
@@ -187,19 +210,39 @@ impl CompletionListener {
action_id, execution_id
);
match queue_manager.notify_completion(action_id).await {
Ok(notified) => {
if notified {
match queue_manager.release_active_slot(execution_id).await {
Ok(release) => {
if let Some(release) = release {
if let Some(next_execution_id) = release.next_execution_id {
info!(
"Queue slot released for action {}, next execution notified",
action_id
"Queue slot released for action {}, next execution {} can proceed",
action_id, next_execution_id
);
if let Err(republish_err) = Self::publish_execution_requested(
pool,
publisher,
action_id,
next_execution_id,
)
.await
{
queue_manager
.restore_active_slot(execution_id, &release)
.await?;
return Err(republish_err);
}
} else {
debug!(
"Queue slot released for action {}, no executions waiting",
action_id
);
}
} else {
debug!(
"Execution {} had no active queue slot to release",
execution_id
);
}
}
Err(e) => {
error!(
@@ -225,6 +268,38 @@ impl CompletionListener {
Ok(())
}
async fn publish_execution_requested(
pool: &PgPool,
publisher: &Publisher,
action_id: i64,
execution_id: i64,
) -> Result<()> {
let execution = ExecutionRepository::find_by_id(pool, execution_id)
.await?
.ok_or_else(|| anyhow::anyhow!("Execution {} not found", execution_id))?;
let payload = ExecutionRequestedPayload {
execution_id,
action_id: Some(action_id),
action_ref: execution.action_ref.clone(),
parent_id: execution.parent,
enforcement_id: execution.enforcement,
config: execution.config.clone(),
};
let envelope = MessageEnvelope::new(MessageType::ExecutionRequested, payload)
.with_source("executor-completion-listener");
publisher.publish_envelope(&envelope).await?;
debug!(
"Republished deferred ExecutionRequested for execution {}",
execution_id
);
Ok(())
}
}
#[cfg(test)]
@@ -233,13 +308,13 @@ mod tests {
use crate::queue_manager::ExecutionQueueManager;
#[tokio::test]
async fn test_notify_completion_releases_slot() {
async fn test_release_active_slot_releases_slot() {
let queue_manager = Arc::new(ExecutionQueueManager::with_defaults());
let action_id = 1;
// Simulate acquiring a slot
queue_manager
.enqueue_and_wait(action_id, 100, 1)
.enqueue_and_wait(action_id, 100, 1, None)
.await
.unwrap();
@@ -249,8 +324,9 @@ mod tests {
assert_eq!(stats.queue_length, 0);
// Simulate completion notification
let notified = queue_manager.notify_completion(action_id).await.unwrap();
assert!(!notified); // No one waiting
let release = queue_manager.release_active_slot(100).await.unwrap();
assert!(release.is_some());
assert_eq!(release.unwrap().next_execution_id, None);
// Verify slot is released
let stats = queue_manager.get_queue_stats(action_id).await.unwrap();
@@ -258,13 +334,13 @@ mod tests {
}
#[tokio::test]
async fn test_notify_completion_wakes_waiting() {
async fn test_release_active_slot_wakes_waiting() {
let queue_manager = Arc::new(ExecutionQueueManager::with_defaults());
let action_id = 1;
// Fill capacity
queue_manager
.enqueue_and_wait(action_id, 100, 1)
.enqueue_and_wait(action_id, 100, 1, None)
.await
.unwrap();
@@ -272,7 +348,7 @@ mod tests {
let queue_manager_clone = queue_manager.clone();
let handle = tokio::spawn(async move {
queue_manager_clone
.enqueue_and_wait(action_id, 101, 1)
.enqueue_and_wait(action_id, 101, 1, None)
.await
.unwrap();
});
@@ -286,8 +362,8 @@ mod tests {
assert_eq!(stats.queue_length, 1);
// Notify completion
let notified = queue_manager.notify_completion(action_id).await.unwrap();
assert!(notified); // Should wake the waiting execution
let release = queue_manager.release_active_slot(100).await.unwrap();
assert_eq!(release.unwrap().next_execution_id, Some(101));
// Wait for queued execution to proceed
handle.await.unwrap();
@@ -306,7 +382,7 @@ mod tests {
// Fill capacity
queue_manager
.enqueue_and_wait(action_id, 100, 1)
.enqueue_and_wait(action_id, 100, 1, None)
.await
.unwrap();
@@ -320,7 +396,7 @@ mod tests {
let handle = tokio::spawn(async move {
queue_manager
.enqueue_and_wait(action_id, exec_id, 1)
.enqueue_and_wait(action_id, exec_id, 1, None)
.await
.unwrap();
order.lock().await.push(exec_id);
@@ -333,9 +409,13 @@ mod tests {
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
// Release them one by one
for _ in 0..3 {
for execution_id in 100..103 {
tokio::time::sleep(tokio::time::Duration::from_millis(50)).await;
queue_manager.notify_completion(action_id).await.unwrap();
let release = queue_manager
.release_active_slot(execution_id)
.await
.unwrap();
assert!(release.is_some());
}
// Wait for all to complete
@@ -351,11 +431,11 @@ mod tests {
#[tokio::test]
async fn test_completion_with_no_queue() {
let queue_manager = Arc::new(ExecutionQueueManager::with_defaults());
let action_id = 999; // Non-existent action
let execution_id = 999; // Non-existent execution
// Should succeed but not notify anyone
let result = queue_manager.notify_completion(action_id).await;
let result = queue_manager.release_active_slot(execution_id).await;
assert!(result.is_ok());
assert!(!result.unwrap());
assert!(result.unwrap().is_none());
}
}

View File

@@ -14,7 +14,7 @@ use attune_common::{
error::Error,
models::ExecutionStatus,
mq::{Consumer, ConsumerConfig, MessageEnvelope, MessageType, MqResult},
repositories::{execution::UpdateExecutionInput, ExecutionRepository, FindById, Update},
repositories::{execution::UpdateExecutionInput, ExecutionRepository, FindById},
};
use chrono::Utc;
use serde_json::json;
@@ -179,13 +179,12 @@ async fn handle_execution_requested(
}
};
// Only fail if still in a non-terminal state
if !matches!(
execution.status,
ExecutionStatus::Scheduled | ExecutionStatus::Running
) {
// Only scheduled executions are still legitimately owned by the scheduler.
// If the execution already moved to running or a terminal state, this DLQ
// delivery is stale and must not overwrite newer state.
if execution.status != ExecutionStatus::Scheduled {
info!(
"Execution {} already in terminal state {:?}, skipping",
"Execution {} already left Scheduled state ({:?}), skipping stale DLQ handling",
execution_id, execution.status
);
return Ok(()); // Acknowledge to remove from queue
@@ -193,6 +192,12 @@ async fn handle_execution_requested(
// Get worker info from payload for better error message
let worker_id = envelope.payload.get("worker_id").and_then(|v| v.as_i64());
let scheduled_attempt_updated_at = envelope
.payload
.get("scheduled_attempt_updated_at")
.and_then(|v| v.as_str())
.and_then(|s| chrono::DateTime::parse_from_rfc3339(s).ok())
.map(|dt| dt.with_timezone(&Utc));
let error_message = if let Some(wid) = worker_id {
format!(
@@ -214,26 +219,87 @@ async fn handle_execution_requested(
..Default::default()
};
match ExecutionRepository::update(pool, execution_id, update_input).await {
Ok(_) => {
if let Some(timestamp) = scheduled_attempt_updated_at {
// Guard on both status and the exact updated_at from when the execution was
// scheduled — prevents overwriting state that changed after this DLQ message
// was enqueued.
match ExecutionRepository::update_if_status_and_updated_at(
pool,
execution_id,
ExecutionStatus::Scheduled,
timestamp,
update_input,
)
.await
{
Ok(Some(_)) => {
info!(
"Successfully failed execution {} due to worker queue expiration",
execution_id
);
Ok(())
}
Ok(None) => {
info!(
"Skipping DLQ failure for execution {} because it already left Scheduled state",
execution_id
);
Ok(())
}
Err(e) => {
error!(
"Failed to update execution {} to failed state: {}",
execution_id, e
);
// Return error to nack and potentially retry
Err(attune_common::mq::MqError::Consume(format!(
"Failed to update execution: {}",
e
)))
}
}
} else {
// Fallback for DLQ messages that predate the scheduled_attempt_updated_at
// field. Use a status-only guard — same safety guarantee as the original code
// (never overwrites terminal or running state).
warn!(
"DLQ message for execution {} lacks scheduled_attempt_updated_at; \
falling back to status-only guard",
execution_id
);
match ExecutionRepository::update_if_status(
pool,
execution_id,
ExecutionStatus::Scheduled,
update_input,
)
.await
{
Ok(Some(_)) => {
info!(
"Successfully failed execution {} due to worker queue expiration (status-only guard)",
execution_id
);
Ok(())
}
Ok(None) => {
info!(
"Skipping DLQ failure for execution {} because it already left Scheduled state",
execution_id
);
Ok(())
}
Err(e) => {
error!(
"Failed to update execution {} to failed state: {}",
execution_id, e
);
Err(attune_common::mq::MqError::Consume(format!(
"Failed to update execution: {}",
e
)))
}
}
}
}
/// Create a dead letter consumer configuration

View File

@@ -19,7 +19,7 @@ use attune_common::{
event::{EnforcementRepository, EventRepository, UpdateEnforcementInput},
execution::{CreateExecutionInput, ExecutionRepository},
rule::RuleRepository,
Create, FindById, Update,
FindById,
},
};
@@ -116,6 +116,14 @@ impl EnforcementProcessor {
.await?
.ok_or_else(|| anyhow::anyhow!("Enforcement not found: {}", enforcement_id))?;
if enforcement.status != EnforcementStatus::Created {
debug!(
"Enforcement {} already left Created state ({:?}), skipping duplicate processing",
enforcement_id, enforcement.status
);
return Ok(());
}
// Fetch associated rule
let rule = RuleRepository::find_by_id(
pool,
@@ -135,7 +143,7 @@ impl EnforcementProcessor {
// Evaluate whether to create execution
if Self::should_create_execution(&enforcement, &rule, event.as_ref())? {
Self::create_execution(
let execution_created = Self::create_execution(
pool,
publisher,
policy_enforcer,
@@ -145,10 +153,10 @@ impl EnforcementProcessor {
)
.await?;
// Update enforcement status to Processed after successful execution creation
EnforcementRepository::update(
let updated = EnforcementRepository::update_loaded_if_status(
pool,
enforcement_id,
&enforcement,
EnforcementStatus::Created,
UpdateEnforcementInput {
status: Some(EnforcementStatus::Processed),
payload: None,
@@ -157,17 +165,27 @@ impl EnforcementProcessor {
)
.await?;
debug!("Updated enforcement {} status to Processed", enforcement_id);
if updated.is_some() {
debug!(
"Updated enforcement {} status to Processed after {} execution path",
enforcement_id,
if execution_created {
"new"
} else {
"idempotent"
}
);
}
} else {
info!(
"Skipping execution creation for enforcement: {}",
enforcement_id
);
// Update enforcement status to Disabled since it was not actionable
EnforcementRepository::update(
let updated = EnforcementRepository::update_loaded_if_status(
pool,
enforcement_id,
&enforcement,
EnforcementStatus::Created,
UpdateEnforcementInput {
status: Some(EnforcementStatus::Disabled),
payload: None,
@@ -176,11 +194,13 @@ impl EnforcementProcessor {
)
.await?;
if updated.is_some() {
debug!(
"Updated enforcement {} status to Disabled (skipped)",
enforcement_id
);
}
}
Ok(())
}
@@ -230,11 +250,11 @@ impl EnforcementProcessor {
async fn create_execution(
pool: &PgPool,
publisher: &Publisher,
policy_enforcer: &PolicyEnforcer,
_policy_enforcer: &PolicyEnforcer,
_queue_manager: &ExecutionQueueManager,
enforcement: &Enforcement,
rule: &Rule,
) -> Result<()> {
) -> Result<bool> {
// Extract action ID — should_create_execution already verified it's Some,
// but guard defensively here as well.
let action_id = match rule.action {
@@ -257,33 +277,10 @@ impl EnforcementProcessor {
enforcement.id, rule.id, action_id
);
let pack_id = rule.pack;
let action_ref = &rule.action_ref;
// Enforce policies and wait for queue slot if needed
info!(
"Enforcing policies for action {} (enforcement: {})",
action_id, enforcement.id
);
// Use enforcement ID for queue tracking (execution doesn't exist yet)
if let Err(e) = policy_enforcer
.enforce_and_wait(action_id, Some(pack_id), enforcement.id)
.await
{
error!(
"Policy enforcement failed for enforcement {}: {}",
enforcement.id, e
);
return Err(e);
}
info!(
"Policy check passed and queue slot obtained for enforcement: {}",
enforcement.id
);
// Now create execution in database (we have a queue slot)
// Create the execution row first; scheduler-side policy enforcement
// now handles both rule-triggered and manual executions uniformly.
let execution_input = CreateExecutionInput {
action: Some(action_id),
action_ref: action_ref.clone(),
@@ -298,21 +295,36 @@ impl EnforcementProcessor {
workflow_task: None, // Non-workflow execution
};
let execution = ExecutionRepository::create(pool, execution_input).await?;
let execution_result = ExecutionRepository::create_top_level_for_enforcement_if_absent(
pool,
execution_input,
enforcement.id,
)
.await?;
let execution = execution_result.execution;
if execution_result.created {
info!(
"Created execution: {} for enforcement: {}",
execution.id, enforcement.id
);
} else {
info!(
"Reusing execution: {} for enforcement: {}",
execution.id, enforcement.id
);
}
// Publish ExecutionRequested message
if execution_result.created
|| execution.status == attune_common::models::enums::ExecutionStatus::Requested
{
let payload = ExecutionRequestedPayload {
execution_id: execution.id,
action_id: Some(action_id),
action_ref: action_ref.clone(),
parent_id: None,
enforcement_id: Some(enforcement.id),
config: enforcement.config.clone(),
config: execution.config.clone(),
};
let envelope =
@@ -331,11 +343,12 @@ impl EnforcementProcessor {
"Published execution.requested message for execution: {} (enforcement: {}, action: {})",
execution.id, enforcement.id, action_id
);
}
// NOTE: Queue slot will be released when worker publishes execution.completed
// and CompletionListener calls queue_manager.notify_completion(action_id)
Ok(())
Ok(execution_result.created)
}
}
@@ -368,7 +381,7 @@ mod tests {
pack: 1,
pack_ref: "test".to_string(),
label: "Test Rule".to_string(),
description: "Test rule description".to_string(),
description: Some("Test rule description".to_string()),
trigger_ref: "test.trigger".to_string(),
trigger: Some(1),
action_ref: "test.action".to_string(),

View File

@@ -19,7 +19,7 @@ use attune_common::{
event::{CreateEnforcementInput, EnforcementRepository, EventRepository},
pack::PackRepository,
rule::RuleRepository,
Create, FindById, List,
FindById, List,
},
template_resolver::{resolve_templates, TemplateContext},
};
@@ -206,14 +206,23 @@ impl EventProcessor {
conditions: rule.conditions.clone(),
};
let enforcement = EnforcementRepository::create(pool, create_input).await?;
let enforcement_result =
EnforcementRepository::create_or_get_by_rule_event(pool, create_input).await?;
let enforcement = enforcement_result.enforcement;
if enforcement_result.created {
info!(
"Enforcement {} created for rule {} (event: {})",
enforcement.id, rule.r#ref, event.id
);
} else {
info!(
"Reusing enforcement {} for rule {} (event: {})",
enforcement.id, rule.r#ref, event.id
);
}
// Publish EnforcementCreated message
if enforcement_result.created || enforcement.status == EnforcementStatus::Created {
let enforcement_payload = EnforcementCreatedPayload {
enforcement_id: enforcement.id,
rule_id: Some(rule.id),
@@ -223,7 +232,8 @@ impl EventProcessor {
payload: payload.clone(),
};
let envelope = MessageEnvelope::new(MessageType::EnforcementCreated, enforcement_payload)
let envelope =
MessageEnvelope::new(MessageType::EnforcementCreated, enforcement_payload)
.with_source("event-processor");
publisher.publish_envelope(&envelope).await?;
@@ -232,6 +242,7 @@ impl EventProcessor {
"Published EnforcementCreated message for enforcement {}",
enforcement.id
);
}
Ok(())
}

View File

@@ -9,13 +9,14 @@
use anyhow::Result;
use attune_common::{
error::Error as AttuneError,
models::{enums::InquiryStatus, inquiry::Inquiry, Execution, Id},
mq::{
Consumer, InquiryCreatedPayload, InquiryRespondedPayload, MessageEnvelope, MessageType,
Publisher,
},
repositories::{
execution::{ExecutionRepository, UpdateExecutionInput},
execution::{ExecutionRepository, UpdateExecutionInput, SELECT_COLUMNS},
inquiry::{CreateInquiryInput, InquiryRepository},
Create, FindById, Update,
},
@@ -28,6 +29,8 @@ use tracing::{debug, error, info, warn};
/// Special key in action result to indicate an inquiry should be created
pub const INQUIRY_RESULT_KEY: &str = "__inquiry";
const INQUIRY_ID_RESULT_KEY: &str = "__inquiry_id";
const INQUIRY_CREATED_PUBLISHED_RESULT_KEY: &str = "__inquiry_created_published";
/// Structure for inquiry data in action results
#[derive(Debug, Clone, serde::Deserialize)]
@@ -104,26 +107,71 @@ impl InquiryHandler {
let inquiry_request: InquiryRequest = serde_json::from_value(inquiry_value.clone())?;
Ok(inquiry_request)
}
}
/// Returns true when `e` represents a PostgreSQL unique constraint violation (code 23505).
fn is_db_unique_violation(e: &AttuneError) -> bool {
if let AttuneError::Database(sqlx_err) = e {
return sqlx_err
.as_database_error()
.and_then(|db| db.code())
.as_deref()
== Some("23505");
}
false
}
impl InquiryHandler {
/// Create an inquiry for an execution and pause it
pub async fn create_inquiry_from_result(
pool: &PgPool,
publisher: &Publisher,
execution_id: Id,
result: &JsonValue,
_result: &JsonValue,
) -> Result<Inquiry> {
info!("Creating inquiry for execution {}", execution_id);
// Extract inquiry request
let inquiry_request = Self::extract_inquiry_request(result)?;
let mut tx = pool.begin().await?;
let execution = sqlx::query_as::<_, Execution>(&format!(
"SELECT {SELECT_COLUMNS} FROM execution WHERE id = $1 FOR UPDATE"
))
.bind(execution_id)
.fetch_one(&mut *tx)
.await?;
// Calculate timeout if specified
let mut result = execution
.result
.clone()
.ok_or_else(|| anyhow::anyhow!("Execution {} has no result", execution_id))?;
let inquiry_request = Self::extract_inquiry_request(&result)?;
let timeout_at = inquiry_request
.timeout_seconds
.map(|seconds| Utc::now() + chrono::Duration::seconds(seconds));
// Create inquiry in database
let inquiry_input = CreateInquiryInput {
let existing_inquiry_id = result
.get(INQUIRY_ID_RESULT_KEY)
.and_then(|value| value.as_i64());
let published = result
.get(INQUIRY_CREATED_PUBLISHED_RESULT_KEY)
.and_then(|value| value.as_bool())
.unwrap_or(false);
let (inquiry, should_publish) = if let Some(inquiry_id) = existing_inquiry_id {
let inquiry = InquiryRepository::find_by_id(&mut *tx, inquiry_id)
.await?
.ok_or_else(|| {
anyhow::anyhow!(
"Inquiry {} referenced by execution {} result not found",
inquiry_id,
execution_id
)
})?;
let should_publish = !published && inquiry.status == InquiryStatus::Pending;
(inquiry, should_publish)
} else {
let create_result = InquiryRepository::create(
&mut *tx,
CreateInquiryInput {
execution: execution_id,
prompt: inquiry_request.prompt.clone(),
response_schema: inquiry_request.response_schema.clone(),
@@ -131,20 +179,55 @@ impl InquiryHandler {
status: InquiryStatus::Pending,
response: None,
timeout_at,
},
)
.await;
let inquiry = match create_result {
Ok(inq) => inq,
Err(e) => {
// Unique constraint violation (23505): another replica already
// created the inquiry for this execution. Treat as idempotent
// success — drop the aborted transaction and return the existing row.
if is_db_unique_violation(&e) {
info!(
"Inquiry for execution {} already created by another replica \
(unique constraint 23505); treating as idempotent",
execution_id
);
// tx is in an aborted state; dropping it issues ROLLBACK.
drop(tx);
let inquiries =
InquiryRepository::find_by_execution(pool, execution_id).await?;
let existing = inquiries.into_iter().next().ok_or_else(|| {
anyhow::anyhow!(
"Inquiry for execution {} not found after unique constraint violation",
execution_id
)
})?;
return Ok(existing);
}
return Err(e.into());
}
};
let inquiry = InquiryRepository::create(pool, inquiry_input).await?;
Self::set_inquiry_result_metadata(&mut result, inquiry.id, false)?;
ExecutionRepository::update(
&mut *tx,
execution_id,
UpdateExecutionInput {
result: Some(result),
..Default::default()
},
)
.await?;
info!(
"Created inquiry {} for execution {}",
inquiry.id, execution_id
);
(inquiry, true)
};
// Update execution status to paused/waiting
// Note: We use a special status or keep it as "running" with inquiry tracking
// For now, we'll keep status as-is and track via inquiry relationship
tx.commit().await?;
// Publish InquiryCreated message
if should_publish {
let payload = InquiryCreatedPayload {
inquiry_id: inquiry.id,
execution_id,
@@ -158,15 +241,64 @@ impl InquiryHandler {
MessageEnvelope::new(MessageType::InquiryCreated, payload).with_source("executor");
publisher.publish_envelope(&envelope).await?;
Self::mark_inquiry_created_published(pool, execution_id).await?;
debug!(
"Published InquiryCreated message for inquiry {}",
inquiry.id
);
}
Ok(inquiry)
}
fn set_inquiry_result_metadata(
result: &mut JsonValue,
inquiry_id: Id,
published: bool,
) -> Result<()> {
let obj = result
.as_object_mut()
.ok_or_else(|| anyhow::anyhow!("execution result is not a JSON object"))?;
obj.insert(
INQUIRY_ID_RESULT_KEY.to_string(),
JsonValue::Number(inquiry_id.into()),
);
obj.insert(
INQUIRY_CREATED_PUBLISHED_RESULT_KEY.to_string(),
JsonValue::Bool(published),
);
Ok(())
}
async fn mark_inquiry_created_published(pool: &PgPool, execution_id: Id) -> Result<()> {
let execution = ExecutionRepository::find_by_id(pool, execution_id)
.await?
.ok_or_else(|| anyhow::anyhow!("Execution {} not found", execution_id))?;
let mut result = execution
.result
.clone()
.ok_or_else(|| anyhow::anyhow!("Execution {} has no result", execution_id))?;
let inquiry_id = result
.get(INQUIRY_ID_RESULT_KEY)
.and_then(|value| value.as_i64())
.ok_or_else(|| anyhow::anyhow!("Execution {} missing __inquiry_id", execution_id))?;
Self::set_inquiry_result_metadata(&mut result, inquiry_id, true)?;
ExecutionRepository::update(
pool,
execution_id,
UpdateExecutionInput {
result: Some(result),
..Default::default()
},
)
.await?;
Ok(())
}
/// Handle an inquiry response message
async fn handle_inquiry_response(
pool: &PgPool,
@@ -235,9 +367,13 @@ impl InquiryHandler {
if let Some(obj) = updated_result.as_object_mut() {
obj.insert("__inquiry_response".to_string(), response.clone());
obj.insert(
"__inquiry_id".to_string(),
INQUIRY_ID_RESULT_KEY.to_string(),
JsonValue::Number(inquiry.id.into()),
);
obj.insert(
INQUIRY_CREATED_PUBLISHED_RESULT_KEY.to_string(),
JsonValue::Bool(true),
);
}
// Update execution with new result

View File

@@ -10,14 +10,23 @@
use anyhow::Result;
use chrono::{DateTime, Duration, Utc};
use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
use sqlx::PgPool;
use std::collections::HashMap;
use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
use tracing::{debug, info, warn};
use attune_common::models::{enums::ExecutionStatus, Id};
use attune_common::{
models::{
enums::{ExecutionStatus, PolicyMethod},
Id, Policy,
},
repositories::action::PolicyRepository,
};
use crate::queue_manager::ExecutionQueueManager;
use crate::queue_manager::{
ExecutionQueueManager, QueuedRemovalOutcome, SlotEnqueueOutcome, SlotReleaseOutcome,
};
/// Policy violation type
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
@@ -79,16 +88,38 @@ impl std::fmt::Display for PolicyViolation {
}
/// Execution policy configuration
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ExecutionPolicy {
/// Rate limit: maximum executions per time window
pub rate_limit: Option<RateLimit>,
/// Concurrency limit: maximum concurrent executions
pub concurrency_limit: Option<u32>,
/// How a concurrency violation should be handled.
pub concurrency_method: PolicyMethod,
/// Parameter paths used to scope concurrency grouping.
pub concurrency_parameters: Vec<String>,
/// Resource quotas
pub quotas: Option<HashMap<String, u64>>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SchedulingPolicyOutcome {
Ready,
Queued,
}
impl Default for ExecutionPolicy {
fn default() -> Self {
Self {
rate_limit: None,
concurrency_limit: None,
concurrency_method: PolicyMethod::Enqueue,
concurrency_parameters: Vec::new(),
quotas: None,
}
}
}
/// Rate limit configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RateLimit {
@@ -98,6 +129,25 @@ pub struct RateLimit {
pub window_seconds: u32,
}
#[derive(Debug, Clone)]
struct ResolvedConcurrencyPolicy {
limit: u32,
method: PolicyMethod,
parameters: Vec<String>,
}
impl From<Policy> for ExecutionPolicy {
fn from(policy: Policy) -> Self {
Self {
rate_limit: None,
concurrency_limit: Some(policy.threshold as u32),
concurrency_method: policy.method,
concurrency_parameters: policy.parameters,
quotas: None,
}
}
}
/// Policy enforcement scope
#[derive(Debug, Clone, PartialEq, Eq)]
#[allow(dead_code)] // Used in tests
@@ -185,6 +235,174 @@ impl PolicyEnforcer {
self.action_policies.insert(action_id, policy);
}
/// Best-effort release for a slot acquired during scheduling when the
/// execution never reaches the worker/completion path.
pub async fn release_execution_slot(
&self,
execution_id: Id,
) -> Result<Option<SlotReleaseOutcome>> {
match &self.queue_manager {
Some(queue_manager) => queue_manager.release_active_slot(execution_id).await,
None => Ok(None),
}
}
pub async fn restore_execution_slot(
&self,
execution_id: Id,
outcome: &SlotReleaseOutcome,
) -> Result<()> {
match &self.queue_manager {
Some(queue_manager) => {
queue_manager
.restore_active_slot(execution_id, outcome)
.await
}
None => Ok(()),
}
}
pub async fn remove_queued_execution(
&self,
execution_id: Id,
) -> Result<Option<QueuedRemovalOutcome>> {
match &self.queue_manager {
Some(queue_manager) => queue_manager.remove_queued_execution(execution_id).await,
None => Ok(None),
}
}
pub async fn restore_queued_execution(&self, outcome: &QueuedRemovalOutcome) -> Result<()> {
match &self.queue_manager {
Some(queue_manager) => queue_manager.restore_queued_execution(outcome).await,
None => Ok(()),
}
}
pub async fn enforce_for_scheduling(
&self,
action_id: Id,
pack_id: Option<Id>,
execution_id: Id,
config: Option<&JsonValue>,
) -> Result<SchedulingPolicyOutcome> {
if let Some(violation) = self
.check_policies_except_concurrency(action_id, pack_id)
.await?
{
warn!("Policy violation for action {}: {}", action_id, violation);
return Err(anyhow::anyhow!("Policy violation: {}", violation));
}
if let Some(concurrency) = self.resolve_concurrency_policy(action_id, pack_id).await? {
let group_key = self.build_parameter_group_key(&concurrency.parameters, config);
if let Some(queue_manager) = &self.queue_manager {
match concurrency.method {
PolicyMethod::Enqueue => {
return match queue_manager
.enqueue(action_id, execution_id, concurrency.limit, group_key)
.await?
{
SlotEnqueueOutcome::Acquired => Ok(SchedulingPolicyOutcome::Ready),
SlotEnqueueOutcome::Enqueued => Ok(SchedulingPolicyOutcome::Queued),
};
}
PolicyMethod::Cancel => {
let outcome = queue_manager
.try_acquire(
action_id,
execution_id,
concurrency.limit,
group_key.clone(),
)
.await?;
if !outcome.acquired {
let violation = PolicyViolation::ConcurrencyLimitExceeded {
limit: concurrency.limit,
current_count: outcome.current_count,
};
warn!("Policy violation for action {}: {}", action_id, violation);
return Err(anyhow::anyhow!("Policy violation: {}", violation));
}
}
}
} else {
let scope = PolicyScope::Action(action_id);
if let Some(violation) = self
.check_concurrency_limit(concurrency.limit, &scope)
.await?
{
return Err(anyhow::anyhow!("Policy violation: {}", violation));
}
}
}
Ok(SchedulingPolicyOutcome::Ready)
}
async fn resolve_policy(&self, action_id: Id, pack_id: Option<Id>) -> Result<ExecutionPolicy> {
if let Some(policy) = self.action_policies.get(&action_id) {
return Ok(policy.clone());
}
if let Some(policy) = PolicyRepository::find_latest_by_action(&self.pool, action_id).await?
{
return Ok(policy.into());
}
if let Some(pack_id) = pack_id {
if let Some(policy) = self.pack_policies.get(&pack_id) {
return Ok(policy.clone());
}
if let Some(policy) = PolicyRepository::find_latest_by_pack(&self.pool, pack_id).await?
{
return Ok(policy.into());
}
}
if let Some(policy) = PolicyRepository::find_latest_global(&self.pool).await? {
return Ok(policy.into());
}
Ok(self.global_policy.clone())
}
async fn resolve_concurrency_policy(
&self,
action_id: Id,
pack_id: Option<Id>,
) -> Result<Option<ResolvedConcurrencyPolicy>> {
let policy = self.resolve_policy(action_id, pack_id).await?;
Ok(policy
.concurrency_limit
.map(|limit| ResolvedConcurrencyPolicy {
limit,
method: policy.concurrency_method,
parameters: policy.concurrency_parameters,
}))
}
fn build_parameter_group_key(
&self,
parameter_paths: &[String],
config: Option<&JsonValue>,
) -> Option<String> {
if parameter_paths.is_empty() {
return None;
}
let values: BTreeMap<String, JsonValue> = parameter_paths
.iter()
.map(|path| (path.clone(), extract_parameter_value(config, path)))
.collect();
serde_json::to_string(&values).ok()
}
/// Get the concurrency limit for a specific action
///
/// Returns the most specific concurrency limit found:
@@ -192,6 +410,7 @@ impl PolicyEnforcer {
/// 2. Pack policy
/// 3. Global policy
/// 4. None (unlimited)
#[allow(dead_code)]
pub fn get_concurrency_limit(&self, action_id: Id, pack_id: Option<Id>) -> Option<u32> {
// Check action-specific policy first
if let Some(policy) = self.action_policies.get(&action_id) {
@@ -213,79 +432,6 @@ impl PolicyEnforcer {
self.global_policy.concurrency_limit
}
/// Enforce policies and wait in queue if necessary
///
/// This method combines policy checking with queue management to ensure:
/// 1. Policy violations are detected early
/// 2. FIFO ordering is maintained when capacity is limited
/// 3. Executions wait efficiently for available slots
///
/// # Arguments
/// * `action_id` - The action to execute
/// * `pack_id` - The pack containing the action
/// * `execution_id` - The execution/enforcement ID for queue tracking
///
/// # Returns
/// * `Ok(())` - Policy allows execution and queue slot obtained
/// * `Err(PolicyViolation)` - Policy prevents execution
/// * `Err(QueueError)` - Queue timeout or other queue error
pub async fn enforce_and_wait(
&self,
action_id: Id,
pack_id: Option<Id>,
execution_id: Id,
) -> Result<()> {
// First, check for policy violations (rate limit, quotas, etc.)
// Note: We skip concurrency check here since queue manages that
if let Some(violation) = self
.check_policies_except_concurrency(action_id, pack_id)
.await?
{
warn!("Policy violation for action {}: {}", action_id, violation);
return Err(anyhow::anyhow!("Policy violation: {}", violation));
}
// If queue manager is available, use it for concurrency control
if let Some(queue_manager) = &self.queue_manager {
let concurrency_limit = self
.get_concurrency_limit(action_id, pack_id)
.unwrap_or(u32::MAX); // Default to unlimited if no policy
debug!(
"Enqueuing execution {} for action {} with concurrency limit {}",
execution_id, action_id, concurrency_limit
);
queue_manager
.enqueue_and_wait(action_id, execution_id, concurrency_limit)
.await?;
info!(
"Execution {} obtained queue slot for action {}",
execution_id, action_id
);
} else {
// No queue manager - use legacy polling behavior
debug!(
"No queue manager configured, using legacy policy wait for action {}",
action_id
);
if let Some(concurrency_limit) = self.get_concurrency_limit(action_id, pack_id) {
// Check concurrency with old method
let scope = PolicyScope::Action(action_id);
if let Some(violation) = self
.check_concurrency_limit(concurrency_limit, &scope)
.await?
{
return Err(anyhow::anyhow!("Policy violation: {}", violation));
}
}
}
Ok(())
}
/// Check policies except concurrency (which is handled by queue)
async fn check_policies_except_concurrency(
&self,
@@ -631,11 +777,28 @@ impl PolicyEnforcer {
}
}
fn extract_parameter_value(config: Option<&JsonValue>, path: &str) -> JsonValue {
let mut current = match config {
Some(value) => value,
None => return JsonValue::Null,
};
for segment in path.split('.') {
match current {
JsonValue::Object(map) => match map.get(segment) {
Some(next) => current = next,
None => return JsonValue::Null,
},
_ => return JsonValue::Null,
}
}
current.clone()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::queue_manager::QueueConfig;
use tokio::time::{sleep, Duration};
#[test]
fn test_policy_violation_display() {
@@ -665,6 +828,8 @@ mod tests {
let policy = ExecutionPolicy::default();
assert!(policy.rate_limit.is_none());
assert!(policy.concurrency_limit.is_none());
assert_eq!(policy.concurrency_method, PolicyMethod::Enqueue);
assert!(policy.concurrency_parameters.is_empty());
assert!(policy.quotas.is_none());
}
@@ -769,132 +934,25 @@ mod tests {
}
#[tokio::test]
async fn test_enforce_and_wait_with_queue_manager() {
async fn test_build_parameter_group_key_uses_exact_values() {
let pool = sqlx::PgPool::connect_lazy("postgresql://localhost/test").unwrap();
let queue_manager = Arc::new(ExecutionQueueManager::with_defaults());
let mut enforcer = PolicyEnforcer::with_queue_manager(pool, queue_manager.clone());
// Set concurrency limit
enforcer.set_action_policy(
1,
ExecutionPolicy {
concurrency_limit: Some(1),
..Default::default()
},
);
// First execution should proceed immediately
let result = enforcer.enforce_and_wait(1, None, 100).await;
assert!(result.is_ok());
// Check queue stats
let stats = queue_manager.get_queue_stats(1).await.unwrap();
assert_eq!(stats.active_count, 1);
assert_eq!(stats.queue_length, 0);
let enforcer = PolicyEnforcer::new(pool);
let config = serde_json::json!({
"environment": "prod",
"target": {
"region": "us-east-1"
}
#[tokio::test]
async fn test_enforce_and_wait_fifo_ordering() {
let pool = sqlx::PgPool::connect_lazy("postgresql://localhost/test").unwrap();
let queue_manager = Arc::new(ExecutionQueueManager::with_defaults());
let mut enforcer = PolicyEnforcer::with_queue_manager(pool, queue_manager.clone());
enforcer.set_action_policy(
1,
ExecutionPolicy {
concurrency_limit: Some(1),
..Default::default()
},
);
let enforcer = Arc::new(enforcer);
// First execution
let result = enforcer.enforce_and_wait(1, None, 100).await;
assert!(result.is_ok());
// Queue multiple executions
let execution_order = Arc::new(tokio::sync::Mutex::new(Vec::new()));
let mut handles = vec![];
for exec_id in 101..=103 {
let enforcer = enforcer.clone();
let queue_manager = queue_manager.clone();
let order = execution_order.clone();
let handle = tokio::spawn(async move {
enforcer.enforce_and_wait(1, None, exec_id).await.unwrap();
order.lock().await.push(exec_id);
// Simulate work
sleep(Duration::from_millis(10)).await;
queue_manager.notify_completion(1).await.unwrap();
});
handles.push(handle);
}
// Give tasks time to queue
sleep(Duration::from_millis(100)).await;
// Release first execution
queue_manager.notify_completion(1).await.unwrap();
// Wait for all
for handle in handles {
handle.await.unwrap();
}
// Verify FIFO order
let order = execution_order.lock().await;
assert_eq!(*order, vec![101, 102, 103]);
}
#[tokio::test]
async fn test_enforce_and_wait_without_queue_manager() {
let pool = sqlx::PgPool::connect_lazy("postgresql://localhost/test").unwrap();
let mut enforcer = PolicyEnforcer::new(pool);
// Set unlimited concurrency
enforcer.set_action_policy(
1,
ExecutionPolicy {
concurrency_limit: None,
..Default::default()
},
let group_key = enforcer.build_parameter_group_key(
&["target.region".to_string(), "environment".to_string()],
Some(&config),
);
// Should work without queue manager (legacy behavior)
let result = enforcer.enforce_and_wait(1, None, 100).await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_enforce_and_wait_queue_timeout() {
let config = QueueConfig {
max_queue_length: 100,
queue_timeout_seconds: 1, // Short timeout for test
enable_metrics: true,
};
let pool = sqlx::PgPool::connect_lazy("postgresql://localhost/test").unwrap();
let queue_manager = Arc::new(ExecutionQueueManager::new(config));
let mut enforcer = PolicyEnforcer::with_queue_manager(pool, queue_manager.clone());
// Set concurrency limit
enforcer.set_action_policy(
1,
ExecutionPolicy {
concurrency_limit: Some(1),
..Default::default()
},
assert_eq!(
group_key.as_deref(),
Some("{\"environment\":\"prod\",\"target.region\":\"us-east-1\"}")
);
// First execution proceeds
enforcer.enforce_and_wait(1, None, 100).await.unwrap();
// Second execution should timeout
let result = enforcer.enforce_and_wait(1, None, 101).await;
assert!(result.is_err());
assert!(result.unwrap_err().to_string().contains("timeout"));
}
// Integration tests would require database setup

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -297,6 +297,7 @@ impl ExecutorService {
self.inner.pool.clone(),
self.inner.publisher.clone(),
Arc::new(scheduler_consumer),
self.inner.policy_enforcer.clone(),
);
handles.push(tokio::spawn(async move { scheduler.start().await }));

View File

@@ -12,7 +12,10 @@ use anyhow::Result;
use attune_common::{
models::{enums::ExecutionStatus, Execution},
mq::{MessageEnvelope, MessageType, Publisher},
repositories::execution::SELECT_COLUMNS as EXECUTION_COLUMNS,
repositories::{
execution::{UpdateExecutionInput, SELECT_COLUMNS as EXECUTION_COLUMNS},
ExecutionRepository,
},
};
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
@@ -178,20 +181,27 @@ impl ExecutionTimeoutMonitor {
"original_status": "scheduled"
});
// Update execution status in database
sqlx::query(
"UPDATE execution
SET status = $1,
result = $2,
updated = NOW()
WHERE id = $3",
let updated = ExecutionRepository::update_if_status_and_updated_before(
&self.pool,
execution_id,
ExecutionStatus::Scheduled,
self.calculate_cutoff_time(),
UpdateExecutionInput {
status: Some(ExecutionStatus::Failed),
result: Some(result.clone()),
..Default::default()
},
)
.bind(ExecutionStatus::Failed)
.bind(&result)
.bind(execution_id)
.execute(&self.pool)
.await?;
if updated.is_none() {
debug!(
"Skipping timeout failure for execution {} because it already left Scheduled or is no longer stale",
execution_id
);
return Ok(());
}
info!("Execution {} marked as failed in database", execution_id);
// Publish completion notification

View File

@@ -155,6 +155,7 @@ impl WorkflowLoader {
}
// Read and parse YAML
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Workflow files come from pack directories already discovered under packs_base_dir.
let content = fs::read_to_string(&file.path)
.await
.map_err(|e| Error::validation(format!("Failed to read workflow file: {}", e)))?;
@@ -265,6 +266,7 @@ impl WorkflowLoader {
pack_name: &str,
) -> Result<Vec<WorkflowFile>> {
let mut workflow_files = Vec::new();
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Executor workflow scanning only traverses pack-owned workflow directories.
let mut entries = fs::read_dir(workflows_dir)
.await
.map_err(|e| Error::validation(format!("Failed to read workflows directory: {}", e)))?;

View File

@@ -26,6 +26,7 @@ use attune_executor::queue_manager::{ExecutionQueueManager, QueueConfig};
use chrono::Utc;
use serde_json::json;
use sqlx::PgPool;
use std::collections::VecDeque;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::Mutex;
@@ -99,7 +100,7 @@ async fn create_test_action(pool: &PgPool, pack_id: i64, pack_ref: &str, suffix:
pack: pack_id,
pack_ref: pack_ref.to_string(),
label: format!("FIFO Test Action {}", suffix),
description: format!("Test action {}", suffix),
description: Some(format!("Test action {}", suffix)),
entrypoint: "echo test".to_string(),
runtime: None,
runtime_version_constraint: None,
@@ -172,6 +173,26 @@ async fn cleanup_test_data(pool: &PgPool, pack_id: i64) {
.ok();
}
async fn release_next_active(
manager: &ExecutionQueueManager,
active_execution_ids: &mut VecDeque<i64>,
) -> Option<i64> {
let execution_id = active_execution_ids
.pop_front()
.expect("Expected an active execution to release");
let release = manager
.release_active_slot(execution_id)
.await
.expect("Release should succeed")
.expect("Active execution should have a tracked slot");
if let Some(next_execution_id) = release.next_execution_id {
active_execution_ids.push_back(next_execution_id);
}
release.next_execution_id
}
#[tokio::test]
#[ignore] // Requires database
async fn test_fifo_ordering_with_database() {
@@ -198,8 +219,9 @@ async fn test_fifo_ordering_with_database() {
// Create first execution in database and enqueue
let first_exec_id =
create_test_execution(&pool, action_id, &action_ref, ExecutionStatus::Requested).await;
let mut active_execution_ids = VecDeque::from([first_exec_id]);
manager
.enqueue_and_wait(action_id, first_exec_id, max_concurrent)
.enqueue_and_wait(action_id, first_exec_id, max_concurrent, None)
.await
.expect("First execution should enqueue");
@@ -222,7 +244,7 @@ async fn test_fifo_ordering_with_database() {
// Enqueue and wait
manager_clone
.enqueue_and_wait(action_id, exec_id, max_concurrent)
.enqueue_and_wait(action_id, exec_id, max_concurrent, None)
.await
.expect("Enqueue should succeed");
@@ -250,10 +272,7 @@ async fn test_fifo_ordering_with_database() {
// Release them one by one
for _ in 0..num_executions {
sleep(Duration::from_millis(50)).await;
manager
.notify_completion(action_id)
.await
.expect("Notify should succeed");
release_next_active(&manager, &mut active_execution_ids).await;
}
// Wait for all to complete
@@ -295,6 +314,7 @@ async fn test_high_concurrency_stress() {
let num_executions: i64 = 1000;
let execution_order = Arc::new(Mutex::new(Vec::new()));
let mut handles = vec![];
let execution_ids = Arc::new(Mutex::new(vec![None; num_executions as usize]));
println!("Starting stress test with {} executions...", num_executions);
let start_time = std::time::Instant::now();
@@ -305,6 +325,7 @@ async fn test_high_concurrency_stress() {
let manager_clone = manager.clone();
let action_ref_clone = action_ref.clone();
let order = execution_order.clone();
let ids = execution_ids.clone();
let handle = tokio::spawn(async move {
let exec_id = create_test_execution(
@@ -314,9 +335,10 @@ async fn test_high_concurrency_stress() {
ExecutionStatus::Requested,
)
.await;
ids.lock().await[i as usize] = Some(exec_id);
manager_clone
.enqueue_and_wait(action_id, exec_id, max_concurrent)
.enqueue_and_wait(action_id, exec_id, max_concurrent, None)
.await
.expect("Enqueue should succeed");
@@ -332,6 +354,7 @@ async fn test_high_concurrency_stress() {
let manager_clone = manager.clone();
let action_ref_clone = action_ref.clone();
let order = execution_order.clone();
let ids = execution_ids.clone();
let handle = tokio::spawn(async move {
let exec_id = create_test_execution(
@@ -341,9 +364,10 @@ async fn test_high_concurrency_stress() {
ExecutionStatus::Requested,
)
.await;
ids.lock().await[i as usize] = Some(exec_id);
manager_clone
.enqueue_and_wait(action_id, exec_id, max_concurrent)
.enqueue_and_wait(action_id, exec_id, max_concurrent, None)
.await
.expect("Enqueue should succeed");
@@ -376,15 +400,21 @@ async fn test_high_concurrency_stress() {
);
// Release all executions
let ids = execution_ids.lock().await;
let mut active_execution_ids = VecDeque::from(
ids.iter()
.take(max_concurrent as usize)
.map(|id| id.expect("Initial execution id should be recorded"))
.collect::<Vec<_>>(),
);
drop(ids);
println!("Releasing executions...");
for i in 0..num_executions {
if i % 100 == 0 {
println!("Released {} executions", i);
}
manager
.notify_completion(action_id)
.await
.expect("Notify should succeed");
release_next_active(&manager, &mut active_execution_ids).await;
// Small delay to allow queue processing
if i % 50 == 0 {
@@ -416,7 +446,7 @@ async fn test_high_concurrency_stress() {
"All executions should complete"
);
let expected: Vec<i64> = (0..num_executions).collect();
let expected: Vec<_> = (0..num_executions).collect();
assert_eq!(
*order, expected,
"Executions should complete in strict FIFO order"
@@ -461,9 +491,31 @@ async fn test_multiple_workers_simulation() {
let num_executions = 30;
let execution_order = Arc::new(Mutex::new(Vec::new()));
let mut handles = vec![];
let mut active_execution_ids = VecDeque::new();
// Spawn all executions
for i in 0..num_executions {
// Fill the initial worker slots deterministically.
for i in 0..max_concurrent {
let exec_id =
create_test_execution(&pool, action_id, &action_ref, ExecutionStatus::Requested).await;
active_execution_ids.push_back(exec_id);
let manager_clone = manager.clone();
let order = execution_order.clone();
let handle = tokio::spawn(async move {
manager_clone
.enqueue_and_wait(action_id, exec_id, max_concurrent, None)
.await
.expect("Enqueue should succeed");
order.lock().await.push(i);
});
handles.push(handle);
}
// Queue the remaining executions.
for i in max_concurrent..num_executions {
let pool_clone = pool.clone();
let manager_clone = manager.clone();
let action_ref_clone = action_ref.clone();
@@ -479,7 +531,7 @@ async fn test_multiple_workers_simulation() {
.await;
manager_clone
.enqueue_and_wait(action_id, exec_id, max_concurrent)
.enqueue_and_wait(action_id, exec_id, max_concurrent, None)
.await
.expect("Enqueue should succeed");
@@ -499,6 +551,8 @@ async fn test_multiple_workers_simulation() {
let worker_completions = Arc::new(Mutex::new(vec![0, 0, 0]));
let worker_completions_clone = worker_completions.clone();
let manager_clone = manager.clone();
let active_execution_ids = Arc::new(Mutex::new(active_execution_ids));
let active_execution_ids_clone = active_execution_ids.clone();
// Spawn worker simulators
let worker_handle = tokio::spawn(async move {
@@ -514,10 +568,8 @@ async fn test_multiple_workers_simulation() {
sleep(Duration::from_millis(delay)).await;
// Worker completes and notifies
manager_clone
.notify_completion(action_id)
.await
.expect("Notify should succeed");
let mut active_execution_ids = active_execution_ids_clone.lock().await;
release_next_active(&manager_clone, &mut active_execution_ids).await;
worker_completions_clone.lock().await[next_worker] += 1;
@@ -536,7 +588,7 @@ async fn test_multiple_workers_simulation() {
// Verify FIFO order maintained despite different worker speeds
let order = execution_order.lock().await;
let expected: Vec<i64> = (0..num_executions).collect();
let expected: Vec<_> = (0..num_executions).collect();
assert_eq!(
*order, expected,
"FIFO order should be maintained regardless of worker speed"
@@ -576,27 +628,30 @@ async fn test_cross_action_independence() {
let executions_per_action = 50;
let mut handles = vec![];
let mut action1_active = VecDeque::new();
let mut action2_active = VecDeque::new();
let mut action3_active = VecDeque::new();
// Spawn executions for all three actions simultaneously
for action_id in [action1_id, action2_id, action3_id] {
let action_ref = format!("fifo_test_action_{}_{}", suffix, action_id);
for i in 0..executions_per_action {
let pool_clone = pool.clone();
let manager_clone = manager.clone();
let action_ref_clone = action_ref.clone();
let handle = tokio::spawn(async move {
let exec_id = create_test_execution(
&pool_clone,
action_id,
&action_ref_clone,
ExecutionStatus::Requested,
)
let exec_id =
create_test_execution(&pool, action_id, &action_ref, ExecutionStatus::Requested)
.await;
match action_id {
id if id == action1_id && i == 0 => action1_active.push_back(exec_id),
id if id == action2_id && i == 0 => action2_active.push_back(exec_id),
id if id == action3_id && i == 0 => action3_active.push_back(exec_id),
_ => {}
}
let manager_clone = manager.clone();
let handle = tokio::spawn(async move {
manager_clone
.enqueue_and_wait(action_id, exec_id, 1)
.enqueue_and_wait(action_id, exec_id, 1, None)
.await
.expect("Enqueue should succeed");
@@ -634,18 +689,9 @@ async fn test_cross_action_independence() {
// Release all actions in an interleaved pattern
for i in 0..executions_per_action {
// Release one from each action
manager
.notify_completion(action1_id)
.await
.expect("Notify should succeed");
manager
.notify_completion(action2_id)
.await
.expect("Notify should succeed");
manager
.notify_completion(action3_id)
.await
.expect("Notify should succeed");
release_next_active(&manager, &mut action1_active).await;
release_next_active(&manager, &mut action2_active).await;
release_next_active(&manager, &mut action3_active).await;
if i % 10 == 0 {
sleep(Duration::from_millis(10)).await;
@@ -698,8 +744,9 @@ async fn test_cancellation_during_queue() {
// Fill capacity
let exec_id =
create_test_execution(&pool, action_id, &action_ref, ExecutionStatus::Requested).await;
let mut active_execution_ids = VecDeque::from([exec_id]);
manager
.enqueue_and_wait(action_id, exec_id, max_concurrent)
.enqueue_and_wait(action_id, exec_id, max_concurrent, None)
.await
.unwrap();
@@ -722,7 +769,7 @@ async fn test_cancellation_during_queue() {
ids.lock().await.push(exec_id);
manager_clone
.enqueue_and_wait(action_id, exec_id, max_concurrent)
.enqueue_and_wait(action_id, exec_id, max_concurrent, None)
.await
});
@@ -757,7 +804,7 @@ async fn test_cancellation_during_queue() {
// Release remaining
for _ in 0..8 {
manager.notify_completion(action_id).await.unwrap();
release_next_active(&manager, &mut active_execution_ids).await;
sleep(Duration::from_millis(20)).await;
}
@@ -798,17 +845,21 @@ async fn test_queue_stats_persistence() {
let max_concurrent = 5;
let num_executions = 50;
let mut active_execution_ids = VecDeque::new();
// Enqueue executions
for i in 0..num_executions {
let exec_id =
create_test_execution(&pool, action_id, &action_ref, ExecutionStatus::Requested).await;
if i < max_concurrent {
active_execution_ids.push_back(exec_id);
}
// Start the enqueue in background
let manager_clone = manager.clone();
tokio::spawn(async move {
manager_clone
.enqueue_and_wait(action_id, exec_id, max_concurrent)
.enqueue_and_wait(action_id, exec_id, max_concurrent, None)
.await
.ok();
});
@@ -838,7 +889,7 @@ async fn test_queue_stats_persistence() {
// Release all
for _ in 0..num_executions {
manager.notify_completion(action_id).await.unwrap();
release_next_active(&manager, &mut active_execution_ids).await;
sleep(Duration::from_millis(10)).await;
}
@@ -854,13 +905,122 @@ async fn test_queue_stats_persistence() {
assert_eq!(final_db_stats.queue_length, 0);
assert_eq!(final_mem_stats.queue_length, 0);
assert_eq!(final_db_stats.total_enqueued, num_executions);
assert_eq!(final_db_stats.total_completed, num_executions);
assert_eq!(final_db_stats.total_enqueued, num_executions as i64);
assert_eq!(final_db_stats.total_completed, num_executions as i64);
// Cleanup
cleanup_test_data(&pool, pack_id).await;
}
#[tokio::test]
#[ignore] // Requires database
async fn test_release_restore_recovers_active_slot_and_next_queue_head() {
let pool = setup_db().await;
let timestamp = Utc::now().timestamp();
let suffix = format!("restore_release_{}", timestamp);
let pack_id = create_test_pack(&pool, &suffix).await;
let pack_ref = format!("fifo_test_pack_{}", suffix);
let action_id = create_test_action(&pool, pack_id, &pack_ref, &suffix).await;
let action_ref = format!("fifo_test_action_{}", suffix);
let manager = ExecutionQueueManager::with_db_pool(QueueConfig::default(), pool.clone());
let first =
create_test_execution(&pool, action_id, &action_ref, ExecutionStatus::Requested).await;
let second =
create_test_execution(&pool, action_id, &action_ref, ExecutionStatus::Requested).await;
let third =
create_test_execution(&pool, action_id, &action_ref, ExecutionStatus::Requested).await;
manager.enqueue(action_id, first, 1, None).await.unwrap();
manager.enqueue(action_id, second, 1, None).await.unwrap();
manager.enqueue(action_id, third, 1, None).await.unwrap();
let stats = manager.get_queue_stats(action_id).await.unwrap();
assert_eq!(stats.active_count, 1);
assert_eq!(stats.queue_length, 2);
let release = manager
.release_active_slot(first)
.await
.unwrap()
.expect("first execution should own an active slot");
assert_eq!(release.next_execution_id, Some(second));
let stats = manager.get_queue_stats(action_id).await.unwrap();
assert_eq!(stats.active_count, 1);
assert_eq!(stats.queue_length, 1);
manager.restore_active_slot(first, &release).await.unwrap();
let stats = manager.get_queue_stats(action_id).await.unwrap();
assert_eq!(stats.active_count, 1);
assert_eq!(stats.queue_length, 2);
assert_eq!(stats.total_completed, 0);
let next = manager
.release_active_slot(first)
.await
.unwrap()
.expect("restored execution should still own the active slot");
assert_eq!(next.next_execution_id, Some(second));
cleanup_test_data(&pool, pack_id).await;
}
#[tokio::test]
#[ignore] // Requires database
async fn test_remove_restore_recovers_queued_execution_position() {
let pool = setup_db().await;
let timestamp = Utc::now().timestamp();
let suffix = format!("restore_queue_{}", timestamp);
let pack_id = create_test_pack(&pool, &suffix).await;
let pack_ref = format!("fifo_test_pack_{}", suffix);
let action_id = create_test_action(&pool, pack_id, &pack_ref, &suffix).await;
let action_ref = format!("fifo_test_action_{}", suffix);
let manager = ExecutionQueueManager::with_db_pool(QueueConfig::default(), pool.clone());
let first =
create_test_execution(&pool, action_id, &action_ref, ExecutionStatus::Requested).await;
let second =
create_test_execution(&pool, action_id, &action_ref, ExecutionStatus::Requested).await;
let third =
create_test_execution(&pool, action_id, &action_ref, ExecutionStatus::Requested).await;
manager.enqueue(action_id, first, 1, None).await.unwrap();
manager.enqueue(action_id, second, 1, None).await.unwrap();
manager.enqueue(action_id, third, 1, None).await.unwrap();
let removal = manager
.remove_queued_execution(second)
.await
.unwrap()
.expect("second execution should be queued");
assert_eq!(removal.next_execution_id, None);
let stats = manager.get_queue_stats(action_id).await.unwrap();
assert_eq!(stats.active_count, 1);
assert_eq!(stats.queue_length, 1);
manager.restore_queued_execution(&removal).await.unwrap();
let stats = manager.get_queue_stats(action_id).await.unwrap();
assert_eq!(stats.active_count, 1);
assert_eq!(stats.queue_length, 2);
let release = manager
.release_active_slot(first)
.await
.unwrap()
.expect("first execution should own the active slot");
assert_eq!(release.next_execution_id, Some(second));
cleanup_test_data(&pool, pack_id).await;
}
#[tokio::test]
#[ignore] // Requires database
async fn test_queue_full_rejection() {
@@ -888,7 +1048,7 @@ async fn test_queue_full_rejection() {
let exec_id =
create_test_execution(&pool, action_id, &action_ref, ExecutionStatus::Requested).await;
manager
.enqueue_and_wait(action_id, exec_id, max_concurrent)
.enqueue_and_wait(action_id, exec_id, max_concurrent, None)
.await
.unwrap();
@@ -900,7 +1060,7 @@ async fn test_queue_full_rejection() {
tokio::spawn(async move {
manager_clone
.enqueue_and_wait(action_id, exec_id, max_concurrent)
.enqueue_and_wait(action_id, exec_id, max_concurrent, None)
.await
.ok();
});
@@ -917,7 +1077,7 @@ async fn test_queue_full_rejection() {
let exec_id =
create_test_execution(&pool, action_id, &action_ref, ExecutionStatus::Requested).await;
let result = manager
.enqueue_and_wait(action_id, exec_id, max_concurrent)
.enqueue_and_wait(action_id, exec_id, max_concurrent, None)
.await;
assert!(result.is_err(), "Should reject when queue is full");
@@ -951,6 +1111,7 @@ async fn test_extreme_stress_10k_executions() {
let max_concurrent = 10;
let num_executions: i64 = 10000;
let completed = Arc::new(Mutex::new(0u64));
let execution_ids = Arc::new(Mutex::new(vec![None; num_executions as usize]));
println!(
"Starting extreme stress test with {} executions...",
@@ -965,6 +1126,7 @@ async fn test_extreme_stress_10k_executions() {
let manager_clone = manager.clone();
let action_ref_clone = action_ref.clone();
let completed_clone = completed.clone();
let ids = execution_ids.clone();
let handle = tokio::spawn(async move {
let exec_id = create_test_execution(
@@ -974,9 +1136,10 @@ async fn test_extreme_stress_10k_executions() {
ExecutionStatus::Requested,
)
.await;
ids.lock().await[i as usize] = Some(exec_id);
manager_clone
.enqueue_and_wait(action_id, exec_id, max_concurrent)
.enqueue_and_wait(action_id, exec_id, max_concurrent, None)
.await
.expect("Enqueue should succeed");
@@ -999,12 +1162,18 @@ async fn test_extreme_stress_10k_executions() {
println!("All executions spawned");
// Release all
let ids = execution_ids.lock().await;
let mut active_execution_ids = VecDeque::from(
ids.iter()
.take(max_concurrent as usize)
.map(|id| id.expect("Initial execution id should be recorded"))
.collect::<Vec<_>>(),
);
drop(ids);
let release_start = std::time::Instant::now();
for i in 0i64..num_executions {
manager
.notify_completion(action_id)
.await
.expect("Notify should succeed");
release_next_active(&manager, &mut active_execution_ids).await;
if i % 1000 == 0 {
println!("Released: {}", i);

View File

@@ -9,7 +9,7 @@
use attune_common::{
config::Config,
db::Database,
models::enums::ExecutionStatus,
models::enums::{ExecutionStatus, PolicyMethod},
repositories::{
action::{ActionRepository, CreateActionInput},
execution::{CreateExecutionInput, ExecutionRepository},
@@ -94,7 +94,7 @@ async fn create_test_action(pool: &PgPool, pack_id: i64, suffix: &str) -> i64 {
pack: pack_id,
pack_ref: format!("test_pack_{}", suffix),
label: format!("Test Action {}", suffix),
description: format!("Test action {}", suffix),
description: Some(format!("Test action {}", suffix)),
entrypoint: "echo test".to_string(),
runtime: None,
runtime_version_constraint: None,
@@ -190,6 +190,8 @@ async fn test_global_rate_limit() {
window_seconds: 60,
}),
concurrency_limit: None,
concurrency_method: PolicyMethod::Enqueue,
concurrency_parameters: Vec::new(),
quotas: None,
};
@@ -242,6 +244,8 @@ async fn test_concurrency_limit() {
let policy = ExecutionPolicy {
rate_limit: None,
concurrency_limit: Some(2),
concurrency_method: PolicyMethod::Enqueue,
concurrency_parameters: Vec::new(),
quotas: None,
};
@@ -300,6 +304,8 @@ async fn test_action_specific_policy() {
window_seconds: 60,
}),
concurrency_limit: None,
concurrency_method: PolicyMethod::Enqueue,
concurrency_parameters: Vec::new(),
quotas: None,
};
enforcer.set_action_policy(action_id, action_policy);
@@ -345,6 +351,8 @@ async fn test_pack_specific_policy() {
let pack_policy = ExecutionPolicy {
rate_limit: None,
concurrency_limit: Some(1),
concurrency_method: PolicyMethod::Enqueue,
concurrency_parameters: Vec::new(),
quotas: None,
};
enforcer.set_pack_policy(pack_id, pack_policy);
@@ -388,6 +396,8 @@ async fn test_policy_priority() {
window_seconds: 60,
}),
concurrency_limit: None,
concurrency_method: PolicyMethod::Enqueue,
concurrency_parameters: Vec::new(),
quotas: None,
};
let mut enforcer = PolicyEnforcer::with_global_policy(pool.clone(), global_policy);
@@ -399,6 +409,8 @@ async fn test_policy_priority() {
window_seconds: 60,
}),
concurrency_limit: None,
concurrency_method: PolicyMethod::Enqueue,
concurrency_parameters: Vec::new(),
quotas: None,
};
enforcer.set_action_policy(action_id, action_policy);

View File

@@ -27,6 +27,37 @@ use tracing::{debug, error, info, warn};
use crate::api_client::ApiClient;
fn existing_command_env(cmd: &Command, key: &str) -> Option<String> {
cmd.as_std()
.get_envs()
.find_map(|(env_key, value)| {
if env_key == key {
value.map(|value| value.to_string_lossy().into_owned())
} else {
None
}
})
.or_else(|| std::env::var(key).ok())
}
fn apply_runtime_env_vars(
cmd: &mut Command,
exec_config: &RuntimeExecutionConfig,
pack_dir: &std::path::Path,
env_dir: Option<&std::path::Path>,
) {
if exec_config.env_vars.is_empty() {
return;
}
let vars = exec_config.build_template_vars_with_env(pack_dir, env_dir);
for (key, env_var_config) in &exec_config.env_vars {
let resolved = env_var_config.resolve(&vars, existing_command_env(cmd, key).as_deref());
debug!("Setting sensor runtime env var: {}={}", key, resolved);
cmd.env(key, resolved);
}
}
/// Sensor manager that coordinates all sensor instances
#[derive(Clone)]
pub struct SensorManager {
@@ -502,20 +533,7 @@ impl SensorManager {
.env("ATTUNE_MQ_EXCHANGE", "attune.events")
.env("ATTUNE_LOG_LEVEL", "info");
if !exec_config.env_vars.is_empty() {
let vars = exec_config.build_template_vars_with_env(&pack_dir, env_dir_opt);
for (key, value_template) in &exec_config.env_vars {
let resolved = attune_common::models::RuntimeExecutionConfig::resolve_template(
value_template,
&vars,
);
debug!(
"Setting sensor runtime env var: {}={} (template: {})",
key, resolved, value_template
);
cmd.env(key, resolved);
}
}
apply_runtime_env_vars(&mut cmd, &exec_config, &pack_dir, env_dir_opt);
let mut child = cmd
.stdin(Stdio::null())
@@ -904,6 +922,10 @@ pub struct SensorStatus {
#[cfg(test)]
mod tests {
use super::*;
use attune_common::models::runtime::{
RuntimeEnvVarConfig, RuntimeEnvVarOperation, RuntimeEnvVarSpec,
};
use std::collections::HashMap;
#[test]
fn test_sensor_status_default() {
@@ -913,4 +935,46 @@ mod tests {
assert_eq!(status.failure_count, 0);
assert!(status.last_poll.is_none());
}
#[test]
fn test_apply_runtime_env_vars_prepends_to_existing_command_env() {
let mut env_vars = HashMap::new();
env_vars.insert(
"PYTHONPATH".to_string(),
RuntimeEnvVarConfig::Spec(RuntimeEnvVarSpec {
value: "{pack_dir}/lib".to_string(),
operation: RuntimeEnvVarOperation::Prepend,
separator: ":".to_string(),
}),
);
let exec_config = RuntimeExecutionConfig {
env_vars,
..RuntimeExecutionConfig::default()
};
let mut cmd = Command::new("python3");
cmd.env("PYTHONPATH", "/existing/pythonpath");
apply_runtime_env_vars(
&mut cmd,
&exec_config,
std::path::Path::new("/packs/testpack"),
None,
);
let resolved = cmd
.as_std()
.get_envs()
.find_map(|(key, value)| {
if key == "PYTHONPATH" {
value.map(|value| value.to_string_lossy().into_owned())
} else {
None
}
})
.expect("PYTHONPATH should be set");
assert_eq!(resolved, "/packs/testpack/lib:/existing/pythonpath");
}
}

View File

@@ -84,6 +84,7 @@ impl ArtifactManager {
// Store stdout
if !stdout.is_empty() {
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Artifact filenames are fixed constants under an execution-scoped directory derived from the execution ID.
let stdout_path = exec_dir.join("stdout.log");
let mut file = fs::File::create(&stdout_path)
.await
@@ -117,6 +118,7 @@ impl ArtifactManager {
// Store stderr
if !stderr.is_empty() {
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Artifact filenames are fixed constants under an execution-scoped directory derived from the execution ID.
let stderr_path = exec_dir.join("stderr.log");
let mut file = fs::File::create(&stderr_path)
.await
@@ -162,6 +164,7 @@ impl ArtifactManager {
.await
.map_err(|e| Error::Internal(format!("Failed to create execution directory: {}", e)))?;
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Result artifacts are written to a fixed filename inside the execution-scoped directory.
let result_path = exec_dir.join("result.json");
let result_json = serde_json::to_string_pretty(result)?;
@@ -209,6 +212,7 @@ impl ArtifactManager {
.await
.map_err(|e| Error::Internal(format!("Failed to create execution directory: {}", e)))?;
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Custom artifact paths are always rooted under the execution-scoped artifact directory.
let file_path = exec_dir.join(filename);
let mut file = fs::File::create(&file_path)
.await
@@ -246,6 +250,7 @@ impl ArtifactManager {
/// Read an artifact
pub async fn read_artifact(&self, artifact: &Artifact) -> Result<Vec<u8>> {
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Artifact reads use paths previously created by the artifact manager inside the configured artifact root.
fs::read(&artifact.path)
.await
.map_err(|e| Error::Internal(format!("Failed to read artifact: {}", e)))

View File

@@ -474,6 +474,7 @@ impl ActionExecutor {
let actions_dir = pack_dir.join("actions");
let actions_dir_exists = actions_dir.exists();
let actions_dir_contents: Vec<String> = if actions_dir_exists {
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Diagnostic directory listing is confined to the action pack directory derived from pack_ref.
std::fs::read_dir(&actions_dir)
.map(|entries| {
entries
@@ -543,6 +544,16 @@ impl ActionExecutor {
selected_runtime_version,
max_stdout_bytes: self.max_stdout_bytes,
max_stderr_bytes: self.max_stderr_bytes,
stdout_log_path: Some(
self.artifact_manager
.get_execution_dir(execution.id)
.join("stdout.log"),
),
stderr_log_path: Some(
self.artifact_manager
.get_execution_dir(execution.id)
.join("stderr.log"),
),
parameter_delivery: action.parameter_delivery,
parameter_format: action.parameter_format,
output_format: action.output_format,
@@ -892,6 +903,7 @@ impl ActionExecutor {
// Check if stderr log exists and is non-empty from artifact storage
let stderr_path = exec_dir.join("stderr.log");
if stderr_path.exists() {
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Log paths are fixed artifact filenames inside the execution-scoped directory.
if let Ok(contents) = tokio::fs::read_to_string(&stderr_path).await {
if !contents.trim().is_empty() {
result_data["stderr_log"] =
@@ -903,6 +915,7 @@ impl ActionExecutor {
// Check if stdout log exists from artifact storage
let stdout_path = exec_dir.join("stdout.log");
if stdout_path.exists() {
// nosemgrep: rust.actix.path-traversal.tainted-path.tainted-path -- Log paths are fixed artifact filenames inside the execution-scoped directory.
if let Ok(contents) = tokio::fs::read_to_string(&stdout_path).await {
if !contents.is_empty() {
result_data["stdout"] = serde_json::json!(contents);
@@ -990,7 +1003,11 @@ impl ActionExecutor {
..Default::default()
};
ExecutionRepository::update(&self.pool, execution_id, input).await?;
let execution = ExecutionRepository::find_by_id(&self.pool, execution_id)
.await?
.ok_or_else(|| anyhow::anyhow!("Execution {} not found", execution_id))?;
ExecutionRepository::update_loaded(&self.pool, &execution, input).await?;
Ok(())
}

View File

@@ -452,7 +452,7 @@ mod tests {
#[test]
fn test_detected_runtimes_json_structure() {
// Test the JSON structure that set_detected_runtimes builds
let runtimes = vec![
let runtimes = [
DetectedRuntime {
name: "python".to_string(),
path: "/usr/bin/python3".to_string(),

View File

@@ -200,6 +200,8 @@ mod tests {
selected_runtime_version: None,
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
stdout_log_path: None,
stderr_log_path: None,
parameter_delivery: ParameterDelivery::default(),
parameter_format: ParameterFormat::default(),
output_format: OutputFormat::default(),
@@ -233,6 +235,8 @@ mod tests {
selected_runtime_version: None,
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
stdout_log_path: None,
stderr_log_path: None,
parameter_delivery: ParameterDelivery::default(),
parameter_format: ParameterFormat::default(),
output_format: OutputFormat::default(),

View File

@@ -2,9 +2,10 @@
//!
//! Provides bounded log writers that limit output size to prevent OOM issues.
use std::path::Path;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::AsyncWrite;
use tokio::io::{AsyncWrite, AsyncWriteExt};
const TRUNCATION_NOTICE_STDOUT: &str = "\n\n[OUTPUT TRUNCATED: stdout exceeded size limit]\n";
const TRUNCATION_NOTICE_STDERR: &str = "\n\n[OUTPUT TRUNCATED: stderr exceeded size limit]\n";
@@ -76,6 +77,15 @@ pub struct BoundedLogWriter {
truncation_notice: &'static str,
}
/// A file-backed writer that applies the same truncation policy as `BoundedLogWriter`.
pub struct BoundedLogFileWriter {
file: tokio::fs::File,
max_bytes: usize,
truncated: bool,
data_bytes_written: usize,
truncation_notice: &'static str,
}
impl BoundedLogWriter {
/// Create a new bounded log writer for stdout
pub fn new_stdout(max_bytes: usize) -> Self {
@@ -166,6 +176,76 @@ impl BoundedLogWriter {
}
}
impl BoundedLogFileWriter {
pub async fn new_stdout(path: &Path, max_bytes: usize) -> std::io::Result<Self> {
Self::create(path, max_bytes, TRUNCATION_NOTICE_STDOUT).await
}
pub async fn new_stderr(path: &Path, max_bytes: usize) -> std::io::Result<Self> {
Self::create(path, max_bytes, TRUNCATION_NOTICE_STDERR).await
}
async fn create(
path: &Path,
max_bytes: usize,
truncation_notice: &'static str,
) -> std::io::Result<Self> {
if let Some(parent) = path.parent() {
tokio::fs::create_dir_all(parent).await?;
}
let file = tokio::fs::OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(path)
.await?;
Ok(Self {
file,
max_bytes,
truncated: false,
data_bytes_written: 0,
truncation_notice,
})
}
pub async fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> {
if self.truncated {
return Ok(());
}
let effective_limit = self.max_bytes.saturating_sub(NOTICE_RESERVE_BYTES);
let remaining_space = effective_limit.saturating_sub(self.data_bytes_written);
if remaining_space == 0 {
self.add_truncation_notice().await?;
return Ok(());
}
let bytes_to_write = std::cmp::min(buf.len(), remaining_space);
if bytes_to_write > 0 {
self.file.write_all(&buf[..bytes_to_write]).await?;
self.data_bytes_written += bytes_to_write;
}
if bytes_to_write < buf.len() {
self.add_truncation_notice().await?;
}
self.file.flush().await
}
async fn add_truncation_notice(&mut self) -> std::io::Result<()> {
if self.truncated {
return Ok(());
}
self.truncated = true;
self.file.write_all(self.truncation_notice.as_bytes()).await
}
}
impl AsyncWrite for BoundedLogWriter {
fn poll_write(
mut self: Pin<&mut Self>,

View File

@@ -48,7 +48,7 @@ pub use dependency::{
DependencyError, DependencyManager, DependencyManagerRegistry, DependencyResult,
DependencySpec, EnvironmentInfo,
};
pub use log_writer::{BoundedLogResult, BoundedLogWriter};
pub use log_writer::{BoundedLogFileWriter, BoundedLogResult, BoundedLogWriter};
pub use parameter_passing::{ParameterDeliveryConfig, PreparedParameters};
// Re-export parameter types from common
@@ -148,6 +148,12 @@ pub struct ExecutionContext {
/// Maximum stderr size in bytes (for log truncation)
pub max_stderr_bytes: usize,
/// Optional live stdout log path for incremental writes during execution.
pub stdout_log_path: Option<PathBuf>,
/// Optional live stderr log path for incremental writes during execution.
pub stderr_log_path: Option<PathBuf>,
/// How parameters should be delivered to the action
pub parameter_delivery: ParameterDelivery,
@@ -185,6 +191,8 @@ impl ExecutionContext {
selected_runtime_version: None,
max_stdout_bytes: 10 * 1024 * 1024,
max_stderr_bytes: 10 * 1024 * 1024,
stdout_log_path: None,
stderr_log_path: None,
parameter_delivery: ParameterDelivery::default(),
parameter_format: ParameterFormat::default(),
output_format: OutputFormat::default(),

Some files were not shown because too many files have changed in this diff Show More