working on runtime executions

This commit is contained in:
2026-02-16 22:04:20 -06:00
parent f52320f889
commit 904ede04be
99 changed files with 6778 additions and 5929 deletions

View File

@@ -1,87 +1,24 @@
# Optimized Multi-stage Dockerfile for Attune Rust services
# This Dockerfile minimizes layer invalidation by selectively copying only required crates
# Multi-stage Dockerfile for Attune Rust services (api, executor, sensor, notifier)
#
# Key optimizations:
# 1. Copy only Cargo.toml files first to cache dependency downloads
# 2. Build dummy binaries to cache compiled dependencies
# 3. Copy only the specific crate being built (plus common)
# 4. Use BuildKit cache mounts for cargo registry and build artifacts
# Simple and robust: build the entire workspace, then copy the target binary.
# No dummy sources, no selective crate copying, no fragile hacks.
#
# Usage: DOCKER_BUILDKIT=1 docker build --build-arg SERVICE=api -f docker/Dockerfile.optimized -t attune-api .
# Usage:
# DOCKER_BUILDKIT=1 docker build --build-arg SERVICE=api -f docker/Dockerfile.optimized -t attune-api .
# DOCKER_BUILDKIT=1 docker build --build-arg SERVICE=executor -f docker/Dockerfile.optimized -t attune-executor .
# DOCKER_BUILDKIT=1 docker build --build-arg SERVICE=sensor -f docker/Dockerfile.optimized -t attune-sensor .
# DOCKER_BUILDKIT=1 docker build --build-arg SERVICE=notifier -f docker/Dockerfile.optimized -t attune-notifier .
#
# Build time comparison (after common crate changes):
# - Old: ~5 minutes (rebuilds all dependencies)
# - New: ~30 seconds (only recompiles changed code)
#
# Note: This Dockerfile does NOT copy packs into the image.
# Packs are mounted as volumes at runtime from the packs_data volume.
# The init-packs service in docker-compose.yaml handles pack initialization.
# Note: Packs are NOT copied into the image — they are mounted as volumes at runtime.
ARG RUST_VERSION=1.92
ARG DEBIAN_VERSION=bookworm
# ============================================================================
# Stage 1: Planner - Extract dependency information
# ============================================================================
FROM rust:${RUST_VERSION}-${DEBIAN_VERSION} AS planner
# Install build dependencies
RUN apt-get update && apt-get install -y \
pkg-config \
libssl-dev \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /build
# Copy only Cargo.toml and Cargo.lock to understand dependencies
COPY Cargo.toml Cargo.lock ./
# Copy all crate manifests (but not source code)
# This allows cargo to resolve the workspace without needing source
COPY crates/common/Cargo.toml ./crates/common/Cargo.toml
COPY crates/api/Cargo.toml ./crates/api/Cargo.toml
COPY crates/executor/Cargo.toml ./crates/executor/Cargo.toml
COPY crates/sensor/Cargo.toml ./crates/sensor/Cargo.toml
COPY crates/core-timer-sensor/Cargo.toml ./crates/core-timer-sensor/Cargo.toml
COPY crates/worker/Cargo.toml ./crates/worker/Cargo.toml
COPY crates/notifier/Cargo.toml ./crates/notifier/Cargo.toml
COPY crates/cli/Cargo.toml ./crates/cli/Cargo.toml
# Create dummy lib.rs and main.rs files for all crates
# This allows us to build dependencies without the actual source code
RUN mkdir -p crates/common/src && echo "fn main() {}" > crates/common/src/lib.rs
RUN mkdir -p crates/api/src && echo "fn main() {}" > crates/api/src/main.rs
RUN mkdir -p crates/executor/src && echo "fn main() {}" > crates/executor/src/main.rs
RUN mkdir -p crates/executor/benches && echo "fn main() {}" > crates/executor/benches/context_clone.rs
RUN mkdir -p crates/sensor/src && echo "fn main() {}" > crates/sensor/src/main.rs
RUN mkdir -p crates/core-timer-sensor/src && echo "fn main() {}" > crates/core-timer-sensor/src/main.rs
RUN mkdir -p crates/worker/src && echo "fn main() {}" > crates/worker/src/main.rs
RUN mkdir -p crates/notifier/src && echo "fn main() {}" > crates/notifier/src/main.rs
RUN mkdir -p crates/cli/src && echo "fn main() {}" > crates/cli/src/main.rs
# Copy SQLx metadata for compile-time query checking
COPY .sqlx/ ./.sqlx/
# Build argument to specify which service to build
ARG SERVICE=api
# Build dependencies only (with dummy source)
# This layer is only invalidated when Cargo.toml or Cargo.lock changes
# BuildKit cache mounts persist cargo registry and git cache
# - registry/git use sharing=shared (cargo handles concurrent access safely)
# - target uses service-specific cache ID to avoid conflicts between services
RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=shared \
--mount=type=cache,target=/usr/local/cargo/git,sharing=shared \
--mount=type=cache,target=/build/target,id=target-planner-${SERVICE} \
cargo build --release --bin attune-${SERVICE} || true
# ============================================================================
# Stage 2: Builder - Compile the actual service
# Stage 1: Builder - Compile the entire workspace
# ============================================================================
FROM rust:${RUST_VERSION}-${DEBIAN_VERSION} AS builder
# Install build dependencies
RUN apt-get update && apt-get install -y \
pkg-config \
libssl-dev \
@@ -90,10 +27,9 @@ RUN apt-get update && apt-get install -y \
WORKDIR /build
# Copy workspace configuration
# Copy dependency metadata first so `cargo fetch` layer is cached
# when only source code changes (Cargo.toml/Cargo.lock stay the same)
COPY Cargo.toml Cargo.lock ./
# Copy all crate manifests (required for workspace resolution)
COPY crates/common/Cargo.toml ./crates/common/Cargo.toml
COPY crates/api/Cargo.toml ./crates/api/Cargo.toml
COPY crates/executor/Cargo.toml ./crates/executor/Cargo.toml
@@ -103,106 +39,87 @@ COPY crates/worker/Cargo.toml ./crates/worker/Cargo.toml
COPY crates/notifier/Cargo.toml ./crates/notifier/Cargo.toml
COPY crates/cli/Cargo.toml ./crates/cli/Cargo.toml
# Create dummy source files for workspace members that won't be built
# This satisfies workspace resolution without copying full source
RUN mkdir -p crates/api/src && echo "fn main() {}" > crates/api/src/main.rs
RUN mkdir -p crates/executor/src && echo "fn main() {}" > crates/executor/src/main.rs
RUN mkdir -p crates/executor/benches && echo "fn main() {}" > crates/executor/benches/context_clone.rs
RUN mkdir -p crates/sensor/src && echo "fn main() {}" > crates/sensor/src/main.rs
RUN mkdir -p crates/core-timer-sensor/src && echo "fn main() {}" > crates/core-timer-sensor/src/main.rs
RUN mkdir -p crates/worker/src && echo "fn main() {}" > crates/worker/src/main.rs
RUN mkdir -p crates/notifier/src && echo "fn main() {}" > crates/notifier/src/main.rs
RUN mkdir -p crates/cli/src && echo "fn main() {}" > crates/cli/src/main.rs
# Create minimal stub sources so cargo can resolve the workspace and fetch deps.
# These are ONLY used for `cargo fetch` — never compiled.
RUN mkdir -p crates/common/src && echo "" > crates/common/src/lib.rs && \
mkdir -p crates/api/src && echo "fn main(){}" > crates/api/src/main.rs && \
mkdir -p crates/executor/src && echo "fn main(){}" > crates/executor/src/main.rs && \
mkdir -p crates/executor/benches && echo "fn main(){}" > crates/executor/benches/context_clone.rs && \
mkdir -p crates/sensor/src && echo "fn main(){}" > crates/sensor/src/main.rs && \
mkdir -p crates/core-timer-sensor/src && echo "fn main(){}" > crates/core-timer-sensor/src/main.rs && \
mkdir -p crates/worker/src && echo "fn main(){}" > crates/worker/src/main.rs && \
mkdir -p crates/notifier/src && echo "fn main(){}" > crates/notifier/src/main.rs && \
mkdir -p crates/cli/src && echo "fn main(){}" > crates/cli/src/main.rs
# Copy SQLx metadata
# Download all dependencies (cached unless Cargo.toml/Cargo.lock change)
# registry/git use sharing=shared — cargo handles concurrent reads safely
RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=shared \
--mount=type=cache,target=/usr/local/cargo/git,sharing=shared \
cargo fetch
# Now copy the real source code, SQLx metadata, and migrations
COPY .sqlx/ ./.sqlx/
# Copy migrations (required for some services)
COPY migrations/ ./migrations/
COPY crates/ ./crates/
# Copy the common crate (almost all services depend on this)
COPY crates/common/ ./crates/common/
# Build the specified service
# The cargo registry and git cache are pre-populated from the planner stage
# Only the actual compilation happens here
# - registry/git use sharing=shared (concurrent builds of different services are safe)
# - target uses service-specific cache ID (each service compiles different crates)
# Build the entire workspace in release mode.
# All binaries are compiled together, sharing dependency compilation.
# target cache uses sharing=locked so concurrent service builds serialize
# writes to the shared compilation cache instead of corrupting it.
#
# IMPORTANT: ARG SERVICE is declared AFTER this RUN so that changing the
# SERVICE value does not invalidate the cached build layer. The first
# service to build compiles the full workspace; subsequent services get
# a cache hit here and skip straight to the cp below.
RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=shared \
--mount=type=cache,target=/usr/local/cargo/git,sharing=shared \
--mount=type=cache,target=/build/target,sharing=locked \
cargo build --release --lib -p attune-common
cargo build --release --workspace --bins -j 4
# Build argument to specify which service to build
# Extract the requested service binary from the target cache.
# This is the only layer that varies per SERVICE value.
ARG SERVICE=api
# Copy only the source for the service being built
# This is the key optimization: changes to other crates won't invalidate this layer
COPY crates/${SERVICE}/ ./crates/${SERVICE}/
# Build the specified service
# The cargo registry and git cache are pre-populated from the planner stage
# Only the actual compilation happens here
# - registry/git use sharing=shared (concurrent builds of different services are safe)
# - target uses service-specific cache ID (each service compiles different crates)
RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=shared \
--mount=type=cache,target=/usr/local/cargo/git,sharing=shared \
--mount=type=cache,target=/build/target,sharing=shared \
cargo build --release --bin attune-${SERVICE} && \
RUN --mount=type=cache,target=/build/target,sharing=locked \
cp /build/target/release/attune-${SERVICE} /build/attune-service-binary
# ============================================================================
# Stage 3: Runtime - Create minimal runtime image
# Stage 2: Runtime - Minimal image with just the service binary
# ============================================================================
FROM debian:${DEBIAN_VERSION}-slim AS runtime
# Install runtime dependencies
RUN apt-get update && apt-get install -y \
ca-certificates \
libssl3 \
curl \
git \
&& rm -rf /var/lib/apt/lists/*
# Create non-root user and directories
# Note: /opt/attune/packs is mounted as a volume at runtime, not copied in
# /opt/attune/packs is mounted as a volume at runtime, not copied in
RUN useradd -m -u 1000 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs && \
mkdir -p /opt/attune/packs /opt/attune/logs /opt/attune/runtime_envs && \
chown -R attune:attune /opt/attune
WORKDIR /opt/attune
# Copy the service binary from builder
# Copy the service binary from builder using a fixed path (no variable in COPY source)
# This avoids the circular dependency Docker hits when using ARG in --from paths
COPY --from=builder /build/attune-service-binary /usr/local/bin/attune-service
# Copy configuration file for Docker Compose development
# In production, mount config files as a volume instead of baking them into the image
# Copy configuration and migrations
COPY config.docker.yaml ./config.yaml
# Copy migrations for services that need them
COPY migrations/ ./migrations/
# Note: Packs are NOT copied into the image
# They are mounted as a volume at runtime from the packs_data volume
# The init-packs service populates the packs_data volume from ./packs directory
# Pack binaries (like attune-core-timer-sensor) are also in the mounted volume
# Set ownership (packs will be mounted at runtime)
RUN chown -R attune:attune /opt/attune
# Switch to non-root user
USER attune
# Environment variables (can be overridden at runtime)
ENV RUST_LOG=info
ENV ATTUNE_CONFIG=/opt/attune/config.yaml
# Health check (will be overridden per service in docker-compose)
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
CMD curl -f http://localhost:8080/health || exit 1
# Expose default port (override per service)
EXPOSE 8080
# Run the service
CMD ["/usr/local/bin/attune-service"]

View File

@@ -11,7 +11,6 @@
ARG RUST_VERSION=1.92
ARG DEBIAN_VERSION=bookworm
ARG PYTHON_VERSION=3.11
ARG NODE_VERSION=20
# ============================================================================
@@ -102,29 +101,40 @@ CMD ["/usr/local/bin/attune-worker"]
# Stage 2b: Python Worker (Shell + Python)
# Runtime capabilities: shell, python
# Use case: Python actions and scripts with dependencies
#
# Uses debian-slim + apt python3 (NOT the python: Docker image) so that
# python3 lives at /usr/bin/python3 — the same path as worker-full.
# This avoids broken venv symlinks when multiple workers share the
# runtime_envs volume.
# ============================================================================
FROM python:${PYTHON_VERSION}-slim-${DEBIAN_VERSION} AS worker-python
FROM debian:${DEBIAN_VERSION}-slim AS worker-python
# Install system dependencies
# Install system dependencies including Python
RUN apt-get update && apt-get install -y \
ca-certificates \
libssl3 \
curl \
build-essential \
python3 \
python3-pip \
python3-venv \
procps \
&& rm -rf /var/lib/apt/lists/*
# Create python symlink for convenience
RUN ln -sf /usr/bin/python3 /usr/bin/python
# Install common Python packages
# These are commonly used in automation scripts
RUN pip install --no-cache-dir \
# Use --break-system-packages for Debian 12+ pip-in-system-python restrictions
RUN pip3 install --no-cache-dir --break-system-packages \
requests>=2.31.0 \
pyyaml>=6.0 \
jinja2>=3.1.0 \
python-dateutil>=2.8.0
# Create worker user and directories
RUN useradd -m -u 1001 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs && \
RUN useradd -m -u 1000 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs /opt/attune/runtime_envs && \
chown -R attune:attune /opt/attune
WORKDIR /opt/attune
@@ -161,8 +171,12 @@ CMD ["/usr/local/bin/attune-worker"]
# Stage 2c: Node Worker (Shell + Node.js)
# Runtime capabilities: shell, node
# Use case: JavaScript/TypeScript actions and npm packages
#
# Uses debian-slim + NodeSource apt repo (NOT the node: Docker image) so that
# node lives at /usr/bin/node — the same path as worker-full.
# This avoids path mismatches when multiple workers share volumes.
# ============================================================================
FROM node:${NODE_VERSION}-slim AS worker-node
FROM debian:${DEBIAN_VERSION}-slim AS worker-node
# Install system dependencies
RUN apt-get update && apt-get install -y \
@@ -172,10 +186,14 @@ RUN apt-get update && apt-get install -y \
procps \
&& rm -rf /var/lib/apt/lists/*
# Install Node.js from NodeSource (same method as worker-full)
RUN curl -fsSL https://deb.nodesource.com/setup_${NODE_VERSION}.x | bash - && \
apt-get install -y nodejs && \
rm -rf /var/lib/apt/lists/*
# Create worker user and directories
# Note: Node base image has 'node' user at UID 1000, so we use UID 1001
RUN useradd -m -u 1001 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs && \
RUN useradd -m -u 1000 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs /opt/attune/runtime_envs && \
chown -R attune:attune /opt/attune
WORKDIR /opt/attune
@@ -227,13 +245,13 @@ RUN apt-get update && apt-get install -y \
procps \
&& rm -rf /var/lib/apt/lists/*
# Install Node.js from NodeSource
RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \
# Install Node.js from NodeSource (same method and version as worker-node)
RUN curl -fsSL https://deb.nodesource.com/setup_${NODE_VERSION}.x | bash - && \
apt-get install -y nodejs && \
rm -rf /var/lib/apt/lists/*
# Create python symlink for convenience
RUN ln -s /usr/bin/python3 /usr/bin/python
RUN ln -sf /usr/bin/python3 /usr/bin/python
# Install common Python packages
# Use --break-system-packages for Debian 12+ pip-in-system-python restrictions
@@ -244,8 +262,8 @@ RUN pip3 install --no-cache-dir --break-system-packages \
python-dateutil>=2.8.0
# Create worker user and directories
RUN useradd -m -u 1001 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs && \
RUN useradd -m -u 1000 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs /opt/attune/runtime_envs && \
chown -R attune:attune /opt/attune
WORKDIR /opt/attune

View File

@@ -1,81 +1,32 @@
# Optimized Multi-stage Dockerfile for Attune workers
# This Dockerfile minimizes layer invalidation by selectively copying only required crates
# Multi-stage Dockerfile for Attune worker service
#
# Key optimizations:
# 1. Copy only Cargo.toml files first to cache dependency downloads
# 2. Build dummy binaries to cache compiled dependencies
# 3. Copy only worker and common crates (not all crates)
# 4. Use BuildKit cache mounts for cargo registry and build artifacts
# Simple and robust: build the entire workspace, then copy the worker binary
# into different runtime base images depending on language support needed.
# No dummy source compilation, no selective crate copying, no fragile hacks.
#
# Supports building different worker variants with different runtime capabilities
# Targets:
# worker-base - Shell only (lightweight)
# worker-python - Shell + Python
# worker-node - Shell + Node.js
# worker-full - Shell + Python + Node.js + Native
#
# Usage:
# docker build --target worker-base -t attune-worker:base -f docker/Dockerfile.worker.optimized .
# docker build --target worker-python -t attune-worker:python -f docker/Dockerfile.worker.optimized .
# docker build --target worker-node -t attune-worker:node -f docker/Dockerfile.worker.optimized .
# docker build --target worker-full -t attune-worker:full -f docker/Dockerfile.worker.optimized .
# DOCKER_BUILDKIT=1 docker build --target worker-base -t attune-worker:base -f docker/Dockerfile.worker.optimized .
# DOCKER_BUILDKIT=1 docker build --target worker-python -t attune-worker:python -f docker/Dockerfile.worker.optimized .
# DOCKER_BUILDKIT=1 docker build --target worker-node -t attune-worker:node -f docker/Dockerfile.worker.optimized .
# DOCKER_BUILDKIT=1 docker build --target worker-full -t attune-worker:full -f docker/Dockerfile.worker.optimized .
#
# Note: Packs are NOT copied into the image — they are mounted as volumes at runtime.
ARG RUST_VERSION=1.92
ARG DEBIAN_VERSION=bookworm
ARG PYTHON_VERSION=3.11
ARG NODE_VERSION=20
# ============================================================================
# Stage 1: Planner - Extract dependency information
# ============================================================================
FROM rust:${RUST_VERSION}-${DEBIAN_VERSION} AS planner
# Install build dependencies
RUN apt-get update && apt-get install -y \
pkg-config \
libssl-dev \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /build
# Copy only Cargo.toml and Cargo.lock
COPY Cargo.toml Cargo.lock ./
# Copy all crate manifests (required for workspace resolution)
COPY crates/common/Cargo.toml ./crates/common/Cargo.toml
COPY crates/api/Cargo.toml ./crates/api/Cargo.toml
COPY crates/executor/Cargo.toml ./crates/executor/Cargo.toml
COPY crates/sensor/Cargo.toml ./crates/sensor/Cargo.toml
COPY crates/core-timer-sensor/Cargo.toml ./crates/core-timer-sensor/Cargo.toml
COPY crates/worker/Cargo.toml ./crates/worker/Cargo.toml
COPY crates/notifier/Cargo.toml ./crates/notifier/Cargo.toml
COPY crates/cli/Cargo.toml ./crates/cli/Cargo.toml
# Create dummy source files to satisfy cargo
RUN mkdir -p crates/common/src && echo "fn main() {}" > crates/common/src/lib.rs
RUN mkdir -p crates/api/src && echo "fn main() {}" > crates/api/src/main.rs
RUN mkdir -p crates/executor/src && echo "fn main() {}" > crates/executor/src/main.rs
RUN mkdir -p crates/executor/benches && echo "fn main() {}" > crates/executor/benches/context_clone.rs
RUN mkdir -p crates/sensor/src && echo "fn main() {}" > crates/sensor/src/main.rs
RUN mkdir -p crates/core-timer-sensor/src && echo "fn main() {}" > crates/core-timer-sensor/src/main.rs
RUN mkdir -p crates/worker/src && echo "fn main() {}" > crates/worker/src/main.rs
RUN mkdir -p crates/notifier/src && echo "fn main() {}" > crates/notifier/src/main.rs
RUN mkdir -p crates/cli/src && echo "fn main() {}" > crates/cli/src/main.rs
# Copy SQLx metadata
COPY .sqlx/ ./.sqlx/
# Build dependencies only (with dummy source)
# This layer is cached and only invalidated when dependencies change
# - registry/git use sharing=shared (cargo handles concurrent access safely)
# - target uses private cache for planner stage
RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=shared \
--mount=type=cache,target=/usr/local/cargo/git,sharing=shared \
--mount=type=cache,target=/build/target,id=target-worker-planner \
cargo build --release --bin attune-worker || true
# ============================================================================
# Stage 2: Builder - Compile the worker binary
# Stage 1: Builder - Compile the entire workspace
# ============================================================================
FROM rust:${RUST_VERSION}-${DEBIAN_VERSION} AS builder
# Install build dependencies
RUN apt-get update && apt-get install -y \
pkg-config \
libssl-dev \
@@ -84,10 +35,9 @@ RUN apt-get update && apt-get install -y \
WORKDIR /build
# Copy workspace configuration
# Copy dependency metadata first so `cargo fetch` layer is cached
# when only source code changes (Cargo.toml/Cargo.lock stay the same)
COPY Cargo.toml Cargo.lock ./
# Copy all crate manifests (required for workspace resolution)
COPY crates/common/Cargo.toml ./crates/common/Cargo.toml
COPY crates/api/Cargo.toml ./crates/api/Cargo.toml
COPY crates/executor/Cargo.toml ./crates/executor/Cargo.toml
@@ -97,50 +47,48 @@ COPY crates/worker/Cargo.toml ./crates/worker/Cargo.toml
COPY crates/notifier/Cargo.toml ./crates/notifier/Cargo.toml
COPY crates/cli/Cargo.toml ./crates/cli/Cargo.toml
# Create dummy source files for workspace members that won't be built
# This satisfies workspace resolution without copying full source
RUN mkdir -p crates/api/src && echo "fn main() {}" > crates/api/src/main.rs
RUN mkdir -p crates/executor/src && echo "fn main() {}" > crates/executor/src/main.rs
RUN mkdir -p crates/executor/benches && echo "fn main() {}" > crates/executor/benches/context_clone.rs
RUN mkdir -p crates/sensor/src && echo "fn main() {}" > crates/sensor/src/main.rs
RUN mkdir -p crates/core-timer-sensor/src && echo "fn main() {}" > crates/core-timer-sensor/src/main.rs
RUN mkdir -p crates/notifier/src && echo "fn main() {}" > crates/notifier/src/main.rs
RUN mkdir -p crates/cli/src && echo "fn main() {}" > crates/cli/src/main.rs
# Create minimal stub sources so cargo can resolve the workspace and fetch deps.
# Unlike the old approach, these are ONLY used for `cargo fetch` — never compiled.
RUN mkdir -p crates/common/src && echo "" > crates/common/src/lib.rs && \
mkdir -p crates/api/src && echo "fn main(){}" > crates/api/src/main.rs && \
mkdir -p crates/executor/src && echo "fn main(){}" > crates/executor/src/main.rs && \
mkdir -p crates/executor/benches && echo "fn main(){}" > crates/executor/benches/context_clone.rs && \
mkdir -p crates/sensor/src && echo "fn main(){}" > crates/sensor/src/main.rs && \
mkdir -p crates/core-timer-sensor/src && echo "fn main(){}" > crates/core-timer-sensor/src/main.rs && \
mkdir -p crates/worker/src && echo "fn main(){}" > crates/worker/src/main.rs && \
mkdir -p crates/notifier/src && echo "fn main(){}" > crates/notifier/src/main.rs && \
mkdir -p crates/cli/src && echo "fn main(){}" > crates/cli/src/main.rs
# Copy SQLx metadata
COPY .sqlx/ ./.sqlx/
# Copy migrations (required by common crate)
COPY migrations/ ./migrations/
# Copy ONLY the crates needed for worker
# This is the key optimization: changes to api/executor/sensor/notifier/cli won't invalidate this layer
COPY crates/common/ ./crates/common/
COPY crates/worker/ ./crates/worker/
# Build the worker binary
# Dependencies are already cached from planner stage
# - registry/git use sharing=shared (concurrent builds are safe)
# - target uses dedicated cache for worker builds (all workers share same binary)
# Download all dependencies (cached unless Cargo.toml/Cargo.lock change)
RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=shared \
--mount=type=cache,target=/usr/local/cargo/git,sharing=shared \
--mount=type=cache,target=/build/target,id=target-worker-builder \
cargo build --release --bin attune-worker && \
cargo fetch
# Now copy the real source code, SQLx metadata, and migrations
COPY .sqlx/ ./.sqlx/
COPY migrations/ ./migrations/
COPY crates/ ./crates/
# Build the entire workspace in release mode.
# All binaries are compiled together, sharing dependency compilation.
# target cache uses sharing=locked so concurrent service builds serialize
# writes to the shared compilation cache instead of corrupting it.
RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=shared \
--mount=type=cache,target=/usr/local/cargo/git,sharing=shared \
--mount=type=cache,target=/build/target,sharing=locked \
cargo build --release --workspace --bins -j 4 && \
cp /build/target/release/attune-worker /build/attune-worker
# Verify the binary was built
RUN ls -lh /build/attune-worker && \
file /build/attune-worker && \
/build/attune-worker --version || echo "Version check skipped"
file /build/attune-worker
# ============================================================================
# Stage 3a: Base Worker (Shell only)
# Stage 2a: Base Worker (Shell only)
# Runtime capabilities: shell
# Use case: Lightweight workers for shell scripts and basic automation
# ============================================================================
FROM debian:${DEBIAN_VERSION}-slim AS worker-base
# Install runtime dependencies
RUN apt-get update && apt-get install -y \
ca-certificates \
libssl3 \
@@ -149,154 +97,38 @@ RUN apt-get update && apt-get install -y \
procps \
&& rm -rf /var/lib/apt/lists/*
# Create worker user and directories
# Note: /opt/attune/packs is mounted as a volume at runtime, not copied in
RUN useradd -m -u 1000 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs && \
mkdir -p /opt/attune/packs /opt/attune/logs /opt/attune/runtime_envs && \
chown -R attune:attune /opt/attune
WORKDIR /opt/attune
# Copy worker binary from builder
COPY --from=builder /build/attune-worker /usr/local/bin/attune-worker
# Copy configuration template
COPY config.docker.yaml ./config.yaml
# Note: Packs are NOT copied into the image
# They are mounted as a volume at runtime from the packs_data volume
# The init-packs service populates the packs_data volume from ./packs directory
# Switch to non-root user
USER attune
# Environment variables
ENV ATTUNE_WORKER_RUNTIMES="shell"
ENV ATTUNE_WORKER_TYPE="container"
ENV RUST_LOG=info
ENV ATTUNE_CONFIG=/opt/attune/config.yaml
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
CMD pgrep -f attune-worker || exit 1
# Run the worker
CMD ["/usr/local/bin/attune-worker"]
# ============================================================================
# Stage 3b: Python Worker (Shell + Python)
# Stage 2b: Python Worker (Shell + Python)
# Runtime capabilities: shell, python
# Use case: Python actions and scripts with dependencies
#
# Uses debian-slim + apt python3 (NOT the python: Docker image) so that
# python3 lives at /usr/bin/python3 — the same path as worker-full.
# This avoids broken venv symlinks when multiple workers share the
# runtime_envs volume.
# ============================================================================
FROM python:${PYTHON_VERSION}-slim-${DEBIAN_VERSION} AS worker-python
FROM debian:${DEBIAN_VERSION}-slim AS worker-python
# Install system dependencies
RUN apt-get update && apt-get install -y \
ca-certificates \
libssl3 \
curl \
build-essential \
procps \
&& rm -rf /var/lib/apt/lists/*
# Install common Python packages
# These are commonly used in automation scripts
RUN pip install --no-cache-dir \
requests>=2.31.0 \
pyyaml>=6.0 \
jinja2>=3.1.0 \
python-dateutil>=2.8.0
# Create worker user and directories
# Note: /opt/attune/packs is mounted as a volume at runtime, not copied in
RUN useradd -m -u 1001 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs && \
chown -R attune:attune /opt/attune
WORKDIR /opt/attune
# Copy worker binary from builder
COPY --from=builder /build/attune-worker /usr/local/bin/attune-worker
# Copy configuration template
COPY config.docker.yaml ./config.yaml
# Note: Packs are NOT copied into the image
# They are mounted as a volume at runtime from the packs_data volume
# Switch to non-root user
USER attune
# Environment variables
ENV ATTUNE_WORKER_RUNTIMES="shell,python"
ENV ATTUNE_WORKER_TYPE="container"
ENV RUST_LOG=info
ENV ATTUNE_CONFIG=/opt/attune/config.yaml
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
CMD pgrep -f attune-worker || exit 1
# Run the worker
CMD ["/usr/local/bin/attune-worker"]
# ============================================================================
# Stage 3c: Node Worker (Shell + Node.js)
# Runtime capabilities: shell, node
# Use case: JavaScript/TypeScript actions and npm packages
# ============================================================================
FROM node:${NODE_VERSION}-slim AS worker-node
# Install system dependencies
RUN apt-get update && apt-get install -y \
ca-certificates \
libssl3 \
curl \
procps \
&& rm -rf /var/lib/apt/lists/*
# Create worker user and directories
# Note: Node base image has 'node' user at UID 1000, so we use UID 1001
# Note: /opt/attune/packs is mounted as a volume at runtime, not copied in
RUN useradd -m -u 1001 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs && \
chown -R attune:attune /opt/attune
WORKDIR /opt/attune
# Copy worker binary from builder
COPY --from=builder /build/attune-worker /usr/local/bin/attune-worker
# Copy configuration template
COPY config.docker.yaml ./config.yaml
# Note: Packs are NOT copied into the image
# They are mounted as a volume at runtime from the packs_data volume
# Switch to non-root user
USER attune
# Environment variables
ENV ATTUNE_WORKER_RUNTIMES="shell,node"
ENV ATTUNE_WORKER_TYPE="container"
ENV RUST_LOG=info
ENV ATTUNE_CONFIG=/opt/attune/config.yaml
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
CMD pgrep -f attune-worker || exit 1
# Run the worker
CMD ["/usr/local/bin/attune-worker"]
# ============================================================================
# Stage 3d: Full Worker (All runtimes)
# Runtime capabilities: shell, python, node, native
# Use case: General-purpose automation with multi-language support
# ============================================================================
FROM debian:${DEBIAN_VERSION} AS worker-full
# Install system dependencies including Python and Node.js
RUN apt-get update && apt-get install -y \
ca-certificates \
libssl3 \
@@ -308,15 +140,9 @@ RUN apt-get update && apt-get install -y \
procps \
&& rm -rf /var/lib/apt/lists/*
# Install Node.js from NodeSource
RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \
apt-get install -y nodejs && \
rm -rf /var/lib/apt/lists/*
# Create python symlink for convenience
RUN ln -s /usr/bin/python3 /usr/bin/python
RUN ln -sf /usr/bin/python3 /usr/bin/python
# Install common Python packages
# Use --break-system-packages for Debian 12+ pip-in-system-python restrictions
RUN pip3 install --no-cache-dir --break-system-packages \
requests>=2.31.0 \
@@ -324,35 +150,118 @@ RUN pip3 install --no-cache-dir --break-system-packages \
jinja2>=3.1.0 \
python-dateutil>=2.8.0
# Create worker user and directories
# Note: /opt/attune/packs is mounted as a volume at runtime, not copied in
RUN useradd -m -u 1001 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs && \
RUN useradd -m -u 1000 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs /opt/attune/runtime_envs && \
chown -R attune:attune /opt/attune
WORKDIR /opt/attune
COPY --from=builder /build/attune-worker /usr/local/bin/attune-worker
COPY config.docker.yaml ./config.yaml
USER attune
ENV ATTUNE_WORKER_RUNTIMES="shell,python"
ENV ATTUNE_WORKER_TYPE="container"
ENV RUST_LOG=info
ENV ATTUNE_CONFIG=/opt/attune/config.yaml
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
CMD pgrep -f attune-worker || exit 1
CMD ["/usr/local/bin/attune-worker"]
# ============================================================================
# Stage 2c: Node Worker (Shell + Node.js)
# Runtime capabilities: shell, node
#
# Uses debian-slim + NodeSource apt repo (NOT the node: Docker image) so that
# node lives at /usr/bin/node — the same path as worker-full.
# This avoids path mismatches when multiple workers share volumes.
# ============================================================================
FROM debian:${DEBIAN_VERSION}-slim AS worker-node
RUN apt-get update && apt-get install -y \
ca-certificates \
libssl3 \
curl \
procps \
&& rm -rf /var/lib/apt/lists/*
# Install Node.js from NodeSource (same method as worker-full)
RUN curl -fsSL https://deb.nodesource.com/setup_${NODE_VERSION}.x | bash - && \
apt-get install -y nodejs && \
rm -rf /var/lib/apt/lists/*
RUN useradd -m -u 1000 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs /opt/attune/runtime_envs && \
chown -R attune:attune /opt/attune
WORKDIR /opt/attune
COPY --from=builder /build/attune-worker /usr/local/bin/attune-worker
COPY config.docker.yaml ./config.yaml
USER attune
ENV ATTUNE_WORKER_RUNTIMES="shell,node"
ENV ATTUNE_WORKER_TYPE="container"
ENV RUST_LOG=info
ENV ATTUNE_CONFIG=/opt/attune/config.yaml
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
CMD pgrep -f attune-worker || exit 1
CMD ["/usr/local/bin/attune-worker"]
# ============================================================================
# Stage 2d: Full Worker (All runtimes)
# Runtime capabilities: shell, python, node, native
# ============================================================================
FROM debian:${DEBIAN_VERSION} AS worker-full
RUN apt-get update && apt-get install -y \
ca-certificates \
libssl3 \
curl \
build-essential \
python3 \
python3-pip \
python3-venv \
procps \
&& rm -rf /var/lib/apt/lists/*
# Install Node.js from NodeSource (same method and version as worker-node)
RUN curl -fsSL https://deb.nodesource.com/setup_${NODE_VERSION}.x | bash - && \
apt-get install -y nodejs && \
rm -rf /var/lib/apt/lists/*
RUN ln -sf /usr/bin/python3 /usr/bin/python
# Use --break-system-packages for Debian 12+ pip-in-system-python restrictions
RUN pip3 install --no-cache-dir --break-system-packages \
requests>=2.31.0 \
pyyaml>=6.0 \
jinja2>=3.1.0 \
python-dateutil>=2.8.0
RUN useradd -m -u 1000 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs /opt/attune/runtime_envs && \
chown -R attune:attune /opt/attune
WORKDIR /opt/attune
# Copy worker binary from builder
COPY --from=builder /build/attune-worker /usr/local/bin/attune-worker
# Copy configuration template
COPY config.docker.yaml ./config.yaml
# Note: Packs are NOT copied into the image
# They are mounted as a volume at runtime from the packs_data volume
# Switch to non-root user
USER attune
# Environment variables
ENV ATTUNE_WORKER_RUNTIMES="shell,python,node,native"
ENV ATTUNE_WORKER_TYPE="container"
ENV RUST_LOG=info
ENV ATTUNE_CONFIG=/opt/attune/config.yaml
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
CMD pgrep -f attune-worker || exit 1
# Run the worker
CMD ["/usr/local/bin/attune-worker"]

View File

@@ -65,8 +65,22 @@ echo -e "${GREEN}✓${NC} Database is ready"
# Create target packs directory if it doesn't exist
echo -e "${YELLOW}${NC} Ensuring packs directory exists..."
mkdir -p "$TARGET_PACKS_DIR"
# Ensure the attune user (uid 1000) can write to the packs directory
# so the API service can install packs at runtime
chown -R 1000:1000 "$TARGET_PACKS_DIR"
echo -e "${GREEN}${NC} Packs directory ready at: $TARGET_PACKS_DIR"
# Initialise runtime environments volume with correct ownership.
# Workers (running as attune uid 1000) need write access to create
# virtualenvs, node_modules, etc. at runtime.
RUNTIME_ENVS_DIR="${RUNTIME_ENVS_DIR:-/opt/attune/runtime_envs}"
if [ -d "$RUNTIME_ENVS_DIR" ] || mkdir -p "$RUNTIME_ENVS_DIR" 2>/dev/null; then
chown -R 1000:1000 "$RUNTIME_ENVS_DIR"
echo -e "${GREEN}${NC} Runtime environments directory ready at: $RUNTIME_ENVS_DIR"
else
echo -e "${YELLOW}${NC} Runtime environments directory not mounted, skipping"
fi
# Check if source packs directory exists
if [ ! -d "$SOURCE_PACKS_DIR" ]; then
echo -e "${RED}${NC} Source packs directory not found: $SOURCE_PACKS_DIR"
@@ -208,6 +222,10 @@ for pack_dir in "$TARGET_PACKS_DIR"/*; do
done
echo ""
# Ensure ownership is correct after all packs have been copied
# The API service (running as attune uid 1000) needs write access to install new packs
chown -R 1000:1000 "$TARGET_PACKS_DIR"
echo -e "${BLUE}${NC} Pack files are accessible to all services via shared volume"
echo ""