Files
attune/docker/Dockerfile.worker.optimized

271 lines
9.7 KiB
Docker

# Multi-stage Dockerfile for Attune worker service
#
# Simple and robust: build the entire workspace, then copy the worker binary
# into different runtime base images depending on language support needed.
# No dummy source compilation, no selective crate copying, no fragile hacks.
#
# Targets:
# worker-base - Shell only (lightweight)
# worker-python - Shell + Python
# worker-node - Shell + Node.js
# worker-full - Shell + Python + Node.js + Native
#
# Usage:
# DOCKER_BUILDKIT=1 docker build --target worker-base -t attune-worker:base -f docker/Dockerfile.worker.optimized .
# DOCKER_BUILDKIT=1 docker build --target worker-python -t attune-worker:python -f docker/Dockerfile.worker.optimized .
# DOCKER_BUILDKIT=1 docker build --target worker-node -t attune-worker:node -f docker/Dockerfile.worker.optimized .
# DOCKER_BUILDKIT=1 docker build --target worker-full -t attune-worker:full -f docker/Dockerfile.worker.optimized .
#
# Note: Packs are NOT copied into the image — they are mounted as volumes at runtime.
ARG RUST_VERSION=1.92
ARG DEBIAN_VERSION=bookworm
ARG NODE_VERSION=20
# ============================================================================
# Stage 1: Builder - Compile the entire workspace
# ============================================================================
FROM rust:${RUST_VERSION}-${DEBIAN_VERSION} AS builder
RUN apt-get update && apt-get install -y \
pkg-config \
libssl-dev \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /build
# Increase rustc stack size to prevent SIGSEGV during release builds
ENV RUST_MIN_STACK=67108864
# Copy dependency metadata first so `cargo fetch` layer is cached
# when only source code changes (Cargo.toml/Cargo.lock stay the same)
COPY Cargo.toml Cargo.lock ./
COPY crates/common/Cargo.toml ./crates/common/Cargo.toml
COPY crates/api/Cargo.toml ./crates/api/Cargo.toml
COPY crates/executor/Cargo.toml ./crates/executor/Cargo.toml
COPY crates/sensor/Cargo.toml ./crates/sensor/Cargo.toml
COPY crates/core-timer-sensor/Cargo.toml ./crates/core-timer-sensor/Cargo.toml
COPY crates/worker/Cargo.toml ./crates/worker/Cargo.toml
COPY crates/notifier/Cargo.toml ./crates/notifier/Cargo.toml
COPY crates/cli/Cargo.toml ./crates/cli/Cargo.toml
# Create minimal stub sources so cargo can resolve the workspace and fetch deps.
# Unlike the old approach, these are ONLY used for `cargo fetch` — never compiled.
RUN mkdir -p crates/common/src && echo "" > crates/common/src/lib.rs && \
mkdir -p crates/api/src && echo "fn main(){}" > crates/api/src/main.rs && \
mkdir -p crates/executor/src && echo "fn main(){}" > crates/executor/src/main.rs && \
mkdir -p crates/executor/benches && echo "fn main(){}" > crates/executor/benches/context_clone.rs && \
mkdir -p crates/sensor/src && echo "fn main(){}" > crates/sensor/src/main.rs && \
mkdir -p crates/core-timer-sensor/src && echo "fn main(){}" > crates/core-timer-sensor/src/main.rs && \
mkdir -p crates/worker/src && echo "fn main(){}" > crates/worker/src/main.rs && \
echo "fn main(){}" > crates/worker/src/agent_main.rs && \
mkdir -p crates/notifier/src && echo "fn main(){}" > crates/notifier/src/main.rs && \
mkdir -p crates/cli/src && echo "fn main(){}" > crates/cli/src/main.rs
# Download all dependencies (cached unless Cargo.toml/Cargo.lock change)
RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=shared \
--mount=type=cache,target=/usr/local/cargo/git,sharing=shared \
cargo fetch
# Now copy the real source code and migrations
COPY migrations/ ./migrations/
COPY crates/ ./crates/
# Build the entire workspace in release mode.
# All binaries are compiled together, sharing dependency compilation.
# target cache uses sharing=locked so concurrent service builds serialize
# writes to the shared compilation cache instead of corrupting it.
RUN --mount=type=cache,target=/usr/local/cargo/registry,sharing=shared \
--mount=type=cache,target=/usr/local/cargo/git,sharing=shared \
--mount=type=cache,target=/build/target,sharing=locked \
cargo build --release --workspace --bins -j 4 && \
cp /build/target/release/attune-worker /build/attune-worker
# Verify the binary was built
RUN ls -lh /build/attune-worker && \
file /build/attune-worker
# ============================================================================
# Stage 2a: Base Worker (Shell only)
# Runtime capabilities: shell
# ============================================================================
FROM debian:${DEBIAN_VERSION}-slim AS worker-base
RUN apt-get update && apt-get install -y \
ca-certificates \
libssl3 \
curl \
bash \
procps \
&& rm -rf /var/lib/apt/lists/*
RUN useradd -m -u 1000 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs /opt/attune/runtime_envs /opt/attune/config && \
chown -R attune:attune /opt/attune
WORKDIR /opt/attune
COPY --from=builder /build/attune-worker /usr/local/bin/attune-worker
USER attune
ENV ATTUNE_WORKER_RUNTIMES="shell"
ENV ATTUNE_WORKER_TYPE="container"
ENV RUST_LOG=info
ENV ATTUNE_CONFIG=/opt/attune/config/config.yaml
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
CMD pgrep -f attune-worker || exit 1
CMD ["/usr/local/bin/attune-worker"]
# ============================================================================
# Stage 2b: Python Worker (Shell + Python)
# Runtime capabilities: shell, python
#
# Uses debian-slim + apt python3 (NOT the python: Docker image) so that
# python3 lives at /usr/bin/python3 — the same path as worker-full.
# This avoids broken venv symlinks when multiple workers share the
# runtime_envs volume.
# ============================================================================
FROM debian:${DEBIAN_VERSION}-slim AS worker-python
RUN apt-get update && apt-get install -y \
ca-certificates \
libssl3 \
curl \
build-essential \
python3 \
python3-pip \
python3-venv \
procps \
&& rm -rf /var/lib/apt/lists/*
# Create python symlink for convenience
RUN ln -sf /usr/bin/python3 /usr/bin/python
# Use --break-system-packages for Debian 12+ pip-in-system-python restrictions
RUN pip3 install --no-cache-dir --break-system-packages \
requests>=2.31.0 \
pyyaml>=6.0 \
jinja2>=3.1.0 \
python-dateutil>=2.8.0
RUN useradd -m -u 1000 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs /opt/attune/runtime_envs /opt/attune/config && \
chown -R attune:attune /opt/attune
WORKDIR /opt/attune
COPY --from=builder /build/attune-worker /usr/local/bin/attune-worker
USER attune
ENV ATTUNE_WORKER_RUNTIMES="shell,python"
ENV ATTUNE_WORKER_TYPE="container"
ENV RUST_LOG=info
ENV ATTUNE_CONFIG=/opt/attune/config/config.yaml
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
CMD pgrep -f attune-worker || exit 1
CMD ["/usr/local/bin/attune-worker"]
# ============================================================================
# Stage 2c: Node Worker (Shell + Node.js)
# Runtime capabilities: shell, node
#
# Uses debian-slim + NodeSource apt repo (NOT the node: Docker image) so that
# node lives at /usr/bin/node — the same path as worker-full.
# This avoids path mismatches when multiple workers share volumes.
# ============================================================================
FROM debian:${DEBIAN_VERSION}-slim AS worker-node
ARG NODE_VERSION=20
RUN apt-get update && apt-get install -y \
ca-certificates \
libssl3 \
curl \
procps \
&& rm -rf /var/lib/apt/lists/*
# Install Node.js from NodeSource (same method as worker-full)
RUN curl -fsSL https://deb.nodesource.com/setup_${NODE_VERSION}.x | bash - && \
apt-get install -y nodejs && \
rm -rf /var/lib/apt/lists/*
RUN useradd -m -u 1000 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs /opt/attune/runtime_envs /opt/attune/config && \
chown -R attune:attune /opt/attune
WORKDIR /opt/attune
COPY --from=builder /build/attune-worker /usr/local/bin/attune-worker
USER attune
ENV ATTUNE_WORKER_RUNTIMES="shell,node"
ENV ATTUNE_WORKER_TYPE="container"
ENV RUST_LOG=info
ENV ATTUNE_CONFIG=/opt/attune/config/config.yaml
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
CMD pgrep -f attune-worker || exit 1
CMD ["/usr/local/bin/attune-worker"]
# ============================================================================
# Stage 2d: Full Worker (All runtimes)
# Runtime capabilities: shell, python, node, native
# ============================================================================
FROM debian:${DEBIAN_VERSION} AS worker-full
ARG NODE_VERSION=20
RUN apt-get update && apt-get install -y \
ca-certificates \
libssl3 \
curl \
build-essential \
python3 \
python3-pip \
python3-venv \
procps \
&& rm -rf /var/lib/apt/lists/*
# Install Node.js from NodeSource (same method and version as worker-node)
RUN curl -fsSL https://deb.nodesource.com/setup_${NODE_VERSION}.x | bash - && \
apt-get install -y nodejs && \
rm -rf /var/lib/apt/lists/*
RUN ln -sf /usr/bin/python3 /usr/bin/python
# Use --break-system-packages for Debian 12+ pip-in-system-python restrictions
RUN pip3 install --no-cache-dir --break-system-packages \
requests>=2.31.0 \
pyyaml>=6.0 \
jinja2>=3.1.0 \
python-dateutil>=2.8.0
RUN useradd -m -u 1000 attune && \
mkdir -p /opt/attune/packs /opt/attune/logs /opt/attune/runtime_envs /opt/attune/config && \
chown -R attune:attune /opt/attune
WORKDIR /opt/attune
COPY --from=builder /build/attune-worker /usr/local/bin/attune-worker
USER attune
ENV ATTUNE_WORKER_RUNTIMES="shell,python,node,native"
ENV ATTUNE_WORKER_TYPE="container"
ENV RUST_LOG=info
ENV ATTUNE_CONFIG=/opt/attune/config/config.yaml
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
CMD pgrep -f attune-worker || exit 1
CMD ["/usr/local/bin/attune-worker"]