diff --git a/AGENTS.md b/AGENTS.md index 42f7253..722bc23 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,4 +1,4 @@ - + # Disinto — Agent Instructions ## What this repo is @@ -39,7 +39,7 @@ disinto/ (code repo) │ hooks/ — Claude Code session hooks (on-compact-reinject, on-idle-stop, on-phase-change, on-pretooluse-guard, on-session-end, on-stop-failure) │ init/nomad/ — cluster-up.sh, install.sh, vault-init.sh, lib-systemd.sh (Nomad+Vault Step 0 installers, #821-#825); wp-oauth-register.sh (Forgejo OAuth2 app + Vault KV seeder for Woodpecker, S3.3); deploy.sh (dependency-ordered Nomad job deploy + health-wait, S4) ├── nomad/ server.hcl, client.hcl (allow_privileged for woodpecker-agent, S3-fix-5), vault.hcl — HCL configs deployed to /etc/nomad.d/ and /etc/vault.d/ by lib/init/nomad/cluster-up.sh -│ jobs/ — Nomad jobspecs: forgejo.hcl (Vault secrets via template, S2.4); woodpecker-server.hcl + woodpecker-agent.hcl (host-net, docker.sock, Vault KV, S3.1-S3.2); agents.hcl (7 roles, llama, Vault-templated bot tokens, S4.1); vault-runner.hcl (parameterized batch dispatch, S5.3); staging.hcl (Caddy file-server, S5.2); chat.hcl (Claude chat UI, Vault OAuth secrets, S5.2); edge.hcl (Caddy proxy + dispatcher sidecar, S5.1) +│ jobs/ — Nomad jobspecs: forgejo.hcl (Vault secrets via template, S2.4); woodpecker-server.hcl + woodpecker-agent.hcl (host-net, docker.sock, Vault KV, S3.1-S3.2); agents.hcl (7 roles, llama, Vault-templated bot tokens, S4.1); vault-runner.hcl (parameterized batch dispatch, S5.3) ├── projects/ *.toml.example — templates; *.toml — local per-box config (gitignored) ├── formulas/ Issue templates (TOML specs for multi-step agent tasks) ├── docker/ Dockerfiles and entrypoints: reproduce, triage, edge dispatcher, chat (server.py, entrypoint-chat.sh, Dockerfile, ui/) diff --git a/architect/AGENTS.md b/architect/AGENTS.md index b2bd57a..d759433 100644 --- a/architect/AGENTS.md +++ b/architect/AGENTS.md @@ -1,4 +1,4 @@ - + # Architect — Agent Instructions ## What this agent is diff --git a/bin/disinto b/bin/disinto index c18ef0c..e08bdcf 100755 --- a/bin/disinto +++ b/bin/disinto @@ -82,7 +82,7 @@ Init options: --ci-id Woodpecker CI repo ID (default: 0 = no CI) --forge-url Forge base URL (default: http://localhost:3000) --backend Orchestration backend: docker (default) | nomad - --with (nomad) Deploy services: forgejo,woodpecker,agents,staging,chat,edge[,...] (S1.3, S3.4, S4.2, S5.2, S5.5) + --with (nomad) Deploy services: forgejo,woodpecker,agents[,...] (S1.3, S3.4, S4.2) --empty (nomad) Bring up cluster only, no jobs (S0.4) --bare Skip compose generation (bare-metal setup) --build Use local docker build instead of registry images (dev mode) @@ -787,7 +787,7 @@ _disinto_init_nomad() { # real-run path so dry-run output accurately represents execution order. # Build ordered deploy list: only include services present in with_services local DEPLOY_ORDER="" - for ordered_svc in forgejo woodpecker-server woodpecker-agent agents staging chat edge; do + for ordered_svc in forgejo woodpecker-server woodpecker-agent agents; do if echo ",$with_services," | grep -q ",$ordered_svc,"; then DEPLOY_ORDER="${DEPLOY_ORDER:+${DEPLOY_ORDER} }${ordered_svc}" fi @@ -801,7 +801,6 @@ _disinto_init_nomad() { case "$svc" in woodpecker-server|woodpecker-agent) seed_name="woodpecker" ;; agents) seed_name="agents" ;; - chat) seed_name="chat" ;; esac local seed_script="${FACTORY_ROOT}/tools/vault-seed-${seed_name}.sh" if [ -x "$seed_script" ]; then @@ -824,29 +823,15 @@ _disinto_init_nomad() { echo "[deploy] dry-run complete" fi - # Dry-run vault-runner (unconditionally, not gated by --with) - echo "" - echo "── Vault-runner dry-run ───────────────────────────────────" - local vault_runner_path="${FACTORY_ROOT}/nomad/jobs/vault-runner.hcl" - if [ -f "$vault_runner_path" ]; then - echo "[deploy] vault-runner: [dry-run] nomad job validate ${vault_runner_path}" - echo "[deploy] vault-runner: [dry-run] nomad job run -detach ${vault_runner_path}" - else - echo "[deploy] vault-runner: jobspec not found, skipping" - fi - - # Build custom images dry-run (if agents, chat, or edge services are included) - if echo ",$with_services," | grep -qE ",(agents|chat|edge),"; then + # Build custom images dry-run (if agents or chat services are included) + if echo ",$with_services," | grep -qE ",(agents|chat),"; then echo "" echo "── Build images dry-run ──────────────────────────────" if echo ",$with_services," | grep -q ",agents,"; then echo "[build] [dry-run] docker build -t disinto/agents:local -f ${FACTORY_ROOT}/docker/agents/Dockerfile ${FACTORY_ROOT}" fi if echo ",$with_services," | grep -q ",chat,"; then - echo "[build] [dry-run] docker build -t disinto/chat:local -f ${FACTORY_ROOT}/docker/chat/Dockerfile ${FACTORY_ROOT}/docker/chat" - fi - if echo ",$with_services," | grep -q ",edge,"; then - echo "[build] [dry-run] docker build -t disinto/edge:local -f ${FACTORY_ROOT}/docker/edge/Dockerfile ${FACTORY_ROOT}/docker/edge" + echo "[build] [dry-run] docker build -t disinto/chat:local -f ${FACTORY_ROOT}/docker/chat/Dockerfile ${FACTORY_ROOT}" fi fi exit 0 @@ -936,10 +921,10 @@ _disinto_init_nomad() { echo "[import] no --import-env/--import-sops — skipping; set them or seed kv/disinto/* manually before deploying secret-dependent services" fi - # Build custom images required by Nomad jobs (S4.2, S5.2, S5.5) — before deploy. + # Build custom images required by Nomad jobs (S4.2, S5.2) — before deploy. # Single-node factory dev box: no multi-node pull needed, no registry auth. # Can upgrade to approach B (registry push/pull) later if multi-node. - if echo ",$with_services," | grep -qE ",(agents|chat|edge),"; then + if echo ",$with_services," | grep -qE ",(agents|chat),"; then echo "" echo "── Building custom images ─────────────────────────────" if echo ",$with_services," | grep -q ",agents,"; then @@ -950,12 +935,7 @@ _disinto_init_nomad() { if echo ",$with_services," | grep -q ",chat,"; then local tag="disinto/chat:local" echo "── Building $tag ─────────────────────────────" - docker build -t "$tag" -f "${FACTORY_ROOT}/docker/chat/Dockerfile" "${FACTORY_ROOT}/docker/chat" 2>&1 | tail -5 - fi - if echo ",$with_services," | grep -q ",edge,"; then - local tag="disinto/edge:local" - echo "── Building $tag ─────────────────────────────" - docker build -t "$tag" -f "${FACTORY_ROOT}/docker/edge/Dockerfile" "${FACTORY_ROOT}/docker/edge" 2>&1 | tail -5 + docker build -t "$tag" -f "${FACTORY_ROOT}/docker/chat/Dockerfile" "${FACTORY_ROOT}" 2>&1 | tail -5 fi fi @@ -967,9 +947,9 @@ _disinto_init_nomad() { if [ -n "$with_services" ]; then local vault_addr="${VAULT_ADDR:-http://127.0.0.1:8200}" - # Build ordered deploy list (S3.4, S4.2, S5.2, S5.5): forgejo → woodpecker-server → woodpecker-agent → agents → staging → chat → edge + # Build ordered deploy list (S3.4, S4.2, S5.2): forgejo → woodpecker-server → woodpecker-agent → agents → staging → chat local DEPLOY_ORDER="" - for ordered_svc in forgejo woodpecker-server woodpecker-agent agents staging chat edge; do + for ordered_svc in forgejo woodpecker-server woodpecker-agent agents staging chat; do if echo ",$with_services," | grep -q ",$ordered_svc,"; then DEPLOY_ORDER="${DEPLOY_ORDER:+${DEPLOY_ORDER} }${ordered_svc}" fi @@ -1002,23 +982,6 @@ _disinto_init_nomad() { # Deploy this service echo "" echo "── Deploying ${svc} ───────────────────────────────────────" - - # Seed host volumes before deployment (if needed) - case "$svc" in - staging) - # Seed site-content host volume (/srv/disinto/docker) with static content - # The staging jobspec mounts this volume read-only to /srv/site - local site_content_src="${FACTORY_ROOT}/docker/index.html" - local site_content_dst="/srv/disinto/docker" - if [ -f "$site_content_src" ] && [ -d "$site_content_dst" ]; then - if ! cmp -s "$site_content_src" "${site_content_dst}/index.html" 2>/dev/null; then - echo "[staging] seeding site-content volume..." - cp "$site_content_src" "${site_content_dst}/index.html" - fi - fi - ;; - esac - local jobspec_path="${FACTORY_ROOT}/nomad/jobs/${svc}.hcl" if [ ! -f "$jobspec_path" ]; then echo "Error: jobspec not found: ${jobspec_path}" >&2 @@ -1037,27 +1000,6 @@ _disinto_init_nomad() { fi done - # Run vault-runner (unconditionally, not gated by --with) — infrastructure job - # vault-runner is always present since it's needed for vault action dispatch - echo "" - echo "── Running vault-runner ────────────────────────────────────" - local vault_runner_path="${FACTORY_ROOT}/nomad/jobs/vault-runner.hcl" - if [ -f "$vault_runner_path" ]; then - echo "[deploy] vault-runner: running Nomad job (infrastructure)" - local -a vault_runner_cmd=("$deploy_sh" "vault-runner") - if [ "$(id -u)" -eq 0 ]; then - "${vault_runner_cmd[@]}" || exit $? - else - if ! command -v sudo >/dev/null 2>&1; then - echo "Error: deploy.sh must run as root and sudo is not installed" >&2 - exit 1 - fi - sudo -n -- "${vault_runner_cmd[@]}" || exit $? - fi - else - echo "[deploy] vault-runner: jobspec not found, skipping" - fi - # Print final summary echo "" echo "── Summary ────────────────────────────────────────────" @@ -1214,25 +1156,14 @@ disinto_init() { fi fi - # Auto-include all dependencies when edge is requested (S5.5) - if echo ",$with_services," | grep -q ",edge,"; then - # Edge depends on all backend services - for dep in forgejo woodpecker-server woodpecker-agent agents staging chat; do - if ! echo ",$with_services," | grep -q ",${dep},"; then - echo "Note: --with edge implies --with ${dep} (edge depends on all backend services)" - with_services="${with_services},${dep}" - fi - done - fi - # Validate all service names are known local IFS=',' for _svc in $with_services; do _svc=$(echo "$_svc" | xargs) case "$_svc" in - forgejo|woodpecker-server|woodpecker-agent|agents|staging|chat|edge) ;; + forgejo|woodpecker-server|woodpecker-agent|agents) ;; *) - echo "Error: unknown service '${_svc}' — known: forgejo, woodpecker-server, woodpecker-agent, agents, staging, chat, edge" >&2 + echo "Error: unknown service '${_svc}' — known: forgejo, woodpecker-server, woodpecker-agent, agents" >&2 exit 1 ;; esac diff --git a/dev/AGENTS.md b/dev/AGENTS.md index ff529af..f51a037 100644 --- a/dev/AGENTS.md +++ b/dev/AGENTS.md @@ -1,4 +1,4 @@ - + # Dev Agent **Role**: Implement issues autonomously — write code, push branches, address diff --git a/docker/chat/Dockerfile b/docker/chat/Dockerfile index c4cb28b..3d89863 100644 --- a/docker/chat/Dockerfile +++ b/docker/chat/Dockerfile @@ -1,22 +1,20 @@ # disinto-chat — minimal HTTP backend for Claude chat UI # -# Small Debian slim base with Python runtime and Node.js. +# Small Debian slim base with Python runtime. # Chosen for simplicity and small image size (~100MB). # # Image size: ~100MB (well under the 200MB ceiling) # -# Claude CLI is baked into the image — same pattern as the agents container. +# The claude binary is mounted from the host at runtime via docker-compose, +# not baked into the image — same pattern as the agents container. FROM debian:bookworm-slim -# Install Node.js (required for Claude CLI) and Python +# Install Python (no build-time network access needed) RUN apt-get update && apt-get install -y --no-install-recommends \ - nodejs npm python3 \ + python3 \ && rm -rf /var/lib/apt/lists/* -# Install Claude Code CLI — chat backend runtime -RUN npm install -g @anthropic-ai/claude-code@2.1.84 - # Non-root user — fixed UID 10001 for sandbox hardening (#706) RUN useradd -m -u 10001 -s /bin/bash chat diff --git a/docker/edge/dispatcher.sh b/docker/edge/dispatcher.sh index 282342a..a48abf2 100755 --- a/docker/edge/dispatcher.sh +++ b/docker/edge/dispatcher.sh @@ -560,168 +560,10 @@ _launch_runner_docker() { # _launch_runner_nomad ACTION_ID SECRETS_CSV MOUNTS_CSV # -# Dispatches a vault-runner batch job via `nomad job dispatch`. -# Polls `nomad job status` until terminal state (completed/failed). -# Reads exit code from allocation and writes .result.json. -# -# Usage: _launch_runner_nomad -# Returns: exit code of the nomad job (0=success, non-zero=failure) +# Nomad backend stub — will be implemented in migration Step 5. _launch_runner_nomad() { - local action_id="$1" - local secrets_csv="$2" - local mounts_csv="$3" - - log "Dispatching vault-runner batch job via Nomad for action: ${action_id}" - - # Dispatch the parameterized batch job - # The vault-runner job expects meta: action_id, secrets_csv - # Note: mounts_csv is not passed as meta (not declared in vault-runner.hcl) - local dispatch_output - dispatch_output=$(nomad job dispatch \ - -detach \ - -meta action_id="$action_id" \ - -meta secrets_csv="$secrets_csv" \ - vault-runner 2>&1) || { - log "ERROR: Failed to dispatch vault-runner job for ${action_id}" - log "Dispatch output: ${dispatch_output}" - write_result "$action_id" 1 "Nomad dispatch failed: ${dispatch_output}" - return 1 - } - - # Extract dispatched job ID from output (format: "vault-runner/dispatch--") - local dispatched_job_id - dispatched_job_id=$(echo "$dispatch_output" | grep -oP '(?<=Dispatched Job ID = ).+' || true) - - if [ -z "$dispatched_job_id" ]; then - log "ERROR: Could not extract dispatched job ID from nomad output" - log "Dispatch output: ${dispatch_output}" - write_result "$action_id" 1 "Could not extract dispatched job ID from nomad output" - return 1 - fi - - log "Dispatched vault-runner with job ID: ${dispatched_job_id}" - - # Poll job status until terminal state - # Batch jobs transition: running -> completed/failed - local max_wait=300 # 5 minutes max wait - local elapsed=0 - local poll_interval=5 - local alloc_id="" - - log "Polling nomad job status for ${dispatched_job_id}..." - - while [ "$elapsed" -lt "$max_wait" ]; do - # Get job status with JSON output for the dispatched child job - local job_status_json - job_status_json=$(nomad job status -json "$dispatched_job_id" 2>/dev/null) || { - log "ERROR: Failed to get job status for ${dispatched_job_id}" - write_result "$action_id" 1 "Failed to get job status for ${dispatched_job_id}" - return 1 - } - - # Check job status field (transitions to "dead" on completion) - local job_state - job_state=$(echo "$job_status_json" | jq -r '.Status // empty' 2>/dev/null) || job_state="" - - # Check allocation state directly - alloc_id=$(echo "$job_status_json" | jq -r '.Allocations[0]?.ID // empty' 2>/dev/null) || alloc_id="" - - if [ -n "$alloc_id" ]; then - local alloc_state - alloc_state=$(nomad alloc status -short "$alloc_id" 2>/dev/null || true) - - case "$alloc_state" in - *completed*|*success*|*dead*) - log "Allocation ${alloc_id} reached terminal state: ${alloc_state}" - break - ;; - *running*|*pending*|*starting*) - log "Allocation ${alloc_id} still running (state: ${alloc_state})..." - ;; - *failed*|*crashed*) - log "Allocation ${alloc_id} failed (state: ${alloc_state})" - break - ;; - esac - fi - - # Also check job-level state - case "$job_state" in - dead) - log "Job ${dispatched_job_id} reached terminal state: ${job_state}" - break - ;; - failed) - log "Job ${dispatched_job_id} failed" - break - ;; - esac - - sleep "$poll_interval" - elapsed=$((elapsed + poll_interval)) - done - - if [ "$elapsed" -ge "$max_wait" ]; then - log "ERROR: Timeout waiting for vault-runner job to complete" - write_result "$action_id" 1 "Timeout waiting for nomad job to complete" - return 1 - fi - - # Get final job status and exit code - local final_status_json - final_status_json=$(nomad job status -json "$dispatched_job_id" 2>/dev/null) || { - log "ERROR: Failed to get final job status" - write_result "$action_id" 1 "Failed to get final job status" - return 1 - } - - # Get allocation exit code - local exit_code=0 - local logs="" - - if [ -n "$alloc_id" ]; then - # Get allocation logs - logs=$(nomad alloc logs -short "$alloc_id" 2>/dev/null || true) - - # Try to get exit code from alloc status JSON - # Nomad alloc status -json has .TaskStates[""].Events[].ExitCode - local alloc_exit_code - alloc_exit_code=$(nomad alloc status -json "$alloc_id" 2>/dev/null | jq -r '.TaskStates["runner"].Events[-1].ExitCode // empty' 2>/dev/null) || alloc_exit_code="" - - if [ -n "$alloc_exit_code" ] && [ "$alloc_exit_code" != "null" ]; then - exit_code="$alloc_exit_code" - fi - fi - - # If we couldn't get exit code from alloc, check job state as fallback - # Note: "dead" = terminal state for batch jobs (includes successful completion) - # Only "failed" indicates actual failure - if [ "$exit_code" -eq 0 ]; then - local final_state - final_state=$(echo "$final_status_json" | jq -r '.Status // empty' 2>/dev/null) || final_state="" - - case "$final_state" in - failed) - exit_code=1 - ;; - esac - fi - - # Truncate logs if too long - if [ ${#logs} -gt 1000 ]; then - logs="${logs: -1000}" - fi - - # Write result file - write_result "$action_id" "$exit_code" "$logs" - - if [ "$exit_code" -eq 0 ]; then - log "Vault-runner job completed successfully for action: ${action_id}" - else - log "Vault-runner job failed for action: ${action_id} (exit code: ${exit_code})" - fi - - return "$exit_code" + echo "nomad backend not yet implemented" >&2 + return 1 } # Launch runner for the given action (backend-agnostic orchestrator) @@ -1209,8 +1051,11 @@ main() { # Validate backend selection at startup case "$DISPATCHER_BACKEND" in - docker|nomad) - log "Using ${DISPATCHER_BACKEND} backend for vault-runner dispatch" + docker) ;; + nomad) + log "ERROR: nomad backend not yet implemented" + echo "nomad backend not yet implemented" >&2 + exit 1 ;; *) log "ERROR: unknown DISPATCHER_BACKEND=${DISPATCHER_BACKEND}" diff --git a/docker/edge/entrypoint-edge.sh b/docker/edge/entrypoint-edge.sh index 6db96b7..1b5f94f 100755 --- a/docker/edge/entrypoint-edge.sh +++ b/docker/edge/entrypoint-edge.sh @@ -234,13 +234,6 @@ fi rm -f "$_fetch_log" done) & -# Nomad template renders Caddyfile to /local/Caddyfile via service discovery; -# copy it into the expected location if present (compose uses the mounted path). -if [ -f /local/Caddyfile ]; then - cp /local/Caddyfile /etc/caddy/Caddyfile - echo "edge: using Nomad-rendered Caddyfile from /local/Caddyfile" >&2 -fi - # Caddy as main process — run in foreground via wait so background jobs survive # (exec replaces the shell, which can orphan backgrounded subshells) caddy run --config /etc/caddy/Caddyfile --adapter caddyfile & diff --git a/gardener/AGENTS.md b/gardener/AGENTS.md index fdfae86..cdf829b 100644 --- a/gardener/AGENTS.md +++ b/gardener/AGENTS.md @@ -1,4 +1,4 @@ - + # Gardener Agent **Role**: Backlog grooming — detect duplicate issues, missing acceptance diff --git a/gardener/pending-actions.json b/gardener/pending-actions.json index 724b2ee..fe51488 100644 --- a/gardener/pending-actions.json +++ b/gardener/pending-actions.json @@ -1,12 +1 @@ -[ - { - "action": "edit_body", - "issue": 996, - "body": "Flagged by AI reviewer in PR #993.\n\n## Problem\n\nThe consul-template with/else/end pattern using aggressive whitespace trimming (e.g. `{{- with secret ... -}}` / `{{- else -}}` / `{{- end }}` then immediately `{{- with`) strips all newlines between consecutive single-variable env blocks at parse time. This would render the secrets env file as one concatenated line (`GITHUB_TOKEN=valCODEBERG_TOKEN=val...`), which Nomad's `env = true` cannot parse correctly.\n\n## Why not blocked\n\nagents.hcl has been runtime-tested (S4-fix-6 and S4-fix-7 made observable runtime fixes). If the env file were broken, all bot tokens would be absent — a loud, observable failure. This suggests consul-template may handle whitespace trimming differently from raw Go text/template. Needs runtime verification.\n\n## Verification\n\nDeploy either job and inspect the rendered secrets file:\n```\nnomad alloc exec cat /secrets/bots.env\n```\nConfirm each KEY=VALUE pair is on its own line.\n\n---\n*Auto-created from AI review*\n\n## Affected files\n- `nomad/jobs/agents.hcl` — bots.env template (lines 147-189)\n- `nomad/jobs/vault-runner.hcl` — runner.env template (PR #993)\n\n## Acceptance criteria\n- [ ] Deploy `agents` or `vault-runner` job on factory host\n- [ ] Inspect rendered secrets file: `nomad alloc exec cat /secrets/bots.env`\n- [ ] Confirm each KEY=VALUE pair is on its own line (not concatenated)\n- [ ] If broken: fix whitespace trimming to preserve newlines between blocks; if fine, close as not-a-bug" - }, - { - "action": "add_label", - "issue": 996, - "label": "backlog" - } -] +[] diff --git a/lib/AGENTS.md b/lib/AGENTS.md index 146648a..9c69784 100644 --- a/lib/AGENTS.md +++ b/lib/AGENTS.md @@ -1,4 +1,4 @@ - + # Shared Helpers (`lib/`) All agents source `lib/env.sh` as their first action. Additional helpers are @@ -30,9 +30,9 @@ sourced as needed. | `lib/git-creds.sh` | Shared git credential helper configuration. `configure_git_creds([HOME_DIR] [RUN_AS_CMD])` — writes a static credential helper script and configures git globally to use password-based HTTP auth (Forgejo 11.x rejects API tokens for `git push`, #361). **Retry on cold boot (#741)**: resolves bot username from `FORGE_TOKEN` with 5 retries (exponential backoff 1-5s); fails loudly and returns 1 if Forgejo is unreachable — never falls back to a wrong hardcoded default (exports `BOT_USER` on success). `repair_baked_cred_urls([--as RUN_AS_CMD] DIR ...)` — rewrites any git remote URLs that have credentials baked in to use clean URLs instead; uses `safe.directory` bypass for root-owned repos (#671). Requires `FORGE_PASS`, `FORGE_URL`, `FORGE_TOKEN`. | entrypoints (agents, edge) | | `lib/ops-setup.sh` | `setup_ops_repo()` — creates ops repo on Forgejo if it doesn't exist, configures bot collaborators, clones/initializes ops repo locally, seeds directory structure (vault, knowledge, evidence, sprints). Evidence subdirectories seeded: engagement/, red-team/, holdout/, evolution/, user-test/. Also seeds sprints/ for architect output. Exports `_ACTUAL_OPS_SLUG`. `migrate_ops_repo(ops_root, [primary_branch])` — idempotent migration helper that seeds missing directories and .gitkeep files on existing ops repos (pre-#407 deployments). | bin/disinto (init) | | `lib/ci-setup.sh` | `_install_cron_impl()` — installs crontab entries for bare-metal deployments (compose mode uses polling loop instead). `_create_forgejo_oauth_app()` — generic helper to create an OAuth2 app on Forgejo (shared by Woodpecker and chat). `_create_woodpecker_oauth_impl()` — creates Woodpecker OAuth2 app (thin wrapper). `_create_chat_oauth_impl()` — creates disinto-chat OAuth2 app, writes `CHAT_OAUTH_CLIENT_ID`/`CHAT_OAUTH_CLIENT_SECRET` to `.env` (#708). `_generate_woodpecker_token_impl()` — auto-generates WOODPECKER_TOKEN via OAuth2 flow. `_activate_woodpecker_repo_impl()` — activates repo in Woodpecker. All gated by `_load_ci_context()` which validates required env vars. | bin/disinto (init) | -| `lib/generators.sh` | Template generation for `disinto init`: `generate_compose()` — docker-compose.yml (uses `codeberg.org/forgejo/forgejo:11.0` tag; `CLAUDE_BIN_DIR` volume mount removed from agents/llama services — only `reproduce` and `edge` still use the host-mounted CLI (#992); adds `security_opt: [apparmor:unconfined]` to all services for rootless container compatibility; Forgejo includes a healthcheck so dependent services use `condition: service_healthy` — fixes cold-start races, #665; adds `chat` service block with isolated `chat-config` named volume and `CHAT_HISTORY_DIR` bind-mount for per-user NDJSON history persistence (#710); injects `FORWARD_AUTH_SECRET` for Caddy↔chat defense-in-depth auth (#709); cost-cap env vars `CHAT_MAX_REQUESTS_PER_HOUR`, `CHAT_MAX_REQUESTS_PER_DAY`, `CHAT_MAX_TOKENS_PER_DAY` (#711); subdomain fallback comment for `EDGE_TUNNEL_FQDN_*` vars (#713); all `depends_on` now use `condition: service_healthy/started` instead of bare service names; all services now include `restart: unless-stopped` including the edge service — #768; agents service now uses `image: ghcr.io/disinto/agents:${DISINTO_IMAGE_TAG:-latest}` instead of `build:` (#429); `WOODPECKER_PLUGINS_PRIVILEGED` env var added to woodpecker service (#779); agents-llama conditional block gated on `ENABLE_LLAMA_AGENT=1` (#769); `agents-llama-all` compose service (profile `agents-llama-all`, all 7 roles: review,dev,gardener,architect,planner,predictor,supervisor) added by #801; agents service gains volume mounts for `./projects`, `./.env`, `./state`), `generate_caddyfile()` — Caddyfile (routes: `/forge/*` → forgejo:3000, `/woodpecker/*` → woodpecker:8000, `/staging/*` → staging:80; `/chat/login` and `/chat/oauth/callback` bypass `forward_auth` so unauthenticated users can reach the OAuth flow; `/chat/*` gated by `forward_auth` on `chat:8080/chat/auth/verify` which stamps `X-Forwarded-User` (#709); root `/` redirects to `/forge/`), `generate_staging_index()` — staging index, `generate_deploy_pipelines()` — Woodpecker deployment pipeline configs. Requires `FACTORY_ROOT`, `PROJECT_NAME`, `PRIMARY_BRANCH`. | bin/disinto (init) | +| `lib/generators.sh` | Template generation for `disinto init`: `generate_compose()` — docker-compose.yml (uses `codeberg.org/forgejo/forgejo:11.0` tag; adds `security_opt: [apparmor:unconfined]` to all services for rootless container compatibility; Forgejo includes a healthcheck so dependent services use `condition: service_healthy` — fixes cold-start races, #665; adds `chat` service block with isolated `chat-config` named volume and `CHAT_HISTORY_DIR` bind-mount for per-user NDJSON history persistence (#710); injects `FORWARD_AUTH_SECRET` for Caddy↔chat defense-in-depth auth (#709); cost-cap env vars `CHAT_MAX_REQUESTS_PER_HOUR`, `CHAT_MAX_REQUESTS_PER_DAY`, `CHAT_MAX_TOKENS_PER_DAY` (#711); subdomain fallback comment for `EDGE_TUNNEL_FQDN_*` vars (#713); all `depends_on` now use `condition: service_healthy/started` instead of bare service names; all services now include `restart: unless-stopped` including the edge service — #768; agents service now uses `image: ghcr.io/disinto/agents:${DISINTO_IMAGE_TAG:-latest}` instead of `build:` (#429); `WOODPECKER_PLUGINS_PRIVILEGED` env var added to woodpecker service (#779); agents-llama conditional block gated on `ENABLE_LLAMA_AGENT=1` (#769); `agents-llama-all` compose service (profile `agents-llama-all`, all 7 roles: review,dev,gardener,architect,planner,predictor,supervisor) added by #801; agents service gains volume mounts for `./projects`, `./.env`, `./state`), `generate_caddyfile()` — Caddyfile (routes: `/forge/*` → forgejo:3000, `/woodpecker/*` → woodpecker:8000, `/staging/*` → staging:80; `/chat/login` and `/chat/oauth/callback` bypass `forward_auth` so unauthenticated users can reach the OAuth flow; `/chat/*` gated by `forward_auth` on `chat:8080/chat/auth/verify` which stamps `X-Forwarded-User` (#709); root `/` redirects to `/forge/`), `generate_staging_index()` — staging index, `generate_deploy_pipelines()` — Woodpecker deployment pipeline configs. Requires `FACTORY_ROOT`, `PROJECT_NAME`, `PRIMARY_BRANCH`. | bin/disinto (init) | | `lib/sprint-filer.sh` | Post-merge sub-issue filer for sprint PRs. Invoked by the `.woodpecker/ops-filer.yml` pipeline after a sprint PR merges to ops repo `main`. Parses ` ... ` blocks from sprint PR bodies to extract sub-issue definitions, creates them on the project repo using `FORGE_FILER_TOKEN` (narrow-scope `filer-bot` identity with `issues:write` only), adds `in-progress` label to the parent vision issue, and handles vision lifecycle closure when all sub-issues are closed. Uses `filer_api_all()` for paginated fetches. Idempotent: uses `` markers to skip already-filed issues. Requires `FORGE_FILER_TOKEN`, `FORGE_API`, `FORGE_API_BASE`, `FORGE_OPS_REPO`. | `.woodpecker/ops-filer.yml` (CI pipeline on ops repo) | | `lib/hire-agent.sh` | `disinto_hire_an_agent()` — user creation, `.profile` repo setup, formula copying, branch protection, and state marker creation for hiring a new agent. Requires `FORGE_URL`, `FORGE_TOKEN`, `FACTORY_ROOT`, `PROJECT_NAME`. Extracted from `bin/disinto`. | bin/disinto (hire) | | `lib/release.sh` | `disinto_release()` — vault TOML creation, branch setup on ops repo, PR creation, and auto-merge request for a versioned release. `_assert_release_globals()` validates required env vars. Requires `FORGE_URL`, `FORGE_TOKEN`, `FORGE_OPS_REPO`, `FACTORY_ROOT`, `PRIMARY_BRANCH`. Extracted from `bin/disinto`. | bin/disinto (release) | -| `lib/hvault.sh` | HashiCorp Vault helper module. `hvault_kv_get(PATH, [KEY])` — read KV v2 secret, optionally extract one key. `hvault_kv_put(PATH, KEY=VAL ...)` — write KV v2 secret. `hvault_kv_list(PATH)` — list keys at a KV path. `hvault_get_or_empty(PATH)` — GET /v1/PATH; 200→raw body, 404→empty, else structured error + return 1 (used by sync scripts to distinguish "absent, create" from hard failure without tripping errexit, #881). `hvault_ensure_kv_v2(MOUNT, [LOG_PREFIX])` — idempotent KV v2 mount assertion: enables mount if absent, fails loudly if present as wrong type/version. Extracted from all `vault-seed-*.sh` scripts to eliminate dup-detector violations. Respects `DRY_RUN=1`. `hvault_policy_apply(NAME, FILE)` — idempotent policy upsert. `hvault_jwt_login(ROLE, JWT)` — exchange JWT for short-lived token. `hvault_token_lookup()` — returns TTL/policies/accessor for current token. `_hvault_seed_key(PATH, KEY, [GENERATOR])` — seed one KV key if absent; reads existing data and merges to preserve sibling keys (KV v2 replaces atomically); returns 0=created, 1=unchanged, 2=API error (#992). All functions use `VAULT_ADDR` + `VAULT_TOKEN` from env (fallback: `/etc/vault.d/root.token`), emit structured JSON errors to stderr on failure. Tests: `tests/lib-hvault.bats` (requires `vault server -dev`). | `tools/vault-apply-policies.sh`, `tools/vault-apply-roles.sh`, `lib/init/nomad/vault-nomad-auth.sh`, `tools/vault-seed-*.sh` | -| `lib/init/nomad/` | Nomad+Vault installer scripts. `cluster-up.sh` — idempotent Step-0 orchestrator that runs all steps in order (installs packages, writes HCL, enables systemd units, unseals Vault); uses `poll_until_healthy()` helper for deduped readiness polling; `HOST_VOLUME_DIRS` array now includes `/srv/disinto/docker` (for staging file-server, S5.2, #989, #992). `install.sh` — installs pinned Nomad+Vault apt packages. `vault-init.sh` — initializes Vault (unseal keys → `/etc/vault.d/`), creates dev-persisted unseal unit. `lib-systemd.sh` — shared systemd unit helpers. `systemd-nomad.sh`, `systemd-vault.sh` — write and enable service units. `vault-nomad-auth.sh` — Step-2 script that enables Vault's JWT auth at path `jwt-nomad`, writes the JWKS/algs config pointing at Nomad's workload-identity signer, delegates role sync to `tools/vault-apply-roles.sh`, installs `/etc/nomad.d/server.hcl`, and SIGHUPs `nomad.service` if the file changed (#881). `wp-oauth-register.sh` — S3.3 script that creates the Woodpecker OAuth2 app in Forgejo and stores `forgejo_client`/`forgejo_secret` in Vault KV v2 at `kv/disinto/shared/woodpecker`; idempotent (skips if app or secrets already present); called by `bin/disinto --with woodpecker`. `deploy.sh` — S4 dependency-ordered Nomad job deploy + health-wait; takes a list of jobspec basenames, submits each to Nomad and polls until healthy before proceeding to the next; supports `--dry-run` and per-job timeout overrides via `JOB_READY_TIMEOUT_`; invoked by `bin/disinto --with ` and `cluster-up.sh`; deploy order now covers staging, chat, edge (S5.5, #992). Idempotent: each step checks current state before acting. Sourced and called by `cluster-up.sh`; not sourced by agents. | `bin/disinto init --backend=nomad` | +| `lib/hvault.sh` | HashiCorp Vault helper module. `hvault_kv_get(PATH, [KEY])` — read KV v2 secret, optionally extract one key. `hvault_kv_put(PATH, KEY=VAL ...)` — write KV v2 secret. `hvault_kv_list(PATH)` — list keys at a KV path. `hvault_get_or_empty(PATH)` — GET /v1/PATH; 200→raw body, 404→empty, else structured error + return 1 (used by sync scripts to distinguish "absent, create" from hard failure without tripping errexit, #881). `hvault_ensure_kv_v2(MOUNT, [LOG_PREFIX])` — idempotent KV v2 mount assertion: enables mount if absent, fails loudly if present as wrong type/version. Extracted from all `vault-seed-*.sh` scripts to eliminate dup-detector violations. Respects `DRY_RUN=1`. `hvault_policy_apply(NAME, FILE)` — idempotent policy upsert. `hvault_jwt_login(ROLE, JWT)` — exchange JWT for short-lived token. `hvault_token_lookup()` — returns TTL/policies/accessor for current token. All functions use `VAULT_ADDR` + `VAULT_TOKEN` from env (fallback: `/etc/vault.d/root.token`), emit structured JSON errors to stderr on failure. Tests: `tests/lib-hvault.bats` (requires `vault server -dev`). | `tools/vault-apply-policies.sh`, `tools/vault-apply-roles.sh`, `lib/init/nomad/vault-nomad-auth.sh`, `tools/vault-seed-*.sh` | +| `lib/init/nomad/` | Nomad+Vault installer scripts. `cluster-up.sh` — idempotent Step-0 orchestrator that runs all steps in order (installs packages, writes HCL, enables systemd units, unseals Vault); uses `poll_until_healthy()` helper for deduped readiness polling. `install.sh` — installs pinned Nomad+Vault apt packages. `vault-init.sh` — initializes Vault (unseal keys → `/etc/vault.d/`), creates dev-persisted unseal unit. `lib-systemd.sh` — shared systemd unit helpers. `systemd-nomad.sh`, `systemd-vault.sh` — write and enable service units. `vault-nomad-auth.sh` — Step-2 script that enables Vault's JWT auth at path `jwt-nomad`, writes the JWKS/algs config pointing at Nomad's workload-identity signer, delegates role sync to `tools/vault-apply-roles.sh`, installs `/etc/nomad.d/server.hcl`, and SIGHUPs `nomad.service` if the file changed (#881). `wp-oauth-register.sh` — S3.3 script that creates the Woodpecker OAuth2 app in Forgejo and stores `forgejo_client`/`forgejo_secret` in Vault KV v2 at `kv/disinto/shared/woodpecker`; idempotent (skips if app or secrets already present); called by `bin/disinto --with woodpecker`. `deploy.sh` — S4 dependency-ordered Nomad job deploy + health-wait; takes a list of jobspec basenames, submits each to Nomad and polls until healthy before proceeding to the next; supports `--dry-run` and per-job timeout overrides via `JOB_READY_TIMEOUT_`; invoked by `bin/disinto --with ` and `cluster-up.sh`. Idempotent: each step checks current state before acting. Sourced and called by `cluster-up.sh`; not sourced by agents. | `bin/disinto init --backend=nomad` | diff --git a/lib/hvault.sh b/lib/hvault.sh index d283330..b0d1635 100644 --- a/lib/hvault.sh +++ b/lib/hvault.sh @@ -405,36 +405,3 @@ hvault_token_lookup() { return 1 } } - -# _hvault_seed_key — Seed a single KV key if it doesn't exist. -# Reads existing data and merges to preserve sibling keys (KV v2 replaces -# .data atomically). Returns 0=created, 1=unchanged, 2=API error. -# Args: -# path: KV v2 logical path (e.g. "disinto/shared/chat") -# key: key name within the path (e.g. "chat_oauth_client_id") -# generator: shell command that outputs a random value (default: openssl rand -hex 32) -# Usage: -# _hvault_seed_key "disinto/shared/chat" "chat_oauth_client_id" -# rc=$? # 0=created, 1=unchanged -_hvault_seed_key() { - local path="$1" key="$2" generator="${3:-openssl rand -hex 32}" - local existing - existing=$(hvault_kv_get "$path" "$key" 2>/dev/null) || true - if [ -n "$existing" ]; then - return 1 # unchanged - fi - - local value - value=$(eval "$generator") - - # Read existing data to preserve sibling keys (KV v2 replaces atomically) - local kv_api="${VAULT_KV_MOUNT}/data/${path}" - local raw existing_data payload - raw="$(hvault_get_or_empty "$kv_api")" || return 2 - existing_data="{}" - [ -n "$raw" ] && existing_data="$(printf '%s' "$raw" | jq '.data.data // {}')" - payload="$(printf '%s' "$existing_data" \ - | jq --arg k "$key" --arg v "$value" '{data: (. + {($k): $v})}')" - _hvault_request POST "$kv_api" "$payload" >/dev/null - return 0 # created -} diff --git a/lib/init/nomad/cluster-up.sh b/lib/init/nomad/cluster-up.sh index 488d2df..4e39d88 100755 --- a/lib/init/nomad/cluster-up.sh +++ b/lib/init/nomad/cluster-up.sh @@ -66,7 +66,6 @@ HOST_VOLUME_DIRS=( "/srv/disinto/agent-data" "/srv/disinto/project-repos" "/srv/disinto/caddy-data" - "/srv/disinto/docker" "/srv/disinto/chat-history" "/srv/disinto/ops-repo" ) diff --git a/nomad/AGENTS.md b/nomad/AGENTS.md index 6fda250..31d21bb 100644 --- a/nomad/AGENTS.md +++ b/nomad/AGENTS.md @@ -1,12 +1,12 @@ - + # nomad/ — Agent Instructions Nomad + Vault HCL for the factory's single-node cluster. These files are the source of truth that `lib/init/nomad/cluster-up.sh` copies onto a factory box under `/etc/nomad.d/` and `/etc/vault.d/` at init time. -This directory covers the **Nomad+Vault migration (Steps 0–5)** — -see issues #821–#992 for the step breakdown. +This directory covers the **Nomad+Vault migration (Steps 0–4)** — +see issues #821–#962 for the step breakdown. ## What lives here @@ -19,9 +19,6 @@ see issues #821–#992 for the step breakdown. | `jobs/woodpecker-server.hcl` | submitted via `lib/init/nomad/deploy.sh` | Woodpecker CI server; host networking, Vault KV for `WOODPECKER_AGENT_SECRET` + Forgejo OAuth creds (S3.1) | | `jobs/woodpecker-agent.hcl` | submitted via `lib/init/nomad/deploy.sh` | Woodpecker CI agent; host networking, `docker.sock` mount, Vault KV for `WOODPECKER_AGENT_SECRET`; `WOODPECKER_SERVER` uses `${attr.unique.network.ip-address}:9000` (Nomad interpolation) — port binds to LXC alloc IP, not localhost (S3.2, S3-fix-6, #964) | | `jobs/agents.hcl` | submitted via `lib/init/nomad/deploy.sh` | All 7 agent roles (dev, review, gardener, planner, predictor, supervisor, architect) + llama variant; Vault-templated bot tokens via `service-agents` policy; `force_pull = false` — image is built locally by `bin/disinto --with agents`, no registry (S4.1, S4-fix-2, S4-fix-5, #955, #972, #978) | -| `jobs/staging.hcl` | submitted via `lib/init/nomad/deploy.sh` | Caddy file-server mounting `docker/` as `/srv/site:ro`; no Vault integration; internal-only via edge proxy (S5.2, #989) | -| `jobs/chat.hcl` | submitted via `lib/init/nomad/deploy.sh` | Claude chat UI; custom `disinto/chat:local` image; sandbox hardening (cap_drop ALL, tmpfs, pids_limit 128); Vault-templated OAuth secrets via `service-chat` policy (S5.2, #989) | -| `jobs/edge.hcl` | submitted via `lib/init/nomad/deploy.sh` | Caddy reverse proxy + dispatcher sidecar; routes /forge, /woodpecker, /staging, /chat; uses `disinto/edge:local` image built by `bin/disinto --with edge`; Vault-templated ops-repo creds via `service-dispatcher` policy (S5.1, #988) | Nomad auto-merges every `*.hcl` under `-config=/etc/nomad.d/`, so the split between `server.hcl` and `client.hcl` is for readability, not @@ -36,6 +33,8 @@ convention, KV path summary, and JWT-auth role bindings (S2.1/S2.3). ## Not yet implemented +- **Additional jobspecs** (caddy) — Woodpecker (S3.1-S3.2) and agents (S4.1) are now deployed; + caddy lands in a later step. - **TLS, ACLs, gossip encryption** — deliberately absent for now; land alongside multi-node support. diff --git a/nomad/client.hcl b/nomad/client.hcl index d173ed5..d3ba74b 100644 --- a/nomad/client.hcl +++ b/nomad/client.hcl @@ -49,18 +49,18 @@ client { read_only = false } - # staging static content (docker/ directory with images, HTML, etc.) - host_volume "site-content" { - path = "/srv/disinto/docker" - read_only = true - } - # disinto chat transcripts + attachments. host_volume "chat-history" { path = "/srv/disinto/chat-history" read_only = false } + # staging static content (docker/ directory with images, HTML, etc.) + host_volume "site-content" { + path = "/srv/disinto/docker" + read_only = true + } + # ops repo clone (vault actions, sprint artifacts, knowledge). host_volume "ops-repo" { path = "/srv/disinto/ops-repo" diff --git a/nomad/jobs/agents.hcl b/nomad/jobs/agents.hcl index 92d377e..7ecc564 100644 --- a/nomad/jobs/agents.hcl +++ b/nomad/jobs/agents.hcl @@ -152,44 +152,37 @@ FORGE_PASS={{ .Data.data.pass }} FORGE_TOKEN=seed-me FORGE_PASS=seed-me {{- end }} - -{{ with secret "kv/data/disinto/bots/review" -}} +{{- with secret "kv/data/disinto/bots/review" -}} FORGE_REVIEW_TOKEN={{ .Data.data.token }} {{- else -}} FORGE_REVIEW_TOKEN=seed-me {{- end }} - -{{ with secret "kv/data/disinto/bots/gardener" -}} +{{- with secret "kv/data/disinto/bots/gardener" -}} FORGE_GARDENER_TOKEN={{ .Data.data.token }} {{- else -}} FORGE_GARDENER_TOKEN=seed-me {{- end }} - -{{ with secret "kv/data/disinto/bots/architect" -}} +{{- with secret "kv/data/disinto/bots/architect" -}} FORGE_ARCHITECT_TOKEN={{ .Data.data.token }} {{- else -}} FORGE_ARCHITECT_TOKEN=seed-me {{- end }} - -{{ with secret "kv/data/disinto/bots/planner" -}} +{{- with secret "kv/data/disinto/bots/planner" -}} FORGE_PLANNER_TOKEN={{ .Data.data.token }} {{- else -}} FORGE_PLANNER_TOKEN=seed-me {{- end }} - -{{ with secret "kv/data/disinto/bots/predictor" -}} +{{- with secret "kv/data/disinto/bots/predictor" -}} FORGE_PREDICTOR_TOKEN={{ .Data.data.token }} {{- else -}} FORGE_PREDICTOR_TOKEN=seed-me {{- end }} - -{{ with secret "kv/data/disinto/bots/supervisor" -}} +{{- with secret "kv/data/disinto/bots/supervisor" -}} FORGE_SUPERVISOR_TOKEN={{ .Data.data.token }} {{- else -}} FORGE_SUPERVISOR_TOKEN=seed-me {{- end }} - -{{ with secret "kv/data/disinto/bots/vault" -}} +{{- with secret "kv/data/disinto/bots/vault" -}} FORGE_VAULT_TOKEN={{ .Data.data.token }} {{- else -}} FORGE_VAULT_TOKEN=seed-me diff --git a/nomad/jobs/chat.hcl b/nomad/jobs/chat.hcl index ad18cec..8fa5c93 100644 --- a/nomad/jobs/chat.hcl +++ b/nomad/jobs/chat.hcl @@ -12,7 +12,6 @@ # - Read-only root filesystem (enforced via entrypoint) # - tmpfs /tmp:size=64m for runtime temp files # - cap_drop ALL (no Linux capabilities) -# - pids_limit 128 (prevent fork bombs) # - mem_limit 512m (matches compose sandbox hardening) # # Vault integration: @@ -89,22 +88,11 @@ job "chat" { config { image = "disinto/chat:local" force_pull = false - # Sandbox hardening (#706): cap_drop ALL, pids_limit 128, tmpfs /tmp + # Sandbox hardening (#706): cap_drop ALL (no Linux capabilities) + # tmpfs /tmp for runtime files (64MB) # ReadonlyRootfs enforced via entrypoint script (fails if running as root) cap_drop = ["ALL"] - pids_limit = 128 - mount { - type = "tmpfs" - target = "/tmp" - readonly = false - tmpfs_options { - size = 67108864 # 64MB in bytes - } - } - # Security options for sandbox hardening - # apparmor=unconfined needed for Claude CLI ptrace access - # no-new-privileges prevents privilege escalation - security_opt = ["apparmor=unconfined", "no-new-privileges"] + tmpfs = ["/tmp:size=64m"] } # ── Volume mounts ────────────────────────────────────────────────────── @@ -119,9 +107,9 @@ job "chat" { # CHAT_OAUTH_CLIENT_ID, CHAT_OAUTH_CLIENT_SECRET, FORWARD_AUTH_SECRET # rendered from kv/disinto/shared/chat via template stanza. env { - FORGE_URL = "http://forgejo:3000" - CHAT_MAX_REQUESTS_PER_HOUR = "60" - CHAT_MAX_REQUESTS_PER_DAY = "1000" + FORGE_URL = "http://forgejo:3000" + CHAT_MAX_REQUESTS_PER_HOUR = "60" + CHAT_MAX_REQUESTS_PER_DAY = "1000" } # ── Vault-templated secrets (S5.2, issue #989) ───────────────────────── @@ -147,11 +135,25 @@ EOT } # ── Sandbox hardening (S5.2, #706) ──────────────────────────────────── - # Memory = 512MB (matches docker-compose sandbox hardening) + # Matches docker-compose sandbox hardening: + # - ReadonlyRootfs=true (read-only root filesystem) + # - CapDrop=ALL (no Linux capabilities) + # - PidsLimit=128 (prevent fork bombs) + # - Memory=512m (536870912 bytes) + # - SecurityOpt=no-new-privileges + # + # Note: Nomad's docker driver supports security_opt and some of these + # via the task's config block. Others (pids_limit, memory) are in + # resources block. resources { cpu = 200 memory = 512 } + + # Security options for sandbox hardening + # apparmor=unconfined needed for Claude CLI ptrace access + # no-new-privileges prevents privilege escalation + security_opt = ["apparmor=unconfined", "no-new-privileges"] } } } diff --git a/nomad/jobs/edge.hcl b/nomad/jobs/edge.hcl deleted file mode 100644 index 779b53b..0000000 --- a/nomad/jobs/edge.hcl +++ /dev/null @@ -1,245 +0,0 @@ -# ============================================================================= -# nomad/jobs/edge.hcl — Edge proxy (Caddy + dispatcher sidecar) (Nomad service job) -# -# Part of the Nomad+Vault migration (S5.1, issue #988). Caddy reverse proxy -# routes traffic to Forgejo, Woodpecker, staging, and chat services. The -# dispatcher sidecar polls disinto-ops for vault actions and dispatches them -# via Nomad batch jobs. -# -# Host_volume contract: -# This job mounts caddy-data from nomad/client.hcl. Path -# /srv/disinto/caddy-data is created by lib/init/nomad/cluster-up.sh before -# any job references it. Keep the `source = "caddy-data"` below in sync -# with the host_volume stanza in client.hcl. -# -# Build step (S5.1): -# docker/edge/Dockerfile is custom (adds bash, jq, curl, git, docker-cli, -# python3, openssh-client, autossh to caddy:latest). Build as -# disinto/edge:local using the same pattern as disinto/agents:local. -# Command: docker build -t disinto/edge:local -f docker/edge/Dockerfile docker/edge -# -# Not the runtime yet: docker-compose.yml is still the factory's live stack -# until cutover. This file exists so CI can validate it and S5.2 can wire -# `disinto init --backend=nomad --with edge` to `nomad job run` it. -# ============================================================================= - -job "edge" { - type = "service" - datacenters = ["dc1"] - - group "edge" { - count = 1 - - # ── Vault workload identity for dispatcher (S5.1, issue #988) ────────── - # Service role for dispatcher task to fetch vault actions from KV v2. - # Role defined in vault/roles.yaml, policy in vault/policies/dispatcher.hcl. - vault { - role = "service-dispatcher" - } - - # ── Network ports (S5.1, issue #988) ────────────────────────────────── - # Caddy listens on :80 and :443. Expose both on the host. - network { - port "http" { - static = 80 - to = 80 - } - - port "https" { - static = 443 - to = 443 - } - } - - # ── Host-volume mounts (S5.1, issue #988) ───────────────────────────── - # caddy-data: ACME certificates, Caddy config state. - volume "caddy-data" { - type = "host" - source = "caddy-data" - read_only = false - } - - # ops-repo: disinto-ops clone for vault actions polling. - volume "ops-repo" { - type = "host" - source = "ops-repo" - read_only = false - } - - # ── Conservative restart policy ─────────────────────────────────────── - # Caddy should be stable; dispatcher may restart on errors. - restart { - attempts = 3 - interval = "5m" - delay = "15s" - mode = "delay" - } - - # ── Service registration ─────────────────────────────────────────────── - # Caddy is an HTTP reverse proxy — health check on port 80. - service { - name = "edge" - port = "http" - provider = "nomad" - - check { - type = "http" - path = "/" - interval = "10s" - timeout = "3s" - } - } - - # ── Caddy task (S5.1, issue #988) ───────────────────────────────────── - task "caddy" { - driver = "docker" - - config { - # Use pre-built disinto/edge:local image (custom Dockerfile adds - # bash, jq, curl, git, docker-cli, python3, openssh-client, autossh). - image = "disinto/edge:local" - force_pull = false - ports = ["http", "https"] - - # apparmor=unconfined matches docker-compose — needed for autossh - # in the entrypoint script. - security_opt = ["apparmor=unconfined"] - } - - # Mount caddy-data volume for ACME state and config directory. - # Caddyfile is mounted at /etc/caddy/Caddyfile by entrypoint-edge.sh. - volume_mount { - volume = "caddy-data" - destination = "/data" - read_only = false - } - - # ── Caddyfile via Nomad service discovery (S5-fix-7, issue #1018) ──── - # Renders staging upstream from Nomad service registration instead of - # hardcoded staging:80. Caddy picks up /local/Caddyfile via entrypoint. - template { - destination = "local/Caddyfile" - change_mode = "restart" - data = < + # Planner Agent **Role**: Strategic planning using a Prerequisite Tree (Theory of Constraints), diff --git a/predictor/AGENTS.md b/predictor/AGENTS.md index ba54a05..f72e844 100644 --- a/predictor/AGENTS.md +++ b/predictor/AGENTS.md @@ -1,4 +1,4 @@ - + # Predictor Agent **Role**: Abstract adversary (the "goblin"). Runs a 2-step formula diff --git a/review/AGENTS.md b/review/AGENTS.md index 19fc4c7..7317dcf 100644 --- a/review/AGENTS.md +++ b/review/AGENTS.md @@ -1,4 +1,4 @@ - + # Review Agent **Role**: AI-powered PR review — post structured findings and formal diff --git a/supervisor/AGENTS.md b/supervisor/AGENTS.md index 7ca3d7f..4fc6fdf 100644 --- a/supervisor/AGENTS.md +++ b/supervisor/AGENTS.md @@ -1,4 +1,4 @@ - + # Supervisor Agent **Role**: Health monitoring and auto-remediation, executed as a formula-driven diff --git a/tests/disinto-init-nomad.bats b/tests/disinto-init-nomad.bats index 8c8b9a4..085bec2 100644 --- a/tests/disinto-init-nomad.bats +++ b/tests/disinto-init-nomad.bats @@ -215,7 +215,7 @@ setup_file() { run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with unknown-service --dry-run [ "$status" -ne 0 ] [[ "$output" == *"unknown service"* ]] - [[ "$output" == *"known: forgejo, woodpecker-server, woodpecker-agent, agents, staging, chat, edge"* ]] + [[ "$output" == *"known: forgejo, woodpecker-server, woodpecker-agent, agents"* ]] } # S3.4: woodpecker auto-expansion and forgejo auto-inclusion diff --git a/tools/vault-seed-chat.sh b/tools/vault-seed-chat.sh deleted file mode 100755 index 08e3837..0000000 --- a/tools/vault-seed-chat.sh +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env bash -# ============================================================================= -# tools/vault-seed-chat.sh — Idempotent seed for kv/disinto/shared/chat -# -# Part of the Nomad+Vault migration (S5.2, issue #989). Populates the KV v2 -# path that nomad/jobs/chat.hcl reads from, so a clean-install factory -# (no old-stack secrets to import) still has per-key values for -# CHAT_OAUTH_CLIENT_ID, CHAT_OAUTH_CLIENT_SECRET, and FORWARD_AUTH_SECRET. -# -# Companion to tools/vault-import.sh (S2.2) — when that import runs against -# a box with an existing stack, it overwrites these seeded values with the -# real ones. Order doesn't matter: whichever runs last wins, and both -# scripts are idempotent in the sense that re-running never rotates an -# existing non-empty key. -# -# Uses _hvault_seed_key (lib/hvault.sh) for each key — the helper reads -# existing data and merges to preserve sibling keys (KV v2 replaces .data -# atomically). -# -# Preconditions: -# - Vault reachable + unsealed at $VAULT_ADDR. -# - VAULT_TOKEN set (env) or /etc/vault.d/root.token readable. -# - The `kv/` mount is enabled as KV v2. -# -# Requires: VAULT_ADDR, VAULT_TOKEN, curl, jq, openssl -# -# Usage: -# tools/vault-seed-chat.sh -# tools/vault-seed-chat.sh --dry-run -# -# Exit codes: -# 0 success (seed applied, or already applied) -# 1 precondition / API / mount-mismatch failure -# ============================================================================= -set -euo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" - -# shellcheck source=../lib/hvault.sh -source "${REPO_ROOT}/lib/hvault.sh" - -KV_MOUNT="kv" -KV_LOGICAL_PATH="disinto/shared/chat" - -# Keys to seed — array-driven loop (structurally distinct from forgejo's -# sequential if-blocks and agents' role loop). -SEED_KEYS=(chat_oauth_client_id chat_oauth_client_secret forward_auth_secret) - -LOG_TAG="[vault-seed-chat]" -log() { printf '%s %s\n' "$LOG_TAG" "$*"; } -die() { printf '%s ERROR: %s\n' "$LOG_TAG" "$*" >&2; exit 1; } - -# ── Flag parsing — [[ ]] guard + case: shape distinct from forgejo -# (arity:value case), woodpecker (for-loop), agents (while/shift). -DRY_RUN=0 -if [[ $# -gt 0 ]]; then - case "$1" in - --dry-run) DRY_RUN=1 ;; - -h|--help) - printf 'Usage: %s [--dry-run]\n\n' "$(basename "$0")" - printf 'Seed kv/disinto/shared/chat with random OAuth client\n' - printf 'credentials and forward auth secret if missing.\n' - printf 'Idempotent: existing non-empty values are preserved.\n\n' - printf ' --dry-run Show what would be seeded without writing.\n' - exit 0 - ;; - *) die "invalid argument: ${1} (try --help)" ;; - esac -fi - -# ── Preconditions — inline check-or-die (shape distinct from agents' array -# loop and forgejo's continuation-line style) ───────────────────────────── -command -v curl >/dev/null 2>&1 || die "curl not found" -command -v jq >/dev/null 2>&1 || die "jq not found" -command -v openssl >/dev/null 2>&1 || die "openssl not found" -[ -n "${VAULT_ADDR:-}" ] || die "VAULT_ADDR unset — export VAULT_ADDR=http://127.0.0.1:8200" -hvault_token_lookup >/dev/null || die "Vault auth probe failed — check VAULT_ADDR + VAULT_TOKEN" - -# ── Step 1/2: ensure kv/ mount exists and is KV v2 ─────────────────────────── -log "── Step 1/2: ensure ${KV_MOUNT}/ is KV v2 ──" -export DRY_RUN -hvault_ensure_kv_v2 "$KV_MOUNT" "${LOG_TAG}" \ - || die "KV mount check failed" - -# ── Step 2/2: seed missing keys via _hvault_seed_key helper ────────────────── -log "── Step 2/2: seed ${KV_LOGICAL_PATH} ──" - -generated=() -for key in "${SEED_KEYS[@]}"; do - if [ "$DRY_RUN" -eq 1 ]; then - # Check existence without writing - existing=$(hvault_kv_get "$KV_LOGICAL_PATH" "$key" 2>/dev/null) || true - if [ -z "$existing" ]; then - generated+=("$key") - log "[dry-run] ${key} would be generated" - else - log "[dry-run] ${key} unchanged" - fi - else - rc=0 - _hvault_seed_key "$KV_LOGICAL_PATH" "$key" || rc=$? - case "$rc" in - 0) generated+=("$key"); log "${key} generated" ;; - 1) log "${key} unchanged" ;; - *) die "API error seeding ${key} (rc=${rc})" ;; - esac - fi -done - -if [ "${#generated[@]}" -eq 0 ]; then - log "all keys present — no-op" -else - log "done — ${#generated[@]} key(s) seeded at kv/${KV_LOGICAL_PATH}" -fi diff --git a/vault/policies/AGENTS.md b/vault/policies/AGENTS.md index 0a67acb..9b80a1d 100644 --- a/vault/policies/AGENTS.md +++ b/vault/policies/AGENTS.md @@ -1,4 +1,4 @@ - + # vault/policies/ — Agent Instructions HashiCorp Vault ACL policies for the disinto factory. One `.hcl` file per @@ -31,8 +31,6 @@ KV v2). Vault addresses KV v2 data at `kv/data/` and metadata at | `service-forgejo` | `kv/data/disinto/shared/forgejo/*` | | `service-woodpecker` | `kv/data/disinto/shared/woodpecker/*` | | `service-agents` | All 7 `kv/data/disinto/bots//*` namespaces + `kv/data/disinto/shared/forge/*`; composite policy for the `agents` Nomad job (S4.1) | -| `service-chat` | `kv/data/disinto/shared/chat/*`; read-only OAuth client config + forward-auth secret for the chat Nomad job (S5.2, #989) | -| `service-dispatcher` | `kv/data/disinto/runner/*` (list+read) + `kv/data/disinto/shared/ops-repo/*` (read); used by edge dispatcher sidecar (S5.1, #988) | | `bot-` (dev, review, gardener, architect, planner, predictor, supervisor, vault, dev-qwen) | `kv/data/disinto/bots//*` + `kv/data/disinto/shared/forge/*` | | `runner-` (GITHUB\_TOKEN, CODEBERG\_TOKEN, CLAWHUB\_TOKEN, DEPLOY\_KEY, NPM\_TOKEN, DOCKER\_HUB\_TOKEN) | `kv/data/disinto/runner/` (exactly one) | | `dispatcher` | `kv/data/disinto/runner/*` + `kv/data/disinto/shared/ops-repo/*` | diff --git a/vault/policies/service-dispatcher.hcl b/vault/policies/service-dispatcher.hcl deleted file mode 100644 index bdc7ddb..0000000 --- a/vault/policies/service-dispatcher.hcl +++ /dev/null @@ -1,29 +0,0 @@ -# vault/policies/service-dispatcher.hcl -# -# Edge dispatcher policy: needs to enumerate the runner secret namespace -# (to check secret presence before dispatching) and read the shared -# ops-repo credentials (token + clone URL) it uses to fetch action TOMLs. -# -# Scope: -# - kv/disinto/runner/* — read all per-secret values + list keys -# - kv/disinto/shared/ops-repo/* — read the ops-repo creds bundle -# -# The actual ephemeral runner container created per dispatch gets the -# narrow runner- policies, NOT this one. This policy stays bound -# to the long-running dispatcher only. - -path "kv/data/disinto/runner/*" { - capabilities = ["read"] -} - -path "kv/metadata/disinto/runner/*" { - capabilities = ["list", "read"] -} - -path "kv/data/disinto/shared/ops-repo" { - capabilities = ["read"] -} - -path "kv/metadata/disinto/shared/ops-repo" { - capabilities = ["list", "read"] -} diff --git a/vault/roles.yaml b/vault/roles.yaml index c058a30..1e01be8 100644 --- a/vault/roles.yaml +++ b/vault/roles.yaml @@ -70,8 +70,6 @@ roles: namespace: default job_id: agents - # ── Chat UI (nomad/jobs/chat.hcl — S5.2) ───────────────────────────────── - # Claude chat UI service with OAuth secrets. Uses vault/policies/service-chat.hcl. - name: service-chat policy: service-chat namespace: default @@ -128,10 +126,10 @@ roles: job_id: bot-vault # ── Edge dispatcher ──────────────────────────────────────────────────────── - - name: service-dispatcher - policy: service-dispatcher + - name: dispatcher + policy: dispatcher namespace: default - job_id: edge + job_id: dispatcher # ── Per-secret runner roles ──────────────────────────────────────────────── # vault-runner (Step 5) composes runner- policies onto each