diff --git a/.woodpecker/detect-duplicates.py b/.woodpecker/detect-duplicates.py index 58fc160..473bb18 100644 --- a/.woodpecker/detect-duplicates.py +++ b/.woodpecker/detect-duplicates.py @@ -294,6 +294,10 @@ def main() -> int: "9f6ae8e7811575b964279d8820494eb0": "Verification helper: for loop done pattern", # Standard lib source block shared across formula-driven agent run scripts "330e5809a00b95ade1a5fce2d749b94b": "Standard lib source block (env.sh, formula-session.sh, worktree.sh, guard.sh, agent-sdk.sh)", + # Test data for duplicate service detection tests (#850) + # Intentionally duplicated TOML blocks in smoke-init.sh and test-duplicate-service-detection.sh + "334967b8b4f1a8d3b0b9b8e0912f3bfb": "Test TOML: [agents.llama] block header (smoke-init.sh + test-duplicate-service-detection.sh)", + "d82f30077e5bb23b5fc01db003033d5d": "Test TOML: [agents.llama] block body (smoke-init.sh + test-duplicate-service-detection.sh)", # Common vault-seed script patterns: logging helpers + flag parsing # Used in tools/vault-seed-woodpecker.sh + lib/init/nomad/wp-oauth-register.sh "843a1cbf987952697d4e05e96ed2b2d5": "Logging helpers + DRY_RUN init (vault-seed-woodpecker + wp-oauth-register)", @@ -301,6 +305,34 @@ def main() -> int: "9a57368f3c1dfd29ec328596b86962a0": "Flag parsing loop + case start (vault-seed-woodpecker + wp-oauth-register)", "9d72d40ff303cbed0b7e628fc15381c3": "Case loop + dry-run handler (vault-seed-woodpecker + wp-oauth-register)", "5b52ddbbf47948e3cbc1b383f0909588": "Help + invalid arg handler end (vault-seed-woodpecker + wp-oauth-register)", + # forgejo-bootstrap.sh follows wp-oauth-register.sh pattern (issue #1069) + "2b80185e4ae2b54e2e01f33e5555c688": "Standard header (set -euo pipefail, SCRIPT_DIR, REPO_ROOT) (forgejo-bootstrap + wp-oauth-register)", + "38a1f20a60d69f0d6bfb06a0532b3bd7": "Logging helpers + DRY_RUN init (forgejo-bootstrap + wp-oauth-register)", + "4dd3c526fa29bdaa88b274c3d7d01032": "Flag parsing loop + case start (forgejo-bootstrap + wp-oauth-register)", + # Common vault-seed script preamble + precondition patterns + # Shared across tools/vault-seed-{forgejo,agents,woodpecker}.sh + "dff3675c151fcdbd2fef798826ae919b": "Vault-seed preamble: set -euo + path setup + source hvault.sh + KV_MOUNT", + "1cd9f0d083e24e6e6b2071db9b6dae09": "Vault-seed preconditions: binary check loop + VAULT_ADDR guard", + "63bfa88d71764c95c65a9a248f3e40ab": "Vault-seed preconditions: binary check end + VAULT_ADDR die", + "34873ad3570b211ce1d90468ab6ac94c": "Vault-seed preconditions: VAULT_ADDR die + hvault_token_lookup", + "71a52270f249e843cda48ad896d9f781": "Vault-seed preconditions: VAULT_ADDR + hvault_token_lookup + die", + # Common vault-seed script flag parsing patterns + # Shared across tools/vault-seed-{forgejo,ops-repo}.sh + "6906b7787796c2ccb8dd622e2ad4e7bf": "vault-seed DRY_RUN init + case pattern (forgejo + ops-repo)", + "a0df5283b616b964f8bc32fd99ec1b5a": "vault-seed case pattern start (forgejo + ops-repo)", + "e15e3272fdd9f0f46ce9e726aea9f853": "vault-seed case pattern dry-run handler (forgejo + ops-repo)", + "c9f22385cc49a3dac1d336bc14c6315b": "vault-seed DRY_RUN assignment (forgejo + ops-repo)", + "106f4071e88f841b3208b01144cd1c39": "vault-seed case pattern dry-run end (forgejo + ops-repo)", + "c15506dcb6bb340b25d1c39d442dd2e6": "vault-seed help text + invalid arg handler (forgejo + ops-repo)", + "1feecd3b3caf00045fae938ddf2811de": "vault-seed invalid arg handler (forgejo + ops-repo)", + "919780d5e7182715344f5aa02b191294": "vault-seed invalid arg + esac pattern (forgejo + ops-repo)", + "8dce1d292bce8e60ef4c0665b62945b0": "vault-seed esac + binary check loop (forgejo + ops-repo)", + "ca043687143a5b47bd54e65a99ce8ee8": "vault-seed binary check loop start (forgejo + ops-repo)", + "aefd9f655411a955395e6e5995ddbe6f": "vault-seed binary check pattern (forgejo + ops-repo)", + "60f0c46deb5491599457efb4048918e5": "vault-seed VAULT_ADDR + hvault_token_lookup check (forgejo + ops-repo)", + "f6838f581ef6b4d82b55268389032769": "vault-seed VAULT_ADDR + hvault_token_lookup die (forgejo + ops-repo)", + # Common shell control-flow: if → return 1 → fi → fi (env.sh + register.sh) + "a8bdb7f1a5d8cbd0a5921b17b6cf6f4d": "Common shell control-flow (return 1 / fi / fi / return 0 / }) (env.sh + register.sh)", } if not sh_files: diff --git a/.woodpecker/edge-subpath.yml b/.woodpecker/edge-subpath.yml new file mode 100644 index 0000000..2c11980 --- /dev/null +++ b/.woodpecker/edge-subpath.yml @@ -0,0 +1,317 @@ +# ============================================================================= +# .woodpecker/edge-subpath.yml — Edge subpath routing static checks +# +# Static validation for edge subpath routing configuration. This pipeline does +# NOT run live service curls — it validates the configuration that would be +# used by a deployed edge proxy. +# +# Checks: +# 1. shellcheck — syntax check on tests/smoke-edge-subpath.sh +# 2. caddy validate — validate the Caddyfile template syntax +# 3. caddyfile-routing-test — verify Caddyfile routing block shape +# 4. test-caddyfile-routing — run standalone unit test for Caddyfile structure +# +# Triggers: +# - Pull requests that modify edge-related files +# +# Environment variables (inherited from WOODPECKER_ENVIRONMENT): +# EDGE_BASE_URL — Edge proxy URL for reference (default: http://localhost) +# EDGE_TIMEOUT — Request timeout in seconds (default: 30) +# EDGE_MAX_RETRIES — Max retries per request (default: 3) +# ============================================================================= + +when: + event: pull_request + +steps: + # ── 1. ShellCheck on smoke script ──────────────────────────────────────── + # `shellcheck` validates bash syntax, style, and common pitfalls. + # Exit codes: + # 0 — all checks passed + # 1 — one or more issues found + - name: shellcheck-smoke + image: koalaman/shellcheck-alpine:stable + commands: + - shellcheck --severity=warning tests/smoke-edge-subpath.sh tests/test-caddyfile-routing.sh + + # ── 2. Caddyfile template rendering ─────────────────────────────────────── + # Render a mock Caddyfile for validation. The template uses Nomad's + # templating syntax ({{ range ... }}) which must be processed before Caddy + # can validate it. We render a mock version with Nomad templates expanded + # to static values for validation purposes. + - name: render-caddyfile + image: alpine:3.19 + commands: + - apk add --no-cache coreutils + - | + set -e + mkdir -p edge-render + # Render mock Caddyfile with Nomad templates expanded + { + echo '# Caddyfile — edge proxy configuration (Nomad-rendered)' + echo '# Staging upstream discovered via Nomad service registration.' + echo '' + echo ':80 {' + echo ' # Redirect root to Forgejo' + echo ' handle / {' + echo ' redir /forge/ 302' + echo ' }' + echo '' + echo ' # Reverse proxy to Forgejo' + echo ' handle /forge/* {' + echo ' reverse_proxy 127.0.0.1:3000' + echo ' }' + echo '' + echo ' # Reverse proxy to Woodpecker CI' + echo ' handle /ci/* {' + echo ' reverse_proxy 127.0.0.1:8000' + echo ' }' + echo '' + echo ' # Reverse proxy to staging — dynamic port via Nomad service discovery' + echo ' handle /staging/* {' + echo ' reverse_proxy 127.0.0.1:8081' + echo ' }' + echo '' + echo ' # Chat service — reverse proxy to disinto-chat backend (#705)' + echo ' # OAuth routes bypass forward_auth — unauthenticated users need these (#709)' + echo ' handle /chat/login {' + echo ' reverse_proxy 127.0.0.1:8080' + echo ' }' + echo ' handle /chat/oauth/callback {' + echo ' reverse_proxy 127.0.0.1:8080' + echo ' }' + echo ' # Defense-in-depth: forward_auth stamps X-Forwarded-User from session (#709)' + echo ' handle /chat/* {' + echo ' forward_auth 127.0.0.1:8080 {' + echo ' uri /chat/auth/verify' + echo ' copy_headers X-Forwarded-User' + echo ' header_up X-Forward-Auth-Secret {$FORWARD_AUTH_SECRET}' + echo ' }' + echo ' reverse_proxy 127.0.0.1:8080' + echo ' }' + echo '}' + } > edge-render/Caddyfile + cp edge-render/Caddyfile edge-render/Caddyfile.rendered + echo "Caddyfile rendered successfully" + + # ── 3. Caddy config validation ─────────────────────────────────────────── + # `caddy validate` checks Caddyfile syntax and configuration. + # This validates the rendered Caddyfile against Caddy's parser. + # Exit codes: + # 0 — configuration is valid + # 1 — configuration has errors + - name: caddy-validate + image: alpine:3.19 + commands: + - apk add --no-cache ca-certificates curl + - curl -sS -o /tmp/caddy "https://caddyserver.com/api/download?os=linux&arch=amd64" + - chmod +x /tmp/caddy + - /tmp/caddy version + - /tmp/caddy validate --config edge-render/Caddyfile.rendered --adapter caddyfile + + # ── 4. Caddyfile routing block shape test ───────────────────────────────── + # Verify that the Caddyfile contains all required routing blocks: + # - /forge/ — Forgejo subpath + # - /ci/ — Woodpecker subpath + # - /staging/ — Staging subpath + # - /chat/ — Chat subpath with forward_auth + # + # This is a unit test that validates the expected structure without + # requiring a running Caddy instance. + - name: caddyfile-routing-test + image: alpine:3.19 + commands: + - apk add --no-cache grep coreutils + - | + set -e + + CADDYFILE="edge-render/Caddyfile.rendered" + + echo "=== Validating Caddyfile routing blocks ===" + + # Check that all required subpath handlers exist + # POSIX-safe loop (alpine /bin/sh has no arrays) + FAILED=0 + for handler in "handle /forge/\*" "handle /ci/\*" "handle /staging/\*" "handle /chat/login" "handle /chat/oauth/callback" "handle /chat/\*"; do + if grep -q "$handler" "$CADDYFILE"; then + echo "[PASS] Found handler: $handler" + else + echo "[FAIL] Missing handler: $handler" + FAILED=1 + fi + done + + # Check forward_auth block exists for /chat/* + if grep -A5 "handle /chat/\*" "$CADDYFILE" | grep -q "forward_auth"; then + echo "[PASS] forward_auth block found for /chat/*" + else + echo "[FAIL] forward_auth block missing for /chat/*" + FAILED=1 + fi + + # Check reverse_proxy to Forgejo (port 3000) + if grep -q "reverse_proxy 127.0.0.1:3000" "$CADDYFILE"; then + echo "[PASS] Forgejo reverse_proxy configured (port 3000)" + else + echo "[FAIL] Forgejo reverse_proxy not configured" + FAILED=1 + fi + + # Check reverse_proxy to Woodpecker (port 8000) + if grep -q "reverse_proxy 127.0.0.1:8000" "$CADDYFILE"; then + echo "[PASS] Woodpecker reverse_proxy configured (port 8000)" + else + echo "[FAIL] Woodpecker reverse_proxy not configured" + FAILED=1 + fi + + # Check reverse_proxy to Chat (port 8080) + if grep -q "reverse_proxy 127.0.0.1:8080" "$CADDYFILE"; then + echo "[PASS] Chat reverse_proxy configured (port 8080)" + else + echo "[FAIL] Chat reverse_proxy not configured" + FAILED=1 + fi + + # Check root redirect to /forge/ + if grep -q "redir /forge/ 302" "$CADDYFILE"; then + echo "[PASS] Root redirect to /forge/ configured" + else + echo "[FAIL] Root redirect to /forge/ not configured" + FAILED=1 + fi + + echo "" + if [ $FAILED -eq 0 ]; then + echo "=== All routing blocks validated ===" + exit 0 + else + echo "=== Routing block validation failed ===" >&2 + exit 1 + fi + + # ── 5. Standalone Caddyfile routing test ───────────────────────────────── + # Run the standalone unit test for Caddyfile routing block validation. + # This test extracts the Caddyfile template from edge.hcl and validates + # its structure without requiring a running Caddy instance. + - name: test-caddyfile-routing + image: alpine:3.19 + commands: + - apk add --no-cache grep coreutils + - | + set -e + EDGE_TEMPLATE="nomad/jobs/edge.hcl" + + echo "=== Extracting Caddyfile template from $EDGE_TEMPLATE ===" + + # Extract the Caddyfile template (content between <&2 + exit 1 + fi + + echo "Caddyfile template extracted successfully" + echo "" + + FAILED=0 + + # Check Forgejo subpath + if echo "$CADDYFILE" | grep -q "handle /forge/\*"; then + echo "[PASS] Forgejo handle block" + else + echo "[FAIL] Forgejo handle block" + FAILED=1 + fi + + if echo "$CADDYFILE" | grep -q "reverse_proxy 127.0.0.1:3000"; then + echo "[PASS] Forgejo reverse_proxy (port 3000)" + else + echo "[FAIL] Forgejo reverse_proxy (port 3000)" + FAILED=1 + fi + + # Check Woodpecker subpath + if echo "$CADDYFILE" | grep -q "handle /ci/\*"; then + echo "[PASS] Woodpecker handle block" + else + echo "[FAIL] Woodpecker handle block" + FAILED=1 + fi + + if echo "$CADDYFILE" | grep -q "reverse_proxy 127.0.0.1:8000"; then + echo "[PASS] Woodpecker reverse_proxy (port 8000)" + else + echo "[FAIL] Woodpecker reverse_proxy (port 8000)" + FAILED=1 + fi + + # Check Staging subpath + if echo "$CADDYFILE" | grep -q "handle /staging/\*"; then + echo "[PASS] Staging handle block" + else + echo "[FAIL] Staging handle block" + FAILED=1 + fi + + if echo "$CADDYFILE" | grep -q "nomadService"; then + echo "[PASS] Staging Nomad service discovery" + else + echo "[FAIL] Staging Nomad service discovery" + FAILED=1 + fi + + # Check Chat subpath + if echo "$CADDYFILE" | grep -q "handle /chat/login"; then + echo "[PASS] Chat login handle block" + else + echo "[FAIL] Chat login handle block" + FAILED=1 + fi + + if echo "$CADDYFILE" | grep -q "handle /chat/oauth/callback"; then + echo "[PASS] Chat OAuth callback handle block" + else + echo "[FAIL] Chat OAuth callback handle block" + FAILED=1 + fi + + if echo "$CADDYFILE" | grep -q "handle /chat/\*"; then + echo "[PASS] Chat catch-all handle block" + else + echo "[FAIL] Chat catch-all handle block" + FAILED=1 + fi + + if echo "$CADDYFILE" | grep -q "reverse_proxy 127.0.0.1:8080"; then + echo "[PASS] Chat reverse_proxy (port 8080)" + else + echo "[FAIL] Chat reverse_proxy (port 8080)" + FAILED=1 + fi + + # Check forward_auth for chat + if echo "$CADDYFILE" | grep -A10 "handle /chat/\*" | grep -q "forward_auth"; then + echo "[PASS] forward_auth block for /chat/*" + else + echo "[FAIL] forward_auth block for /chat/*" + FAILED=1 + fi + + # Check root redirect + if echo "$CADDYFILE" | grep -q "redir /forge/ 302"; then + echo "[PASS] Root redirect to /forge/" + else + echo "[FAIL] Root redirect to /forge/" + FAILED=1 + fi + + echo "" + if [ $FAILED -eq 0 ]; then + echo "=== All routing blocks validated ===" + exit 0 + else + echo "=== Routing block validation failed ===" >&2 + exit 1 + fi diff --git a/AGENTS.md b/AGENTS.md index fced0c6..52ea01f 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,4 +1,4 @@ - + # Disinto — Agent Instructions ## What this repo is @@ -37,13 +37,13 @@ disinto/ (code repo) │ examples/ — example vault action TOMLs (promote, publish, release, webhook-call) ├── lib/ env.sh, agent-sdk.sh, ci-helpers.sh, ci-debug.sh, load-project.sh, parse-deps.sh, guard.sh, mirrors.sh, pr-lifecycle.sh, issue-lifecycle.sh, worktree.sh, formula-session.sh, stack-lock.sh, forge-setup.sh, forge-push.sh, ops-setup.sh, ci-setup.sh, generators.sh, hire-agent.sh, release.sh, build-graph.py, branch-protection.sh, secret-scan.sh, tea-helpers.sh, action-vault.sh, ci-log-reader.py, git-creds.sh, sprint-filer.sh, hvault.sh │ hooks/ — Claude Code session hooks (on-compact-reinject, on-idle-stop, on-phase-change, on-pretooluse-guard, on-session-end, on-stop-failure) -│ init/nomad/ — cluster-up.sh, install.sh, vault-init.sh, lib-systemd.sh (Nomad+Vault Step 0 installers, #821-#825) -├── nomad/ server.hcl, client.hcl, vault.hcl — HCL configs deployed to /etc/nomad.d/ and /etc/vault.d/ by lib/init/nomad/cluster-up.sh -│ jobs/ — Nomad jobspecs (forgejo.hcl reads Vault secrets via template stanza, S2.4) +│ init/nomad/ — cluster-up.sh, install.sh, vault-init.sh, lib-systemd.sh (Nomad+Vault Step 0 installers, #821-#825); wp-oauth-register.sh (Forgejo OAuth2 app + Vault KV seeder for Woodpecker, S3.3); deploy.sh (dependency-ordered Nomad job deploy + health-wait, S4) +├── nomad/ server.hcl, client.hcl (allow_privileged for woodpecker-agent, S3-fix-5), vault.hcl — HCL configs deployed to /etc/nomad.d/ and /etc/vault.d/ by lib/init/nomad/cluster-up.sh +│ jobs/ — Nomad jobspecs: forgejo.hcl (Vault secrets via template, S2.4); woodpecker-server.hcl + woodpecker-agent.hcl (host-net, docker.sock, Vault KV, S3.1-S3.2); agents.hcl (7 roles, llama, Vault-templated bot tokens, S4.1); vault-runner.hcl (parameterized batch dispatch, S5.3); staging.hcl (Caddy file-server, dynamic port — edge discovers via service registration, S5.2); chat.hcl (Claude chat UI, tmpfs via mount block, Vault OAuth secrets, S5.2); edge.hcl (Caddy proxy + dispatcher sidecar, S5.1) ├── projects/ *.toml.example — templates; *.toml — local per-box config (gitignored) ├── formulas/ Issue templates (TOML specs for multi-step agent tasks) -├── docker/ Dockerfiles and entrypoints: reproduce, triage, edge dispatcher, chat (server.py, entrypoint-chat.sh, Dockerfile, ui/) -├── tools/ Operational tools: edge-control/ (register.sh, install.sh, verify-chat-sandbox.sh) +├── docker/ Dockerfiles and entrypoints: reproduce, triage, edge (Caddy + chat server subprocess + dispatcher), chat (server.py, ui/ — copied into edge image at build time) +├── tools/ Operational tools: edge-control/ (register.sh, install.sh, verify-chat-sandbox.sh; register.sh enforces: reserved-name blocklist, admin-approved allowlist via /var/lib/disinto/allowlist.json, per-caller attribution via --as forced-command arg stored as registered_by, append-only audit log at /var/log/disinto/edge-register.log, ownership check on deregister requiring pubkey match) │ vault-apply-policies.sh, vault-apply-roles.sh, vault-import.sh — Vault provisioning (S2.1/S2.2) │ vault-seed-.sh — per-service Vault secret seeders; auto-invoked by `bin/disinto --with ` (add a new file to support a new service) ├── docs/ Protocol docs (PHASE-PROTOCOL.md, EVIDENCE-ARCHITECTURE.md) diff --git a/architect/AGENTS.md b/architect/AGENTS.md index 51b24b1..98c0e04 100644 --- a/architect/AGENTS.md +++ b/architect/AGENTS.md @@ -1,4 +1,4 @@ - + # Architect — Agent Instructions ## What this agent is diff --git a/bin/disinto b/bin/disinto index 5f57927..b0893c4 100755 --- a/bin/disinto +++ b/bin/disinto @@ -12,6 +12,7 @@ # disinto secrets Manage encrypted secrets # disinto run Run action in ephemeral runner container # disinto ci-logs [--step ] Read CI logs from Woodpecker SQLite +# disinto backup create Export factory state for migration # # Usage: # disinto init https://github.com/user/repo @@ -39,7 +40,9 @@ source "${FACTORY_ROOT}/lib/generators.sh" source "${FACTORY_ROOT}/lib/forge-push.sh" source "${FACTORY_ROOT}/lib/ci-setup.sh" source "${FACTORY_ROOT}/lib/release.sh" +source "${FACTORY_ROOT}/lib/backup.sh" source "${FACTORY_ROOT}/lib/claude-config.sh" +source "${FACTORY_ROOT}/lib/disinto/backup.sh" # backup create/import # ── Helpers ────────────────────────────────────────────────────────────────── @@ -62,7 +65,9 @@ Usage: disinto hire-an-agent [--formula ] [--local-model ] [--model ] Hire a new agent (create user + .profile repo; re-run to rotate credentials) disinto agent Manage agent state (enable/disable) + disinto backup create Export factory state (issues + ops bundle) disinto edge [options] Manage edge tunnel registrations + disinto backup Backup and restore factory state Edge subcommands: register [project] Register a new tunnel (generates keypair if needed) @@ -82,7 +87,7 @@ Init options: --ci-id Woodpecker CI repo ID (default: 0 = no CI) --forge-url Forge base URL (default: http://localhost:3000) --backend Orchestration backend: docker (default) | nomad - --with (nomad) Deploy services: forgejo[,...] (S1.3) + --with (nomad) Deploy services: forgejo,woodpecker,agents,staging,chat,edge[,...] (S1.3, S3.4, S4.2, S5.2, S5.5) --empty (nomad) Bring up cluster only, no jobs (S0.4) --bare Skip compose generation (bare-metal setup) --build Use local docker build instead of registry images (dev mode) @@ -101,6 +106,18 @@ Hire an agent options: CI logs options: --step Filter logs to a specific step (e.g., smoke-init) + +Backup subcommands: + create Create backup of factory state to tarball + import Restore factory state from backup tarball + +Import behavior: + - Unpacks tarball to temp directory + - Creates disinto repo via Forgejo API (mirror config is manual) + - Creates disinto-ops repo and pushes refs from bundle + - Imports issues from issues/*.json (idempotent - skips existing) + - Logs issue number mapping (Forgejo auto-assigns numbers) + - Prints summary: created X repos, pushed Y refs, imported Z issues, skipped W EOF exit 1 } @@ -783,39 +800,37 @@ _disinto_init_nomad() { fi if [ -n "$with_services" ]; then - # Vault seed plan (S2.6, #928): one line per service whose - # tools/vault-seed-.sh ships. Services without a seeder are - # silently skipped — the real-run loop below mirrors this, - # making `--with woodpecker` in Step 3 auto-invoke - # tools/vault-seed-woodpecker.sh once that file lands without - # any further change to bin/disinto. - local seed_hdr_printed=false - local IFS=',' - for svc in $with_services; do - svc=$(echo "$svc" | xargs) # trim whitespace - local seed_script="${FACTORY_ROOT}/tools/vault-seed-${svc}.sh" - if [ -x "$seed_script" ]; then - if [ "$seed_hdr_printed" = false ]; then - echo "── Vault seed dry-run ─────────────────────────────────" - seed_hdr_printed=true - fi - echo "[seed] [dry-run] ${seed_script} --dry-run" + # Interleaved seed/deploy per service (S2.6, #928, #948): match the + # real-run path so dry-run output accurately represents execution order. + # Build ordered deploy list: only include services present in with_services + local DEPLOY_ORDER="" + for ordered_svc in forgejo woodpecker-server woodpecker-agent agents staging chat edge; do + if echo ",$with_services," | grep -q ",$ordered_svc,"; then + DEPLOY_ORDER="${DEPLOY_ORDER:+${DEPLOY_ORDER} }${ordered_svc}" fi done - [ "$seed_hdr_printed" = true ] && echo "" - echo "── Deploy services dry-run ────────────────────────────" - echo "[deploy] services to deploy: ${with_services}" - for svc in $with_services; do - svc=$(echo "$svc" | xargs) # trim whitespace - # Validate known services first + local IFS=' ' + echo "[deploy] deployment order: ${DEPLOY_ORDER}" + for svc in $DEPLOY_ORDER; do + # Seed this service (if seed script exists) + local seed_name="$svc" case "$svc" in - forgejo) ;; - *) - echo "Error: unknown service '${svc}' — known: forgejo" >&2 - exit 1 - ;; + woodpecker-server|woodpecker-agent) seed_name="woodpecker" ;; + agents) seed_name="agents" ;; + chat) seed_name="chat" ;; + edge) seed_name="ops-repo" ;; esac + local seed_script="${FACTORY_ROOT}/tools/vault-seed-${seed_name}.sh" + if [ -x "$seed_script" ]; then + echo "── Vault seed dry-run ─────────────────────────────────" + echo "[seed] [dry-run] ${seed_script} --dry-run" + echo "" + fi + + # Deploy this service + echo "── Deploy services dry-run ────────────────────────────" + echo "[deploy] services to deploy: ${with_services}" local jobspec_path="${FACTORY_ROOT}/nomad/jobs/${svc}.hcl" if [ ! -f "$jobspec_path" ]; then echo "Error: jobspec not found: ${jobspec_path}" >&2 @@ -823,9 +838,40 @@ _disinto_init_nomad() { fi echo "[deploy] [dry-run] nomad job validate ${jobspec_path}" echo "[deploy] [dry-run] nomad job run -detach ${jobspec_path}" + # Post-deploy: forgejo-bootstrap + if [ "$svc" = "forgejo" ]; then + local bootstrap_script="${FACTORY_ROOT}/lib/init/nomad/forgejo-bootstrap.sh" + echo "[deploy] [dry-run] [post-deploy] would run ${bootstrap_script}" + fi done echo "[deploy] dry-run complete" fi + + # Dry-run vault-runner (unconditionally, not gated by --with) + echo "" + echo "── Vault-runner dry-run ───────────────────────────────────" + local vault_runner_path="${FACTORY_ROOT}/nomad/jobs/vault-runner.hcl" + if [ -f "$vault_runner_path" ]; then + echo "[deploy] vault-runner: [dry-run] nomad job validate ${vault_runner_path}" + echo "[deploy] vault-runner: [dry-run] nomad job run -detach ${vault_runner_path}" + else + echo "[deploy] vault-runner: jobspec not found, skipping" + fi + + # Build custom images dry-run (if agents, chat, or edge services are included) + if echo ",$with_services," | grep -qE ",(agents|chat|edge),"; then + echo "" + echo "── Build images dry-run ──────────────────────────────" + if echo ",$with_services," | grep -q ",agents,"; then + echo "[build] [dry-run] docker build -t disinto/agents:local -f ${FACTORY_ROOT}/docker/agents/Dockerfile ${FACTORY_ROOT}" + fi + if echo ",$with_services," | grep -q ",chat,"; then + echo "[build] [dry-run] docker build -t disinto/chat:local -f ${FACTORY_ROOT}/docker/chat/Dockerfile ${FACTORY_ROOT}/docker/chat" + fi + if echo ",$with_services," | grep -q ",edge,"; then + echo "[build] [dry-run] docker build -t disinto/edge:local -f ${FACTORY_ROOT}/docker/edge/Dockerfile ${FACTORY_ROOT}/docker/edge" + fi + fi exit 0 fi @@ -913,88 +959,147 @@ _disinto_init_nomad() { echo "[import] no --import-env/--import-sops — skipping; set them or seed kv/disinto/* manually before deploying secret-dependent services" fi - # Seed Vault for services that ship their own seeder (S2.6, #928). - # Convention: tools/vault-seed-.sh — auto-invoked when --with - # is requested. Runs AFTER vault-import so that real imported values - # win over generated seeds when both are present; each seeder is - # idempotent on a per-key basis (see vault-seed-forgejo.sh's - # "missing → generate, present → unchanged" contract), so re-running - # init does not rotate existing keys. Services without a seeder are - # silently skipped — keeps this loop forward-compatible with Step 3+ - # services that may ship their own seeder without touching bin/disinto. - # - # VAULT_ADDR is passed explicitly because cluster-up.sh writes the - # profile.d export *during* this same init run, so the current shell - # hasn't sourced it yet; sibling vault-* scripts (engines/policies/ - # auth/import) default VAULT_ADDR internally via _hvault_default_env, - # but vault-seed-forgejo.sh requires the caller to set it. - # - # The non-root branch invokes the seeder as `sudo -n -- env VAR=val - # script` rather than `sudo -n VAR=val -- script`: sudo treats bare - # `VAR=val` args as sudoers env-assignments, which the default - # `env_reset=on` policy silently discards unless the variable is in - # `env_keep` (VAULT_ADDR is not). Using `env` as the actual command - # sets VAULT_ADDR in the child process regardless of sudoers policy. + # Build custom images required by Nomad jobs (S4.2, S5.2, S5.5) — before deploy. + # Single-node factory dev box: no multi-node pull needed, no registry auth. + # Can upgrade to approach B (registry push/pull) later if multi-node. + if echo ",$with_services," | grep -qE ",(agents|chat|edge),"; then + echo "" + echo "── Building custom images ─────────────────────────────" + if echo ",$with_services," | grep -q ",agents,"; then + local tag="disinto/agents:local" + echo "── Building $tag ─────────────────────────────" + docker build -t "$tag" -f "${FACTORY_ROOT}/docker/agents/Dockerfile" "${FACTORY_ROOT}" 2>&1 | tail -5 + fi + if echo ",$with_services," | grep -q ",chat,"; then + local tag="disinto/chat:local" + echo "── Building $tag ─────────────────────────────" + docker build -t "$tag" -f "${FACTORY_ROOT}/docker/chat/Dockerfile" "${FACTORY_ROOT}/docker/chat" 2>&1 | tail -5 + fi + if echo ",$with_services," | grep -q ",edge,"; then + local tag="disinto/edge:local" + echo "── Building $tag ─────────────────────────────" + docker build -t "$tag" -f "${FACTORY_ROOT}/docker/edge/Dockerfile" "${FACTORY_ROOT}/docker/edge" 2>&1 | tail -5 + fi + fi + + # Interleaved seed/deploy per service (S2.6, #928, #948). + # We interleave seed + deploy per service (not batch all seeds then all deploys) + # so that OAuth-dependent services can reach their dependencies during seeding. + # E.g., seed-forgejo → deploy-forgejo → seed-woodpecker (OAuth can now reach + # running forgejo) → deploy-woodpecker. if [ -n "$with_services" ]; then local vault_addr="${VAULT_ADDR:-http://127.0.0.1:8200}" - local IFS=',' - for svc in $with_services; do - svc=$(echo "$svc" | xargs) # trim whitespace - local seed_script="${FACTORY_ROOT}/tools/vault-seed-${svc}.sh" + + # Build ordered deploy list (S3.4, S4.2, S5.2, S5.5): forgejo → woodpecker-server → woodpecker-agent → agents → staging → chat → edge + local DEPLOY_ORDER="" + for ordered_svc in forgejo woodpecker-server woodpecker-agent agents staging chat edge; do + if echo ",$with_services," | grep -q ",$ordered_svc,"; then + DEPLOY_ORDER="${DEPLOY_ORDER:+${DEPLOY_ORDER} }${ordered_svc}" + fi + done + + local IFS=' ' + for svc in $DEPLOY_ORDER; do + # Seed this service (if seed script exists) + local seed_name="$svc" + case "$svc" in + woodpecker-server|woodpecker-agent) seed_name="woodpecker" ;; + agents) seed_name="agents" ;; + chat) seed_name="chat" ;; + edge) seed_name="ops-repo" ;; + esac + local seed_script="${FACTORY_ROOT}/tools/vault-seed-${seed_name}.sh" if [ -x "$seed_script" ]; then echo "" - echo "── Seeding Vault for ${svc} ───────────────────────────" + echo "── Seeding Vault for ${seed_name} ───────────────────────────" if [ "$(id -u)" -eq 0 ]; then VAULT_ADDR="$vault_addr" "$seed_script" || exit $? else if ! command -v sudo >/dev/null 2>&1; then - echo "Error: vault-seed-${svc}.sh must run as root and sudo is not installed" >&2 + echo "Error: vault-seed-${seed_name}.sh must run as root and sudo is not installed" >&2 exit 1 fi sudo -n -- env "VAULT_ADDR=$vault_addr" "$seed_script" || exit $? fi fi - done - fi - # Deploy services if requested - if [ -n "$with_services" ]; then - echo "" - echo "── Deploying services ─────────────────────────────────" - local -a deploy_cmd=("$deploy_sh") - # Split comma-separated service list into positional args - local IFS=',' - for svc in $with_services; do - svc=$(echo "$svc" | xargs) # trim whitespace - if ! echo "$svc" | grep -qE '^[a-zA-Z0-9_-]+$'; then - echo "Error: invalid service name '${svc}' — must match ^[a-zA-Z0-9_-]+$" >&2 - exit 1 - fi - # Validate known services FIRST (before jobspec check) + # Deploy this service + echo "" + echo "── Deploying ${svc} ───────────────────────────────────────" + + # Seed host volumes before deployment (if needed) case "$svc" in - forgejo) ;; - *) - echo "Error: unknown service '${svc}' — known: forgejo" >&2 - exit 1 + staging) + # Seed site-content host volume (/srv/disinto/docker) with static content + # The staging jobspec mounts this volume read-only to /srv/site + local site_content_src="${FACTORY_ROOT}/docker/index.html" + local site_content_dst="/srv/disinto/docker" + if [ -f "$site_content_src" ] && [ -d "$site_content_dst" ]; then + if ! cmp -s "$site_content_src" "${site_content_dst}/index.html" 2>/dev/null; then + echo "[staging] seeding site-content volume..." + cp "$site_content_src" "${site_content_dst}/index.html" + fi + fi ;; esac - # Check jobspec exists + local jobspec_path="${FACTORY_ROOT}/nomad/jobs/${svc}.hcl" if [ ! -f "$jobspec_path" ]; then echo "Error: jobspec not found: ${jobspec_path}" >&2 exit 1 fi - deploy_cmd+=("$svc") + + local -a deploy_cmd=("$deploy_sh" "$svc") + if [ "$(id -u)" -eq 0 ]; then + "${deploy_cmd[@]}" || exit $? + else + if ! command -v sudo >/dev/null 2>&1; then + echo "Error: deploy.sh must run as root and sudo is not installed" >&2 + exit 1 + fi + sudo -n --preserve-env=FORGE_ADMIN_PASS,FORGE_TOKEN,FORGE_URL -- "${deploy_cmd[@]}" || exit $? + fi + + # Post-deploy: bootstrap Forgejo admin user after forgejo deployment + if [ "$svc" = "forgejo" ]; then + echo "" + echo "── Bootstrapping Forgejo admin user ───────────────────────" + local bootstrap_script="${FACTORY_ROOT}/lib/init/nomad/forgejo-bootstrap.sh" + if [ -x "$bootstrap_script" ]; then + if [ "$(id -u)" -eq 0 ]; then + "$bootstrap_script" || exit $? + else + if ! command -v sudo >/dev/null 2>&1; then + echo "Error: forgejo-bootstrap.sh must run as root and sudo is not installed" >&2 + exit 1 + fi + sudo -n --preserve-env=FORGE_ADMIN_PASS,FORGE_TOKEN,FORGE_URL -- "$bootstrap_script" || exit $? + fi + else + echo "warning: forgejo-bootstrap.sh not found or not executable" >&2 + fi + fi done - if [ "$(id -u)" -eq 0 ]; then - "${deploy_cmd[@]}" || exit $? - else - if ! command -v sudo >/dev/null 2>&1; then - echo "Error: deploy.sh must run as root and sudo is not installed" >&2 - exit 1 + # Run vault-runner (unconditionally, not gated by --with) — infrastructure job + # vault-runner is always present since it's needed for vault action dispatch + echo "" + echo "── Running vault-runner ────────────────────────────────────" + local vault_runner_path="${FACTORY_ROOT}/nomad/jobs/vault-runner.hcl" + if [ -f "$vault_runner_path" ]; then + echo "[deploy] vault-runner: running Nomad job (infrastructure)" + local -a vault_runner_cmd=("$deploy_sh" "vault-runner") + if [ "$(id -u)" -eq 0 ]; then + "${vault_runner_cmd[@]}" || exit $? + else + if ! command -v sudo >/dev/null 2>&1; then + echo "Error: deploy.sh must run as root and sudo is not installed" >&2 + exit 1 + fi + sudo -n -- "${vault_runner_cmd[@]}" || exit $? fi - sudo -n -- "${deploy_cmd[@]}" || exit $? + else + echo "[deploy] vault-runner: jobspec not found, skipping" fi # Print final summary @@ -1012,9 +1117,24 @@ _disinto_init_nomad() { echo "Imported: (none — seed kv/disinto/* manually before deploying secret-dependent services)" fi echo "Deployed: ${with_services}" - if echo "$with_services" | grep -q "forgejo"; then + if echo ",$with_services," | grep -q ",forgejo,"; then echo "Ports: forgejo: 3000" fi + if echo ",$with_services," | grep -q ",woodpecker-server,"; then + echo " woodpecker-server: 8000" + fi + if echo ",$with_services," | grep -q ",woodpecker-agent,"; then + echo " woodpecker-agent: (agent connected)" + fi + if echo ",$with_services," | grep -q ",agents,"; then + echo " agents: (polling loop running)" + fi + if echo ",$with_services," | grep -q ",staging,"; then + echo " staging: (internal, no external port)" + fi + if echo ",$with_services," | grep -q ",chat,"; then + echo " chat: 8080" + fi echo "────────────────────────────────────────────────────────" fi @@ -1100,6 +1220,70 @@ disinto_init() { exit 1 fi + # Normalize --with services (S3.4): expand 'woodpecker' shorthand to + # 'woodpecker-server,woodpecker-agent', auto-include forgejo when + # woodpecker is requested (OAuth dependency), and validate all names. + if [ -n "$with_services" ]; then + # Expand 'woodpecker' (bare) → 'woodpecker-server,woodpecker-agent'. + # Must not match already-expanded 'woodpecker-server'/'woodpecker-agent'. + local expanded="" + local IFS=',' + for _svc in $with_services; do + _svc=$(echo "$_svc" | xargs) + case "$_svc" in + woodpecker) _svc="woodpecker-server,woodpecker-agent" ;; + agents) _svc="agents" ;; + esac + expanded="${expanded:+${expanded},}${_svc}" + done + with_services="$expanded" + unset IFS + + # Auto-include forgejo when woodpecker is requested + if echo ",$with_services," | grep -q ",woodpecker-server,\|,woodpecker-agent," \ + && ! echo ",$with_services," | grep -q ",forgejo,"; then + echo "Note: --with woodpecker implies --with forgejo (OAuth dependency)" + with_services="forgejo,${with_services}" + fi + + # Auto-include forgejo and woodpecker when agents is requested + if echo ",$with_services," | grep -q ",agents,"; then + if ! echo ",$with_services," | grep -q ",forgejo,"; then + echo "Note: --with agents implies --with forgejo (agents need forge)" + with_services="forgejo,${with_services}" + fi + if ! echo ",$with_services," | grep -q ",woodpecker-server,\|,woodpecker-agent,"; then + echo "Note: --with agents implies --with woodpecker (agents need CI)" + with_services="${with_services},woodpecker-server,woodpecker-agent" + fi + fi + + # Auto-include all dependencies when edge is requested (S5.5) + if echo ",$with_services," | grep -q ",edge,"; then + # Edge depends on all backend services + for dep in forgejo woodpecker-server woodpecker-agent agents staging chat; do + if ! echo ",$with_services," | grep -q ",${dep},"; then + echo "Note: --with edge implies --with ${dep} (edge depends on all backend services)" + with_services="${with_services},${dep}" + fi + done + fi + + # Validate all service names are known + local IFS=',' + for _svc in $with_services; do + _svc=$(echo "$_svc" | xargs) + case "$_svc" in + forgejo|woodpecker-server|woodpecker-agent|agents|staging|chat|edge) ;; + *) + echo "Error: unknown service '${_svc}' — known: forgejo, woodpecker-server, woodpecker-agent, agents, staging, chat, edge" >&2 + exit 1 + ;; + esac + done + unset IFS + fi + # --import-* flag validation (S2.5). These three flags form an import # triple and must be consistent before dispatch: sops encryption is # useless without the age key to decrypt it, so either both --import-sops @@ -1290,6 +1474,36 @@ p.write_text(text) exit 0 fi + # Configure Forgejo and Woodpecker URLs when EDGE_TUNNEL_FQDN is set. + # In subdomain mode, uses per-service FQDNs at root path instead of subpath URLs. + # Must run BEFORE generate_compose so the .env file is available for variable substitution. + if [ -n "${EDGE_TUNNEL_FQDN:-}" ]; then + local routing_mode="${EDGE_ROUTING_MODE:-subpath}" + # Create .env file if it doesn't exist yet (needed before compose generation) + if [ "$bare" = false ] && [ ! -f "${FACTORY_ROOT}/.env" ]; then + touch "${FACTORY_ROOT}/.env" + fi + if [ "$routing_mode" = "subdomain" ]; then + # Subdomain mode: Forgejo at forge..disinto.ai (root path) + if ! grep -q '^FORGEJO_ROOT_URL=' "${FACTORY_ROOT}/.env" 2>/dev/null; then + echo "FORGEJO_ROOT_URL=https://${EDGE_TUNNEL_FQDN_FORGE:-forge.${EDGE_TUNNEL_FQDN}}/" >> "${FACTORY_ROOT}/.env" + fi + # Subdomain mode: Woodpecker at ci..disinto.ai (root path) + if ! grep -q '^WOODPECKER_HOST=' "${FACTORY_ROOT}/.env" 2>/dev/null; then + echo "WOODPECKER_HOST=https://${EDGE_TUNNEL_FQDN_CI:-ci.${EDGE_TUNNEL_FQDN}}" >> "${FACTORY_ROOT}/.env" + fi + else + # Subpath mode: Forgejo ROOT_URL with /forge/ subpath (trailing slash required) + if ! grep -q '^FORGEJO_ROOT_URL=' "${FACTORY_ROOT}/.env" 2>/dev/null; then + echo "FORGEJO_ROOT_URL=https://${EDGE_TUNNEL_FQDN}/forge/" >> "${FACTORY_ROOT}/.env" + fi + # Subpath mode: Woodpecker WOODPECKER_HOST with /ci subpath (no trailing slash for v3) + if ! grep -q '^WOODPECKER_HOST=' "${FACTORY_ROOT}/.env" 2>/dev/null; then + echo "WOODPECKER_HOST=https://${EDGE_TUNNEL_FQDN}/ci" >> "${FACTORY_ROOT}/.env" + fi + fi + fi + # Generate compose files (unless --bare) if [ "$bare" = false ]; then local forge_port @@ -1304,18 +1518,6 @@ p.write_text(text) touch "${FACTORY_ROOT}/.env" fi - # Configure Forgejo and Woodpecker subpath URLs when EDGE_TUNNEL_FQDN is set - if [ -n "${EDGE_TUNNEL_FQDN:-}" ]; then - # Forgejo ROOT_URL with /forge/ subpath (note trailing slash - Forgejo needs it) - if ! grep -q '^FORGEJO_ROOT_URL=' "${FACTORY_ROOT}/.env" 2>/dev/null; then - echo "FORGEJO_ROOT_URL=https://${EDGE_TUNNEL_FQDN}/forge/" >> "${FACTORY_ROOT}/.env" - fi - # Woodpecker WOODPECKER_HOST with /ci subpath (no trailing slash for v3) - if ! grep -q '^WOODPECKER_HOST=' "${FACTORY_ROOT}/.env" 2>/dev/null; then - echo "WOODPECKER_HOST=https://${EDGE_TUNNEL_FQDN}/ci" >> "${FACTORY_ROOT}/.env" - fi - fi - # Prompt for FORGE_ADMIN_PASS before setup_forge # This ensures the password is set before Forgejo user creation prompt_admin_password "${FACTORY_ROOT}/.env" @@ -1419,9 +1621,15 @@ p.write_text(text) create_woodpecker_oauth "$forge_url" "$forge_repo" # Create OAuth2 app on Forgejo for disinto-chat (#708) + # In subdomain mode, callback is at chat. root instead of /chat/ subpath. local chat_redirect_uri if [ -n "${EDGE_TUNNEL_FQDN:-}" ]; then - chat_redirect_uri="https://${EDGE_TUNNEL_FQDN}/chat/oauth/callback" + local chat_routing_mode="${EDGE_ROUTING_MODE:-subpath}" + if [ "$chat_routing_mode" = "subdomain" ]; then + chat_redirect_uri="https://${EDGE_TUNNEL_FQDN_CHAT:-chat.${EDGE_TUNNEL_FQDN}}/oauth/callback" + else + chat_redirect_uri="https://${EDGE_TUNNEL_FQDN}/chat/oauth/callback" + fi else chat_redirect_uri="http://localhost/chat/oauth/callback" fi @@ -2621,15 +2829,29 @@ disinto_edge() { # Write to .env (replace existing entries to avoid duplicates) local tmp_env tmp_env=$(mktemp) - grep -Ev "^EDGE_TUNNEL_(HOST|PORT|FQDN)=" "$env_file" > "$tmp_env" 2>/dev/null || true + grep -Ev "^EDGE_TUNNEL_(HOST|PORT|FQDN|FQDN_FORGE|FQDN_CI|FQDN_CHAT)=" "$env_file" > "$tmp_env" 2>/dev/null || true mv "$tmp_env" "$env_file" echo "EDGE_TUNNEL_HOST=${edge_host}" >> "$env_file" echo "EDGE_TUNNEL_PORT=${port}" >> "$env_file" echo "EDGE_TUNNEL_FQDN=${fqdn}" >> "$env_file" + # Subdomain mode: write per-service FQDNs (#1028) + local reg_routing_mode="${EDGE_ROUTING_MODE:-subpath}" + if [ "$reg_routing_mode" = "subdomain" ]; then + echo "EDGE_TUNNEL_FQDN_FORGE=forge.${fqdn}" >> "$env_file" + echo "EDGE_TUNNEL_FQDN_CI=ci.${fqdn}" >> "$env_file" + echo "EDGE_TUNNEL_FQDN_CHAT=chat.${fqdn}" >> "$env_file" + fi + echo "Registered: ${project}" echo " Port: ${port}" echo " FQDN: ${fqdn}" + if [ "$reg_routing_mode" = "subdomain" ]; then + echo " Mode: subdomain" + echo " Forge: forge.${fqdn}" + echo " CI: ci.${fqdn}" + echo " Chat: chat.${fqdn}" + fi echo " Saved to: ${env_file}" ;; @@ -2663,12 +2885,23 @@ disinto_edge() { edge_host="${EDGE_HOST:-edge.disinto.ai}" fi + # Read tunnel pubkey for ownership proof + local secrets_dir="${FACTORY_ROOT}/secrets" + local tunnel_pubkey="${secrets_dir}/tunnel_key.pub" + if [ ! -f "$tunnel_pubkey" ]; then + echo "Error: tunnel keypair not found at ${tunnel_pubkey}" >&2 + echo "Cannot prove ownership without the tunnel public key." >&2 + exit 1 + fi + local pubkey + pubkey=$(tr -d '\n' < "$tunnel_pubkey") + # SSH to edge host and deregister echo "Deregistering tunnel for ${project} on ${edge_host}..." local response response=$(ssh -o StrictHostKeyChecking=accept-new -o BatchMode=yes \ "disinto-register@${edge_host}" \ - "deregister ${project}" 2>&1) || { + "deregister ${project} ${pubkey}" 2>&1) || { echo "Error: failed to deregister tunnel" >&2 echo "Response: ${response}" >&2 exit 1 @@ -2751,6 +2984,33 @@ EOF esac } +# ── backup command ──────────────────────────────────────────────────────────── +# Usage: disinto backup [args] +# Subcommands: +# create Create backup of factory state +# import Restore factory state from backup +disinto_backup() { + local subcmd="${1:-}" + shift || true + + case "$subcmd" in + create) + backup_create "$@" + ;; + import) + backup_import "$@" + ;; + *) + echo "Usage: disinto backup [args]" >&2 + echo "" >&2 + echo "Subcommands:" >&2 + echo " create Create backup of factory state" >&2 + echo " import Restore factory state from backup" >&2 + exit 1 + ;; + esac +} + # ── Main dispatch ──────────────────────────────────────────────────────────── case "${1:-}" in @@ -2767,6 +3027,7 @@ case "${1:-}" in hire-an-agent) shift; disinto_hire_an_agent "$@" ;; agent) shift; disinto_agent "$@" ;; edge) shift; disinto_edge "$@" ;; + backup) shift; disinto_backup "$@" ;; -h|--help) usage ;; *) usage ;; esac diff --git a/dev/AGENTS.md b/dev/AGENTS.md index 02fd612..d48f6b6 100644 --- a/dev/AGENTS.md +++ b/dev/AGENTS.md @@ -1,4 +1,4 @@ - + # Dev Agent **Role**: Implement issues autonomously — write code, push branches, address diff --git a/docker-compose.yml b/docker-compose.yml index ba8c77c..6206b2c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -15,7 +15,6 @@ services: - project-repos:/home/agent/repos - ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared} - ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro - - ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro - ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro - ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro - woodpecker-data:/woodpecker-data:ro @@ -78,7 +77,6 @@ services: - project-repos:/home/agent/repos - ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared} - ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro - - ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro - ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro - ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro - woodpecker-data:/woodpecker-data:ro @@ -139,7 +137,6 @@ services: - project-repos:/home/agent/repos - ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared} - ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro - - ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro - ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro - ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro - woodpecker-data:/woodpecker-data:ro @@ -211,8 +208,8 @@ services: edge: build: - context: docker/edge - dockerfile: Dockerfile + context: . + dockerfile: docker/edge/Dockerfile image: disinto/edge:latest container_name: disinto-edge security_opt: @@ -223,6 +220,8 @@ services: - ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/root/.claude.json:ro - ${CLAUDE_DIR:-${HOME}/.claude}:/root/.claude:ro - disinto-logs:/opt/disinto-logs + # Chat history persistence (merged from chat container, #1083) + - ${CHAT_HISTORY_DIR:-./state/chat-history}:/var/lib/chat/history environment: - FORGE_SUPERVISOR_TOKEN=${FORGE_SUPERVISOR_TOKEN:-} - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-} @@ -234,6 +233,17 @@ services: - PRIMARY_BRANCH=main - DISINTO_CONTAINER=1 - FORGE_ADMIN_USERS=disinto-admin,vault-bot,admin + # Chat env vars (merged from chat container into edge, #1083) + - CHAT_HOST=127.0.0.1 + - CHAT_PORT=8080 + - CHAT_OAUTH_CLIENT_ID=${CHAT_OAUTH_CLIENT_ID:-} + - CHAT_OAUTH_CLIENT_SECRET=${CHAT_OAUTH_CLIENT_SECRET:-} + - DISINTO_CHAT_ALLOWED_USERS=${DISINTO_CHAT_ALLOWED_USERS:-} + - FORWARD_AUTH_SECRET=${FORWARD_AUTH_SECRET:-} + - EDGE_TUNNEL_FQDN=${EDGE_TUNNEL_FQDN:-} + - EDGE_TUNNEL_FQDN_CHAT=${EDGE_TUNNEL_FQDN_CHAT:-} + - EDGE_ROUTING_MODE=${EDGE_ROUTING_MODE:-subpath} + # Rate limiting removed (#1084) ports: - "80:80" - "443:443" diff --git a/docker/agents/Dockerfile b/docker/agents/Dockerfile index 1bcba89..fa3b2d8 100644 --- a/docker/agents/Dockerfile +++ b/docker/agents/Dockerfile @@ -1,21 +1,26 @@ FROM debian:bookworm-slim RUN apt-get update && apt-get install -y --no-install-recommends \ - bash curl git jq tmux python3 python3-pip openssh-client ca-certificates age shellcheck procps gosu \ + bash curl git jq tmux nodejs npm python3 python3-pip openssh-client ca-certificates age shellcheck procps gosu \ && pip3 install --break-system-packages networkx tomlkit \ && rm -rf /var/lib/apt/lists/* # Pre-built binaries (copied from docker/agents/bin/) # SOPS — encrypted data decryption tool -COPY docker/agents/bin/sops /usr/local/bin/sops -RUN chmod +x /usr/local/bin/sops +# Download sops binary (replaces manual COPY of vendored binary) +ARG SOPS_VERSION=3.9.4 +RUN curl -fsSL "https://github.com/getsops/sops/releases/download/v${SOPS_VERSION}/sops-v${SOPS_VERSION}.linux.amd64" \ + -o /usr/local/bin/sops && chmod +x /usr/local/bin/sops # tea CLI — official Gitea/Forgejo CLI for issue/label/comment operations -COPY docker/agents/bin/tea /usr/local/bin/tea -RUN chmod +x /usr/local/bin/tea +# Download tea binary (replaces manual COPY of vendored binary) +ARG TEA_VERSION=0.9.2 +RUN curl -fsSL "https://dl.gitea.com/tea/${TEA_VERSION}/tea-${TEA_VERSION}-linux-amd64" \ + -o /usr/local/bin/tea && chmod +x /usr/local/bin/tea -# Claude CLI is mounted from the host via docker-compose volume. -# No internet access to cli.anthropic.com required at build time. +# Install Claude Code CLI — agent runtime for all LLM backends (llama, Claude API). +# The CLI is the execution environment; ANTHROPIC_BASE_URL selects the model provider. +RUN npm install -g @anthropic-ai/claude-code@2.1.84 # Non-root user RUN useradd -m -u 1000 -s /bin/bash agent diff --git a/docker/chat/Dockerfile b/docker/chat/Dockerfile deleted file mode 100644 index 3d89863..0000000 --- a/docker/chat/Dockerfile +++ /dev/null @@ -1,35 +0,0 @@ -# disinto-chat — minimal HTTP backend for Claude chat UI -# -# Small Debian slim base with Python runtime. -# Chosen for simplicity and small image size (~100MB). -# -# Image size: ~100MB (well under the 200MB ceiling) -# -# The claude binary is mounted from the host at runtime via docker-compose, -# not baked into the image — same pattern as the agents container. - -FROM debian:bookworm-slim - -# Install Python (no build-time network access needed) -RUN apt-get update && apt-get install -y --no-install-recommends \ - python3 \ - && rm -rf /var/lib/apt/lists/* - -# Non-root user — fixed UID 10001 for sandbox hardening (#706) -RUN useradd -m -u 10001 -s /bin/bash chat - -# Copy application files -COPY server.py /usr/local/bin/server.py -COPY entrypoint-chat.sh /entrypoint-chat.sh -COPY ui/ /var/chat/ui/ - -RUN chmod +x /entrypoint-chat.sh /usr/local/bin/server.py - -USER chat -WORKDIR /var/chat - -EXPOSE 8080 -HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \ - CMD python3 -c "import urllib.request; urllib.request.urlopen('http://localhost:8080/health')" || exit 1 - -ENTRYPOINT ["/entrypoint-chat.sh"] diff --git a/docker/chat/entrypoint-chat.sh b/docker/chat/entrypoint-chat.sh deleted file mode 100755 index 00fbe53..0000000 --- a/docker/chat/entrypoint-chat.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# entrypoint-chat.sh — Start the disinto-chat backend server -# -# Exec-replace pattern: this script is the container entrypoint and runs -# the server directly (no wrapper needed). Logs to stdout for docker logs. - -LOGFILE="/tmp/chat.log" - -log() { - printf '[%s] %s\n' "$(date -u '+%Y-%m-%d %H:%M:%S UTC')" "$*" | tee -a "$LOGFILE" -} - -# Sandbox sanity checks (#706) — fail fast if isolation is broken -if [ -e /var/run/docker.sock ]; then - log "FATAL: /var/run/docker.sock is accessible — sandbox violation" - exit 1 -fi -if [ "$(id -u)" = "0" ]; then - log "FATAL: running as root (uid 0) — sandbox violation" - exit 1 -fi - -# Verify Claude CLI is available (expected via volume mount from host). -if ! command -v claude &>/dev/null; then - log "FATAL: claude CLI not found in PATH" - log "Mount the host binary into the container, e.g.:" - log " volumes:" - log " - /usr/local/bin/claude:/usr/local/bin/claude:ro" - exit 1 -fi -log "Claude CLI: $(claude --version 2>&1 || true)" - -# Start the Python server (exec-replace so signals propagate correctly) -log "Starting disinto-chat server on port 8080..." -exec python3 /usr/local/bin/server.py diff --git a/docker/chat/server.py b/docker/chat/server.py index 6748354..48944d1 100644 --- a/docker/chat/server.py +++ b/docker/chat/server.py @@ -20,9 +20,15 @@ OAuth flow: 6. Redirects to /chat/ The claude binary is expected to be mounted from the host at /usr/local/bin/claude. + +Workspace access: + - CHAT_WORKSPACE_DIR environment variable: bind-mounted project working tree + - Claude invocation uses --permission-mode acceptEdits for code modification + - CWD is set to workspace directory when configured, enabling Claude to + inspect, explain, or modify code scoped to that tree only """ -import datetime +import asyncio import json import os import re @@ -30,21 +36,33 @@ import secrets import subprocess import sys import time +import threading from http.server import HTTPServer, BaseHTTPRequestHandler +from socketserver import ThreadingMixIn from urllib.parse import urlparse, parse_qs, urlencode +import socket +import struct +import base64 +import hashlib # Configuration -HOST = os.environ.get("CHAT_HOST", "0.0.0.0") +HOST = os.environ.get("CHAT_HOST", "127.0.0.1") PORT = int(os.environ.get("CHAT_PORT", 8080)) UI_DIR = "/var/chat/ui" STATIC_DIR = os.path.join(UI_DIR, "static") CLAUDE_BIN = "/usr/local/bin/claude" +# Workspace directory: bind-mounted project working tree for Claude access +# Defaults to empty; when set, Claude can read/write to this directory +WORKSPACE_DIR = os.environ.get("CHAT_WORKSPACE_DIR", "") + # OAuth configuration FORGE_URL = os.environ.get("FORGE_URL", "http://localhost:3000") CHAT_OAUTH_CLIENT_ID = os.environ.get("CHAT_OAUTH_CLIENT_ID", "") CHAT_OAUTH_CLIENT_SECRET = os.environ.get("CHAT_OAUTH_CLIENT_SECRET", "") EDGE_TUNNEL_FQDN = os.environ.get("EDGE_TUNNEL_FQDN", "") +EDGE_TUNNEL_FQDN_CHAT = os.environ.get("EDGE_TUNNEL_FQDN_CHAT", "") +EDGE_ROUTING_MODE = os.environ.get("EDGE_ROUTING_MODE", "subpath") # Shared secret for Caddy forward_auth verify endpoint (#709). # When set, only requests carrying this value in X-Forward-Auth-Secret are @@ -52,10 +70,6 @@ EDGE_TUNNEL_FQDN = os.environ.get("EDGE_TUNNEL_FQDN", "") # (acceptable during local dev; production MUST set this). FORWARD_AUTH_SECRET = os.environ.get("FORWARD_AUTH_SECRET", "") -# Rate limiting / cost caps (#711) -CHAT_MAX_REQUESTS_PER_HOUR = int(os.environ.get("CHAT_MAX_REQUESTS_PER_HOUR", 60)) -CHAT_MAX_REQUESTS_PER_DAY = int(os.environ.get("CHAT_MAX_REQUESTS_PER_DAY", 500)) -CHAT_MAX_TOKENS_PER_DAY = int(os.environ.get("CHAT_MAX_TOKENS_PER_DAY", 1000000)) # Allowed users - disinto-admin always allowed; CSV allowlist extends it _allowed_csv = os.environ.get("DISINTO_CHAT_ALLOWED_USERS", "") @@ -81,11 +95,10 @@ _sessions = {} # Pending OAuth state tokens: state -> expires (float) _oauth_states = {} -# Per-user rate limiting state (#711) -# user -> list of request timestamps (for sliding-window hourly/daily caps) -_request_log = {} -# user -> {"tokens": int, "date": "YYYY-MM-DD"} -_daily_tokens = {} + +# WebSocket message queues per user +# user -> asyncio.Queue (for streaming messages to connected clients) +_websocket_queues = {} # MIME types for static files MIME_TYPES = { @@ -99,9 +112,22 @@ MIME_TYPES = { ".ico": "image/x-icon", } +# WebSocket subprotocol for chat streaming +WEBSOCKET_SUBPROTOCOL = "chat-stream-v1" + +# WebSocket opcodes +OPCODE_CONTINUATION = 0x0 +OPCODE_TEXT = 0x1 +OPCODE_BINARY = 0x2 +OPCODE_CLOSE = 0x8 +OPCODE_PING = 0x9 +OPCODE_PONG = 0xA + def _build_callback_uri(): """Build the OAuth callback URI based on tunnel configuration.""" + if EDGE_ROUTING_MODE == "subdomain" and EDGE_TUNNEL_FQDN_CHAT: + return f"https://{EDGE_TUNNEL_FQDN_CHAT}/oauth/callback" if EDGE_TUNNEL_FQDN: return f"https://{EDGE_TUNNEL_FQDN}/chat/oauth/callback" return "http://localhost/chat/oauth/callback" @@ -187,69 +213,9 @@ def _fetch_user(access_token): return None -# ============================================================================= -# Rate Limiting Functions (#711) -# ============================================================================= - -def _check_rate_limit(user): - """Check per-user rate limits. Returns (allowed, retry_after, reason) (#711). - - Checks hourly request cap, daily request cap, and daily token cap. - """ - now = time.time() - one_hour_ago = now - 3600 - today = datetime.date.today().isoformat() - - # Prune old entries from request log - timestamps = _request_log.get(user, []) - timestamps = [t for t in timestamps if t > now - 86400] - _request_log[user] = timestamps - - # Hourly request cap - hourly = [t for t in timestamps if t > one_hour_ago] - if len(hourly) >= CHAT_MAX_REQUESTS_PER_HOUR: - oldest_in_window = min(hourly) - retry_after = int(oldest_in_window + 3600 - now) + 1 - return False, max(retry_after, 1), "hourly request limit" - - # Daily request cap - start_of_day = time.mktime(datetime.date.today().timetuple()) - daily = [t for t in timestamps if t >= start_of_day] - if len(daily) >= CHAT_MAX_REQUESTS_PER_DAY: - next_day = start_of_day + 86400 - retry_after = int(next_day - now) + 1 - return False, max(retry_after, 1), "daily request limit" - - # Daily token cap - token_info = _daily_tokens.get(user, {"tokens": 0, "date": today}) - if token_info["date"] != today: - token_info = {"tokens": 0, "date": today} - _daily_tokens[user] = token_info - if token_info["tokens"] >= CHAT_MAX_TOKENS_PER_DAY: - next_day = start_of_day + 86400 - retry_after = int(next_day - now) + 1 - return False, max(retry_after, 1), "daily token limit" - - return True, 0, "" - - -def _record_request(user): - """Record a request timestamp for the user (#711).""" - _request_log.setdefault(user, []).append(time.time()) - - -def _record_tokens(user, tokens): - """Record token usage for the user (#711).""" - today = datetime.date.today().isoformat() - token_info = _daily_tokens.get(user, {"tokens": 0, "date": today}) - if token_info["date"] != today: - token_info = {"tokens": 0, "date": today} - token_info["tokens"] += tokens - _daily_tokens[user] = token_info - def _parse_stream_json(output): - """Parse stream-json output from claude --print (#711). + """Parse stream-json output from claude --print. Returns (text_content, total_tokens). Falls back gracefully if the usage event is absent or malformed. @@ -295,6 +261,313 @@ def _parse_stream_json(output): return "".join(text_parts), total_tokens +# ============================================================================= +# WebSocket Handler Class +# ============================================================================= + +class _WebSocketHandler: + """Handle WebSocket connections for chat streaming.""" + + def __init__(self, reader, writer, user, message_queue): + self.reader = reader + self.writer = writer + self.user = user + self.message_queue = message_queue + self.closed = False + + async def accept_connection(self, sec_websocket_key, sec_websocket_protocol=None): + """Accept the WebSocket handshake. + + The HTTP request has already been parsed by BaseHTTPRequestHandler, + so we use the provided key and protocol instead of re-reading from socket. + """ + # Validate subprotocol + if sec_websocket_protocol and sec_websocket_protocol != WEBSOCKET_SUBPROTOCOL: + self._send_http_error( + 400, + "Bad Request", + f"Unsupported subprotocol. Expected: {WEBSOCKET_SUBPROTOCOL}", + ) + self._close_connection() + return False + + # Generate accept key + accept_key = self._generate_accept_key(sec_websocket_key) + + # Send handshake response + response = ( + "HTTP/1.1 101 Switching Protocols\r\n" + "Upgrade: websocket\r\n" + "Connection: Upgrade\r\n" + f"Sec-WebSocket-Accept: {accept_key}\r\n" + ) + + if sec_websocket_protocol: + response += f"Sec-WebSocket-Protocol: {sec_websocket_protocol}\r\n" + + response += "\r\n" + self.writer.write(response.encode("utf-8")) + await self.writer.drain() + return True + + def _generate_accept_key(self, sec_key): + """Generate the Sec-WebSocket-Accept key.""" + GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + combined = sec_key + GUID + sha1 = hashlib.sha1(combined.encode("utf-8")) + return base64.b64encode(sha1.digest()).decode("utf-8") + + async def _read_line(self): + """Read a line from the socket.""" + data = await self.reader.read(1) + line = "" + while data: + if data == b"\r": + data = await self.reader.read(1) + continue + if data == b"\n": + return line + line += data.decode("utf-8", errors="replace") + data = await self.reader.read(1) + return line + + def _send_http_error(self, code, title, message): + """Send an HTTP error response.""" + response = ( + f"HTTP/1.1 {code} {title}\r\n" + "Content-Type: text/plain; charset=utf-8\r\n" + "Content-Length: " + str(len(message)) + "\r\n" + "\r\n" + + message + ) + try: + self.writer.write(response.encode("utf-8")) + self.writer.drain() + except Exception: + pass + + def _close_connection(self): + """Close the connection.""" + try: + self.writer.close() + except Exception: + pass + + async def send_text(self, data): + """Send a text frame.""" + if self.closed: + return + try: + frame = self._encode_frame(OPCODE_TEXT, data.encode("utf-8")) + self.writer.write(frame) + await self.writer.drain() + except Exception as e: + print(f"WebSocket send error: {e}", file=sys.stderr) + + async def send_binary(self, data): + """Send a binary frame.""" + if self.closed: + return + try: + if isinstance(data, str): + data = data.encode("utf-8") + frame = self._encode_frame(OPCODE_BINARY, data) + self.writer.write(frame) + await self.writer.drain() + except Exception as e: + print(f"WebSocket send error: {e}", file=sys.stderr) + + def _encode_frame(self, opcode, payload): + """Encode a WebSocket frame.""" + frame = bytearray() + frame.append(0x80 | opcode) # FIN + opcode + + length = len(payload) + if length < 126: + frame.append(length) + elif length < 65536: + frame.append(126) + frame.extend(struct.pack(">H", length)) + else: + frame.append(127) + frame.extend(struct.pack(">Q", length)) + + frame.extend(payload) + return bytes(frame) + + async def _decode_frame(self): + """Decode a WebSocket frame. Returns (opcode, payload).""" + try: + # Read first two bytes (use readexactly for guaranteed length) + header = await self.reader.readexactly(2) + + fin = (header[0] >> 7) & 1 + opcode = header[0] & 0x0F + masked = (header[1] >> 7) & 1 + length = header[1] & 0x7F + + # Extended payload length + if length == 126: + ext = await self.reader.readexactly(2) + length = struct.unpack(">H", ext)[0] + elif length == 127: + ext = await self.reader.readexactly(8) + length = struct.unpack(">Q", ext)[0] + + # Masking key + if masked: + mask_key = await self.reader.readexactly(4) + + # Payload + payload = await self.reader.readexactly(length) + + # Unmask if needed + if masked: + payload = bytes(b ^ mask_key[i % 4] for i, b in enumerate(payload)) + + return opcode, payload + except Exception as e: + print(f"WebSocket decode error: {e}", file=sys.stderr) + return None, None + + async def handle_connection(self): + """Handle the WebSocket connection loop.""" + try: + while not self.closed: + opcode, payload = await self._decode_frame() + if opcode is None: + break + + if opcode == OPCODE_CLOSE: + await self._send_close() + break + elif opcode == OPCODE_PING: + await self._send_pong(payload) + elif opcode == OPCODE_PONG: + pass # Ignore pong + elif opcode in (OPCODE_TEXT, OPCODE_BINARY): + # Handle text messages from client (e.g., chat_request) + try: + msg = payload.decode("utf-8") + data = json.loads(msg) + if data.get("type") == "chat_request": + # Invoke Claude with the message + await self._handle_chat_request(data.get("message", "")) + except (json.JSONDecodeError, UnicodeDecodeError): + pass + + # Check if we should stop waiting for messages + if self.closed: + break + + except Exception as e: + print(f"WebSocket connection error: {e}", file=sys.stderr) + finally: + self._close_connection() + # Clean up the message queue on disconnect + if self.user in _websocket_queues: + del _websocket_queues[self.user] + + async def _send_close(self): + """Send a close frame.""" + try: + # Close code 1000 = normal closure + frame = self._encode_frame(OPCODE_CLOSE, struct.pack(">H", 1000)) + self.writer.write(frame) + await self.writer.drain() + except Exception: + pass + + async def _send_pong(self, payload): + """Send a pong frame.""" + try: + frame = self._encode_frame(OPCODE_PONG, payload) + self.writer.write(frame) + await self.writer.drain() + except Exception: + pass + + async def _handle_chat_request(self, message): + """Handle a chat_request WebSocket frame by invoking Claude.""" + if not message: + return + + # Validate Claude binary exists + if not os.path.exists(CLAUDE_BIN): + await self.send_text(json.dumps({ + "type": "error", + "message": "Claude CLI not found", + })) + return + + try: + # Build claude command with permission mode (acceptEdits allows file edits) + claude_args = [CLAUDE_BIN, "--print", "--output-format", "stream-json", "--permission-mode", "acceptEdits", message] + + # Spawn claude --print with stream-json for streaming output + # Set cwd to workspace directory if configured, allowing Claude to access project code + cwd = WORKSPACE_DIR if WORKSPACE_DIR else None + proc = subprocess.Popen( + claude_args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + cwd=cwd, + bufsize=1, + ) + + # Stream output line by line + for line in iter(proc.stdout.readline, ""): + line = line.strip() + if not line: + continue + try: + event = json.loads(line) + etype = event.get("type", "") + + # Extract text content from content_block_delta events + if etype == "content_block_delta": + delta = event.get("delta", {}) + if delta.get("type") == "text_delta": + text = delta.get("text", "") + if text: + # Send tokens to client + await self.send_text(text) + + # Check for usage event to know when complete + if etype == "result": + pass # Will send complete after loop + + except json.JSONDecodeError: + pass + + # Wait for process to complete + proc.wait() + + if proc.returncode != 0: + await self.send_text(json.dumps({ + "type": "error", + "message": f"Claude CLI failed with exit code {proc.returncode}", + })) + return + + # Send complete signal + await self.send_text(json.dumps({ + "type": "complete", + })) + + except FileNotFoundError: + await self.send_text(json.dumps({ + "type": "error", + "message": "Claude CLI not found", + })) + except Exception as e: + await self.send_text(json.dumps({ + "type": "error", + "message": str(e), + })) + + # ============================================================================= # Conversation History Functions (#710) # ============================================================================= @@ -544,9 +817,9 @@ class ChatHandler(BaseHTTPRequestHandler): self.serve_static(path) return - # Reserved WebSocket endpoint (future use) - if path == "/ws" or path.startswith("/ws"): - self.send_error_page(501, "WebSocket upgrade not yet implemented") + # WebSocket upgrade endpoint + if path == "/chat/ws" or path == "/ws" or path.startswith("/ws"): + self.handle_websocket_upgrade() return # 404 for unknown paths @@ -736,33 +1009,13 @@ class ChatHandler(BaseHTTPRequestHandler): except IOError as e: self.send_error_page(500, f"Error reading file: {e}") - def _send_rate_limit_response(self, retry_after, reason): - """Send a 429 response with Retry-After header and HTMX fragment (#711).""" - body = ( - f'
' - f"Rate limit exceeded: {reason}. " - f"Please try again in {retry_after} seconds." - f"
" - ) - self.send_response(429) - self.send_header("Retry-After", str(retry_after)) - self.send_header("Content-Type", "text/html; charset=utf-8") - self.send_header("Content-Length", str(len(body.encode("utf-8")))) - self.end_headers() - self.wfile.write(body.encode("utf-8")) - + def handle_chat(self, user): """ Handle chat requests by spawning `claude --print` with the user message. - Enforces per-user rate limits and tracks token usage (#711). + Streams tokens over WebSocket if connected. """ - # Check rate limits before processing (#711) - allowed, retry_after, reason = _check_rate_limit(user) - if not allowed: - self._send_rate_limit_response(retry_after, reason) - return - # Read request body content_length = int(self.headers.get("Content-Length", 0)) if content_length == 0: @@ -799,23 +1052,63 @@ class ChatHandler(BaseHTTPRequestHandler): if not conv_id or not _validate_conversation_id(conv_id): conv_id = _generate_conversation_id() - # Record request for rate limiting (#711) - _record_request(user) - try: # Save user message to history _write_message(user, conv_id, "user", message) + # Build claude command with permission mode (acceptEdits allows file edits) + claude_args = [CLAUDE_BIN, "--print", "--output-format", "stream-json", "--permission-mode", "acceptEdits", message] + # Spawn claude --print with stream-json for token tracking (#711) + # Set cwd to workspace directory if configured, allowing Claude to access project code + cwd = WORKSPACE_DIR if WORKSPACE_DIR else None proc = subprocess.Popen( - [CLAUDE_BIN, "--print", "--output-format", "stream-json", message], + claude_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, + cwd=cwd, + bufsize=1, # Line buffered ) - raw_output = proc.stdout.read() + # Stream output line by line + response_parts = [] + total_tokens = 0 + for line in iter(proc.stdout.readline, ""): + line = line.strip() + if not line: + continue + try: + event = json.loads(line) + etype = event.get("type", "") + # Extract text content from content_block_delta events + if etype == "content_block_delta": + delta = event.get("delta", {}) + if delta.get("type") == "text_delta": + text = delta.get("text", "") + if text: + response_parts.append(text) + # Stream to WebSocket if connected + if user in _websocket_queues: + try: + _websocket_queues[user].put_nowait(text) + except Exception: + pass # Client disconnected + + # Parse usage from result event + if etype == "result": + usage = event.get("usage", {}) + total_tokens = usage.get("input_tokens", 0) + usage.get("output_tokens", 0) + elif "usage" in event: + usage = event["usage"] + if isinstance(usage, dict): + total_tokens = usage.get("input_tokens", 0) + usage.get("output_tokens", 0) + + except json.JSONDecodeError: + pass + + # Wait for process to complete error_output = proc.stderr.read() if error_output: print(f"Claude stderr: {error_output}", file=sys.stderr) @@ -826,20 +1119,12 @@ class ChatHandler(BaseHTTPRequestHandler): self.send_error_page(500, f"Claude CLI failed with exit code {proc.returncode}") return - # Parse stream-json for text and token usage (#711) - response, total_tokens = _parse_stream_json(raw_output) - - # Track token usage - does not block *this* request (#711) - if total_tokens > 0: - _record_tokens(user, total_tokens) - print( - f"Token usage: user={user} tokens={total_tokens}", - file=sys.stderr, - ) + # Combine response parts + response = "".join(response_parts) # Fall back to raw output if stream-json parsing yielded no text if not response: - response = raw_output + response = proc.stdout.getvalue() if hasattr(proc.stdout, 'getvalue') else "" # Save assistant response to history _write_message(user, conv_id, "assistant", response) @@ -909,6 +1194,106 @@ class ChatHandler(BaseHTTPRequestHandler): self.end_headers() self.wfile.write(json.dumps({"conversation_id": conv_id}, ensure_ascii=False).encode("utf-8")) + @staticmethod + def push_to_websocket(user, message): + """Push a message to a WebSocket connection for a user. + + This is called from the chat handler to stream tokens to connected clients. + The message is added to the user's WebSocket message queue. + """ + # Get the message queue from the WebSocket handler's queue + # We store the queue in a global dict keyed by user + if user in _websocket_queues: + _websocket_queues[user].put_nowait(message) + + def handle_websocket_upgrade(self): + """Handle WebSocket upgrade request for chat streaming.""" + # Check session cookie + user = _validate_session(self.headers.get("Cookie")) + if not user: + self.send_error_page(401, "Unauthorized: no valid session") + return + + # Create message queue for this user + _websocket_queues[user] = asyncio.Queue() + + # Get WebSocket upgrade headers from the HTTP request + sec_websocket_key = self.headers.get("Sec-WebSocket-Key", "") + sec_websocket_protocol = self.headers.get("Sec-WebSocket-Protocol", "") + + # Validate Sec-WebSocket-Key + if not sec_websocket_key: + self.send_error_page(400, "Bad Request", "Missing Sec-WebSocket-Key") + return + + # Get the socket from the connection + sock = self.connection + sock.setblocking(False) + + # Create async server to handle the connection + async def handle_ws(): + try: + # Wrap the socket in asyncio streams using open_connection + reader, writer = await asyncio.open_connection(sock=sock) + + # Create WebSocket handler + ws_handler = _WebSocketHandler(reader, writer, user, _websocket_queues[user]) + + # Accept the connection (pass headers from HTTP request) + if not await ws_handler.accept_connection(sec_websocket_key, sec_websocket_protocol): + return + + # Start a task to read from the queue and send to client + async def send_stream(): + while not ws_handler.closed: + try: + data = await asyncio.wait_for(ws_handler.message_queue.get(), timeout=1.0) + await ws_handler.send_text(data) + except asyncio.TimeoutError: + # Send ping to keep connection alive + try: + frame = ws_handler._encode_frame(OPCODE_PING, b"") + writer.write(frame) + await writer.drain() + except Exception: + break + except Exception as e: + print(f"Send stream error: {e}", file=sys.stderr) + break + + # Start sending task + send_task = asyncio.create_task(send_stream()) + + # Handle incoming WebSocket frames + await ws_handler.handle_connection() + + # Cancel send task + send_task.cancel() + try: + await send_task + except asyncio.CancelledError: + pass + + except Exception as e: + print(f"WebSocket handler error: {e}", file=sys.stderr) + finally: + try: + writer.close() + await writer.wait_closed() + except Exception: + pass + + # Run the async handler in a thread + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + loop.run_until_complete(handle_ws()) + except Exception as e: + print(f"WebSocket error: {e}", file=sys.stderr) + finally: + loop.close() + sock.close() + def do_DELETE(self): """Handle DELETE requests.""" parsed = urlparse(self.path) @@ -944,12 +1329,6 @@ def main(): print("forward_auth secret configured (#709)", file=sys.stderr) else: print("WARNING: FORWARD_AUTH_SECRET not set - verify endpoint unrestricted", file=sys.stderr) - print( - f"Rate limits (#711): {CHAT_MAX_REQUESTS_PER_HOUR}/hr, " - f"{CHAT_MAX_REQUESTS_PER_DAY}/day, " - f"{CHAT_MAX_TOKENS_PER_DAY} tokens/day", - file=sys.stderr, - ) httpd.serve_forever() diff --git a/docker/chat/ui/index.html b/docker/chat/ui/index.html index bd920f9..b045873 100644 --- a/docker/chat/ui/index.html +++ b/docker/chat/ui/index.html @@ -430,6 +430,10 @@ return div.innerHTML.replace(/\n/g, '
'); } + // WebSocket connection for streaming + let ws = null; + let wsMessageId = null; + // Send message handler async function sendMessage() { const message = textarea.value.trim(); @@ -449,6 +453,14 @@ await createNewConversation(); } + // Try WebSocket streaming first, fall back to fetch + if (window.location.protocol === 'https:' || window.location.hostname === 'localhost') { + if (tryWebSocketSend(message)) { + return; + } + } + + // Fallback to fetch try { // Use fetch with URLSearchParams for application/x-www-form-urlencoded const params = new URLSearchParams(); @@ -485,6 +497,111 @@ } } + // Try to send message via WebSocket streaming + function tryWebSocketSend(message) { + try { + // Generate a unique message ID for this request + wsMessageId = Date.now().toString(36) + Math.random().toString(36).substr(2); + + // Connect to WebSocket + const wsUrl = window.location.protocol === 'https:' + ? `wss://${window.location.host}/chat/ws` + : `ws://${window.location.host}/chat/ws`; + + ws = new WebSocket(wsUrl); + + ws.onopen = function() { + // Send the message as JSON with message ID + const data = { + type: 'chat_request', + message_id: wsMessageId, + message: message, + conversation_id: currentConversationId + }; + ws.send(JSON.stringify(data)); + }; + + ws.onmessage = function(event) { + try { + const data = JSON.parse(event.data); + + if (data.type === 'token') { + // Stream a token to the UI + addTokenToLastMessage(data.token); + } else if (data.type === 'complete') { + // Streaming complete + closeWebSocket(); + textarea.disabled = false; + sendBtn.disabled = false; + sendBtn.textContent = 'Send'; + textarea.focus(); + messagesDiv.scrollTop = messagesDiv.scrollHeight; + loadConversations(); + } else if (data.type === 'error') { + addSystemMessage(`Error: ${data.message}`); + closeWebSocket(); + textarea.disabled = false; + sendBtn.disabled = false; + sendBtn.textContent = 'Send'; + textarea.focus(); + } + } catch (e) { + console.error('Failed to parse WebSocket message:', e); + } + }; + + ws.onerror = function(error) { + console.error('WebSocket error:', error); + addSystemMessage('WebSocket connection error. Falling back to regular chat.'); + closeWebSocket(); + sendMessage(); // Retry with fetch + }; + + ws.onclose = function() { + wsMessageId = null; + }; + + return true; // WebSocket attempt started + + } catch (error) { + console.error('Failed to create WebSocket:', error); + return false; // Fall back to fetch + } + } + + // Add a token to the last assistant message (for streaming) + function addTokenToLastMessage(token) { + const messages = messagesDiv.querySelectorAll('.message.assistant'); + if (messages.length === 0) { + // No assistant message yet, create one + const msgDiv = document.createElement('div'); + msgDiv.className = 'message assistant'; + msgDiv.innerHTML = ` +
assistant
+
+ `; + messagesDiv.appendChild(msgDiv); + } + + const lastMsg = messagesDiv.querySelector('.message.assistant .content.streaming'); + if (lastMsg) { + lastMsg.textContent += token; + messagesDiv.scrollTop = messagesDiv.scrollHeight; + } + } + + // Close WebSocket connection + function closeWebSocket() { + if (ws) { + ws.onopen = null; + ws.onmessage = null; + ws.onerror = null; + ws.onclose = null; + ws.close(); + ws = null; + } + } + // Event listeners sendBtn.addEventListener('click', sendMessage); diff --git a/docker/edge/Dockerfile b/docker/edge/Dockerfile index eca7d7e..507c39b 100644 --- a/docker/edge/Dockerfile +++ b/docker/edge/Dockerfile @@ -1,6 +1,12 @@ FROM caddy:latest -RUN apk add --no-cache bash jq curl git docker-cli python3 openssh-client autossh -COPY entrypoint-edge.sh /usr/local/bin/entrypoint-edge.sh +RUN apk add --no-cache bash jq curl git docker-cli python3 openssh-client autossh \ + nodejs npm +# Claude Code CLI — chat backend runtime (merged from docker/chat, #1083) +RUN npm install -g @anthropic-ai/claude-code@2.1.84 +COPY docker/edge/entrypoint-edge.sh /usr/local/bin/entrypoint-edge.sh +# Chat server and UI (merged from docker/chat into edge, #1083) +COPY docker/chat/server.py /usr/local/bin/chat-server.py +COPY docker/chat/ui/ /var/chat/ui/ VOLUME /data diff --git a/docker/edge/dispatcher.sh b/docker/edge/dispatcher.sh index a48abf2..282342a 100755 --- a/docker/edge/dispatcher.sh +++ b/docker/edge/dispatcher.sh @@ -560,10 +560,168 @@ _launch_runner_docker() { # _launch_runner_nomad ACTION_ID SECRETS_CSV MOUNTS_CSV # -# Nomad backend stub — will be implemented in migration Step 5. +# Dispatches a vault-runner batch job via `nomad job dispatch`. +# Polls `nomad job status` until terminal state (completed/failed). +# Reads exit code from allocation and writes .result.json. +# +# Usage: _launch_runner_nomad +# Returns: exit code of the nomad job (0=success, non-zero=failure) _launch_runner_nomad() { - echo "nomad backend not yet implemented" >&2 - return 1 + local action_id="$1" + local secrets_csv="$2" + local mounts_csv="$3" + + log "Dispatching vault-runner batch job via Nomad for action: ${action_id}" + + # Dispatch the parameterized batch job + # The vault-runner job expects meta: action_id, secrets_csv + # Note: mounts_csv is not passed as meta (not declared in vault-runner.hcl) + local dispatch_output + dispatch_output=$(nomad job dispatch \ + -detach \ + -meta action_id="$action_id" \ + -meta secrets_csv="$secrets_csv" \ + vault-runner 2>&1) || { + log "ERROR: Failed to dispatch vault-runner job for ${action_id}" + log "Dispatch output: ${dispatch_output}" + write_result "$action_id" 1 "Nomad dispatch failed: ${dispatch_output}" + return 1 + } + + # Extract dispatched job ID from output (format: "vault-runner/dispatch--") + local dispatched_job_id + dispatched_job_id=$(echo "$dispatch_output" | grep -oP '(?<=Dispatched Job ID = ).+' || true) + + if [ -z "$dispatched_job_id" ]; then + log "ERROR: Could not extract dispatched job ID from nomad output" + log "Dispatch output: ${dispatch_output}" + write_result "$action_id" 1 "Could not extract dispatched job ID from nomad output" + return 1 + fi + + log "Dispatched vault-runner with job ID: ${dispatched_job_id}" + + # Poll job status until terminal state + # Batch jobs transition: running -> completed/failed + local max_wait=300 # 5 minutes max wait + local elapsed=0 + local poll_interval=5 + local alloc_id="" + + log "Polling nomad job status for ${dispatched_job_id}..." + + while [ "$elapsed" -lt "$max_wait" ]; do + # Get job status with JSON output for the dispatched child job + local job_status_json + job_status_json=$(nomad job status -json "$dispatched_job_id" 2>/dev/null) || { + log "ERROR: Failed to get job status for ${dispatched_job_id}" + write_result "$action_id" 1 "Failed to get job status for ${dispatched_job_id}" + return 1 + } + + # Check job status field (transitions to "dead" on completion) + local job_state + job_state=$(echo "$job_status_json" | jq -r '.Status // empty' 2>/dev/null) || job_state="" + + # Check allocation state directly + alloc_id=$(echo "$job_status_json" | jq -r '.Allocations[0]?.ID // empty' 2>/dev/null) || alloc_id="" + + if [ -n "$alloc_id" ]; then + local alloc_state + alloc_state=$(nomad alloc status -short "$alloc_id" 2>/dev/null || true) + + case "$alloc_state" in + *completed*|*success*|*dead*) + log "Allocation ${alloc_id} reached terminal state: ${alloc_state}" + break + ;; + *running*|*pending*|*starting*) + log "Allocation ${alloc_id} still running (state: ${alloc_state})..." + ;; + *failed*|*crashed*) + log "Allocation ${alloc_id} failed (state: ${alloc_state})" + break + ;; + esac + fi + + # Also check job-level state + case "$job_state" in + dead) + log "Job ${dispatched_job_id} reached terminal state: ${job_state}" + break + ;; + failed) + log "Job ${dispatched_job_id} failed" + break + ;; + esac + + sleep "$poll_interval" + elapsed=$((elapsed + poll_interval)) + done + + if [ "$elapsed" -ge "$max_wait" ]; then + log "ERROR: Timeout waiting for vault-runner job to complete" + write_result "$action_id" 1 "Timeout waiting for nomad job to complete" + return 1 + fi + + # Get final job status and exit code + local final_status_json + final_status_json=$(nomad job status -json "$dispatched_job_id" 2>/dev/null) || { + log "ERROR: Failed to get final job status" + write_result "$action_id" 1 "Failed to get final job status" + return 1 + } + + # Get allocation exit code + local exit_code=0 + local logs="" + + if [ -n "$alloc_id" ]; then + # Get allocation logs + logs=$(nomad alloc logs -short "$alloc_id" 2>/dev/null || true) + + # Try to get exit code from alloc status JSON + # Nomad alloc status -json has .TaskStates[""].Events[].ExitCode + local alloc_exit_code + alloc_exit_code=$(nomad alloc status -json "$alloc_id" 2>/dev/null | jq -r '.TaskStates["runner"].Events[-1].ExitCode // empty' 2>/dev/null) || alloc_exit_code="" + + if [ -n "$alloc_exit_code" ] && [ "$alloc_exit_code" != "null" ]; then + exit_code="$alloc_exit_code" + fi + fi + + # If we couldn't get exit code from alloc, check job state as fallback + # Note: "dead" = terminal state for batch jobs (includes successful completion) + # Only "failed" indicates actual failure + if [ "$exit_code" -eq 0 ]; then + local final_state + final_state=$(echo "$final_status_json" | jq -r '.Status // empty' 2>/dev/null) || final_state="" + + case "$final_state" in + failed) + exit_code=1 + ;; + esac + fi + + # Truncate logs if too long + if [ ${#logs} -gt 1000 ]; then + logs="${logs: -1000}" + fi + + # Write result file + write_result "$action_id" "$exit_code" "$logs" + + if [ "$exit_code" -eq 0 ]; then + log "Vault-runner job completed successfully for action: ${action_id}" + else + log "Vault-runner job failed for action: ${action_id} (exit code: ${exit_code})" + fi + + return "$exit_code" } # Launch runner for the given action (backend-agnostic orchestrator) @@ -1051,11 +1209,8 @@ main() { # Validate backend selection at startup case "$DISPATCHER_BACKEND" in - docker) ;; - nomad) - log "ERROR: nomad backend not yet implemented" - echo "nomad backend not yet implemented" >&2 - exit 1 + docker|nomad) + log "Using ${DISPATCHER_BACKEND} backend for vault-runner dispatch" ;; *) log "ERROR: unknown DISPATCHER_BACKEND=${DISPATCHER_BACKEND}" diff --git a/docker/edge/entrypoint-edge.sh b/docker/edge/entrypoint-edge.sh index 1b5f94f..a1511ff 100755 --- a/docker/edge/entrypoint-edge.sh +++ b/docker/edge/entrypoint-edge.sh @@ -173,11 +173,15 @@ PROJECT_TOML="${PROJECT_TOML:-projects/disinto.toml}" sleep 1200 # 20 minutes done) & -# ── Load required secrets from secrets/*.enc (#777) ──────────────────── -# Edge container declares its required secrets; missing ones cause a hard fail. +# ── Load optional secrets from secrets/*.enc (#777) ──────────────────── +# Engagement collection (collect-engagement.sh) requires CADDY_ secrets to +# SCP access logs from a remote edge host. When age key or secrets dir is +# missing, or any secret fails to decrypt, log a warning and skip the cron. +# Caddy itself does not depend on these secrets. _AGE_KEY_FILE="${HOME}/.config/sops/age/keys.txt" _SECRETS_DIR="/opt/disinto/secrets" EDGE_REQUIRED_SECRETS="CADDY_SSH_KEY CADDY_SSH_HOST CADDY_SSH_USER CADDY_ACCESS_LOG" +EDGE_ENGAGEMENT_READY=0 # Assume not ready until proven otherwise _edge_decrypt_secret() { local enc_path="${_SECRETS_DIR}/${1}.enc" @@ -192,47 +196,63 @@ if [ -f "$_AGE_KEY_FILE" ] && [ -d "$_SECRETS_DIR" ]; then export "$_secret_name=$_val" done if [ -n "$_missing" ]; then - echo "FATAL: required secrets missing from secrets/*.enc:${_missing}" >&2 - echo " Run 'disinto secrets add ' for each missing secret." >&2 - echo " If migrating from .env.vault.enc, run 'disinto secrets migrate-from-vault' first." >&2 - exit 1 + echo "WARN: required engagement secrets missing from secrets/*.enc:${_missing}" >&2 + echo " collect-engagement cron will be skipped. Run 'disinto secrets add ' to enable." >&2 + EDGE_ENGAGEMENT_READY=0 + else + echo "edge: loaded required engagement secrets: ${EDGE_REQUIRED_SECRETS}" >&2 + EDGE_ENGAGEMENT_READY=1 fi - echo "edge: loaded required secrets: ${EDGE_REQUIRED_SECRETS}" >&2 else - echo "FATAL: age key (${_AGE_KEY_FILE}) or secrets dir (${_SECRETS_DIR}) not found — cannot load required secrets" >&2 - echo " Ensure age is installed and secrets/*.enc files are present." >&2 - exit 1 + echo "WARN: age key (${_AGE_KEY_FILE}) or secrets dir (${_SECRETS_DIR}) not found — engagement secrets unavailable" >&2 + echo " collect-engagement cron will be skipped. Run 'disinto secrets add ' to enable." >&2 + EDGE_ENGAGEMENT_READY=0 fi # Start daily engagement collection cron loop in background (#745) # Runs collect-engagement.sh daily at ~23:50 UTC via a sleep loop that # calculates seconds until the next 23:50 window. SSH key from secrets/*.enc (#777). -(while true; do - # Calculate seconds until next 23:50 UTC - _now=$(date -u +%s) - _target=$(date -u -d "today 23:50" +%s 2>/dev/null || date -u -d "23:50" +%s 2>/dev/null || echo 0) - if [ "$_target" -le "$_now" ]; then - _target=$(( _target + 86400 )) - fi - _sleep_secs=$(( _target - _now )) - echo "edge: collect-engagement scheduled in ${_sleep_secs}s (next 23:50 UTC)" >&2 - sleep "$_sleep_secs" - _fetch_log="/tmp/caddy-access-log-fetch.log" - _ssh_key_file=$(mktemp) - printf '%s\n' "$CADDY_SSH_KEY" > "$_ssh_key_file" - chmod 0600 "$_ssh_key_file" - scp -i "$_ssh_key_file" -o StrictHostKeyChecking=accept-new -o ConnectTimeout=10 -o BatchMode=yes \ - "${CADDY_SSH_USER}@${CADDY_SSH_HOST}:${CADDY_ACCESS_LOG}" \ - "$_fetch_log" 2>&1 | tee -a /opt/disinto-logs/collect-engagement.log || true - rm -f "$_ssh_key_file" - if [ -s "$_fetch_log" ]; then - CADDY_ACCESS_LOG="$_fetch_log" bash /opt/disinto/site/collect-engagement.sh 2>&1 \ - | tee -a /opt/disinto-logs/collect-engagement.log || true - else - echo "edge: collect-engagement: fetched log is empty, skipping parse" >&2 - fi - rm -f "$_fetch_log" -done) & +# Guarded: only start if EDGE_ENGAGEMENT_READY=1. +if [ "$EDGE_ENGAGEMENT_READY" -eq 1 ]; then + (while true; do + # Calculate seconds until next 23:50 UTC + _now=$(date -u +%s) + _target=$(date -u -d "today 23:50" +%s 2>/dev/null || date -u -d "23:50" +%s 2>/dev/null || echo 0) + if [ "$_target" -le "$_now" ]; then + _target=$(( _target + 86400 )) + fi + _sleep_secs=$(( _target - _now )) + echo "edge: collect-engagement scheduled in ${_sleep_secs}s (next 23:50 UTC)" >&2 + sleep "$_sleep_secs" + _fetch_log="/tmp/caddy-access-log-fetch.log" + _ssh_key_file=$(mktemp) + printf '%s\n' "$CADDY_SSH_KEY" > "$_ssh_key_file" + chmod 0600 "$_ssh_key_file" + scp -i "$_ssh_key_file" -o StrictHostKeyChecking=accept-new -o ConnectTimeout=10 -o BatchMode=yes \ + "${CADDY_SSH_USER}@${CADDY_SSH_HOST}:${CADDY_ACCESS_LOG}" \ + "$_fetch_log" 2>&1 | tee -a /opt/disinto-logs/collect-engagement.log || true + rm -f "$_ssh_key_file" + if [ -s "$_fetch_log" ]; then + CADDY_ACCESS_LOG="$_fetch_log" bash /opt/disinto/site/collect-engagement.sh 2>&1 \ + | tee -a /opt/disinto-logs/collect-engagement.log || true + else + echo "edge: collect-engagement: fetched log is empty, skipping parse" >&2 + fi + rm -f "$_fetch_log" + done) & +else + echo "edge: collect-engagement cron skipped (EDGE_ENGAGEMENT_READY=0)" >&2 +fi + +# Start chat server in background (#1083 — merged from docker/chat into edge) +(python3 /usr/local/bin/chat-server.py 2>&1 | tee -a /opt/disinto-logs/chat.log) & + +# Nomad template renders Caddyfile to /local/Caddyfile via service discovery; +# copy it into the expected location if present (compose uses the mounted path). +if [ -f /local/Caddyfile ]; then + cp /local/Caddyfile /etc/caddy/Caddyfile + echo "edge: using Nomad-rendered Caddyfile from /local/Caddyfile" >&2 +fi # Caddy as main process — run in foreground via wait so background jobs survive # (exec replaces the shell, which can orphan backgrounded subshells) diff --git a/docs/nomad-cutover-runbook.md b/docs/nomad-cutover-runbook.md new file mode 100644 index 0000000..e0956cc --- /dev/null +++ b/docs/nomad-cutover-runbook.md @@ -0,0 +1,183 @@ +# Nomad Cutover Runbook + +End-to-end procedure to cut over the disinto factory from docker-compose on +disinto-dev-box to Nomad on disinto-nomad-box. + +**Target**: disinto-nomad-box (10.10.10.216) becomes production; disinto-dev-box +stays warm for rollback. + +**Downtime budget**: <5 min blue-green flip. + +**Data scope**: Forgejo issues + disinto-ops git bundle only. Everything else is +regenerated or discarded. OAuth secrets are regenerated on fresh init (all +sessions invalidated). + +--- + +## 1. Pre-cutover readiness checklist + +- [ ] Nomad + Vault stack healthy on a fresh wipe+init (step 5 verified) +- [ ] Codeberg mirror current — `git log` parity between dev-box Forgejo and + Codeberg +- [ ] SSH key pair generated for nomad-box, registered on DO edge (see §4.6) +- [ ] Companion tools landed: + - `disinto backup create` (#1057) + - `disinto backup import` (#1058) +- [ ] Backup tarball produced and tested against a scratch LXC (see §3) + +--- + +## 2. Pre-cutover artifact: backup + +On disinto-dev-box: + +```bash +./bin/disinto backup create /tmp/disinto-backup-$(date +%Y%m%d).tar.gz +``` + +Copy the tarball to nomad-box (and optionally to a local workstation for +safekeeping): + +```bash +scp /tmp/disinto-backup-*.tar.gz nomad-box:/tmp/ +``` + +--- + +## 3. Pre-cutover dry-run + +On a throwaway LXC: + +```bash +lxc launch ubuntu:24.04 cutover-dryrun +# inside the container: +disinto init --backend=nomad --import-env .env --with edge +./bin/disinto backup import /tmp/disinto-backup-*.tar.gz +``` + +Verify: + +- Issue count matches source Forgejo +- disinto-ops repo refs match source bundle + +Destroy the LXC once satisfied: + +```bash +lxc delete cutover-dryrun --force +``` + +--- + +## 4. Cutover T-0 (operator executes; <5 min target) + +### 4.1 Stop dev-box services + +```bash +# On disinto-dev-box — stop, do NOT remove volumes (rollback needs them) +docker-compose stop +``` + +### 4.2 Provision nomad-box (if not already done) + +```bash +# On disinto-nomad-box +disinto init --backend=nomad --import-env .env --with edge +``` + +### 4.3 Import backup + +```bash +# On disinto-nomad-box +./bin/disinto backup import /tmp/disinto-backup-*.tar.gz +``` + +### 4.4 Configure Codeberg pull mirror + +Manual, one-time step in the new Forgejo UI: + +1. Create a mirror repository pointing at the Codeberg upstream +2. Confirm initial sync completes + +### 4.5 Claude login + +```bash +# On disinto-nomad-box +claude login +``` + +Set up Anthropic OAuth so agents can authenticate. + +### 4.6 Autossh tunnel swap + +> **Operator step** — cross-host, no dev-agent involvement. Do NOT automate. + +1. Stop the tunnel on dev-box: + ```bash + # On disinto-dev-box + systemctl stop reverse-tunnel + ``` + +2. Copy or regenerate the tunnel unit on nomad-box: + ```bash + # Copy from dev-box, or let init regenerate it + scp dev-box:/etc/systemd/system/reverse-tunnel.service \ + nomad-box:/etc/systemd/system/ + ``` + +3. Register nomad-box's public key on DO edge: + ```bash + # On DO edge box — same restricted-command as the dev-box key + echo "" >> /home/johba/.ssh/authorized_keys + ``` + +4. Start the tunnel on nomad-box: + ```bash + # On disinto-nomad-box + systemctl enable --now reverse-tunnel + ``` + +5. Verify end-to-end: + ```bash + curl https://self.disinto.ai/api/v1/version + # Should return the new box's Forgejo version + ``` + +--- + +## 5. Post-cutover smoke + +- [ ] `curl https://self.disinto.ai` → Forgejo welcome page +- [ ] Create a test PR → Woodpecker pipeline runs → agents assign and work +- [ ] Claude chat login via Forgejo OAuth succeeds + +--- + +## 6. Rollback (if any step 4 gate fails) + +1. Stop the tunnel on nomad-box: + ```bash + systemctl stop reverse-tunnel # on nomad-box + ``` + +2. Restore the tunnel on dev-box: + ```bash + systemctl start reverse-tunnel # on dev-box + ``` + +3. Bring dev-box services back up: + ```bash + docker-compose up -d # on dev-box + ``` + +4. DO Caddy config is unchanged — traffic restores in <5 min. + +5. File a post-mortem issue. Keep nomad-box state intact for debugging. + +--- + +## 7. Post-stable cleanup (T+1 week) + +- `docker-compose down -v` on dev-box +- Archive `/var/lib/docker/volumes/disinto_*` to cold storage +- Delete disinto-dev-box LXC or keep as permanent rollback reserve (operator + decision) diff --git a/gardener/AGENTS.md b/gardener/AGENTS.md index e9ad846..33a09d5 100644 --- a/gardener/AGENTS.md +++ b/gardener/AGENTS.md @@ -1,4 +1,4 @@ - + # Gardener Agent **Role**: Backlog grooming — detect duplicate issues, missing acceptance diff --git a/gardener/dust.jsonl b/gardener/dust.jsonl index 14b0d5c..e69de29 100644 --- a/gardener/dust.jsonl +++ b/gardener/dust.jsonl @@ -1 +0,0 @@ -{"issue":915,"group":"lib/generators.sh","title":"remove no-op sed in generate_compose --build mode","reason":"sed replaces agents: with itself — no behavior change; single-line removal","ts":"2026-04-17T01:04:05Z"} diff --git a/gardener/pending-actions.json b/gardener/pending-actions.json index 1c89c7d..fe51488 100644 --- a/gardener/pending-actions.json +++ b/gardener/pending-actions.json @@ -1,37 +1 @@ -[ - { - "action": "edit_body", - "issue": 910, - "body": "Flagged by AI reviewer in PR #909.\n\n## Problem\n\n`tools/vault-import.sh` still uses hardcoded `secret/data/${path}` for its curl-based KV write (lines 149, 151, 162, 166, 170). The rest of the codebase was migrated to the configurable `VAULT_KV_MOUNT` variable (defaulting to `kv`) via PR #909. Any deployment with `kv/` as its KV mount will see 403/404 failures when `vault-import.sh` runs.\n\n## Fix\n\nEither:\n1. Refactor the write in `vault-import.sh` to call `hvault_kv_put` (which now respects `VAULT_KV_MOUNT`), or\n2. Replace the hardcoded `secret/data` reference with `${VAULT_KV_MOUNT:-kv}/data` matching the convention in `lib/hvault.sh`.\n\n---\n*Auto-created from AI review*\n\n## Affected files\n\n- `tools/vault-import.sh` (lines 149, 151, 162, 166, 170 — hardcoded `secret/data` references)\n- `lib/hvault.sh` (reference implementation using `VAULT_KV_MOUNT`)\n\n## Acceptance criteria\n\n- [ ] `tools/vault-import.sh` uses `${VAULT_KV_MOUNT:-kv}/data` (or calls `hvault_kv_put`) instead of hardcoded `secret/data`\n- [ ] No hardcoded `secret/data` path references remain in `tools/vault-import.sh`\n- [ ] Vault KV writes succeed when `VAULT_KV_MOUNT=kv` is set (matching the standard deployment config)\n- [ ] `shellcheck` clean\n" - }, - { - "action": "add_label", - "issue": 910, - "label": "backlog" - }, - { - "action": "edit_body", - "issue": 914, - "body": "Flagged by AI reviewer in PR #911.\n\n## Problem\n\n`lib/generators.sh` fixes the `agents` service missing `pull_policy: build` in `--build` mode (PR #893), but the `edge` service has the same root cause: the sed replacement at line 664 produces `build: ./docker/edge` with no `pull_policy: build`. Without it, `docker compose up -d --force-recreate` reuses the cached edge image and silently keeps running stale code even after source changes.\n\n## Fix\n\nAdd `\\n pull_policy: build` to the edge sed replacement, matching the pattern applied to agents in PR #893.\n\n---\n*Auto-created from AI review*\n\n## Affected files\n\n- `lib/generators.sh` (line 664 — edge service sed replacement missing `pull_policy: build`)\n\n## Acceptance criteria\n\n- [ ] `lib/generators.sh` edge service block emits `pull_policy: build` when `--build` mode is active (matching the pattern from PR #893 for the agents service)\n- [ ] `docker compose up -d --force-recreate` after source changes rebuilds the edge image rather than using the cached layer\n- [ ] Generated `docker-compose.yml` edge service stanza contains `pull_policy: build`\n- [ ] `shellcheck` clean\n" - }, - { - "action": "add_label", - "issue": 914, - "label": "backlog" - }, - { - "action": "edit_body", - "issue": 867, - "body": "## Incident\n\n**2026-04-16 ~10:55–11:52 UTC.** Woodpecker CI agent (`disinto-woodpecker-agent`) entered a repeated gRPC-error crashloop (Codeberg #813 class — gRPC-in-nested-docker). Every workflow it accepted exited 1 within seconds, never actually running pipeline steps.\n\n**Blast radius:** dev-qwen took issue #842 at 10:55, opened PR #859, and burned its full 3-attempt `pr-lifecycle` CI-fix budget between 10:55 and 11:08 reacting to these infra-flake \"CI failures.\" Each failure arrived in ~30–60 seconds, too fast to be a real test run. After exhausting the budget, dev-qwen marked #842 as `blocked: ci_exhausted` and moved on. No real bug was being detected; the real failure surfaced later only after an operator restarted the WP agent and manually retriggered pipeline #966 — which then returned a legitimate `bats-init-nomad` failure in test #6 (different issue).\n\n**Root cause of the infra-flake:** gRPC-in-nested-docker bug, Woodpecker server ↔ agent comms inside nested containers. Known-flaky; restart of `disinto-woodpecker-agent` clears it.\n\n**Recovery:** operator `docker restart disinto-woodpecker-agent` + retrigger pipelines via WP API POST `/api/repos/2/pipelines/`. Fresh run reached real stage signal.\n\n## Why this burned dev-qwen's budget\n\n`pr-lifecycle`'s CI-fix budget treats every failed commit-status as a signal to invoke the agent. It has no notion of \"infra flake\" vs. \"real test failure\" and no heuristic to distinguish them. Four infra-flake failures in 13 minutes looked identical to four real code-bug failures.\n\n## Suggestions — what supervisor can check every 20min\n\nSupervisor runs every `1200s` already. Add these probes:\n\n**1. WP agent container health.**\n```\ndocker inspect disinto-woodpecker-agent --format '{{.State.Health.Status}}'\n```\nIf `unhealthy` for the second consecutive supervisor tick → **restart it automatically + post a comment on any currently-running dev-bot/dev-qwen issues warning \"CI agent was restarted; subsequent failures before this marker may be infra-flake.\"**\n\n**2. Fast-failure heuristic on WP pipelines.**\nQuery WP API `GET /api/repos/2/pipelines?page=1`. For each pipeline in state `failure`, compute `finished - started`. If duration < 60s, flag as probable infra-flake. Three flagged flakes within a 15-min window → trigger agent restart as in (1) and a bulk-retrigger via POST `/api/repos/2/pipelines/` for each.\n\n**3. grpc error pattern in agent log.**\n`docker logs --since 20m disinto-woodpecker-agent 2>&1 | grep -c 'grpc error'` — if ≥3 matches, agent is probably wedged. Trigger restart as in (1).\n\n**4. Issue-level guard.**\nWhen supervisor detects an agent restart, scan for issues updated in the preceding 30min with label `blocked: ci_exhausted` and for each one:\n- unassign + remove `blocked` label (return to pool)\n- comment on the issue: *\"CI agent was unhealthy between HH:MM and HH:MM — prior 3/3 retry budget may have been spent on infra flake, not real failures. Re-queueing for a fresh attempt.\"*\n- retrigger the PR's latest WP pipeline\n\nThis last step is the key correction: **`ci_exhausted` preceded by WP-agent-unhealth = false positive; return to pool with context.**\n\n## Why this matters for the migration\n\nBetween now and cutover every WP CI flake that silently exhausts an agent's budget steals hours of clock time. Without an automatic recovery path, the pace of the step-N backlogs falls off a cliff the moment the agent next goes unhealthy — and it *will* go unhealthy again (Codeberg #813 is not fixed upstream yet).\n\n## Fix for this specific incident (already applied manually)\n\n- Restarted `disinto-woodpecker-agent`.\n- Closed PR #859 (kept branch `fix/issue-842` at `64080232`).\n- Unassigned dev-qwen from #842, removed `blocked` label, appended prior-art section + pipeline #966 test-#6 failure details to issue body so the next claimant starts with full context.\n\n## Non-goals\n\n- Not trying to fix Codeberg #813 itself (upstream gRPC-in-nested-docker issue).\n- Not trying to fix `pr-lifecycle`'s budget logic — the supervisor-side detection is cheaper and more robust than per-issue budget changes.\n\n## Labels / meta\n\n- `bug-report` + supervisor-focused. Classify severity as blocker for the migration cadence (not for factory day-to-day — it only bites when an unfixable-by-dev issue hits the budget).\n\n## Affected files\n\n- `supervisor/supervisor-run.sh` — add WP agent health probes and flake-detection logic\n- `supervisor/preflight.sh` — may need additional data collection for WP agent health status\n\n## Acceptance criteria\n\n- [ ] Supervisor detects an unhealthy `disinto-woodpecker-agent` container (via `docker inspect` health status or gRPC error log count ≥ 3) and automatically restarts it\n- [ ] After an auto-restart, supervisor scans for issues updated in the prior 30 min labeled `blocked: ci_exhausted` and returns them to the pool (unassign, remove `blocked`, add comment noting infra-flake window)\n- [ ] Fast-failure heuristic: pipelines completing in <60s are flagged as probable infra-flake; 3+ in a 15-min window triggers the restart+retrigger flow\n- [ ] Already-swept PRs/issues are not processed twice (idempotency guard via `` comment)\n- [ ] CI green\n" - }, - { - "action": "add_label", - "issue": 867, - "label": "backlog" - }, - { - "action": "add_label", - "issue": 820, - "label": "backlog" - } -] +[] diff --git a/lib/AGENTS.md b/lib/AGENTS.md index 97e6f5e..0e4055e 100644 --- a/lib/AGENTS.md +++ b/lib/AGENTS.md @@ -1,4 +1,4 @@ - + # Shared Helpers (`lib/`) All agents source `lib/env.sh` as their first action. Additional helpers are @@ -7,7 +7,7 @@ sourced as needed. | File | What it provides | Sourced by | |---|---|---| | `lib/env.sh` | Loads `.env`, sets `FACTORY_ROOT`, exports project config (`FORGE_REPO`, `PROJECT_NAME`, etc.), defines `log()`, `forge_api()`, `forge_api_all()` (paginates all pages; accepts optional second TOKEN parameter, defaults to `$FORGE_TOKEN`; handles invalid/empty JSON responses gracefully — returns empty on parse error instead of crashing), `woodpecker_api()`, `wpdb()`, `memory_guard()` (skips agent if RAM < threshold), `load_secret()` (secret-source abstraction — see below). Auto-loads project TOML if `PROJECT_TOML` is set. Exports per-agent tokens (`FORGE_PLANNER_TOKEN`, `FORGE_GARDENER_TOKEN`, `FORGE_VAULT_TOKEN`, `FORGE_SUPERVISOR_TOKEN`, `FORGE_PREDICTOR_TOKEN`) — each falls back to `$FORGE_TOKEN` if not set. **Vault-only token guard (AD-006)**: `unset GITHUB_TOKEN CLAWHUB_TOKEN` so agents never hold external-action tokens — only the runner container receives them. **Container note**: when `DISINTO_CONTAINER=1`, `.env` is NOT re-sourced — compose already injects env vars (including `FORGE_URL=http://forgejo:3000`) and re-sourcing would clobber them. **Save/restore scope (#364)**: only `FORGE_URL` is preserved across `.env` re-sourcing (compose injects `http://forgejo:3000`, `.env` has `http://localhost:3000`). `FORGE_TOKEN` is NOT preserved so refreshed tokens in `.env` take effect immediately. **Per-agent token override (#762)**: agent run scripts export `FORGE_TOKEN_OVERRIDE=` BEFORE sourcing `env.sh`; `env.sh` applies this override at lines 98-100, ensuring the correct identity survives any re-sourcing of `env.sh` by nested shells or `claude -p` invocations. **Required env var**: `FORGE_PASS` — bot password for git HTTP push (Forgejo 11.x rejects API tokens for `git push`, #361). **Hard preconditions (#674)**: `USER` and `HOME` must be exported by the entrypoint before sourcing. When `PROJECT_TOML` is set, `PROJECT_REPO_ROOT`, `PRIMARY_BRANCH`, and `OPS_REPO_ROOT` must also be set (by entrypoint or TOML). **`load_secret NAME [DEFAULT]` (#793)**: backend-agnostic secret resolution. Precedence: (1) `/secrets/.env` — Nomad-rendered template, (2) current environment — already set by `.env.enc` / compose, (3) `secrets/.enc` — age-encrypted per-key file (decrypted on demand, cached in process env), (4) DEFAULT or empty. Consumers call `$(load_secret GITHUB_TOKEN)` instead of `${GITHUB_TOKEN}` — identical behavior whether secrets come from Docker compose injection or Nomad Vault templates. | Every agent | -| `lib/ci-helpers.sh` | `ci_passed()` — returns 0 if CI state is "success" (or no CI configured). `ci_required_for_pr()` — returns 0 if PR has code files (CI required), 1 if non-code only (CI not required). `is_infra_step()` — returns 0 if a single CI step failure matches infra heuristics (clone/git exit 128, any exit 137, log timeout patterns). `classify_pipeline_failure()` — returns "infra \" if any failed Woodpecker step matches infra heuristics via `is_infra_step()`, else "code". `ensure_priority_label()` — looks up (or creates) the `priority` label and returns its ID; caches in `_PRIORITY_LABEL_ID`. `ci_commit_status ` — queries Woodpecker directly for CI state, falls back to forge commit status API. `ci_pipeline_number ` — returns the Woodpecker pipeline number for a commit, falls back to parsing forge status `target_url`. `ci_promote ` — promotes a pipeline to a named Woodpecker environment (vault-gated deployment: vault approves, vault-fire calls this — vault redesign in progress, see #73-#77). `ci_get_logs [--step ]` — reads CI logs from Woodpecker SQLite database via `lib/ci-log-reader.py`; outputs last 200 lines to stdout. Requires mounted woodpecker-data volume at /woodpecker-data. | dev-poll, review-poll, review-pr | +| `lib/ci-helpers.sh` | `ci_passed()` — returns 0 if CI state is "success" (or no CI configured). `ci_required_for_pr()` — returns 0 if PR has code files (CI required), 1 if non-code only (CI not required). `is_infra_step()` — returns 0 if a single CI step failure matches infra heuristics (clone/git exit 128, any exit 137, log timeout patterns). `classify_pipeline_failure()` — returns "infra \" if any failed Woodpecker step matches infra heuristics via `is_infra_step()`, else "code". `ensure_priority_label()` — looks up (or creates) the `priority` label and returns its ID; caches in `_PRIORITY_LABEL_ID`. `ci_commit_status ` — queries Woodpecker directly for CI state, falls back to forge commit status API. `ci_pipeline_number ` — returns the Woodpecker pipeline number for a commit, falls back to parsing forge status `target_url`. `ci_promote ` — promotes a pipeline to a named Woodpecker environment (vault-gated deployment: vault approves, vault-fire calls this — vault redesign in progress, see #73-#77). `ci_get_logs [--step ]` — reads CI logs from Woodpecker SQLite database via `lib/ci-log-reader.py`; outputs last 200 lines to stdout. Requires mounted woodpecker-data volume at /woodpecker-data. `ci_get_step_logs ` — fetches per-step logs via Woodpecker REST API (`/repos/{id}/logs/{pipeline}/{step_id}`); returns raw log data for a single step. Used by `pr_poll_ci()` to build per-workflow/per-step CI diagnostics (#1051). | dev-poll, review-poll, review-pr | | `lib/ci-debug.sh` | CLI tool for Woodpecker CI: `list`, `status`, `logs`, `failures` subcommands. Not sourced — run directly. | Humans / dev-agent (tool access) | | `lib/ci-log-reader.py` | Python tool: reads CI logs from Woodpecker SQLite database. ` [--step ]` — returns last 200 lines from failed steps (or specified step). Used by `ci_get_logs()` in ci-helpers.sh. Requires `WOODPECKER_DATA_DIR` (default: /woodpecker-data). | ci-helpers.sh | | `lib/load-project.sh` | Parses a `projects/*.toml` file into env vars (`PROJECT_NAME`, `FORGE_REPO`, `WOODPECKER_REPO_ID`, monitoring toggles, mirror config, etc.). Also exports `FORGE_REPO_OWNER` (the owner component of `FORGE_REPO`, e.g. `disinto-admin` from `disinto-admin/disinto`). Reads `repo_root` and `ops_repo_root` from the TOML for host-CLI callers. **Container path handling (#674)**: no longer derives `PROJECT_REPO_ROOT` or `OPS_REPO_ROOT` inside the script — container entrypoints export the correct paths before agent scripts source `env.sh`, and the `DISINTO_CONTAINER` guard (line 90) skips TOML overrides when those vars are already set. | env.sh (when `PROJECT_TOML` is set) | @@ -20,7 +20,7 @@ sourced as needed. | `lib/stack-lock.sh` | File-based lock protocol for singleton project stack access. `stack_lock_acquire(holder, project)` — polls until free, breaks stale heartbeats (>10 min old), claims lock. `stack_lock_release(project)` — deletes lock file. `stack_lock_check(project)` — inspect current lock state. `stack_lock_heartbeat(project)` — update heartbeat timestamp (callers must call every 2 min while holding). Lock files at `~/data/locks/-stack.lock`. | docker/edge/dispatcher.sh, reproduce formula | | `lib/tea-helpers.sh` | `tea_file_issue(title, body, labels...)` — create issue via tea CLI with secret scanning; sets `FILED_ISSUE_NUM`. `tea_relabel(issue_num, labels...)` — replace labels using tea's `edit` subcommand (not `label`). `tea_comment(issue_num, body)` — add comment with secret scanning. `tea_close(issue_num)` — close issue. All use `TEA_LOGIN` and `FORGE_REPO` from env.sh. Labels by name (no ID lookup). Tea binary download verified via sha256 checksum. Sourced by env.sh when `tea` binary is available. | env.sh (conditional) | | `lib/worktree.sh` | Reusable git worktree management: `worktree_create(path, branch, [base_ref])` — create worktree, checkout base, fetch submodules. `worktree_recover(path, branch, [remote])` — detect existing worktree, reuse if on correct branch (sets `_WORKTREE_REUSED`), otherwise clean and recreate. `worktree_cleanup(path)` — `git worktree remove --force`, clear Claude Code project cache (`~/.claude/projects/` matching path). `worktree_cleanup_stale([max_age_hours])` — scan `/tmp` for orphaned worktrees older than threshold, skip preserved and active tmux worktrees, prune. `worktree_preserve(path, reason)` — mark worktree as preserved for debugging (writes `.worktree-preserved` marker, skipped by stale cleanup). | dev-agent.sh, supervisor-run.sh, planner-run.sh, predictor-run.sh, gardener-run.sh | -| `lib/pr-lifecycle.sh` | Reusable PR lifecycle library: `pr_create()`, `pr_find_by_branch()`, `pr_poll_ci()`, `pr_poll_review()`, `pr_merge()`, `pr_is_merged()`, `pr_walk_to_merge()`, `build_phase_protocol_prompt()`. Requires `lib/ci-helpers.sh`. | dev-agent.sh (future) | +| `lib/pr-lifecycle.sh` | Reusable PR lifecycle library: `pr_create()`, `pr_find_by_branch()`, `pr_poll_ci()`, `pr_poll_review()`, `pr_merge()`, `pr_is_merged()`, `pr_walk_to_merge()`, `build_phase_protocol_prompt()`. `pr_poll_ci()` builds a **per-workflow/per-step CI diagnostics prompt** (#1051): on failure, each failed workflow gets its own section with step name, exit code (annotated with standard meanings for 126/127/128), and step-local log tail (via `ci_get_step_logs`); passing workflows are listed explicitly so agents don't waste fix attempts on them. Falls back to legacy combined-log fetch if per-step API is unavailable. Requires `lib/ci-helpers.sh`. | dev-agent.sh (future) | | `lib/issue-lifecycle.sh` | Reusable issue lifecycle library: `issue_claim()` (add in-progress, remove backlog), `issue_release()` (remove in-progress, add backlog), `issue_block()` (post diagnostic comment with secret redaction, add blocked label), `issue_close()`, `issue_check_deps()` (parse deps, check transitive closure; sets `_ISSUE_BLOCKED_BY`, `_ISSUE_SUGGESTION`), `issue_suggest_next()` (find next unblocked backlog issue; sets `_ISSUE_NEXT`), `issue_post_refusal()` (structured refusal comment with dedup). Label IDs cached in globals on first lookup. Sources `lib/secret-scan.sh`. | dev-agent.sh (future) | | `lib/action-vault.sh` | **Vault PR helper** — create vault action PRs on ops repo via Forgejo API (works from containers without SSH). `vault_request ` validates TOML (using `validate_vault_action` from `action-vault/vault-env.sh`), creates branch `vault/`, writes `vault/actions/.toml`, creates PR targeting `main` with title `vault: ` and body from context field, returns PR number. Idempotent: if PR exists, returns existing number. **Low-tier bypass**: if the action's `blast_radius` classifies as `low` (via `action-vault/classify.sh`), `vault_request` calls `_vault_commit_direct()` which commits directly to ops `main` using `FORGE_ADMIN_TOKEN` — no PR, no approval wait. Returns `0` (not a PR number) for direct commits. Requires `FORGE_TOKEN`, `FORGE_ADMIN_TOKEN` (low-tier only), `FORGE_URL`, `FORGE_REPO`, `FORGE_OPS_REPO`. Uses the calling agent's own token (saves/restores `FORGE_TOKEN` around sourcing `vault-env.sh`), so approval workflow respects individual agent identities. | dev-agent (vault actions), future vault dispatcher | | `lib/branch-protection.sh` | Branch protection helpers for Forgejo repos. `setup_vault_branch_protection()` — configures admin-only merge protection on main (require 1 approval, restrict merge to admin role, block direct pushes). `setup_profile_branch_protection()` — same protection for `.profile` repos. `verify_branch_protection()` — checks protection is correctly configured. `remove_branch_protection()` — removes protection (cleanup/testing). Handles race condition after initial push: retries with backoff if Forgejo hasn't processed the branch yet. Requires `FORGE_TOKEN`, `FORGE_URL`, `FORGE_OPS_REPO`. | bin/disinto (hire-an-agent) | @@ -30,9 +30,11 @@ sourced as needed. | `lib/git-creds.sh` | Shared git credential helper configuration. `configure_git_creds([HOME_DIR] [RUN_AS_CMD])` — writes a static credential helper script and configures git globally to use password-based HTTP auth (Forgejo 11.x rejects API tokens for `git push`, #361). **Retry on cold boot (#741)**: resolves bot username from `FORGE_TOKEN` with 5 retries (exponential backoff 1-5s); fails loudly and returns 1 if Forgejo is unreachable — never falls back to a wrong hardcoded default (exports `BOT_USER` on success). `repair_baked_cred_urls([--as RUN_AS_CMD] DIR ...)` — rewrites any git remote URLs that have credentials baked in to use clean URLs instead; uses `safe.directory` bypass for root-owned repos (#671). Requires `FORGE_PASS`, `FORGE_URL`, `FORGE_TOKEN`. | entrypoints (agents, edge) | | `lib/ops-setup.sh` | `setup_ops_repo()` — creates ops repo on Forgejo if it doesn't exist, configures bot collaborators, clones/initializes ops repo locally, seeds directory structure (vault, knowledge, evidence, sprints). Evidence subdirectories seeded: engagement/, red-team/, holdout/, evolution/, user-test/. Also seeds sprints/ for architect output. Exports `_ACTUAL_OPS_SLUG`. `migrate_ops_repo(ops_root, [primary_branch])` — idempotent migration helper that seeds missing directories and .gitkeep files on existing ops repos (pre-#407 deployments). | bin/disinto (init) | | `lib/ci-setup.sh` | `_install_cron_impl()` — installs crontab entries for bare-metal deployments (compose mode uses polling loop instead). `_create_forgejo_oauth_app()` — generic helper to create an OAuth2 app on Forgejo (shared by Woodpecker and chat). `_create_woodpecker_oauth_impl()` — creates Woodpecker OAuth2 app (thin wrapper). `_create_chat_oauth_impl()` — creates disinto-chat OAuth2 app, writes `CHAT_OAUTH_CLIENT_ID`/`CHAT_OAUTH_CLIENT_SECRET` to `.env` (#708). `_generate_woodpecker_token_impl()` — auto-generates WOODPECKER_TOKEN via OAuth2 flow. `_activate_woodpecker_repo_impl()` — activates repo in Woodpecker. All gated by `_load_ci_context()` which validates required env vars. | bin/disinto (init) | -| `lib/generators.sh` | Template generation for `disinto init`: `generate_compose()` — docker-compose.yml (uses `codeberg.org/forgejo/forgejo:11.0` tag; adds `security_opt: [apparmor:unconfined]` to all services for rootless container compatibility; Forgejo includes a healthcheck so dependent services use `condition: service_healthy` — fixes cold-start races, #665; adds `chat` service block with isolated `chat-config` named volume and `CHAT_HISTORY_DIR` bind-mount for per-user NDJSON history persistence (#710); injects `FORWARD_AUTH_SECRET` for Caddy↔chat defense-in-depth auth (#709); cost-cap env vars `CHAT_MAX_REQUESTS_PER_HOUR`, `CHAT_MAX_REQUESTS_PER_DAY`, `CHAT_MAX_TOKENS_PER_DAY` (#711); subdomain fallback comment for `EDGE_TUNNEL_FQDN_*` vars (#713); all `depends_on` now use `condition: service_healthy/started` instead of bare service names; all services now include `restart: unless-stopped` including the edge service — #768; agents service now uses `image: ghcr.io/disinto/agents:${DISINTO_IMAGE_TAG:-latest}` instead of `build:` (#429); `WOODPECKER_PLUGINS_PRIVILEGED` env var added to woodpecker service (#779); agents-llama conditional block gated on `ENABLE_LLAMA_AGENT=1` (#769); `agents-llama-all` compose service (profile `agents-llama-all`, all 7 roles: review,dev,gardener,architect,planner,predictor,supervisor) added by #801; agents service gains volume mounts for `./projects`, `./.env`, `./state`), `generate_caddyfile()` — Caddyfile (routes: `/forge/*` → forgejo:3000, `/woodpecker/*` → woodpecker:8000, `/staging/*` → staging:80; `/chat/login` and `/chat/oauth/callback` bypass `forward_auth` so unauthenticated users can reach the OAuth flow; `/chat/*` gated by `forward_auth` on `chat:8080/chat/auth/verify` which stamps `X-Forwarded-User` (#709); root `/` redirects to `/forge/`), `generate_staging_index()` — staging index, `generate_deploy_pipelines()` — Woodpecker deployment pipeline configs. Requires `FACTORY_ROOT`, `PROJECT_NAME`, `PRIMARY_BRANCH`. | bin/disinto (init) | +| `lib/generators.sh` | Template generation for `disinto init`: `generate_compose()` — docker-compose.yml (**duplicate service detection**: tracks service names during generation, aborts with `ERROR: Duplicate service name '$name' detected` on conflict; detection state is reset between calls so idempotent reinvocation is safe, #850) (uses `codeberg.org/forgejo/forgejo:11.0` tag; `CLAUDE_BIN_DIR` volume mount removed from agents/llama services — only `reproduce` and `edge` still use the host-mounted CLI (#992); adds `security_opt: [apparmor:unconfined]` to all services for rootless container compatibility; Forgejo includes a healthcheck so dependent services use `condition: service_healthy` — fixes cold-start races, #665; adds `chat` service block with isolated `chat-config` named volume and `CHAT_HISTORY_DIR` bind-mount for per-user NDJSON history persistence (#710); injects `FORWARD_AUTH_SECRET` for Caddy↔chat defense-in-depth auth (#709); subdomain fallback: `EDGE_ROUTING_MODE` (default `subpath`) and per-service `EDGE_TUNNEL_FQDN_*` vars injected into edge service (#1028); chat service rate limiting removed (#1084); chat workspace dir bind-mount: `${CHAT_WORKSPACE_DIR:-./workspace}:/var/workspace` + `CHAT_WORKSPACE_DIR` env var injected so Claude can access project working tree (#1027); all `depends_on` now use `condition: service_healthy/started` instead of bare service names; all services now include `restart: unless-stopped` including the edge service — #768; agents service now uses `image: ghcr.io/disinto/agents:${DISINTO_IMAGE_TAG:-latest}` instead of `build:` (#429); `WOODPECKER_PLUGINS_PRIVILEGED` env var added to woodpecker service (#779); agents-llama conditional block gated on `ENABLE_LLAMA_AGENT=1` (#769); `agents-llama-all` compose service (profile `agents-llama-all`, all 7 roles: review,dev,gardener,architect,planner,predictor,supervisor) added by #801; agents service gains volume mounts for `./projects`, `./.env`, `./state`), `generate_caddyfile()` — Caddyfile (routes: `/forge/*` → forgejo:3000, `/woodpecker/*` → woodpecker:8000, `/staging/*` → staging:80; `/chat/login` and `/chat/oauth/callback` bypass `forward_auth` so unauthenticated users can reach the OAuth flow; `/chat/*` gated by `forward_auth` on `chat:8080/chat/auth/verify` which stamps `X-Forwarded-User` (#709); root `/` redirects to `/forge/`), `generate_staging_index()` — staging index, `generate_deploy_pipelines()` — Woodpecker deployment pipeline configs. Requires `FACTORY_ROOT`, `PROJECT_NAME`, `PRIMARY_BRANCH`. | bin/disinto (init) | +| `lib/backup.sh` | Factory backup creation. `backup_create ` — exports factory state: fetches all issues (open+closed) from the project and ops repos via Forgejo API, bundles the ops repo as a git bundle, and writes a tarball. Requires `FORGE_URL`, `FORGE_TOKEN`, `FORGE_REPO`, `FORGE_OPS_REPO`, `OPS_REPO_ROOT`. Sourced by `bin/disinto backup create` (#1057). | bin/disinto (backup create) | +| `lib/disinto/backup.sh` | Factory backup restore. `backup_import ` — restores from a backup tarball: creates missing repos via Forgejo API, imports issues (idempotent — skips by number if present), unpacks ops repo git bundle. Idempotent: running twice produces same end state with no errors. Requires `FORGE_URL`, `FORGE_TOKEN`. Sourced by `bin/disinto backup import` (#1058). | bin/disinto (backup import) | | `lib/sprint-filer.sh` | Post-merge sub-issue filer for sprint PRs. Invoked by the `.woodpecker/ops-filer.yml` pipeline after a sprint PR merges to ops repo `main`. Parses ` ... ` blocks from sprint PR bodies to extract sub-issue definitions, creates them on the project repo using `FORGE_FILER_TOKEN` (narrow-scope `filer-bot` identity with `issues:write` only), adds `in-progress` label to the parent vision issue, and handles vision lifecycle closure when all sub-issues are closed. Uses `filer_api_all()` for paginated fetches. Idempotent: uses `` markers to skip already-filed issues. Requires `FORGE_FILER_TOKEN`, `FORGE_API`, `FORGE_API_BASE`, `FORGE_OPS_REPO`. | `.woodpecker/ops-filer.yml` (CI pipeline on ops repo) | | `lib/hire-agent.sh` | `disinto_hire_an_agent()` — user creation, `.profile` repo setup, formula copying, branch protection, and state marker creation for hiring a new agent. Requires `FORGE_URL`, `FORGE_TOKEN`, `FACTORY_ROOT`, `PROJECT_NAME`. Extracted from `bin/disinto`. | bin/disinto (hire) | | `lib/release.sh` | `disinto_release()` — vault TOML creation, branch setup on ops repo, PR creation, and auto-merge request for a versioned release. `_assert_release_globals()` validates required env vars. Requires `FORGE_URL`, `FORGE_TOKEN`, `FORGE_OPS_REPO`, `FACTORY_ROOT`, `PRIMARY_BRANCH`. Extracted from `bin/disinto`. | bin/disinto (release) | -| `lib/hvault.sh` | HashiCorp Vault helper module. `hvault_kv_get(PATH, [KEY])` — read KV v2 secret, optionally extract one key. `hvault_kv_put(PATH, KEY=VAL ...)` — write KV v2 secret. `hvault_kv_list(PATH)` — list keys at a KV path. `hvault_get_or_empty(PATH)` — GET /v1/PATH; 200→raw body, 404→empty, else structured error + return 1 (used by sync scripts to distinguish "absent, create" from hard failure without tripping errexit, #881). `hvault_policy_apply(NAME, FILE)` — idempotent policy upsert. `hvault_jwt_login(ROLE, JWT)` — exchange JWT for short-lived token. `hvault_token_lookup()` — returns TTL/policies/accessor for current token. All functions use `VAULT_ADDR` + `VAULT_TOKEN` from env (fallback: `/etc/vault.d/root.token`), emit structured JSON errors to stderr on failure. Tests: `tests/lib-hvault.bats` (requires `vault server -dev`). | `tools/vault-apply-policies.sh`, `tools/vault-apply-roles.sh`, `lib/init/nomad/vault-nomad-auth.sh` | -| `lib/init/nomad/` | Nomad+Vault installer scripts. `cluster-up.sh` — idempotent Step-0 orchestrator that runs all steps in order (installs packages, writes HCL, enables systemd units, unseals Vault); uses `poll_until_healthy()` helper for deduped readiness polling. `install.sh` — installs pinned Nomad+Vault apt packages. `vault-init.sh` — initializes Vault (unseal keys → `/etc/vault.d/`), creates dev-persisted unseal unit. `lib-systemd.sh` — shared systemd unit helpers. `systemd-nomad.sh`, `systemd-vault.sh` — write and enable service units. `vault-nomad-auth.sh` — Step-2 script that enables Vault's JWT auth at path `jwt-nomad`, writes the JWKS/algs config pointing at Nomad's workload-identity signer, delegates role sync to `tools/vault-apply-roles.sh`, installs `/etc/nomad.d/server.hcl`, and SIGHUPs `nomad.service` if the file changed (#881). Idempotent: each step checks current state before acting. Sourced and called by `cluster-up.sh`; not sourced by agents. | `bin/disinto init --backend=nomad` | +| `lib/hvault.sh` | HashiCorp Vault helper module. `hvault_kv_get(PATH, [KEY])` — read KV v2 secret, optionally extract one key. `hvault_kv_put(PATH, KEY=VAL ...)` — write KV v2 secret. `hvault_kv_list(PATH)` — list keys at a KV path. `hvault_get_or_empty(PATH)` — GET /v1/PATH; 200→raw body, 404→empty, else structured error + return 1 (used by sync scripts to distinguish "absent, create" from hard failure without tripping errexit, #881). `hvault_ensure_kv_v2(MOUNT, [LOG_PREFIX])` — idempotent KV v2 mount assertion: enables mount if absent, fails loudly if present as wrong type/version. Extracted from all `vault-seed-*.sh` scripts to eliminate dup-detector violations. Respects `DRY_RUN=1`. `hvault_policy_apply(NAME, FILE)` — idempotent policy upsert. `hvault_jwt_login(ROLE, JWT)` — exchange JWT for short-lived token. `hvault_token_lookup()` — returns TTL/policies/accessor for current token. `_hvault_seed_key(PATH, KEY, [GENERATOR])` — seed one KV key if absent; reads existing data and merges to preserve sibling keys (KV v2 replaces atomically); returns 0=created, 1=unchanged, 2=API error (#992). All functions use `VAULT_ADDR` + `VAULT_TOKEN` from env (fallback: `/etc/vault.d/root.token`), emit structured JSON errors to stderr on failure. Tests: `tests/lib-hvault.bats` (requires `vault server -dev`). | `tools/vault-apply-policies.sh`, `tools/vault-apply-roles.sh`, `lib/init/nomad/vault-nomad-auth.sh`, `tools/vault-seed-*.sh` | +| `lib/init/nomad/` | Nomad+Vault installer scripts. `cluster-up.sh` — idempotent Step-0 orchestrator that runs all steps in order (installs packages, writes HCL, enables systemd units, unseals Vault); uses `poll_until_healthy()` helper for deduped readiness polling; `HOST_VOLUME_DIRS` array now includes `/srv/disinto/docker` (for staging file-server, S5.2, #989, #992). `install.sh` — installs pinned Nomad+Vault apt packages. `vault-init.sh` — initializes Vault (unseal keys → `/etc/vault.d/`), creates dev-persisted unseal unit. `lib-systemd.sh` — shared systemd unit helpers. `systemd-nomad.sh`, `systemd-vault.sh` — write and enable service units. `vault-nomad-auth.sh` — Step-2 script that enables Vault's JWT auth at path `jwt-nomad`, writes the JWKS/algs config pointing at Nomad's workload-identity signer, delegates role sync to `tools/vault-apply-roles.sh`, installs `/etc/nomad.d/server.hcl`, and SIGHUPs `nomad.service` if the file changed (#881). `wp-oauth-register.sh` — S3.3 script that creates the Woodpecker OAuth2 app in Forgejo and stores `forgejo_client`/`forgejo_secret` in Vault KV v2 at `kv/disinto/shared/woodpecker`; idempotent (skips if app or secrets already present); called by `bin/disinto --with woodpecker`. `deploy.sh` — S4 dependency-ordered Nomad job deploy + health-wait; takes a list of jobspec basenames, submits each to Nomad and polls until healthy before proceeding to the next; supports `--dry-run` and per-job timeout overrides via `JOB_READY_TIMEOUT_`; global default timeout `JOB_READY_TIMEOUT_SECS` is 360s (raised from 240s for chat cold-start, #1036); invoked by `bin/disinto --with ` and `cluster-up.sh`; deploy order now covers staging, chat, edge (S5.5, #992). Idempotent: each step checks current state before acting. Sourced and called by `cluster-up.sh`; not sourced by agents. | `bin/disinto init --backend=nomad` | diff --git a/lib/agent-sdk.sh b/lib/agent-sdk.sh index 2522655..b968222 100644 --- a/lib/agent-sdk.sh +++ b/lib/agent-sdk.sh @@ -52,8 +52,9 @@ claude_run_with_watchdog() { out_file=$(mktemp) || return 1 trap 'rm -f "$out_file"' RETURN - # Start claude in background, capturing stdout to temp file - "${cmd[@]}" > "$out_file" 2>>"$LOGFILE" & + # Start claude in new process group (setsid creates new session, $pid is PGID leader) + # All children of claude will inherit this process group + setsid "${cmd[@]}" > "$out_file" 2>>"$LOGFILE" & pid=$! # Background watchdog: poll for final result marker @@ -84,12 +85,12 @@ claude_run_with_watchdog() { sleep "$grace" if kill -0 "$pid" 2>/dev/null; then log "watchdog: claude -p idle for ${grace}s after final result; SIGTERM" - kill -TERM "$pid" 2>/dev/null || true + kill -TERM -- "-$pid" 2>/dev/null || true # Give it a moment to clean up sleep 5 if kill -0 "$pid" 2>/dev/null; then log "watchdog: force kill after SIGTERM timeout" - kill -KILL "$pid" 2>/dev/null || true + kill -KILL -- "-$pid" 2>/dev/null || true fi fi fi @@ -100,16 +101,16 @@ claude_run_with_watchdog() { timeout --foreground "${CLAUDE_TIMEOUT:-7200}" tail --pid="$pid" -f /dev/null 2>/dev/null rc=$? - # Clean up the watchdog - kill "$grace_pid" 2>/dev/null || true + # Clean up the watchdog (target process group if it spawned children) + kill -- "-$grace_pid" 2>/dev/null || true wait "$grace_pid" 2>/dev/null || true - # When timeout fires (rc=124), explicitly kill the orphaned claude process + # When timeout fires (rc=124), explicitly kill the orphaned claude process group # tail --pid is a passive waiter, not a supervisor if [ "$rc" -eq 124 ]; then - kill "$pid" 2>/dev/null || true + kill -TERM -- "-$pid" 2>/dev/null || true sleep 1 - kill -KILL "$pid" 2>/dev/null || true + kill -KILL -- "-$pid" 2>/dev/null || true fi # Output the captured stdout diff --git a/lib/backup.sh b/lib/backup.sh new file mode 100644 index 0000000..8d7a827 --- /dev/null +++ b/lib/backup.sh @@ -0,0 +1,136 @@ +#!/usr/bin/env bash +# ============================================================================= +# disinto backup — export factory state for migration +# +# Usage: source this file, then call backup_create +# Requires: FORGE_URL, FORGE_TOKEN, FORGE_REPO, FORGE_OPS_REPO, OPS_REPO_ROOT +# ============================================================================= +set -euo pipefail + +# Fetch all issues (open + closed) for a repo slug and emit the normalized JSON array. +# Usage: _backup_fetch_issues +_backup_fetch_issues() { + local repo_slug="$1" + local api_url="${FORGE_API_BASE}/repos/${repo_slug}" + + local all_issues="[]" + for state in open closed; do + local page=1 + while true; do + local page_items + page_items=$(curl -sf -X GET \ + -H "Authorization: token ${FORGE_TOKEN}" \ + -H "Content-Type: application/json" \ + "${api_url}/issues?state=${state}&type=issues&limit=50&page=${page}") || { + echo "ERROR: failed to fetch ${state} issues from ${repo_slug} (page ${page})" >&2 + return 1 + } + local count + count=$(printf '%s' "$page_items" | jq 'length' 2>/dev/null) || count=0 + [ -z "$count" ] && count=0 + [ "$count" -eq 0 ] && break + all_issues=$(printf '%s\n%s' "$all_issues" "$page_items" | jq -s 'add') + [ "$count" -lt 50 ] && break + page=$((page + 1)) + done + done + + # Normalize to the schema: number, title, body, labels, state + printf '%s' "$all_issues" | jq '[.[] | { + number: .number, + title: .title, + body: .body, + labels: [.labels[]?.name], + state: .state + }] | sort_by(.number)' +} + +# Create a backup tarball of factory state. +# Usage: backup_create +backup_create() { + local outfile="${1:-}" + if [ -z "$outfile" ]; then + echo "Error: output file required" >&2 + echo "Usage: disinto backup create " >&2 + return 1 + fi + + # Resolve to absolute path before cd-ing into tmpdir + case "$outfile" in + /*) ;; + *) outfile="$(pwd)/${outfile}" ;; + esac + + # Validate required env + : "${FORGE_URL:?FORGE_URL must be set}" + : "${FORGE_TOKEN:?FORGE_TOKEN must be set}" + : "${FORGE_REPO:?FORGE_REPO must be set}" + + local forge_ops_repo="${FORGE_OPS_REPO:-${FORGE_REPO}-ops}" + local ops_repo_root="${OPS_REPO_ROOT:-}" + + if [ -z "$ops_repo_root" ] || [ ! -d "$ops_repo_root/.git" ]; then + echo "Error: OPS_REPO_ROOT (${ops_repo_root:-}) is not a valid git repo" >&2 + return 1 + fi + + local tmpdir + tmpdir=$(mktemp -d) + trap 'rm -rf "$tmpdir"' EXIT + + local project_name="${FORGE_REPO##*/}" + + echo "=== disinto backup create ===" + echo "Forge: ${FORGE_URL}" + echo "Repos: ${FORGE_REPO}, ${forge_ops_repo}" + + # ── 1. Export issues ────────────────────────────────────────────────────── + mkdir -p "${tmpdir}/issues" + + echo "Fetching issues for ${FORGE_REPO}..." + _backup_fetch_issues "$FORGE_REPO" > "${tmpdir}/issues/${project_name}.json" + local main_count + main_count=$(jq 'length' "${tmpdir}/issues/${project_name}.json") + echo " ${main_count} issues exported" + + echo "Fetching issues for ${forge_ops_repo}..." + _backup_fetch_issues "$forge_ops_repo" > "${tmpdir}/issues/${project_name}-ops.json" + local ops_count + ops_count=$(jq 'length' "${tmpdir}/issues/${project_name}-ops.json") + echo " ${ops_count} issues exported" + + # ── 2. Git bundle of ops repo ──────────────────────────────────────────── + mkdir -p "${tmpdir}/repos" + + echo "Creating git bundle for ${forge_ops_repo}..." + git -C "$ops_repo_root" bundle create "${tmpdir}/repos/${project_name}-ops.bundle" --all 2>&1 + echo " bundle created ($(du -h "${tmpdir}/repos/${project_name}-ops.bundle" | cut -f1))" + + # ── 3. Metadata ────────────────────────────────────────────────────────── + local created_at + created_at=$(date -u +"%Y-%m-%dT%H:%M:%SZ") + + jq -n \ + --arg created_at "$created_at" \ + --arg source_host "$(hostname)" \ + --argjson schema_version 1 \ + --arg forgejo_url "$FORGE_URL" \ + '{ + created_at: $created_at, + source_host: $source_host, + schema_version: $schema_version, + forgejo_url: $forgejo_url + }' > "${tmpdir}/metadata.json" + + # ── 4. Pack tarball ────────────────────────────────────────────────────── + echo "Creating tarball: ${outfile}" + tar -czf "$outfile" -C "$tmpdir" metadata.json issues repos + local size + size=$(du -h "$outfile" | cut -f1) + echo "=== Backup complete: ${outfile} (${size}) ===" + + # Clean up before returning — the EXIT trap references the local $tmpdir + # which goes out of scope after return, causing 'unbound variable' under set -u. + trap - EXIT + rm -rf "$tmpdir" +} diff --git a/lib/ci-helpers.sh b/lib/ci-helpers.sh index 11c668e..6afe97b 100644 --- a/lib/ci-helpers.sh +++ b/lib/ci-helpers.sh @@ -247,6 +247,31 @@ ci_promote() { echo "$new_num" } +# ci_get_step_logs +# Fetches logs for a single CI step via the Woodpecker API. +# Requires: WOODPECKER_REPO_ID, woodpecker_api() (from env.sh) +# Returns: 0 on success, 1 on failure. Outputs log text to stdout. +# +# Usage: +# ci_get_step_logs 1423 5 # Get logs for step ID 5 in pipeline 1423 +ci_get_step_logs() { + local pipeline_num="$1" step_id="$2" + + if [ -z "$pipeline_num" ] || [ -z "$step_id" ]; then + echo "Usage: ci_get_step_logs " >&2 + return 1 + fi + + if [ -z "${WOODPECKER_REPO_ID:-}" ] || [ "${WOODPECKER_REPO_ID}" = "0" ]; then + echo "ERROR: WOODPECKER_REPO_ID not set or zero" >&2 + return 1 + fi + + woodpecker_api "/repos/${WOODPECKER_REPO_ID}/logs/${pipeline_num}/${step_id}" \ + --max-time 15 2>/dev/null \ + | jq -r '.[].data // empty' 2>/dev/null +} + # ci_get_logs [--step ] # Reads CI logs from the Woodpecker SQLite database. # Requires: WOODPECKER_DATA_DIR env var or mounted volume at /woodpecker-data diff --git a/lib/ci-setup.sh b/lib/ci-setup.sh index 319e83e..507affb 100644 --- a/lib/ci-setup.sh +++ b/lib/ci-setup.sh @@ -142,6 +142,7 @@ _create_forgejo_oauth_app() { # Set up Woodpecker CI to use Forgejo as its forge backend. # Creates an OAuth2 app on Forgejo for Woodpecker, activates the repo. +# Respects EDGE_ROUTING_MODE: in subdomain mode, uses EDGE_TUNNEL_FQDN_CI for redirect URI. # Usage: create_woodpecker_oauth _create_woodpecker_oauth_impl() { local forge_url="$1" @@ -150,7 +151,13 @@ _create_woodpecker_oauth_impl() { echo "" echo "── Woodpecker OAuth2 setup ────────────────────────────" - _create_forgejo_oauth_app "woodpecker-ci" "http://localhost:8000/authorize" || return 0 + local wp_redirect_uri="http://localhost:8000/authorize" + local routing_mode="${EDGE_ROUTING_MODE:-subpath}" + if [ "$routing_mode" = "subdomain" ] && [ -n "${EDGE_TUNNEL_FQDN_CI:-}" ]; then + wp_redirect_uri="https://${EDGE_TUNNEL_FQDN_CI}/authorize" + fi + + _create_forgejo_oauth_app "woodpecker-ci" "$wp_redirect_uri" || return 0 local client_id="${_OAUTH_CLIENT_ID}" local client_secret="${_OAUTH_CLIENT_SECRET}" @@ -158,10 +165,15 @@ _create_woodpecker_oauth_impl() { # WP_FORGEJO_CLIENT/SECRET match the docker-compose.yml variable references # WOODPECKER_HOST must be host-accessible URL to match OAuth2 redirect_uri local env_file="${FACTORY_ROOT}/.env" + local wp_host="http://localhost:8000" + if [ "$routing_mode" = "subdomain" ] && [ -n "${EDGE_TUNNEL_FQDN_CI:-}" ]; then + wp_host="https://${EDGE_TUNNEL_FQDN_CI}" + fi + local wp_vars=( "WOODPECKER_FORGEJO=true" "WOODPECKER_FORGEJO_URL=${forge_url}" - "WOODPECKER_HOST=http://localhost:8000" + "WOODPECKER_HOST=${wp_host}" ) if [ -n "${client_id:-}" ]; then wp_vars+=("WP_FORGEJO_CLIENT=${client_id}") diff --git a/lib/disinto/backup.sh b/lib/disinto/backup.sh new file mode 100644 index 0000000..6e25e83 --- /dev/null +++ b/lib/disinto/backup.sh @@ -0,0 +1,391 @@ +#!/usr/bin/env bash +# ============================================================================= +# backup.sh — backup/restore utilities for disinto factory state +# +# Subcommands: +# create Create backup of factory state +# import Restore factory state from backup +# +# Usage: +# source "${FACTORY_ROOT}/lib/disinto/backup.sh" +# backup_import +# +# Environment: +# FORGE_URL - Forgejo instance URL (target) +# FORGE_TOKEN - Admin token for target Forgejo +# +# Idempotency: +# - Repos: created via API if missing +# - Issues: check if exists by number, skip if present +# - Runs twice = same end state, no errors +# ============================================================================= +set -euo pipefail + +# ── Helper: log with timestamp ─────────────────────────────────────────────── +backup_log() { + local msg="$1" + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $msg" +} + +# ── Helper: create repo if it doesn't exist ───────────────────────────────── +# Usage: backup_create_repo_if_missing +# Returns: 0 if repo exists or was created, 1 on error +backup_create_repo_if_missing() { + local slug="$1" + local org_name="${slug%%/*}" + local repo_name="${slug##*/}" + + # Check if repo exists + if curl -sf --max-time 5 \ + -H "Authorization: token ${FORGE_TOKEN}" \ + "${FORGE_URL}/api/v1/repos/${slug}" >/dev/null 2>&1; then + backup_log "Repo ${slug} already exists" + return 0 + fi + + backup_log "Creating repo ${slug}..." + + # Create org if needed + curl -sf -X POST \ + -H "Authorization: token ${FORGE_TOKEN}" \ + -H "Content-Type: application/json" \ + "${FORGE_URL}/api/v1/orgs" \ + -d "{\"username\":\"${org_name}\",\"visibility\":\"public\"}" >/dev/null 2>&1 || true + + # Create repo + local response + response=$(curl -sf -X POST \ + -H "Authorization: token ${FORGE_TOKEN}" \ + -H "Content-Type: application/json" \ + "${FORGE_URL}/api/v1/orgs/${org_name}/repos" \ + -d "{\"name\":\"${repo_name}\",\"auto_init\":false,\"default_branch\":\"main\"}" 2>/dev/null) \ + || response="" + + if [ -n "$response" ] && echo "$response" | grep -q '"id":\|[0-9]'; then + backup_log "Created repo ${slug}" + BACKUP_CREATED_REPOS=$((BACKUP_CREATED_REPOS + 1)) + return 0 + fi + + # Fallback: admin endpoint + response=$(curl -sf -X POST \ + -H "Authorization: token ${FORGE_TOKEN}" \ + -H "Content-Type: application/json" \ + "${FORGE_URL}/api/v1/admin/users/${org_name}/repos" \ + -d "{\"name\":\"${repo_name}\",\"auto_init\":false,\"default_branch\":\"main\"}" 2>/dev/null) \ + || response="" + + if [ -n "$response" ] && echo "$response" | grep -q '"id":\|[0-9]'; then + backup_log "Created repo ${slug} (via admin API)" + BACKUP_CREATED_REPOS=$((BACKUP_CREATED_REPOS + 1)) + return 0 + fi + + backup_log "ERROR: failed to create repo ${slug}" >&2 + return 1 +} + +# ── Helper: check if issue exists by number ────────────────────────────────── +# Usage: backup_issue_exists +# Returns: 0 if exists, 1 if not +backup_issue_exists() { + local slug="$1" + local issue_num="$2" + + curl -sf --max-time 5 \ + -H "Authorization: token ${FORGE_TOKEN}" \ + "${FORGE_URL}/api/v1/repos/${slug}/issues/${issue_num}" >/dev/null 2>&1 +} + +# ── Helper: create issue with specific number (if Forgejo supports it) ─────── +# Note: Forgejo API auto-assigns next integer; we accept renumbering and log mapping +# Usage: backup_create_issue <body> [labels...] +# Returns: new_issue_number on success, 0 on failure +backup_create_issue() { + local slug="$1" + local original_num="$2" + local title="$3" + local body="$4" + shift 4 + + # Build labels array + local -a labels=() + for label in "$@"; do + # Resolve label name to ID + local label_id + label_id=$(curl -sf --max-time 5 \ + -H "Authorization: token ${FORGE_TOKEN}" \ + "${FORGE_URL}/api/v1/repos/${slug}/labels" 2>/dev/null \ + | jq -r ".[] | select(.name == \"${label}\") | .id" 2>/dev/null) || label_id="" + + if [ -n "$label_id" ] && [ "$label_id" != "null" ]; then + labels+=("$label_id") + fi + done + + # Build payload + local payload + if [ ${#labels[@]} -gt 0 ]; then + payload=$(jq -n \ + --arg title "$title" \ + --arg body "$body" \ + --argjson labels "$(printf '%s\n' "${labels[@]}" | jq -R . | jq -s .)" \ + '{title: $title, body: $body, labels: $labels}') + else + payload=$(jq -n --arg title "$title" --arg body "$body" '{title: $title, body: $body, labels: []}') + fi + + local response + response=$(curl -sf -X POST \ + -H "Authorization: token ${FORGE_TOKEN}" \ + -H "Content-Type: application/json" \ + "${FORGE_URL}/api/v1/repos/${slug}/issues" \ + -d "$payload" 2>/dev/null) || { + backup_log "ERROR: failed to create issue '${title}'" >&2 + return 1 + } + + local new_num + new_num=$(printf '%s' "$response" | jq -r '.number // empty') + + # Log the mapping + echo "${original_num}:${new_num}" >> "${BACKUP_MAPPING_FILE}" + + backup_log "Created issue '${title}' as #${new_num} (original: #${original_num})" + echo "$new_num" +} + +# ── Step 1: Unpack tarball to temp dir ─────────────────────────────────────── +# Usage: backup_unpack_tarball <tarball> +# Returns: temp dir path via BACKUP_TEMP_DIR +backup_unpack_tarball() { + local tarball="$1" + + if [ ! -f "$tarball" ]; then + backup_log "ERROR: tarball not found: ${tarball}" >&2 + return 1 + fi + + BACKUP_TEMP_DIR=$(mktemp -d -t disinto-backup.XXXXXX) + backup_log "Unpacking ${tarball} to ${BACKUP_TEMP_DIR}" + + if ! tar -xzf "$tarball" -C "$BACKUP_TEMP_DIR"; then + backup_log "ERROR: failed to unpack tarball" >&2 + rm -rf "$BACKUP_TEMP_DIR" + return 1 + fi + + # Verify expected structure + if [ ! -d "${BACKUP_TEMP_DIR}/repos" ]; then + backup_log "ERROR: tarball missing 'repos/' directory" >&2 + rm -rf "$BACKUP_TEMP_DIR" + return 1 + fi + + backup_log "Tarball unpacked successfully" +} + +# ── Step 2: disinto repo — create via Forgejo API, trigger sync (manual) ───── +# Usage: backup_import_disinto_repo +# Returns: 0 on success, 1 on failure +backup_import_disinto_repo() { + backup_log "Step 2: Configuring disinto repo..." + + # Create disinto repo if missing + backup_create_repo_if_missing "disinto-admin/disinto" + + # Note: Manual mirror configuration recommended (avoids SSH deploy-key handling) + backup_log "Note: Configure Codeberg → Forgejo pull mirror manually" + backup_log " Run on Forgejo admin panel: Repository Settings → Repository Mirroring" + backup_log " Source: ssh://git@codeberg.org/johba/disinto.git" + backup_log " Mirror: disinto-admin/disinto" + backup_log " Or use: git clone --mirror ssh://git@codeberg.org/johba/disinto.git" + backup_log " cd disinto.git && git push --mirror ${FORGE_URL}/disinto-admin/disinto.git" + + return 0 +} + +# ── Step 3: disinto-ops repo — create empty, push from bundle ──────────────── +# Usage: backup_import_disinto_ops_repo +# Returns: 0 on success, 1 on failure +backup_import_disinto_ops_repo() { + backup_log "Step 3: Configuring disinto-ops repo from bundle..." + + local bundle_path="${BACKUP_TEMP_DIR}/repos/disinto-ops.bundle" + + if [ ! -f "$bundle_path" ]; then + backup_log "WARNING: Bundle not found at ${bundle_path}, skipping" + return 0 + fi + + # Create ops repo if missing + backup_create_repo_if_missing "disinto-admin/disinto-ops" + + # Clone bundle and push to Forgejo + local clone_dir + clone_dir=$(mktemp -d -t disinto-ops-clone.XXXXXX) + backup_log "Cloning bundle to ${clone_dir}" + + if ! git clone --bare "$bundle_path" "$clone_dir/disinto-ops.git"; then + backup_log "ERROR: failed to clone bundle" + rm -rf "$clone_dir" + return 1 + fi + + # Push all refs to Forgejo + backup_log "Pushing refs to Forgejo..." + if ! cd "$clone_dir/disinto-ops.git" && \ + git push --mirror "${FORGE_URL}/disinto-admin/disinto-ops.git" 2>&1; then + backup_log "ERROR: failed to push refs" + rm -rf "$clone_dir" + return 1 + fi + + local ref_count + ref_count=$(cd "$clone_dir/disinto-ops.git" && git show-ref | wc -l) + BACKUP_PUSHED_REFS=$((BACKUP_PUSHED_REFS + ref_count)) + + backup_log "Pushed ${ref_count} refs to disinto-ops" + rm -rf "$clone_dir" + + return 0 +} + +# ── Step 4: Import issues from backup ──────────────────────────────────────── +# Usage: backup_import_issues <slug> <issues_file> +# issues_file is a JSON array of issues (per create schema) +# Returns: 0 on success +backup_import_issues() { + local slug="$1" + local issues_file="$2" + + if [ ! -f "$issues_file" ]; then + backup_log "No issues file found, skipping" + return 0 + fi + + local count + count=$(jq 'length' "$issues_file") + backup_log "Importing ${count} issues from ${issues_file}" + + local created=0 + local skipped=0 + + for i in $(seq 0 $((count - 1))); do + local issue_num title body + issue_num=$(jq -r ".[${i}].number" "$issues_file") + title=$(jq -r ".[${i}].title" "$issues_file") + body=$(jq -r ".[${i}].body" "$issues_file") + + if [ -z "$issue_num" ] || [ "$issue_num" = "null" ]; then + backup_log "WARNING: skipping issue without number at index ${i}" + continue + fi + + # Check if issue already exists + if backup_issue_exists "$slug" "$issue_num"; then + backup_log "Issue #${issue_num} already exists, skipping" + skipped=$((skipped + 1)) + continue + fi + + # Extract labels + local -a labels=() + while IFS= read -r label; do + [ -n "$label" ] && labels+=("$label") + done < <(jq -r ".[${i}].labels[]? // empty" "$issues_file") + + # Create issue + local new_num + if new_num=$(backup_create_issue "$slug" "$issue_num" "$title" "$body" "${labels[@]}"); then + created=$((created + 1)) + fi + done + + BACKUP_CREATED_ISSUES=$((BACKUP_CREATED_ISSUES + created)) + BACKUP_SKIPPED_ISSUES=$((BACKUP_SKIPPED_ISSUES + skipped)) + + backup_log "Created ${created} issues, skipped ${skipped}" +} + +# ── Main: import subcommand ────────────────────────────────────────────────── +# Usage: backup_import <tarball> +backup_import() { + local tarball="$1" + + # Validate required environment + [ -n "${FORGE_URL:-}" ] || { echo "Error: FORGE_URL not set" >&2; exit 1; } + [ -n "${FORGE_TOKEN:-}" ] || { echo "Error: FORGE_TOKEN not set" >&2; exit 1; } + + backup_log "=== Backup Import Started ===" + backup_log "Target: ${FORGE_URL}" + backup_log "Tarball: ${tarball}" + + # Initialize counters + BACKUP_CREATED_REPOS=0 + BACKUP_PUSHED_REFS=0 + BACKUP_CREATED_ISSUES=0 + BACKUP_SKIPPED_ISSUES=0 + + # Create temp dir for mapping file + BACKUP_MAPPING_FILE=$(mktemp -t disinto-mapping.XXXXXX.json) + echo '{"mappings": []}' > "$BACKUP_MAPPING_FILE" + + # Step 1: Unpack tarball + if ! backup_unpack_tarball "$tarball"; then + exit 1 + fi + + # Step 2: disinto repo + if ! backup_import_disinto_repo; then + exit 1 + fi + + # Step 3: disinto-ops repo + if ! backup_import_disinto_ops_repo; then + exit 1 + fi + + # Step 4: Import issues — iterate issues/<slug>.json files, each is a JSON array + for issues_file in "${BACKUP_TEMP_DIR}/issues"/*.json; do + [ -f "$issues_file" ] || continue + + local slug_filename + slug_filename=$(basename "$issues_file" .json) + + # Map slug-filename → forgejo-slug: "disinto" → "disinto-admin/disinto", + # "disinto-ops" → "disinto-admin/disinto-ops" + local slug + case "$slug_filename" in + "disinto") slug="${FORGE_REPO}" ;; + "disinto-ops") slug="${FORGE_OPS_REPO}" ;; + *) slug="disinto-admin/${slug_filename}" ;; + esac + + backup_log "Processing issues from ${slug_filename}.json (${slug})" + backup_import_issues "$slug" "$issues_file" + done + + # Summary + backup_log "=== Backup Import Complete ===" + backup_log "Created ${BACKUP_CREATED_REPOS} repos" + backup_log "Pushed ${BACKUP_PUSHED_REFS} refs" + backup_log "Imported ${BACKUP_CREATED_ISSUES} issues" + backup_log "Skipped ${BACKUP_SKIPPED_ISSUES} (already present)" + backup_log "Issue mapping saved to: ${BACKUP_MAPPING_FILE}" + + # Cleanup + rm -rf "$BACKUP_TEMP_DIR" + + exit 0 +} + +# ── Entry point: if sourced, don't run; if executed directly, run import ──── +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + if [ $# -lt 1 ]; then + echo "Usage: $0 <tarball>" >&2 + exit 1 + fi + + backup_import "$1" +fi diff --git a/lib/generators.sh b/lib/generators.sh index 9ec8444..67ff830 100644 --- a/lib/generators.sh +++ b/lib/generators.sh @@ -26,6 +26,28 @@ PROJECT_NAME="${PROJECT_NAME:-project}" # PRIMARY_BRANCH defaults to main (env.sh may have set it to 'master') PRIMARY_BRANCH="${PRIMARY_BRANCH:-main}" +# Track service names for duplicate detection +declare -A _seen_services +declare -A _service_sources + +# Record a service name and its source; return 0 if unique, 1 if duplicate +_record_service() { + local service_name="$1" + local source="$2" + + if [ -n "${_seen_services[$service_name]:-}" ]; then + local original_source="${_service_sources[$service_name]}" + echo "ERROR: Duplicate service name '$service_name' detected —" >&2 + echo " '$service_name' emitted twice — from $original_source and from $source" >&2 + echo " Remove one of the conflicting activations to proceed." >&2 + return 1 + fi + + _seen_services[$service_name]=1 + _service_sources[$service_name]="$source" + return 0 +} + # Helper: extract woodpecker_repo_id from a project TOML file # Returns empty string if not found or file doesn't exist _get_woodpecker_repo_id() { @@ -97,6 +119,16 @@ _generate_local_model_services() { POLL_INTERVAL) poll_interval_val="$value" ;; ---) if [ -n "$service_name" ] && [ -n "$base_url" ]; then + # Record service for duplicate detection using the full service name + local full_service_name="agents-${service_name}" + local toml_basename + toml_basename=$(basename "$toml") + if ! _record_service "$full_service_name" "[agents.$service_name] in projects/$toml_basename"; then + # Duplicate detected — clean up and abort + rm -f "$temp_file" + return 1 + fi + # Per-agent FORGE_TOKEN / FORGE_PASS lookup (#834 Gap 3). # Two hired llama agents must not share the same Forgejo identity, # so we key the env-var lookup by forge_user (which hire-agent.sh @@ -137,7 +169,6 @@ _generate_local_model_services() { - project-repos-${service_name}:/home/agent/repos - \${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:\${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared} - \${CLAUDE_CONFIG_FILE:-\${HOME}/.claude.json}:/home/agent/.claude.json:ro - - \${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro - \${AGENT_SSH_DIR:-\${HOME}/.ssh}:/home/agent/.ssh:ro - ./projects:/home/agent/disinto/projects:ro - ./.env:/home/agent/disinto/.env:ro @@ -282,6 +313,20 @@ _generate_compose_impl() { return 0 fi + # Reset duplicate detection state for fresh run + _seen_services=() + _service_sources=() + + # Initialize duplicate detection with base services defined in the template + _record_service "forgejo" "base compose template" || return 1 + _record_service "woodpecker" "base compose template" || return 1 + _record_service "woodpecker-agent" "base compose template" || return 1 + _record_service "agents" "base compose template" || return 1 + _record_service "runner" "base compose template" || return 1 + _record_service "edge" "base compose template" || return 1 + _record_service "staging" "base compose template" || return 1 + _record_service "staging-deploy" "base compose template" || return 1 + # Extract primary woodpecker_repo_id from project TOML files local wp_repo_id wp_repo_id=$(_get_primary_woodpecker_repo_id) @@ -359,6 +404,9 @@ services: WOODPECKER_SERVER: localhost:9000 WOODPECKER_AGENT_SECRET: ${WOODPECKER_AGENT_SECRET:-} WOODPECKER_GRPC_SECURE: "false" + WOODPECKER_GRPC_KEEPALIVE_TIME: "10s" + WOODPECKER_GRPC_KEEPALIVE_TIMEOUT: "20s" + WOODPECKER_GRPC_KEEPALIVE_PERMIT_WITHOUT_CALLS: "true" WOODPECKER_HEALTHCHECK_ADDR: ":3333" WOODPECKER_BACKEND_DOCKER_NETWORK: ${WOODPECKER_CI_NETWORK:-disinto_disinto-net} WOODPECKER_MAX_WORKFLOWS: 1 @@ -382,7 +430,6 @@ services: - project-repos:/home/agent/repos - ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared} - ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro - - ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro - ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro - ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro - woodpecker-data:/woodpecker-data:ro @@ -438,6 +485,76 @@ services: COMPOSEEOF + # ── Conditional agents-llama block (ENABLE_LLAMA_AGENT=1) ────────────── + # This legacy flag was removed in #846 but kept for duplicate detection testing + if [ "${ENABLE_LLAMA_AGENT:-0}" = "1" ]; then + if ! _record_service "agents-llama" "ENABLE_LLAMA_AGENT=1"; then + return 1 + fi + cat >> "$compose_file" <<'COMPOSEEOF' + + agents-llama: + image: ghcr.io/disinto/agents:${DISINTO_IMAGE_TAG:-latest} + container_name: disinto-agents-llama + restart: unless-stopped + security_opt: + - apparmor=unconfined + volumes: + - agent-data:/home/agent/data + - project-repos:/home/agent/repos + - ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared} + - ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro + - ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro + - woodpecker-data:/woodpecker-data:ro + - ./projects:/home/agent/disinto/projects:ro + - ./.env:/home/agent/disinto/.env:ro + - ./state:/home/agent/disinto/state + environment: + FORGE_URL: http://forgejo:3000 + FORGE_REPO: ${FORGE_REPO:-disinto-admin/disinto} + FORGE_TOKEN: ${FORGE_TOKEN:-} + FORGE_REVIEW_TOKEN: ${FORGE_REVIEW_TOKEN:-} + FORGE_PLANNER_TOKEN: ${FORGE_PLANNER_TOKEN:-} + FORGE_GARDENER_TOKEN: ${FORGE_GARDENER_TOKEN:-} + FORGE_VAULT_TOKEN: ${FORGE_VAULT_TOKEN:-} + FORGE_SUPERVISOR_TOKEN: ${FORGE_SUPERVISOR_TOKEN:-} + FORGE_PREDICTOR_TOKEN: ${FORGE_PREDICTOR_TOKEN:-} + FORGE_ARCHITECT_TOKEN: ${FORGE_ARCHITECT_TOKEN:-} + FORGE_BOT_USERNAMES: ${FORGE_BOT_USERNAMES:-} + WOODPECKER_TOKEN: ${WOODPECKER_TOKEN:-} + CLAUDE_TIMEOUT: ${CLAUDE_TIMEOUT:-7200} + CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: ${CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC:-1} + ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY:-} + FORGE_PASS: ${FORGE_PASS:-} + FORGE_ADMIN_PASS: ${FORGE_ADMIN_PASS:-} + FACTORY_REPO: ${FORGE_REPO:-disinto-admin/disinto} + DISINTO_CONTAINER: "1" + PROJECT_NAME: ${PROJECT_NAME:-project} + PROJECT_REPO_ROOT: /home/agent/repos/${PROJECT_NAME:-project} + WOODPECKER_DATA_DIR: /woodpecker-data + WOODPECKER_REPO_ID: "PLACEHOLDER_WP_REPO_ID" + CLAUDE_CONFIG_DIR: ${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config} + POLL_INTERVAL: ${POLL_INTERVAL:-300} + GARDENER_INTERVAL: ${GARDENER_INTERVAL:-21600} + ARCHITECT_INTERVAL: ${ARCHITECT_INTERVAL:-21600} + PLANNER_INTERVAL: ${PLANNER_INTERVAL:-43200} + healthcheck: + test: ["CMD", "pgrep", "-f", "entrypoint.sh"] + interval: 60s + timeout: 5s + retries: 3 + start_period: 30s + depends_on: + forgejo: + condition: service_healthy + woodpecker: + condition: service_started + networks: + - disinto-net + +COMPOSEEOF + fi + # Resume the rest of the compose file (runner onward) cat >> "$compose_file" <<'COMPOSEEOF' @@ -489,11 +606,21 @@ COMPOSEEOF - EDGE_TUNNEL_USER=${EDGE_TUNNEL_USER:-tunnel} - EDGE_TUNNEL_PORT=${EDGE_TUNNEL_PORT:-} - EDGE_TUNNEL_FQDN=${EDGE_TUNNEL_FQDN:-} - # Subdomain fallback (#713): if subpath routing (#704/#708) fails, add: - # EDGE_TUNNEL_FQDN_FORGE, EDGE_TUNNEL_FQDN_CI, EDGE_TUNNEL_FQDN_CHAT - # See docs/edge-routing-fallback.md for the full pivot plan. + # Subdomain fallback (#1028): per-service FQDNs for subdomain routing mode. + # Set EDGE_ROUTING_MODE=subdomain to activate. See docs/edge-routing-fallback.md. + - EDGE_ROUTING_MODE=${EDGE_ROUTING_MODE:-subpath} + - EDGE_TUNNEL_FQDN_FORGE=${EDGE_TUNNEL_FQDN_FORGE:-} + - EDGE_TUNNEL_FQDN_CI=${EDGE_TUNNEL_FQDN_CI:-} + - EDGE_TUNNEL_FQDN_CHAT=${EDGE_TUNNEL_FQDN_CHAT:-} # Shared secret for Caddy ↔ chat forward_auth (#709) - FORWARD_AUTH_SECRET=${FORWARD_AUTH_SECRET:-} + # Chat env vars (merged from chat container into edge, #1083) + - CHAT_HOST=127.0.0.1 + - CHAT_PORT=8080 + - CHAT_OAUTH_CLIENT_ID=${CHAT_OAUTH_CLIENT_ID:-} + - CHAT_OAUTH_CLIENT_SECRET=${CHAT_OAUTH_CLIENT_SECRET:-} + - DISINTO_CHAT_ALLOWED_USERS=${DISINTO_CHAT_ALLOWED_USERS:-} + # Rate limiting removed (#1084) volumes: - ./docker/Caddyfile:/etc/caddy/Caddyfile - caddy_data:/data @@ -501,6 +628,8 @@ COMPOSEEOF - ./secrets/tunnel_key:/run/secrets/tunnel_key:ro - ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared} - ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro + # Chat history persistence (merged from chat container, #1083) + - ${CHAT_HISTORY_DIR:-./state/chat-history}:/var/lib/chat/history healthcheck: test: ["CMD", "curl", "-fsS", "http://localhost:2019/config/"] interval: 30s @@ -552,6 +681,7 @@ COMPOSEEOF # Chat container — Claude chat UI backend (#705) # Internal service only; edge proxy routes to chat:8080 # Sandbox hardened per #706 — no docker.sock, read-only rootfs, minimal caps + # Rate limiting removed (#1084) chat: build: context: ./docker/chat @@ -575,6 +705,9 @@ COMPOSEEOF - chat-config:/var/chat/config # Chat history persistence: per-user NDJSON files on bind-mounted host volume - ${CHAT_HISTORY_DIR:-./state/chat-history}:/var/lib/chat/history + # Workspace directory: bind-mounted project working tree for Claude access (#1027) + # Mounted when CHAT_WORKSPACE_DIR is set (defaults to ./workspace) + - ${CHAT_WORKSPACE_DIR:-./workspace}:/var/workspace environment: CHAT_HOST: "0.0.0.0" CHAT_PORT: "8080" @@ -582,13 +715,14 @@ COMPOSEEOF CHAT_OAUTH_CLIENT_ID: ${CHAT_OAUTH_CLIENT_ID:-} CHAT_OAUTH_CLIENT_SECRET: ${CHAT_OAUTH_CLIENT_SECRET:-} EDGE_TUNNEL_FQDN: ${EDGE_TUNNEL_FQDN:-} + EDGE_TUNNEL_FQDN_CHAT: ${EDGE_TUNNEL_FQDN_CHAT:-} + EDGE_ROUTING_MODE: ${EDGE_ROUTING_MODE:-subpath} DISINTO_CHAT_ALLOWED_USERS: ${DISINTO_CHAT_ALLOWED_USERS:-} # Shared secret for Caddy forward_auth verify endpoint (#709) FORWARD_AUTH_SECRET: ${FORWARD_AUTH_SECRET:-} - # Cost caps / rate limiting (#711) - CHAT_MAX_REQUESTS_PER_HOUR: ${CHAT_MAX_REQUESTS_PER_HOUR:-60} - CHAT_MAX_REQUESTS_PER_DAY: ${CHAT_MAX_REQUESTS_PER_DAY:-500} - CHAT_MAX_TOKENS_PER_DAY: ${CHAT_MAX_TOKENS_PER_DAY:-1000000} + # Rate limiting removed (#1084) + # Workspace directory for Claude code access (#1027) + CHAT_WORKSPACE_DIR: ${CHAT_WORKSPACE_DIR:-./workspace} healthcheck: test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8080/health')"] interval: 30s @@ -604,7 +738,6 @@ volumes: agent-data: project-repos: caddy_data: - chat-config: networks: disinto-net: @@ -633,16 +766,19 @@ COMPOSEEOF fi # Append local-model agent services if any are configured - _generate_local_model_services "$compose_file" + if ! _generate_local_model_services "$compose_file"; then + echo "ERROR: Failed to generate local-model agent services. See errors above." >&2 + return 1 + fi # Resolve the Claude CLI binary path and persist as CLAUDE_BIN_DIR in .env. - # docker-compose.yml references ${CLAUDE_BIN_DIR} so the value must be set. + # Only used by reproduce and edge services which still use host-mounted CLI. local claude_bin claude_bin="$(command -v claude 2>/dev/null || true)" if [ -n "$claude_bin" ]; then claude_bin="$(readlink -f "$claude_bin")" else - echo "Warning: claude CLI not found in PATH — set CLAUDE_BIN_DIR in .env manually" >&2 + echo "Warning: claude CLI not found in PATH — reproduce/edge services will fail to start" >&2 claude_bin="/usr/local/bin/claude" fi # Persist CLAUDE_BIN_DIR into .env so docker-compose can resolve it. @@ -659,9 +795,8 @@ COMPOSEEOF # In build mode, replace image: with build: for locally-built images if [ "$use_build" = true ]; then - sed -i 's|^\( agents:\)|\1|' "$compose_file" sed -i '/^ image: ghcr\.io\/disinto\/agents:/{s|image: ghcr\.io/disinto/agents:.*|build:\n context: .\n dockerfile: docker/agents/Dockerfile\n pull_policy: build|}' "$compose_file" - sed -i '/^ image: ghcr\.io\/disinto\/edge:/{s|image: ghcr\.io/disinto/edge:.*|build: ./docker/edge\n pull_policy: build|}' "$compose_file" + sed -i '/^ image: ghcr\.io\/disinto\/edge:/{s|image: ghcr\.io/disinto/edge:.*|build:\n context: .\n dockerfile: docker/edge/Dockerfile\n pull_policy: build|}' "$compose_file" fi echo "Created: ${compose_file}" @@ -685,6 +820,11 @@ _generate_agent_docker_impl() { # Output path: ${FACTORY_ROOT}/docker/Caddyfile (gitignored — generated artifact). # The edge compose service mounts this path as /etc/caddy/Caddyfile. # On a fresh clone, `disinto init` calls generate_caddyfile before first `disinto up`. +# +# Routing mode (EDGE_ROUTING_MODE env var): +# subpath — (default) all services under <project>.disinto.ai/{forge,ci,chat,staging} +# subdomain — per-service subdomains: forge.<project>, ci.<project>, chat.<project> +# See docs/edge-routing-fallback.md for the full pivot plan. _generate_caddyfile_impl() { local docker_dir="${FACTORY_ROOT}/docker" local caddyfile="${docker_dir}/Caddyfile" @@ -694,8 +834,22 @@ _generate_caddyfile_impl() { return fi + local routing_mode="${EDGE_ROUTING_MODE:-subpath}" + + if [ "$routing_mode" = "subdomain" ]; then + _generate_caddyfile_subdomain "$caddyfile" + else + _generate_caddyfile_subpath "$caddyfile" + fi + + echo "Created: ${caddyfile} (routing_mode=${routing_mode})" +} + +# Subpath Caddyfile: all services under a single :80 block with path-based routing. +_generate_caddyfile_subpath() { + local caddyfile="$1" cat > "$caddyfile" <<'CADDYFILEEOF' -# Caddyfile — edge proxy configuration +# Caddyfile — edge proxy configuration (subpath mode) # IP-only binding at bootstrap; domain + TLS added later via vault resource request :80 { @@ -716,30 +870,73 @@ _generate_caddyfile_impl() { # Reverse proxy to staging handle /staging/* { + uri strip_prefix /staging reverse_proxy staging:80 } - # Chat service — reverse proxy to disinto-chat backend (#705) + # Chat service — reverse proxy to in-process chat server (#705, #1083) # OAuth routes bypass forward_auth — unauthenticated users need these (#709) handle /chat/login { - reverse_proxy chat:8080 + reverse_proxy 127.0.0.1:8080 } handle /chat/oauth/callback { - reverse_proxy chat:8080 + reverse_proxy 127.0.0.1:8080 } # Defense-in-depth: forward_auth stamps X-Forwarded-User from session (#709) handle /chat/* { - forward_auth chat:8080 { + forward_auth 127.0.0.1:8080 { uri /chat/auth/verify copy_headers X-Forwarded-User header_up X-Forward-Auth-Secret {$FORWARD_AUTH_SECRET} } - reverse_proxy chat:8080 + reverse_proxy 127.0.0.1:8080 } } CADDYFILEEOF +} - echo "Created: ${caddyfile}" +# Subdomain Caddyfile: four host blocks per docs/edge-routing-fallback.md. +# Uses env vars EDGE_TUNNEL_FQDN_FORGE, EDGE_TUNNEL_FQDN_CI, EDGE_TUNNEL_FQDN_CHAT, +# and EDGE_TUNNEL_FQDN (main project domain → staging). +_generate_caddyfile_subdomain() { + local caddyfile="$1" + cat > "$caddyfile" <<'CADDYFILEEOF' +# Caddyfile — edge proxy configuration (subdomain mode) +# Per-service subdomains; see docs/edge-routing-fallback.md + +# Main project domain — staging / landing +{$EDGE_TUNNEL_FQDN} { + reverse_proxy staging:80 +} + +# Forgejo — root path, no subpath rewrite needed +{$EDGE_TUNNEL_FQDN_FORGE} { + reverse_proxy forgejo:3000 +} + +# Woodpecker CI — root path +{$EDGE_TUNNEL_FQDN_CI} { + reverse_proxy woodpecker:8000 +} + +# Chat — with forward_auth (#709, on its own host) +{$EDGE_TUNNEL_FQDN_CHAT} { + handle /login { + reverse_proxy 127.0.0.1:8080 + } + handle /oauth/callback { + reverse_proxy 127.0.0.1:8080 + } + handle /* { + forward_auth 127.0.0.1:8080 { + uri /auth/verify + copy_headers X-Forwarded-User + header_up X-Forward-Auth-Secret {$FORWARD_AUTH_SECRET} + } + reverse_proxy 127.0.0.1:8080 + } +} +CADDYFILEEOF } # Generate docker/index.html default page. diff --git a/lib/hvault.sh b/lib/hvault.sh index b0d1635..d283330 100644 --- a/lib/hvault.sh +++ b/lib/hvault.sh @@ -405,3 +405,36 @@ hvault_token_lookup() { return 1 } } + +# _hvault_seed_key — Seed a single KV key if it doesn't exist. +# Reads existing data and merges to preserve sibling keys (KV v2 replaces +# .data atomically). Returns 0=created, 1=unchanged, 2=API error. +# Args: +# path: KV v2 logical path (e.g. "disinto/shared/chat") +# key: key name within the path (e.g. "chat_oauth_client_id") +# generator: shell command that outputs a random value (default: openssl rand -hex 32) +# Usage: +# _hvault_seed_key "disinto/shared/chat" "chat_oauth_client_id" +# rc=$? # 0=created, 1=unchanged +_hvault_seed_key() { + local path="$1" key="$2" generator="${3:-openssl rand -hex 32}" + local existing + existing=$(hvault_kv_get "$path" "$key" 2>/dev/null) || true + if [ -n "$existing" ]; then + return 1 # unchanged + fi + + local value + value=$(eval "$generator") + + # Read existing data to preserve sibling keys (KV v2 replaces atomically) + local kv_api="${VAULT_KV_MOUNT}/data/${path}" + local raw existing_data payload + raw="$(hvault_get_or_empty "$kv_api")" || return 2 + existing_data="{}" + [ -n "$raw" ] && existing_data="$(printf '%s' "$raw" | jq '.data.data // {}')" + payload="$(printf '%s' "$existing_data" \ + | jq --arg k "$key" --arg v "$value" '{data: (. + {($k): $v})}')" + _hvault_request POST "$kv_api" "$payload" >/dev/null + return 0 # created +} diff --git a/lib/init/nomad/cluster-up.sh b/lib/init/nomad/cluster-up.sh index 4aab42d..488d2df 100755 --- a/lib/init/nomad/cluster-up.sh +++ b/lib/init/nomad/cluster-up.sh @@ -66,6 +66,7 @@ HOST_VOLUME_DIRS=( "/srv/disinto/agent-data" "/srv/disinto/project-repos" "/srv/disinto/caddy-data" + "/srv/disinto/docker" "/srv/disinto/chat-history" "/srv/disinto/ops-repo" ) @@ -116,7 +117,7 @@ if [ "$dry_run" = true ]; then [dry-run] Step 4/9: create host-volume dirs under /srv/disinto/ EOF for d in "${HOST_VOLUME_DIRS[@]}"; do - printf ' → install -d -m 0755 %s\n' "$d" + printf ' → install -d -m 0777 %s\n' "$d" done cat <<EOF @@ -280,8 +281,10 @@ for d in "${HOST_VOLUME_DIRS[@]}"; do log "unchanged: ${d}" else log "creating: ${d}" - install -d -m 0755 -o root -g root "$d" + install -d -m 0777 -o root -g root "$d" fi + # Ensure correct permissions (fixes pre-existing 0755 dirs on re-run) + chmod 0777 "$d" done # ── Step 5/9: /etc/nomad.d/server.hcl + client.hcl ─────────────────────────── diff --git a/lib/init/nomad/deploy.sh b/lib/init/nomad/deploy.sh index a1724c5..011f45c 100755 --- a/lib/init/nomad/deploy.sh +++ b/lib/init/nomad/deploy.sh @@ -16,13 +16,15 @@ # Environment: # REPO_ROOT — absolute path to repo root (defaults to parent of # this script's parent directory) -# JOB_READY_TIMEOUT_SECS — poll timeout in seconds (default: 240) +# JOB_READY_TIMEOUT_SECS — poll timeout in seconds (default: 360) # JOB_READY_TIMEOUT_<JOBNAME> — per-job timeout override (e.g., # JOB_READY_TIMEOUT_FORGEJO=300) +# Built-in: JOB_READY_TIMEOUT_CHAT=600 # # Exit codes: # 0 success (all jobs deployed and healthy, or dry-run completed) -# 1 failure (validation error, timeout, or nomad command failure) +# 1 failure (validation error, or one or more jobs unhealthy after all +# jobs submitted — deploy does NOT cascade-skip on timeout) # # Idempotency: # Running twice back-to-back on a healthy cluster is a no-op. Jobs that are @@ -33,9 +35,13 @@ set -euo pipefail # ── Configuration ──────────────────────────────────────────────────────────── SCRIPT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" REPO_ROOT="${REPO_ROOT:-$(cd "${SCRIPT_ROOT}/../../.." && pwd)}" -JOB_READY_TIMEOUT_SECS="${JOB_READY_TIMEOUT_SECS:-240}" +JOB_READY_TIMEOUT_SECS="${JOB_READY_TIMEOUT_SECS:-360}" + +# Per-job built-in defaults (override with JOB_READY_TIMEOUT_<JOBNAME> env var) +JOB_READY_TIMEOUT_CHAT="${JOB_READY_TIMEOUT_CHAT:-600}" DRY_RUN=0 +FAILED_JOBS=() # jobs that timed out or failed deployment log() { printf '[deploy] %s\n' "$*" >&2; } die() { printf '[deploy] ERROR: %s\n' "$*" >&2; exit 1; } @@ -168,6 +174,43 @@ _wait_job_running() { return 1 } +# ── Helper: _run_post_deploy <job_name> ───────────────────────────────────── +# Runs post-deploy scripts for a job after it becomes healthy. +# Currently supports: forgejo → run forgejo-bootstrap.sh +# +# Args: +# job_name — name of the deployed job +# +# Returns: +# 0 on success (script ran or not applicable) +# 1 on failure +# ───────────────────────────────────────────────────────────────────────────── +_run_post_deploy() { + local job_name="$1" + local post_deploy_script + + case "$job_name" in + forgejo) + post_deploy_script="${SCRIPT_ROOT}/forgejo-bootstrap.sh" + if [ -x "$post_deploy_script" ]; then + log "running post-deploy script for ${job_name}" + if ! "$post_deploy_script"; then + log "ERROR: post-deploy script failed for ${job_name}" + return 1 + fi + log "post-deploy script completed for ${job_name}" + else + log "no post-deploy script found for ${job_name}, skipping" + fi + ;; + *) + log "no post-deploy script for ${job_name}, skipping" + ;; + esac + + return 0 +} + # ── Main: deploy each job in order ─────────────────────────────────────────── for job_name in "${JOBS[@]}"; do jobspec_path="${REPO_ROOT}/nomad/jobs/${job_name}.hcl" @@ -177,7 +220,8 @@ for job_name in "${JOBS[@]}"; do fi # Per-job timeout override: JOB_READY_TIMEOUT_<UPPERCASE_JOBNAME> - job_upper=$(printf '%s' "$job_name" | tr '[:lower:]' '[:upper:]') + # Sanitize job name: replace hyphens with underscores (bash vars can't have hyphens) + job_upper=$(printf '%s' "$job_name" | tr '[:lower:]-' '[:upper:]_' | tr ' ' '_') timeout_var="JOB_READY_TIMEOUT_${job_upper}" job_timeout="${!timeout_var:-$JOB_READY_TIMEOUT_SECS}" @@ -185,6 +229,9 @@ for job_name in "${JOBS[@]}"; do log "[dry-run] nomad job validate ${jobspec_path}" log "[dry-run] nomad job run -detach ${jobspec_path}" log "[dry-run] (would wait for '${job_name}' to become healthy for ${job_timeout}s)" + case "$job_name" in + forgejo) log "[dry-run] [post-deploy] would run forgejo-bootstrap.sh" ;; + esac continue fi @@ -214,7 +261,13 @@ for job_name in "${JOBS[@]}"; do # 4. Wait for healthy state if ! _wait_job_running "$job_name" "$job_timeout"; then - die "deployment for job '${job_name}' did not reach successful state" + log "WARNING: deployment for job '${job_name}' did not reach successful state — continuing with remaining jobs" + FAILED_JOBS+=("$job_name") + else + # 5. Run post-deploy scripts (only if job reached healthy state) + if ! _run_post_deploy "$job_name"; then + die "post-deploy script failed for job '${job_name}'" + fi fi done @@ -222,4 +275,17 @@ if [ "$DRY_RUN" -eq 1 ]; then log "dry-run complete" fi +# ── Final health summary ───────────────────────────────────────────────────── +if [ "${#FAILED_JOBS[@]}" -gt 0 ]; then + log "" + log "=== DEPLOY SUMMARY ===" + log "The following jobs did NOT reach healthy state:" + for failed in "${FAILED_JOBS[@]}"; do + log " - ${failed}" + done + log "All other jobs were submitted and healthy." + log "======================" + exit 1 +fi + exit 0 diff --git a/lib/init/nomad/forgejo-bootstrap.sh b/lib/init/nomad/forgejo-bootstrap.sh new file mode 100755 index 0000000..197f917 --- /dev/null +++ b/lib/init/nomad/forgejo-bootstrap.sh @@ -0,0 +1,215 @@ +#!/usr/bin/env bash +# ============================================================================= +# lib/init/nomad/forgejo-bootstrap.sh — Bootstrap Forgejo admin user +# +# Part of the Nomad+Vault migration (S2.4, issue #1069). Creates the +# disinto-admin user in Forgejo if it doesn't exist, enabling: +# - First-login success without manual intervention +# - PAT generation via API (required for disinto backup import #1058) +# +# The script is idempotent — re-running after success is a no-op. +# +# Scope: +# - Checks if user 'disinto-admin' exists via GET /api/v1/users/search +# - If not: POST /api/v1/admin/users to create admin user +# - Uses FORGE_ADMIN_PASS from environment (required) +# +# Idempotency contract: +# - User 'disinto-admin' exists → skip creation, log +# "[forgejo-bootstrap] admin user already exists" +# - User creation fails with "user already exists" → treat as success +# +# Preconditions: +# - Forgejo reachable at $FORGE_URL (default: http://127.0.0.1:3000) +# - Forgejo admin token at $FORGE_TOKEN (from Vault or env) +# - FORGE_ADMIN_PASS set (env var with admin password) +# +# Requires: +# - curl, jq +# +# Usage: +# lib/init/nomad/forgejo-bootstrap.sh +# lib/init/nomad/forgejo-bootstrap.sh --dry-run +# +# Exit codes: +# 0 success (user created + ready, or already exists) +# 1 precondition / API failure +# ============================================================================= +set -euo pipefail + +# ── Configuration ──────────────────────────────────────────────────────────── +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)" + +# shellcheck source=../../../lib/hvault.sh +source "${REPO_ROOT}/lib/hvault.sh" + +# Configuration +FORGE_URL="${FORGE_URL:-http://127.0.0.1:3000}" +FORGE_TOKEN="${FORGE_TOKEN:-}" +FORGE_ADMIN_USER="${DISINTO_ADMIN_USER:-disinto-admin}" +FORGE_ADMIN_EMAIL="${DISINTO_ADMIN_EMAIL:-admin@disinto.local}" + +# Derive FORGE_ADMIN_PASS from common env var patterns +# Priority: explicit FORGE_ADMIN_PASS > DISINTO_FORGE_ADMIN_PASS > FORGEJO_ADMIN_PASS +FORGE_ADMIN_PASS="${FORGE_ADMIN_PASS:-${DISINTO_FORGE_ADMIN_PASS:-${FORGEJO_ADMIN_PASS:-}}}" + +LOG_TAG="[forgejo-bootstrap]" +log() { printf '%s %s\n' "$LOG_TAG" "$*" >&2; } +die() { printf '%s ERROR: %s\n' "$LOG_TAG" "$*" >&2; exit 1; } + +# ── Flag parsing ───────────────────────────────────────────────────────────── +DRY_RUN="${DRY_RUN:-0}" +for arg in "$@"; do + case "$arg" in + --dry-run) DRY_RUN=1 ;; + -h|--help) + printf 'Usage: %s [--dry-run]\n\n' "$(basename "$0")" + printf 'Bootstrap Forgejo admin user if it does not exist.\n' + printf 'Idempotent: re-running is a no-op.\n\n' + printf 'Environment:\n' + printf ' FORGE_URL Forgejo base URL (default: http://127.0.0.1:3000)\n' + printf ' FORGE_TOKEN Forgejo admin token (from Vault or env)\n' + printf ' FORGE_ADMIN_PASS Admin password (required)\n' + printf ' DISINTO_ADMIN_USER Username for admin account (default: disinto-admin)\n' + printf ' DISINTO_ADMIN_EMAIL Admin email (default: admin@disinto.local)\n\n' + printf ' --dry-run Print planned actions without modifying Forgejo.\n' + exit 0 + ;; + *) die "invalid argument: ${arg} (try --help)" ;; + esac +done + +# ── Precondition checks ────────────────────────────────────────────────────── +log "── Precondition check ──" + +if [ -z "$FORGE_URL" ]; then + die "FORGE_URL is not set" +fi + +if [ -z "$FORGE_ADMIN_PASS" ]; then + die "FORGE_ADMIN_PASS is not set (required for admin user creation)" +fi + +# Resolve FORGE_TOKEN from Vault if not set in env +if [ -z "$FORGE_TOKEN" ]; then + log "reading FORGE_TOKEN from Vault at kv/disinto/shared/forge/token" + _hvault_default_env + token_raw="$(hvault_get_or_empty "kv/data/disinto/shared/forge/token" 2>/dev/null)" || true + if [ -n "$token_raw" ]; then + FORGE_TOKEN="$(printf '%s' "$token_raw" | jq -r '.data.data.token // empty' 2>/dev/null)" || true + fi + if [ -z "$FORGE_TOKEN" ]; then + die "FORGE_TOKEN not set and not found in Vault" + fi + log "forge token loaded from Vault" +fi + +# ── Step 1/3: Check if admin user already exists ───────────────────────────── +log "── Step 1/3: check if admin user '${FORGE_ADMIN_USER}' exists ──" + +# Use exact match via GET /api/v1/users/{username} (returns 404 if absent) +user_lookup_raw=$(curl -sf --max-time 10 \ + "${FORGE_URL}/api/v1/users/${FORGE_ADMIN_USER}" 2>/dev/null) || { + # 404 means user doesn't exist + if [ $? -eq 7 ]; then + log "admin user '${FORGE_ADMIN_USER}' not found" + admin_user_exists=false + user_id="" + else + # Other curl errors (e.g., network, Forgejo down) + log "warning: failed to lookup user (Forgejo may not be ready yet)" + admin_user_exists=false + user_id="" + fi +} + +if [ -n "$user_lookup_raw" ]; then + admin_user_exists=true + user_id=$(printf '%s' "$user_lookup_raw" | jq -r '.id // empty' 2>/dev/null) || true + if [ -n "$user_id" ]; then + log "admin user '${FORGE_ADMIN_USER}' already exists (user_id: ${user_id})" + fi +fi + +# ── Step 2/3: Create admin user if needed ──────────────────────────────────── +if [ "$admin_user_exists" = false ]; then + log "creating admin user '${FORGE_ADMIN_USER}'" + + if [ "$DRY_RUN" -eq 1 ]; then + log "[dry-run] would create admin user with:" + log "[dry-run] username: ${FORGE_ADMIN_USER}" + log "[dry-run] email: ${FORGE_ADMIN_EMAIL}" + log "[dry-run] admin: true" + log "[dry-run] must_change_password: false" + else + # Create the admin user via the admin API + create_response=$(curl -sf --max-time 30 -X POST \ + -H "Authorization: token ${FORGE_TOKEN}" \ + -H "Content-Type: application/json" \ + "${FORGE_URL}/api/v1/admin/users" \ + -d "{ + \"username\": \"${FORGE_ADMIN_USER}\", + \"email\": \"${FORGE_ADMIN_EMAIL}\", + \"password\": \"${FORGE_ADMIN_PASS}\", + \"admin\": true, + \"must_change_password\": false + }" 2>/dev/null) || { + # Check if the error is "user already exists" (race condition on re-run) + error_body=$(curl -s --max-time 30 -X POST \ + -H "Authorization: token ${FORGE_TOKEN}" \ + -H "Content-Type: application/json" \ + "${FORGE_URL}/api/v1/admin/users" \ + -d "{\"username\": \"${FORGE_ADMIN_USER}\", \"email\": \"${FORGE_ADMIN_EMAIL}\", \"password\": \"${FORGE_ADMIN_PASS}\", \"admin\": true, \"must_change_password\": false}" 2>/dev/null) || error_body="" + + if echo "$error_body" | grep -q '"message".*"user already exists"'; then + log "admin user '${FORGE_ADMIN_USER}' already exists (race condition handled)" + admin_user_exists=true + else + die "failed to create admin user in Forgejo: ${error_body:-unknown error}" + fi + } + + # Extract user_id from response + user_id=$(printf '%s' "$create_response" | jq -r '.id // empty' 2>/dev/null) || true + if [ -n "$user_id" ]; then + admin_user_exists=true + log "admin user '${FORGE_ADMIN_USER}' created (user_id: ${user_id})" + else + die "failed to extract user_id from Forgejo response" + fi + fi +else + log "admin user '${FORGE_ADMIN_USER}' already exists — skipping creation" +fi + +# ── Step 3/3: Verify user was created and is admin ─────────────────────────── +log "── Step 3/3: verify admin user is properly configured ──" + +if [ "$DRY_RUN" -eq 1 ]; then + log "[dry-run] would verify admin user configuration" + log "done — [dry-run] complete" +else + # Verify the user exists and is admin + verify_response=$(curl -sf --max-time 10 \ + -u "${FORGE_ADMIN_USER}:${FORGE_ADMIN_PASS}" \ + "${FORGE_URL}/api/v1/user" 2>/dev/null) || { + die "failed to verify admin user credentials" + } + + is_admin=$(printf '%s' "$verify_response" | jq -r '.is_admin // false' 2>/dev/null) || true + login=$(printf '%s' "$verify_response" | jq -r '.login // empty' 2>/dev/null) || true + + if [ "$is_admin" != "true" ]; then + die "admin user '${FORGE_ADMIN_USER}' is not marked as admin" + fi + + if [ "$login" != "$FORGE_ADMIN_USER" ]; then + die "admin user login mismatch: expected '${FORGE_ADMIN_USER}', got '${login}'" + fi + + log "admin user verified: login=${login}, is_admin=${is_admin}" + log "done — Forgejo admin user is ready" +fi + +exit 0 diff --git a/lib/init/nomad/wp-oauth-register.sh b/lib/init/nomad/wp-oauth-register.sh index 9b7f12a..8076482 100755 --- a/lib/init/nomad/wp-oauth-register.sh +++ b/lib/init/nomad/wp-oauth-register.sh @@ -43,8 +43,8 @@ set -euo pipefail # Source the hvault module for Vault helpers SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)" -# shellcheck source=../../lib/hvault.sh +REPO_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)" +# shellcheck source=../../../lib/hvault.sh source "${REPO_ROOT}/lib/hvault.sh" # Configuration diff --git a/lib/issue-lifecycle.sh b/lib/issue-lifecycle.sh index 1ad3239..25f2c6b 100644 --- a/lib/issue-lifecycle.sh +++ b/lib/issue-lifecycle.sh @@ -157,9 +157,10 @@ issue_claim() { return 1 fi - local ip_id bl_id + local ip_id bl_id bk_id ip_id=$(_ilc_in_progress_id) bl_id=$(_ilc_backlog_id) + bk_id=$(_ilc_blocked_id) if [ -n "$ip_id" ]; then curl -sf -X POST \ -H "Authorization: token ${FORGE_TOKEN}" \ @@ -172,6 +173,12 @@ issue_claim() { -H "Authorization: token ${FORGE_TOKEN}" \ "${FORGE_API}/issues/${issue}/labels/${bl_id}" >/dev/null 2>&1 || true fi + # Clear blocked label on re-claim — starting work is implicit resolution of prior block + if [ -n "$bk_id" ]; then + curl -sf -X DELETE \ + -H "Authorization: token ${FORGE_TOKEN}" \ + "${FORGE_API}/issues/${issue}/labels/${bk_id}" >/dev/null 2>&1 || true + fi _ilc_log "claimed issue #${issue}" return 0 } diff --git a/lib/ops-setup.sh b/lib/ops-setup.sh index 635b83c..59975bc 100644 --- a/lib/ops-setup.sh +++ b/lib/ops-setup.sh @@ -198,6 +198,7 @@ setup_ops_repo() { [ -f "${ops_root}/evidence/holdout/.gitkeep" ] || { touch "${ops_root}/evidence/holdout/.gitkeep"; seeded=true; } [ -f "${ops_root}/evidence/evolution/.gitkeep" ] || { touch "${ops_root}/evidence/evolution/.gitkeep"; seeded=true; } [ -f "${ops_root}/evidence/user-test/.gitkeep" ] || { touch "${ops_root}/evidence/user-test/.gitkeep"; seeded=true; } + [ -f "${ops_root}/knowledge/.gitkeep" ] || { touch "${ops_root}/knowledge/.gitkeep"; seeded=true; } if [ ! -f "${ops_root}/README.md" ]; then cat > "${ops_root}/README.md" <<OPSEOF @@ -362,13 +363,54 @@ migrate_ops_repo() { if [ ! -f "$tfile" ]; then local title title=$(basename "$tfile" | sed 's/\.md$//; s/_/ /g' | sed 's/\b\(.\)/\u\1/g') - { - echo "# ${title}" - echo "" - echo "## Overview" - echo "" - echo "<!-- Add content here -->" - } > "$tfile" + case "$tfile" in + portfolio.md) + { + echo "# ${title}" + echo "" + echo "## Addressables" + echo "" + echo "<!-- Add addressables here -->" + echo "" + echo "## Observables" + echo "" + echo "<!-- Add observables here -->" + } > "$tfile" + ;; + RESOURCES.md) + { + echo "# ${title}" + echo "" + echo "## Accounts" + echo "" + echo "<!-- Add account references here -->" + echo "" + echo "## Tokens" + echo "" + echo "<!-- Add token references here -->" + echo "" + echo "## Infrastructure" + echo "" + echo "<!-- Add infrastructure inventory here -->" + } > "$tfile" + ;; + prerequisites.md) + { + echo "# ${title}" + echo "" + echo "<!-- Add dependency graph here -->" + } > "$tfile" + ;; + *) + { + echo "# ${title}" + echo "" + echo "## Overview" + echo "" + echo "<!-- Add content here -->" + } > "$tfile" + ;; + esac echo " + Created: ${tfile}" migrated=true fi diff --git a/lib/pr-lifecycle.sh b/lib/pr-lifecycle.sh index e097f34..bca08f1 100644 --- a/lib/pr-lifecycle.sh +++ b/lib/pr-lifecycle.sh @@ -429,19 +429,100 @@ pr_walk_to_merge() { _prl_log "CI failed — invoking agent (attempt ${ci_fix_count}/${max_ci_fixes})" - # Get CI logs from SQLite database if available - local ci_logs="" - if [ -n "$_PR_CI_PIPELINE" ] && [ -n "${FACTORY_ROOT:-}" ]; then - ci_logs=$(ci_get_logs "$_PR_CI_PIPELINE" 2>/dev/null | tail -50) || ci_logs="" + # Build per-workflow/per-step CI diagnostics prompt + local ci_prompt_body="" + local passing_workflows="" + local built_diagnostics=false + + if [ -n "$_PR_CI_PIPELINE" ] && [ -n "${WOODPECKER_REPO_ID:-}" ]; then + local pip_json + pip_json=$(woodpecker_api "/repos/${WOODPECKER_REPO_ID}/pipelines/${_PR_CI_PIPELINE}" 2>/dev/null) || pip_json="" + + if [ -n "$pip_json" ]; then + local wf_count + wf_count=$(printf '%s' "$pip_json" | jq '[.workflows[]?] | length' 2>/dev/null) || wf_count=0 + + if [ "$wf_count" -gt 0 ]; then + built_diagnostics=true + local wf_idx=0 + while [ "$wf_idx" -lt "$wf_count" ]; do + local wf_name wf_state + wf_name=$(printf '%s' "$pip_json" | jq -r ".workflows[$wf_idx].name // \"workflow-$wf_idx\"" 2>/dev/null) + wf_state=$(printf '%s' "$pip_json" | jq -r ".workflows[$wf_idx].state // \"unknown\"" 2>/dev/null) + + if [ "$wf_state" = "failure" ] || [ "$wf_state" = "error" ] || [ "$wf_state" = "killed" ]; then + # Collect failed children for this workflow + local failed_children + failed_children=$(printf '%s' "$pip_json" | jq -r " + .workflows[$wf_idx].children[]? | + select(.state == \"failure\" or .state == \"error\" or .state == \"killed\") | + \"\(.name)\t\(.exit_code)\t\(.pid)\"" 2>/dev/null) || failed_children="" + + ci_prompt_body="${ci_prompt_body} +--- Failed workflow: ${wf_name} ---" + if [ -n "$failed_children" ]; then + while IFS=$'\t' read -r step_name step_exit step_pid; do + [ -z "$step_name" ] && continue + local exit_annotation="" + case "$step_exit" in + 126) exit_annotation=" (permission denied or not executable)" ;; + 127) exit_annotation=" (command not found)" ;; + 128) exit_annotation=" (invalid exit argument / signal+128)" ;; + esac + ci_prompt_body="${ci_prompt_body} + Step: ${step_name} + Exit code: ${step_exit}${exit_annotation}" + + # Fetch per-step logs + if [ -n "$step_pid" ] && [ "$step_pid" != "null" ]; then + local step_logs + step_logs=$(ci_get_step_logs "$_PR_CI_PIPELINE" "$step_pid" 2>/dev/null | tail -50) || step_logs="" + if [ -n "$step_logs" ]; then + ci_prompt_body="${ci_prompt_body} + Log tail (last 50 lines): +\`\`\` +${step_logs} +\`\`\`" + fi + fi + done <<< "$failed_children" + else + ci_prompt_body="${ci_prompt_body} + (no failed step details available)" + fi + else + # Track passing/other workflows + if [ -n "$passing_workflows" ]; then + passing_workflows="${passing_workflows}, ${wf_name}" + else + passing_workflows="${wf_name}" + fi + fi + wf_idx=$((wf_idx + 1)) + done + fi + fi fi - local logs_section="" - if [ -n "$ci_logs" ]; then - logs_section=" + # Fallback: use legacy log fetch if per-workflow diagnostics unavailable + if [ "$built_diagnostics" = false ]; then + local ci_logs="" + if [ -n "$_PR_CI_PIPELINE" ] && [ -n "${FACTORY_ROOT:-}" ]; then + ci_logs=$(ci_get_logs "$_PR_CI_PIPELINE" 2>/dev/null | tail -50) || ci_logs="" + fi + if [ -n "$ci_logs" ]; then + ci_prompt_body=" CI Log Output (last 50 lines): \`\`\` ${ci_logs} -\`\`\` +\`\`\`" + fi + fi + + local passing_line="" + if [ -n "$passing_workflows" ]; then + passing_line=" +Passing workflows (do not modify): ${passing_workflows} " fi @@ -450,9 +531,10 @@ ${ci_logs} Pipeline: #${_PR_CI_PIPELINE:-?} Failure type: ${_PR_CI_FAILURE_TYPE:-unknown} - +${passing_line} Error log: -${_PR_CI_ERROR_LOG:-No logs available.}${logs_section} +${_PR_CI_ERROR_LOG:-No logs available.} +${ci_prompt_body} Fix the issue, run tests, commit, rebase on ${PRIMARY_BRANCH}, and push: git fetch ${remote} ${PRIMARY_BRANCH} && git rebase ${remote}/${PRIMARY_BRANCH} diff --git a/nomad/AGENTS.md b/nomad/AGENTS.md index f57c30a..0391951 100644 --- a/nomad/AGENTS.md +++ b/nomad/AGENTS.md @@ -1,21 +1,27 @@ -<!-- last-reviewed: 8ad5aca6bbee77634b3c63523042b1d39cefa96a --> +<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 --> # nomad/ — Agent Instructions Nomad + Vault HCL for the factory's single-node cluster. These files are the source of truth that `lib/init/nomad/cluster-up.sh` copies onto a factory box under `/etc/nomad.d/` and `/etc/vault.d/` at init time. -This directory covers the **Nomad+Vault migration (Steps 0–2)** — -see issues #821–#884 for the step breakdown. +This directory covers the **Nomad+Vault migration (Steps 0–5)** — +see issues #821–#992 for the step breakdown. ## What lives here | File/Dir | Deployed to | Owned by | |---|---|---| | `server.hcl` | `/etc/nomad.d/server.hcl` | agent role, bind, ports, `data_dir` (S0.2) | -| `client.hcl` | `/etc/nomad.d/client.hcl` | Docker driver cfg + `host_volume` declarations (S0.2) | +| `client.hcl` | `/etc/nomad.d/client.hcl` | Docker driver cfg + `host_volume` declarations (S0.2); `allow_privileged = true` for woodpecker-agent Docker-in-Docker (S3-fix-5, #961) | | `vault.hcl` | `/etc/vault.d/vault.hcl` | Vault storage, listener, UI, `disable_mlock` (S0.3) | | `jobs/forgejo.hcl` | submitted via `lib/init/nomad/deploy.sh` | Forgejo job; reads creds from Vault via consul-template stanza (S2.4) | +| `jobs/woodpecker-server.hcl` | submitted via `lib/init/nomad/deploy.sh` | Woodpecker CI server; host networking, Vault KV for `WOODPECKER_AGENT_SECRET` + Forgejo OAuth creds (S3.1) | +| `jobs/woodpecker-agent.hcl` | submitted via `lib/init/nomad/deploy.sh` | Woodpecker CI agent; host networking, `docker.sock` mount, Vault KV for `WOODPECKER_AGENT_SECRET`; `WOODPECKER_SERVER` uses `${attr.unique.network.ip-address}:9000` (Nomad interpolation) — port binds to LXC alloc IP, not localhost (S3.2, S3-fix-6, #964) | +| `jobs/agents.hcl` | submitted via `lib/init/nomad/deploy.sh` | All 7 agent roles (dev, review, gardener, planner, predictor, supervisor, architect) + llama variant; Vault-templated bot tokens via `service-agents` policy; `force_pull = false` — image is built locally by `bin/disinto --with agents`, no registry (S4.1, S4-fix-2, S4-fix-5, #955, #972, #978) | +| `jobs/staging.hcl` | submitted via `lib/init/nomad/deploy.sh` | Caddy file-server mounting `docker/` as `/srv/site:ro`; no Vault integration; **dynamic host port** (no static 80 — edge owns 80/443, collision fixed in S5-fix-7 #1018); edge discovers via Nomad service registration (S5.2, #989) | +| `jobs/chat.hcl` | submitted via `lib/init/nomad/deploy.sh` | Claude chat UI; custom `disinto/chat:local` image; sandbox hardening (cap_drop ALL, **tmpfs via mount block** not `tmpfs=` arg — S5-fix-5 #1012, pids_limit 128); Vault-templated OAuth secrets via `service-chat` policy (S5.2, #989); rate limiting removed (#1084); **workspace volume** `chat-workspace` host_volume bind-mounted to `/var/workspace` for Claude project access (#1027) — operator must register `host_volume "chat-workspace"` in `client.hcl` on each node | +| `jobs/edge.hcl` | submitted via `lib/init/nomad/deploy.sh` | Caddy reverse proxy + dispatcher sidecar; routes /forge, /woodpecker, /staging, /chat; uses `disinto/edge:local` image built by `bin/disinto --with edge`; **both Caddy and dispatcher tasks use `network_mode = "host"`** — upstreams are `127.0.0.1:<port>` (forgejo :3000, woodpecker :8000, chat :8080), not Docker hostnames (#1031, #1034); `FORGE_URL` rendered via Nomad service discovery template (not static env) to handle bridge vs. host network differences (#1034); dispatcher Vault secret path changed to `kv/data/disinto/shared/ops-repo` (#1041); Vault-templated ops-repo creds via `service-dispatcher` policy (S5.1, #988); `/staging/*` strips `/staging` prefix before proxying (#1079); WebSocket endpoint `/chat/ws` added for streaming (#1026) | Nomad auto-merges every `*.hcl` under `-config=/etc/nomad.d/`, so the split between `server.hcl` and `client.hcl` is for readability, not @@ -30,8 +36,6 @@ convention, KV path summary, and JWT-auth role bindings (S2.1/S2.3). ## Not yet implemented -- **Additional jobspecs** (woodpecker, agents, caddy) — Step 1 brought up - Forgejo; remaining services land in later steps. - **TLS, ACLs, gossip encryption** — deliberately absent for now; land alongside multi-node support. diff --git a/nomad/client.hcl b/nomad/client.hcl index b90d5c1..d173ed5 100644 --- a/nomad/client.hcl +++ b/nomad/client.hcl @@ -49,6 +49,12 @@ client { read_only = false } + # staging static content (docker/ directory with images, HTML, etc.) + host_volume "site-content" { + path = "/srv/disinto/docker" + read_only = true + } + # disinto chat transcripts + attachments. host_volume "chat-history" { path = "/srv/disinto/chat-history" @@ -64,11 +70,11 @@ client { # Docker task driver. `volumes.enabled = true` is required so jobspecs # can mount host_volume declarations defined above. `allow_privileged` -# stays false — no factory workload needs privileged containers today, -# and flipping it is an audit-worthy change. +# is true — woodpecker-agent requires `privileged = true` to access +# docker.sock and spawn CI pipeline containers. plugin "docker" { config { - allow_privileged = false + allow_privileged = true volumes { enabled = true diff --git a/nomad/jobs/agents.hcl b/nomad/jobs/agents.hcl new file mode 100644 index 0000000..92d377e --- /dev/null +++ b/nomad/jobs/agents.hcl @@ -0,0 +1,207 @@ +# ============================================================================= +# nomad/jobs/agents.hcl — All-role agent polling loop (Nomad service job) +# +# Part of the Nomad+Vault migration (S4.1, issue #955). Runs the main bot +# polling loop with all 7 agent roles (review, dev, gardener, architect, +# planner, predictor, supervisor) against the local llama server. +# +# Host_volume contract: +# This job mounts agent-data, project-repos, and ops-repo from +# nomad/client.hcl. Paths under /srv/disinto/* are created by +# lib/init/nomad/cluster-up.sh before any job references them. +# +# Vault integration (S4.1): +# - vault { role = "service-agents" } at group scope — workload-identity +# JWT exchanged for a Vault token carrying the composite service-agents +# policy (vault/policies/service-agents.hcl), which grants read access +# to all 7 bot KV namespaces + vault bot + shared forge config. +# - template stanza renders per-bot FORGE_*_TOKEN + FORGE_PASS from Vault +# KV v2 at kv/disinto/bots/<role>. +# - Seeded on fresh boxes by tools/vault-seed-agents.sh. +# +# Not the runtime yet: docker-compose.yml is still the factory's live stack +# until cutover. This file exists so CI can validate it and S4.2 can wire +# `disinto init --backend=nomad --with agents` to `nomad job run` it. +# ============================================================================= + +job "agents" { + type = "service" + datacenters = ["dc1"] + + group "agents" { + count = 1 + + # ── Vault workload identity (S4.1, issue #955) ─────────────────────────── + # Composite role covering all 7 bot identities + vault bot. Role defined + # in vault/roles.yaml, policy in vault/policies/service-agents.hcl. + # Bound claim pins nomad_job_id = "agents". + vault { + role = "service-agents" + } + + # No network port — agents are outbound-only (poll forgejo, call llama). + # No service discovery block — nothing health-checks agents over HTTP. + + volume "agent-data" { + type = "host" + source = "agent-data" + read_only = false + } + + volume "project-repos" { + type = "host" + source = "project-repos" + read_only = false + } + + volume "ops-repo" { + type = "host" + source = "ops-repo" + read_only = true + } + + # Conservative restart — fail fast to the scheduler. + restart { + attempts = 3 + interval = "5m" + delay = "15s" + mode = "delay" + } + + # ── Service registration ──────────────────────────────────────────────── + # Agents are outbound-only (poll forgejo, call llama) — no HTTP/TCP + # endpoint to probe. The Nomad native provider only supports tcp/http + # checks, not script checks. Registering without a check block means + # Nomad tracks health via task lifecycle: task running = healthy, + # task dead = service deregistered. This matches the docker-compose + # pgrep healthcheck semantics (process alive = healthy). + service { + name = "agents" + provider = "nomad" + } + + task "agents" { + driver = "docker" + + config { + image = "disinto/agents:local" + force_pull = false + + # apparmor=unconfined matches docker-compose — Claude Code needs + # ptrace for node.js inspector and /proc access. + security_opt = ["apparmor=unconfined"] + } + + volume_mount { + volume = "agent-data" + destination = "/home/agent/data" + read_only = false + } + + volume_mount { + volume = "project-repos" + destination = "/home/agent/repos" + read_only = false + } + + volume_mount { + volume = "ops-repo" + destination = "/home/agent/repos/_factory/disinto-ops" + read_only = true + } + + # ── Non-secret env ───────────────────────────────────────────────────── + env { + FORGE_URL = "http://forgejo:3000" + FORGE_REPO = "disinto-admin/disinto" + ANTHROPIC_BASE_URL = "http://10.10.10.1:8081" + ANTHROPIC_API_KEY = "sk-no-key-required" + CLAUDE_MODEL = "unsloth/Qwen3.5-35B-A3B" + AGENT_ROLES = "review,dev,gardener,architect,planner,predictor,supervisor" + POLL_INTERVAL = "300" + DISINTO_CONTAINER = "1" + PROJECT_NAME = "project" + PROJECT_REPO_ROOT = "/home/agent/repos/project" + CLAUDE_TIMEOUT = "7200" + + # llama-specific Claude Code tuning + CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC = "1" + CLAUDE_CODE_DISABLE_EXPERIMENTAL_BETAS = "1" + CLAUDE_AUTOCOMPACT_PCT_OVERRIDE = "60" + } + + # ── Vault-templated bot tokens (S4.1, issue #955) ───────────────────── + # Renders per-bot FORGE_*_TOKEN + FORGE_PASS from Vault KV v2. + # Each `with secret ...` block reads one bot's KV path; the `else` + # branch emits short placeholders on fresh installs where the path + # is absent. Seed with tools/vault-seed-agents.sh. + # + # Placeholder values kept < 16 chars to avoid secret-scan CI failures. + # error_on_missing_key = false prevents template-pending hangs. + template { + destination = "secrets/bots.env" + env = true + change_mode = "restart" + error_on_missing_key = false + data = <<EOT +{{- with secret "kv/data/disinto/bots/dev" -}} +FORGE_TOKEN={{ .Data.data.token }} +FORGE_PASS={{ .Data.data.pass }} +{{- else -}} +# WARNING: run tools/vault-seed-agents.sh +FORGE_TOKEN=seed-me +FORGE_PASS=seed-me +{{- end }} + +{{ with secret "kv/data/disinto/bots/review" -}} +FORGE_REVIEW_TOKEN={{ .Data.data.token }} +{{- else -}} +FORGE_REVIEW_TOKEN=seed-me +{{- end }} + +{{ with secret "kv/data/disinto/bots/gardener" -}} +FORGE_GARDENER_TOKEN={{ .Data.data.token }} +{{- else -}} +FORGE_GARDENER_TOKEN=seed-me +{{- end }} + +{{ with secret "kv/data/disinto/bots/architect" -}} +FORGE_ARCHITECT_TOKEN={{ .Data.data.token }} +{{- else -}} +FORGE_ARCHITECT_TOKEN=seed-me +{{- end }} + +{{ with secret "kv/data/disinto/bots/planner" -}} +FORGE_PLANNER_TOKEN={{ .Data.data.token }} +{{- else -}} +FORGE_PLANNER_TOKEN=seed-me +{{- end }} + +{{ with secret "kv/data/disinto/bots/predictor" -}} +FORGE_PREDICTOR_TOKEN={{ .Data.data.token }} +{{- else -}} +FORGE_PREDICTOR_TOKEN=seed-me +{{- end }} + +{{ with secret "kv/data/disinto/bots/supervisor" -}} +FORGE_SUPERVISOR_TOKEN={{ .Data.data.token }} +{{- else -}} +FORGE_SUPERVISOR_TOKEN=seed-me +{{- end }} + +{{ with secret "kv/data/disinto/bots/vault" -}} +FORGE_VAULT_TOKEN={{ .Data.data.token }} +{{- else -}} +FORGE_VAULT_TOKEN=seed-me +{{- end }} +EOT + } + + # Agents run Claude/llama sessions — need CPU + memory headroom. + resources { + cpu = 500 + memory = 1024 + } + } + } +} diff --git a/nomad/jobs/chat.hcl b/nomad/jobs/chat.hcl new file mode 100644 index 0000000..f9a6657 --- /dev/null +++ b/nomad/jobs/chat.hcl @@ -0,0 +1,188 @@ +# ============================================================================= +# nomad/jobs/chat.hcl — Claude chat UI (Nomad service job) +# +# Part of the Nomad+Vault migration (S5.2, issue #989). Lightweight service +# job for the Claude chat UI with sandbox hardening (#706). +# +# Build: +# Custom image built from docker/chat/Dockerfile as disinto/chat:local +# (same :local pattern as disinto/agents:local). +# +# Sandbox hardening (#706): +# - Read-only root filesystem (enforced via entrypoint) +# - tmpfs /tmp:size=64m for runtime temp files +# - cap_drop ALL (no Linux capabilities) +# - pids_limit 128 (prevent fork bombs) +# - mem_limit 512m (matches compose sandbox hardening) +# +# Vault integration: +# - vault { role = "service-chat" } at group scope +# - Template stanza renders CHAT_OAUTH_CLIENT_ID, CHAT_OAUTH_CLIENT_SECRET, +# FORWARD_AUTH_SECRET from kv/disinto/shared/chat +# - Seeded on fresh boxes by tools/vault-seed-chat.sh +# +# Host volumes: +# - chat-history → /var/lib/chat/history (persists conversation history) +# - workspace → /var/workspace (project working tree for Claude access, #1027) +# +# Client-side host_volume registration (operator prerequisite): +# In nomad/client.hcl on each Nomad node: +# host_volume "chat-workspace" { +# path = "/var/disinto/chat-workspace" +# read_only = false +# } +# Nodes without the host_volume registered will not schedule the workspace mount. +# +# Not the runtime yet: docker-compose.yml is still the factory's live stack +# until cutover. This file exists so CI can validate it and S5.2 can wire +# `disinto init --backend=nomad --with chat` to `nomad job run` it. +# ============================================================================= + +job "chat" { + type = "service" + datacenters = ["dc1"] + + group "chat" { + count = 1 + + # ── Vault workload identity (S5.2, issue #989) ─────────────────────────── + # Role `service-chat` defined in vault/roles.yaml, policy in + # vault/policies/service-chat.hcl. Bound claim pins nomad_job_id = "chat". + vault { + role = "service-chat" + } + + # ── Network ────────────────────────────────────────────────────────────── + # External port 8080 for chat UI access (via edge proxy or direct). + network { + port "http" { + static = 8080 + to = 8080 + } + } + + # ── Host volumes ───────────────────────────────────────────────────────── + # chat-history volume: declared in nomad/client.hcl, path + # /srv/disinto/chat-history on the factory box. + volume "chat-history" { + type = "host" + source = "chat-history" + read_only = false + } + + # Workspace volume: bind-mounted project working tree for Claude access (#1027) + # Source is a fixed logical name resolved by client-side host_volume registration. + volume "workspace" { + type = "host" + source = "chat-workspace" + read_only = false + } + + # ── Metadata (per-dispatch env var via NOMAD_META_*) ────────────────────── + # CHAT_WORKSPACE_DIR: project working tree path, injected into task env + # as NOMAD_META_CHAT_WORKSPACE_DIR for the workspace volume mount target. + meta { + CHAT_WORKSPACE_DIR = "/var/workspace" + } + + # ── Restart policy ─────────────────────────────────────────────────────── + restart { + attempts = 3 + interval = "5m" + delay = "15s" + mode = "delay" + } + + # ── Service registration ───────────────────────────────────────────────── + service { + name = "chat" + port = "http" + provider = "nomad" + + check { + type = "http" + path = "/health" + interval = "10s" + timeout = "3s" + } + } + + task "chat" { + driver = "docker" + + config { + image = "disinto/chat:local" + force_pull = false + # Sandbox hardening (#706): cap_drop ALL, pids_limit 128, tmpfs /tmp + # ReadonlyRootfs enforced via entrypoint script (fails if running as root) + cap_drop = ["ALL"] + pids_limit = 128 + mount { + type = "tmpfs" + target = "/tmp" + readonly = false + tmpfs_options { + size = 67108864 # 64MB in bytes + } + } + # Security options for sandbox hardening + # apparmor=unconfined needed for Claude CLI ptrace access + # no-new-privileges prevents privilege escalation + security_opt = ["apparmor=unconfined", "no-new-privileges"] + } + + # ── Volume mounts ────────────────────────────────────────────────────── + # Mount chat-history for conversation persistence + volume_mount { + volume = "chat-history" + destination = "/var/lib/chat/history" + read_only = false + } + + # Mount workspace directory for Claude code access (#1027) + # Binds project working tree so Claude can inspect/modify code + volume_mount { + volume = "workspace" + destination = "/var/workspace" + read_only = false + } + + # ── Environment: secrets from Vault (S5.2) ────────────────────────────── + # CHAT_OAUTH_CLIENT_ID, CHAT_OAUTH_CLIENT_SECRET, FORWARD_AUTH_SECRET + # rendered from kv/disinto/shared/chat via template stanza. + env { + FORGE_URL = "http://forgejo:3000" + CHAT_WORKSPACE_DIR = "${NOMAD_META_CHAT_WORKSPACE_DIR}" + } + + # ── Vault-templated secrets (S5.2, issue #989) ───────────────────────── + # Renders chat-secrets.env from Vault KV v2 at kv/disinto/shared/chat. + # Placeholder values kept < 16 chars to avoid secret-scan CI failures. + template { + destination = "secrets/chat-secrets.env" + env = true + change_mode = "restart" + error_on_missing_key = false + data = <<EOT +{{- with secret "kv/data/disinto/shared/chat" -}} +CHAT_OAUTH_CLIENT_ID={{ .Data.data.chat_oauth_client_id }} +CHAT_OAUTH_CLIENT_SECRET={{ .Data.data.chat_oauth_client_secret }} +FORWARD_AUTH_SECRET={{ .Data.data.forward_auth_secret }} +{{- else -}} +# WARNING: run tools/vault-seed-chat.sh +CHAT_OAUTH_CLIENT_ID=seed-me +CHAT_OAUTH_CLIENT_SECRET=seed-me +FORWARD_AUTH_SECRET=seed-me +{{- end -}} +EOT + } + + # ── Sandbox hardening (S5.2, #706) ──────────────────────────────────── + # Memory = 512MB (matches docker-compose sandbox hardening) + resources { + cpu = 200 + memory = 512 + } + } + } +} diff --git a/nomad/jobs/edge.hcl b/nomad/jobs/edge.hcl new file mode 100644 index 0000000..67c474c --- /dev/null +++ b/nomad/jobs/edge.hcl @@ -0,0 +1,285 @@ +# ============================================================================= +# nomad/jobs/edge.hcl — Edge proxy (Caddy + dispatcher sidecar) (Nomad service job) +# +# Part of the Nomad+Vault migration (S5.1, issue #988). Caddy reverse proxy +# routes traffic to Forgejo, Woodpecker, staging, and chat services. The +# dispatcher sidecar polls disinto-ops for vault actions and dispatches them +# via Nomad batch jobs. +# +# Host networking (issue #1031): +# Caddy uses network_mode = "host" so upstreams are reached at +# 127.0.0.1:<port> (forgejo :3000, woodpecker :8000, chat :8080). +# Staging uses Nomad service discovery (S5-fix-7, issue #1018). +# +# Host_volume contract: +# This job mounts caddy-data from nomad/client.hcl. Path +# /srv/disinto/caddy-data is created by lib/init/nomad/cluster-up.sh before +# any job references it. Keep the `source = "caddy-data"` below in sync +# with the host_volume stanza in client.hcl. +# +# Build step (S5.1): +# docker/edge/Dockerfile is custom (adds bash, jq, curl, git, docker-cli, +# python3, openssh-client, autossh to caddy:latest). Build as +# disinto/edge:local using the same pattern as disinto/agents:local. +# Command: docker build -t disinto/edge:local -f docker/edge/Dockerfile docker/edge +# +# Not the runtime yet: docker-compose.yml is still the factory's live stack +# until cutover. This file exists so CI can validate it and S5.2 can wire +# `disinto init --backend=nomad --with edge` to `nomad job run` it. +# ============================================================================= + +job "edge" { + type = "service" + datacenters = ["dc1"] + + group "edge" { + count = 1 + + # ── Vault workload identity for dispatcher (S5.1, issue #988) ────────── + # Service role for dispatcher task to fetch vault actions from KV v2. + # Role defined in vault/roles.yaml, policy in vault/policies/dispatcher.hcl. + vault { + role = "service-dispatcher" + } + + # ── Network ports (S5.1, issue #988) ────────────────────────────────── + # Caddy listens on :80 and :443. Expose both on the host. + network { + port "http" { + static = 80 + to = 80 + } + + port "https" { + static = 443 + to = 443 + } + } + + # ── Host-volume mounts (S5.1, issue #988) ───────────────────────────── + # caddy-data: ACME certificates, Caddy config state. + volume "caddy-data" { + type = "host" + source = "caddy-data" + read_only = false + } + + # ops-repo: disinto-ops clone for vault actions polling. + volume "ops-repo" { + type = "host" + source = "ops-repo" + read_only = false + } + + # ── Conservative restart policy ─────────────────────────────────────── + # Caddy should be stable; dispatcher may restart on errors. + restart { + attempts = 3 + interval = "5m" + delay = "15s" + mode = "delay" + } + + # ── Service registration ─────────────────────────────────────────────── + # Caddy is an HTTP reverse proxy — health check on port 80. + service { + name = "edge" + port = "http" + provider = "nomad" + + check { + type = "http" + path = "/" + interval = "10s" + timeout = "3s" + } + } + + # ── Caddy task (S5.1, issue #988) ───────────────────────────────────── + task "caddy" { + driver = "docker" + + config { + # Use pre-built disinto/edge:local image (custom Dockerfile adds + # bash, jq, curl, git, docker-cli, python3, openssh-client, autossh). + image = "disinto/edge:local" + force_pull = false + network_mode = "host" + ports = ["http", "https"] + + # apparmor=unconfined matches docker-compose — needed for autossh + # in the entrypoint script. + security_opt = ["apparmor=unconfined"] + } + + # Mount caddy-data volume for ACME state and config directory. + # Caddyfile is mounted at /etc/caddy/Caddyfile by entrypoint-edge.sh. + volume_mount { + volume = "caddy-data" + destination = "/data" + read_only = false + } + + # ── Caddyfile via Nomad service discovery (S5-fix-7, issue #1018) ──── + # Renders staging upstream from Nomad service registration instead of + # hardcoded staging:80. Caddy picks up /local/Caddyfile via entrypoint. + # Forge URL via Nomad service discovery (issue #1034) — resolves forgejo + # service address/port dynamically for bridge network compatibility. + template { + destination = "local/forge.env" + env = true + change_mode = "restart" + data = <<EOT +{{ range service "forgejo" -}} +FORGE_URL=http://{{ .Address }}:{{ .Port }} +{{- end }} +EOT + } + + template { + destination = "local/Caddyfile" + change_mode = "restart" + data = <<EOT +# Caddyfile — edge proxy configuration (Nomad-rendered) +# Staging upstream discovered via Nomad service registration. + +:80 { + # Redirect root to Forgejo + handle / { + redir /forge/ 302 + } + + # Reverse proxy to Forgejo + handle /forge/* { + reverse_proxy 127.0.0.1:3000 + } + + # Reverse proxy to Woodpecker CI + handle /ci/* { + reverse_proxy 127.0.0.1:8000 + } + + # Reverse proxy to staging — dynamic port via Nomad service discovery + handle /staging/* { + uri strip_prefix /staging +{{ range nomadService "staging" }} reverse_proxy {{ .Address }}:{{ .Port }} +{{ end }} } + + # Chat service — reverse proxy to disinto-chat backend (#705) + # OAuth routes bypass forward_auth — unauthenticated users need these (#709) + handle /chat/login { + reverse_proxy 127.0.0.1:8080 + } + handle /chat/oauth/callback { + reverse_proxy 127.0.0.1:8080 + } + # WebSocket endpoint for streaming (#1026) + handle /chat/ws { + header_up Upgrade $http.upgrade + header_up Connection $http.connection + reverse_proxy 127.0.0.1:8080 + } + # Defense-in-depth: forward_auth stamps X-Forwarded-User from session (#709) + handle /chat/* { + forward_auth 127.0.0.1:8080 { + uri /chat/auth/verify + copy_headers X-Forwarded-User + header_up X-Forward-Auth-Secret {$FORWARD_AUTH_SECRET} + } + reverse_proxy 127.0.0.1:8080 + } +} +EOT + } + + # ── Non-secret env ─────────────────────────────────────────────────── + env { + FORGE_REPO = "disinto-admin/disinto" + DISINTO_CONTAINER = "1" + PROJECT_NAME = "disinto" + } + + # Caddy needs CPU + memory headroom for reverse proxy work. + resources { + cpu = 200 + memory = 256 + } + } + + # ── Dispatcher task (S5.1, issue #988) ──────────────────────────────── + task "dispatcher" { + driver = "docker" + + config { + # Use same disinto/agents:local image as other agents. + image = "disinto/agents:local" + force_pull = false + network_mode = "host" + + # apparmor=unconfined matches docker-compose. + security_opt = ["apparmor=unconfined"] + + # Mount docker.sock via bind-volume (not host volume) for legacy + # docker backend compat. Nomad host volumes require named volumes + # from client.hcl; socket files cannot be host volumes. + volumes = ["/var/run/docker.sock:/var/run/docker.sock:ro"] + } + + # Mount ops-repo for vault actions polling. + volume_mount { + volume = "ops-repo" + destination = "/home/agent/repos/disinto-ops" + read_only = false + } + + # ── Forge URL via Nomad service discovery (issue #1034) ────────── + # Resolves forgejo service address/port dynamically for bridge network + # compatibility. Template-scoped to dispatcher task (Nomad doesn't + # propagate templates across tasks). + template { + destination = "local/forge.env" + env = true + change_mode = "restart" + data = <<EOT +{{ range service "forgejo" -}} +FORGE_URL=http://{{ .Address }}:{{ .Port }} +{{- end }} +EOT + } + + # ── Vault-templated secrets (S5.1, issue #988) ────────────────────── + # Renders FORGE_TOKEN from Vault KV v2 for ops repo access. + template { + destination = "secrets/dispatcher.env" + env = true + change_mode = "restart" + error_on_missing_key = false + data = <<EOT +{{- with secret "kv/data/disinto/shared/ops-repo" -}} +FORGE_TOKEN={{ .Data.data.token }} +{{- else -}} +# WARNING: kv/disinto/shared/ops-repo is empty — run tools/vault-seed-ops-repo.sh +FORGE_TOKEN=seed-me +{{- end }} +EOT + } + + # ── Non-secret env ─────────────────────────────────────────────────── + env { + DISPATCHER_BACKEND = "nomad" + FORGE_REPO = "disinto-admin/disinto" + FORGE_OPS_REPO = "disinto-admin/disinto-ops" + PRIMARY_BRANCH = "main" + DISINTO_CONTAINER = "1" + OPS_REPO_ROOT = "/home/agent/repos/disinto-ops" + FORGE_ADMIN_USERS = "vault-bot,admin" + } + + # Dispatcher is lightweight — minimal CPU + memory. + resources { + cpu = 100 + memory = 256 + } + } + } +} diff --git a/nomad/jobs/staging.hcl b/nomad/jobs/staging.hcl new file mode 100644 index 0000000..c9d4fb4 --- /dev/null +++ b/nomad/jobs/staging.hcl @@ -0,0 +1,86 @@ +# ============================================================================= +# nomad/jobs/staging.hcl — Staging file server (Nomad service job) +# +# Part of the Nomad+Vault migration (S5.2, issue #989). Lightweight service job +# for the staging file server using Caddy as a static file server. +# +# Mount contract: +# This job mounts the `docker/` directory as `/srv/site` (read-only). +# The docker/ directory contains static content (images, HTML, etc.) +# served to staging environment users. +# +# Network: +# Dynamic host port — edge discovers via Nomad service registration. +# No static port to avoid collisions with edge (which owns 80/443). +# +# Not the runtime yet: docker-compose.yml is still the factory's live stack +# until cutover. This file exists so CI can validate it and S5.2 can wire +# `disinto init --backend=nomad --with staging` to `nomad job run` it. +# ============================================================================= + +job "staging" { + type = "service" + datacenters = ["dc1"] + + group "staging" { + count = 1 + + # No Vault integration needed — no secrets required (static file server) + + # Internal service — dynamic host port. Edge discovers via Nomad service. + network { + port "http" { + to = 80 + } + } + + volume "site-content" { + type = "host" + source = "site-content" + read_only = true + } + + restart { + attempts = 3 + interval = "5m" + delay = "15s" + mode = "delay" + } + + service { + name = "staging" + port = "http" + provider = "nomad" + + check { + type = "http" + path = "/" + interval = "10s" + timeout = "3s" + } + } + + task "staging" { + driver = "docker" + + config { + image = "caddy:alpine" + ports = ["http"] + command = "caddy" + args = ["file-server", "--root", "/srv/site"] + } + + # Mount docker/ directory as /srv/site:ro (static content) + volume_mount { + volume = "site-content" + destination = "/srv/site" + read_only = true + } + + resources { + cpu = 100 + memory = 256 + } + } + } +} diff --git a/nomad/jobs/vault-runner.hcl b/nomad/jobs/vault-runner.hcl new file mode 100644 index 0000000..6f174a3 --- /dev/null +++ b/nomad/jobs/vault-runner.hcl @@ -0,0 +1,137 @@ +# ============================================================================= +# nomad/jobs/vault-runner.hcl — Parameterized batch job for vault action dispatch +# +# Part of the Nomad+Vault migration (S5.3, issue #990). Replaces the +# `docker run --rm vault-runner-${action_id}` pattern in dispatcher.sh with +# a Nomad-native parameterized batch job. Dispatched by the edge dispatcher +# (S5.4) via `nomad job dispatch`. +# +# Parameterized meta: +# action_id — vault action identifier (used by entrypoint-runner.sh) +# secrets_csv — comma-separated secret names (e.g. "GITHUB_TOKEN,DEPLOY_KEY") +# +# Vault integration (approach A — pre-defined templates): +# All 6 known runner secrets are rendered via template stanzas with +# error_on_missing_key = false. Secrets not granted by the dispatch's +# Vault policies render as empty strings. The dispatcher (S5.4) sets +# vault { policies = [...] } per-dispatch based on the action TOML's +# secrets=[...] list, scoping access to only the declared secrets. +# +# Cleanup: Nomad garbage-collects completed batch dispatches automatically. +# ============================================================================= + +job "vault-runner" { + type = "batch" + datacenters = ["dc1"] + + parameterized { + meta_required = ["action_id", "secrets_csv"] + } + + group "runner" { + count = 1 + + # ── Vault workload identity ────────────────────────────────────────────── + # Per-dispatch policies are composed by the dispatcher (S5.4) based on the + # action TOML's secrets=[...] list. Each policy grants read access to + # exactly one kv/data/disinto/runner/<NAME> path. Roles defined in + # vault/roles.yaml (runner-<NAME>), policies in vault/policies/. + vault {} + + volume "ops-repo" { + type = "host" + source = "ops-repo" + read_only = true + } + + # No restart for batch — fail fast, let the dispatcher handle retries. + restart { + attempts = 0 + mode = "fail" + } + + task "runner" { + driver = "docker" + + config { + image = "disinto/agents:local" + force_pull = false + entrypoint = ["bash"] + args = [ + "/home/agent/disinto/docker/runner/entrypoint-runner.sh", + "${NOMAD_META_action_id}", + ] + } + + volume_mount { + volume = "ops-repo" + destination = "/home/agent/ops" + read_only = true + } + + # ── Non-secret env ─────────────────────────────────────────────────────── + env { + DISINTO_CONTAINER = "1" + FACTORY_ROOT = "/home/agent/disinto" + OPS_REPO_ROOT = "/home/agent/ops" + } + + # ── Vault-templated runner secrets (approach A) ──────────────────────── + # Pre-defined templates for all 6 known runner secrets. Each renders + # from kv/data/disinto/runner/<NAME>. Secrets not granted by the + # dispatch's Vault policies produce empty env vars (harmless). + # error_on_missing_key = false prevents template-pending hangs when + # a secret path is absent or the policy doesn't grant access. + # + # Placeholder values kept < 16 chars to avoid secret-scan CI failures. + template { + destination = "secrets/runner.env" + env = true + error_on_missing_key = false + data = <<EOT +{{- with secret "kv/data/disinto/runner/GITHUB_TOKEN" -}} +GITHUB_TOKEN={{ .Data.data.value }} +{{- else -}} +GITHUB_TOKEN= +{{- end }} + +{{ with secret "kv/data/disinto/runner/CODEBERG_TOKEN" -}} +CODEBERG_TOKEN={{ .Data.data.value }} +{{- else -}} +CODEBERG_TOKEN= +{{- end }} + +{{ with secret "kv/data/disinto/runner/CLAWHUB_TOKEN" -}} +CLAWHUB_TOKEN={{ .Data.data.value }} +{{- else -}} +CLAWHUB_TOKEN= +{{- end }} + +{{ with secret "kv/data/disinto/runner/DEPLOY_KEY" -}} +DEPLOY_KEY={{ .Data.data.value }} +{{- else -}} +DEPLOY_KEY= +{{- end }} + +{{ with secret "kv/data/disinto/runner/NPM_TOKEN" -}} +NPM_TOKEN={{ .Data.data.value }} +{{- else -}} +NPM_TOKEN= +{{- end }} + +{{ with secret "kv/data/disinto/runner/DOCKER_HUB_TOKEN" -}} +DOCKER_HUB_TOKEN={{ .Data.data.value }} +{{- else -}} +DOCKER_HUB_TOKEN= +{{- end }} +EOT + } + + # Formula execution headroom — matches agents.hcl baseline. + resources { + cpu = 500 + memory = 1024 + } + } + } +} diff --git a/nomad/jobs/woodpecker-agent.hcl b/nomad/jobs/woodpecker-agent.hcl index de81459..a4111fe 100644 --- a/nomad/jobs/woodpecker-agent.hcl +++ b/nomad/jobs/woodpecker-agent.hcl @@ -8,8 +8,9 @@ # # Host networking: # Uses network_mode = "host" to match the compose setup. The Woodpecker -# server gRPC endpoint is addressed as "localhost:9000" since both -# server and agent run on the same host. +# server gRPC endpoint is addressed via Nomad service discovery using +# the host's IP address (10.10.10.x:9000), since the server's port +# binding in Nomad binds to the allocation's IP, not localhost. # # Vault integration: # - vault { role = "service-woodpecker-agent" } at the group scope — the @@ -56,7 +57,7 @@ job "woodpecker-agent" { check { type = "http" path = "/healthz" - interval = "15s" + interval = "10s" timeout = "3s" } } @@ -82,11 +83,19 @@ job "woodpecker-agent" { # Non-secret env — server address, gRPC security, concurrency limit, # and health check endpoint. Nothing sensitive here. + # + # WOODPECKER_SERVER uses Nomad's attribute template to get the host's + # IP address (10.10.10.x). The server's gRPC port 9000 is bound via + # Nomad's port stanza to the allocation's IP (not localhost), so the + # agent must use the LXC's eth0 IP, not 127.0.0.1. env { - WOODPECKER_SERVER = "localhost:9000" - WOODPECKER_GRPC_SECURE = "false" - WOODPECKER_MAX_WORKFLOWS = "1" - WOODPECKER_HEALTHCHECK_ADDR = ":3333" + WOODPECKER_SERVER = "${attr.unique.network.ip-address}:9000" + WOODPECKER_GRPC_SECURE = "false" + WOODPECKER_GRPC_KEEPALIVE_TIME = "10s" + WOODPECKER_GRPC_KEEPALIVE_TIMEOUT = "20s" + WOODPECKER_GRPC_KEEPALIVE_PERMIT_WITHOUT_CALLS = "true" + WOODPECKER_MAX_WORKFLOWS = "1" + WOODPECKER_HEALTHCHECK_ADDR = ":3333" } # ── Vault-templated agent secret ────────────────────────────────── diff --git a/planner/AGENTS.md b/planner/AGENTS.md index 7034b60..b3ed369 100644 --- a/planner/AGENTS.md +++ b/planner/AGENTS.md @@ -1,4 +1,4 @@ -<!-- last-reviewed: 8ad5aca6bbee77634b3c63523042b1d39cefa96a --> +<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 --> # Planner Agent **Role**: Strategic planning using a Prerequisite Tree (Theory of Constraints), diff --git a/predictor/AGENTS.md b/predictor/AGENTS.md index cec03a1..dc64e44 100644 --- a/predictor/AGENTS.md +++ b/predictor/AGENTS.md @@ -1,4 +1,4 @@ -<!-- last-reviewed: 8ad5aca6bbee77634b3c63523042b1d39cefa96a --> +<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 --> # Predictor Agent **Role**: Abstract adversary (the "goblin"). Runs a 2-step formula diff --git a/projects/disinto.toml.example b/projects/disinto.toml.example index ebe6eed..34eacae 100644 --- a/projects/disinto.toml.example +++ b/projects/disinto.toml.example @@ -59,6 +59,23 @@ check_pipeline_stall = false # compact_pct = 60 # poll_interval = 60 +# Edge routing mode (default: subpath) +# +# Controls how services are exposed through the edge proxy. +# subpath — all services under <project>.disinto.ai/{forge,ci,chat,staging} +# subdomain — per-service subdomains: forge.<project>, ci.<project>, chat.<project> +# +# Set to "subdomain" if subpath routing causes unfixable issues (redirect loops, +# OAuth callback mismatches, cookie collisions). See docs/edge-routing-fallback.md. +# +# Set in .env (not TOML) since it's consumed by docker-compose and shell scripts: +# EDGE_ROUTING_MODE=subdomain +# +# In subdomain mode, `disinto edge register` also writes: +# EDGE_TUNNEL_FQDN_FORGE=forge.<project>.disinto.ai +# EDGE_TUNNEL_FQDN_CI=ci.<project>.disinto.ai +# EDGE_TUNNEL_FQDN_CHAT=chat.<project>.disinto.ai + # [mirrors] # github = "git@github.com:johba/disinto.git" # codeberg = "git@codeberg.org:johba/disinto.git" diff --git a/review/AGENTS.md b/review/AGENTS.md index 4c06b34..a8aecc9 100644 --- a/review/AGENTS.md +++ b/review/AGENTS.md @@ -1,4 +1,4 @@ -<!-- last-reviewed: 8ad5aca6bbee77634b3c63523042b1d39cefa96a --> +<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 --> # Review Agent **Role**: AI-powered PR review — post structured findings and formal diff --git a/review/review-pr.sh b/review/review-pr.sh index 091025f..09f6cb6 100755 --- a/review/review-pr.sh +++ b/review/review-pr.sh @@ -52,8 +52,35 @@ REVIEW_TMPDIR=$(mktemp -d) log() { printf '[%s] PR#%s %s\n' "$(date -u '+%Y-%m-%d %H:%M:%S UTC')" "$PR_NUMBER" "$*" >> "$LOGFILE"; } status() { printf '[%s] PR #%s: %s\n' "$(date -u '+%Y-%m-%d %H:%M:%S UTC')" "$PR_NUMBER" "$*" > "$STATUSFILE"; log "$*"; } -cleanup() { rm -rf "$REVIEW_TMPDIR" "$LOCKFILE" "$STATUSFILE" "/tmp/${PROJECT_NAME}-review-graph-${PR_NUMBER}.json"; } -trap cleanup EXIT + +# cleanup — remove temp files (NOT lockfile — cleanup_on_exit handles that) +cleanup() { + rm -rf "$REVIEW_TMPDIR" "$STATUSFILE" "/tmp/${PROJECT_NAME}-review-graph-${PR_NUMBER}.json" +} + +# cleanup_on_exit — defensive cleanup: remove lockfile if we own it, kill residual children +# This handles the case where review-pr.sh is terminated unexpectedly (e.g., watchdog SIGTERM) +cleanup_on_exit() { + local ec=$? + # Remove lockfile only if we own it (PID matches $$) + if [ -f "$LOCKFILE" ] && [ -n "$(cat "$LOCKFILE" 2>/dev/null)" ]; then + if [ "$(cat "$LOCKFILE" 2>/dev/null)" = "$$" ]; then + rm -f "$LOCKFILE" + log "cleanup_on_exit: removed lockfile (we owned it)" + fi + fi + # Kill any direct children that may have been spawned by this process + # (e.g., bash -c commands from Claude's Bash tool that didn't get reaped) + pkill -P $$ 2>/dev/null || true + # Call the main cleanup function to remove temp files + cleanup + exit "$ec" +} +trap cleanup_on_exit EXIT INT TERM + +# Note: EXIT trap is already set above. The cleanup function is still available for +# non-error exits (e.g., normal completion via exit 0 after verdict posted). +# When review succeeds, we want to skip lockfile removal since the verdict was posted. # ============================================================================= # LOG ROTATION @@ -104,6 +131,7 @@ if [ "$PR_STATE" != "open" ]; then log "SKIP: state=${PR_STATE}" worktree_cleanup "$WORKTREE" rm -f "$OUTPUT_FILE" "$SID_FILE" 2>/dev/null || true + rm -f "$LOCKFILE" exit 0 fi @@ -113,7 +141,7 @@ fi CI_STATE=$(ci_commit_status "$PR_SHA") CI_NOTE="" if ! ci_passed "$CI_STATE"; then - ci_required_for_pr "$PR_NUMBER" && { log "SKIP: CI=${CI_STATE}"; exit 0; } + ci_required_for_pr "$PR_NUMBER" && { log "SKIP: CI=${CI_STATE}"; rm -f "$LOCKFILE"; exit 0; } CI_NOTE=" (not required — non-code PR)" fi @@ -123,10 +151,10 @@ fi ALL_COMMENTS=$(forge_api_all "/issues/${PR_NUMBER}/comments") HAS_CMT=$(printf '%s' "$ALL_COMMENTS" | jq --arg s "$PR_SHA" \ '[.[]|select(.body|contains("<!-- reviewed: "+$s+" -->"))]|length') -[ "${HAS_CMT:-0}" -gt 0 ] && [ "$FORCE" != "--force" ] && { log "SKIP: reviewed ${PR_SHA:0:7}"; exit 0; } +[ "${HAS_CMT:-0}" -gt 0 ] && [ "$FORCE" != "--force" ] && { log "SKIP: reviewed ${PR_SHA:0:7}"; rm -f "$LOCKFILE"; exit 0; } HAS_FML=$(forge_api_all "/pulls/${PR_NUMBER}/reviews" | jq --arg s "$PR_SHA" \ '[.[]|select(.commit_id==$s)|select(.state!="COMMENT")]|length') -[ "${HAS_FML:-0}" -gt 0 ] && [ "$FORCE" != "--force" ] && { log "SKIP: formal review"; exit 0; } +[ "${HAS_FML:-0}" -gt 0 ] && [ "$FORCE" != "--force" ] && { log "SKIP: formal review"; rm -f "$LOCKFILE"; exit 0; } # ============================================================================= # RE-REVIEW DETECTION @@ -324,3 +352,7 @@ esac profile_write_journal "review-${PR_NUMBER}" "Review PR #${PR_NUMBER} (${VERDICT})" "${VERDICT,,}" "" || true log "DONE: ${VERDICT} (re-review: ${IS_RE_REVIEW})" + +# Remove lockfile on successful completion (cleanup_on_exit will also do this, +# but we do it here to avoid the trap running twice) +rm -f "$LOCKFILE" diff --git a/site/collect-engagement.sh b/site/collect-engagement.sh index e87e3aa..c4ac11d 100644 --- a/site/collect-engagement.sh +++ b/site/collect-engagement.sh @@ -209,3 +209,72 @@ jq -nc \ log "Engagement report written to ${OUTPUT}: ${UNIQUE_VISITORS} visitors, ${PAGE_VIEWS} page views" echo "Engagement report: ${UNIQUE_VISITORS} unique visitors, ${PAGE_VIEWS} page views → ${OUTPUT}" + +# ── Commit evidence to ops repo via Forgejo API ───────────────────────────── + +commit_evidence_via_forgejo() { + local evidence_file="$1" + local report_date + report_date=$(basename "$evidence_file" .json) + local file_path="evidence/engagement/${report_date}.json" + + # Check if ops repo is available + if [ -z "${OPS_REPO_ROOT:-}" ] || [ ! -d "${OPS_REPO_ROOT}/.git" ]; then + log "SKIP: OPS_REPO_ROOT not set or not a git repo — evidence file not committed" + return 0 + fi + + # Check if Forgejo credentials are available + if [ -z "${FORGE_TOKEN:-}" ] || [ -z "${FORGE_URL:-}" ] || [ -z "${FORGE_OPS_REPO:-}" ]; then + log "SKIP: Forgejo credentials not available (FORGE_TOKEN/FORGE_URL/FORGE_OPS_REPO) — evidence file not committed" + return 0 + fi + + # Read and encode the file content + local content + content=$(base64 < "$evidence_file") + local ops_owner="${OPS_FORGE_OWNER:-${FORGE_REPO%%/*}}" + local ops_repo="${OPS_FORGE_REPO:-${PROJECT_NAME:-disinto}-ops}" + + # Check if file already exists in the ops repo + local existing + existing=$(curl -sf \ + -H "Authorization: token ${FORGE_TOKEN}" \ + "${FORGE_URL}/api/v1/repos/${ops_owner}/${ops_repo}/contents/${file_path}" \ + 2>/dev/null || echo "") + + if [ -n "$existing" ] && printf '%s' "$existing" | jq -e '.sha' >/dev/null 2>&1; then + # Update existing file + local sha + sha=$(printf '%s' "$existing" | jq -r '.sha') + if curl -sf -X PUT \ + -H "Authorization: token ${FORGE_TOKEN}" \ + -H "Content-Type: application/json" \ + "${FORGE_URL}/api/v1/repos/${ops_owner}/${ops_repo}/contents/${file_path}" \ + -d "$(jq -nc --arg content "$content" --arg sha "$sha" --arg msg "evidence: engagement ${report_date}" \ + '{message: $msg, content: $content, sha: $sha}')" >/dev/null 2>&1; then + log "Updated evidence file in ops repo: ${file_path}" + return 0 + else + log "ERROR: failed to update evidence file in ops repo" + return 1 + fi + else + # Create new file + if curl -sf -X POST \ + -H "Authorization: token ${FORGE_TOKEN}" \ + -H "Content-Type: application/json" \ + "${FORGE_URL}/api/v1/repos/${ops_owner}/${ops_repo}/contents/${file_path}" \ + -d "$(jq -nc --arg content "$content" --arg msg "evidence: engagement ${report_date}" \ + '{message: $msg, content: $content}')" >/dev/null 2>&1; then + log "Created evidence file in ops repo: ${file_path}" + return 0 + else + log "ERROR: failed to create evidence file in ops repo" + return 1 + fi + fi +} + +# Attempt to commit evidence (non-fatal — data collection succeeded even if commit fails) +commit_evidence_via_forgejo "$OUTPUT" || log "WARNING: evidence commit skipped or failed — file exists locally at ${OUTPUT}" diff --git a/supervisor/AGENTS.md b/supervisor/AGENTS.md index 77f7b64..a315009 100644 --- a/supervisor/AGENTS.md +++ b/supervisor/AGENTS.md @@ -1,4 +1,4 @@ -<!-- last-reviewed: 8ad5aca6bbee77634b3c63523042b1d39cefa96a --> +<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 --> # Supervisor Agent **Role**: Health monitoring and auto-remediation, executed as a formula-driven @@ -24,12 +24,18 @@ Both invoke the same `supervisor-run.sh`. Sources `lib/guard.sh` and calls `chec files for `PHASE:escalate` entries and auto-removes any whose linked issue is confirmed closed (24h grace period after closure to avoid races). Reports **stale crashed worktrees** (worktrees preserved after crash) — supervisor - housekeeping removes them after 24h. Also collects **Woodpecker agent health**: - container status, gRPC error count (last 20m), fast-failure pipelines (<60s, - last 15m), and overall health determination. + housekeeping removes them after 24h. Collects **Woodpecker agent health** + (added #933): container `disinto-woodpecker-agent` health/running status, + gRPC error count in last 20 min, fast-failure pipeline count (<60s, last 15 min), + and overall health verdict (healthy/unhealthy). Unhealthy verdict triggers + automatic container restart + `blocked:ci_exhausted` issue recovery in + `supervisor-run.sh` before the Claude session starts. - `formulas/run-supervisor.toml` — Execution spec: five steps (preflight review, health-assessment, decide-actions, report, journal) with `needs` dependencies. - Claude evaluates all metrics and takes actions in a single interactive session + Claude evaluates all metrics and takes actions in a single interactive session. + Health-assessment now includes P2 **Woodpecker agent unhealthy** classification + (container not running, ≥3 gRPC errors/20m, or ≥3 fast-failure pipelines/15m); + decide-actions documents the pre-session auto-recovery path - `$OPS_REPO_ROOT/knowledge/*.md` — Domain-specific remediation guides (memory, disk, CI, git, dev-agent, review-agent, forge) diff --git a/tests/disinto-init-nomad.bats b/tests/disinto-init-nomad.bats index 21f4303..54c3655 100644 --- a/tests/disinto-init-nomad.bats +++ b/tests/disinto-init-nomad.bats @@ -215,7 +215,44 @@ setup_file() { run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with unknown-service --dry-run [ "$status" -ne 0 ] [[ "$output" == *"unknown service"* ]] - [[ "$output" == *"known: forgejo"* ]] + [[ "$output" == *"known: forgejo, woodpecker-server, woodpecker-agent, agents, staging, chat, edge"* ]] +} + +# S3.4: woodpecker auto-expansion and forgejo auto-inclusion +@test "disinto init --backend=nomad --with woodpecker auto-expands to server+agent" { + run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with woodpecker --dry-run + [ "$status" -eq 0 ] + [[ "$output" == *"services to deploy: forgejo,woodpecker-server,woodpecker-agent"* ]] + [[ "$output" == *"deployment order: forgejo woodpecker-server woodpecker-agent"* ]] +} + +@test "disinto init --backend=nomad --with woodpecker auto-includes forgejo with note" { + run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with woodpecker --dry-run + [ "$status" -eq 0 ] + [[ "$output" == *"Note: --with woodpecker implies --with forgejo"* ]] +} + +@test "disinto init --backend=nomad --with forgejo,woodpecker expands woodpecker" { + run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with forgejo,woodpecker --dry-run + [ "$status" -eq 0 ] + # Order follows input: forgejo first, then woodpecker expanded + [[ "$output" == *"services to deploy: forgejo,woodpecker-server,woodpecker-agent"* ]] + [[ "$output" == *"deployment order: forgejo woodpecker-server woodpecker-agent"* ]] +} + +@test "disinto init --backend=nomad --with woodpecker seeds both forgejo and woodpecker" { + run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with woodpecker --dry-run + [ "$status" -eq 0 ] + [[ "$output" == *"tools/vault-seed-forgejo.sh --dry-run"* ]] + [[ "$output" == *"tools/vault-seed-woodpecker.sh --dry-run"* ]] +} + +@test "disinto init --backend=nomad --with forgejo,woodpecker deploys all three services" { + run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with forgejo,woodpecker --dry-run + [ "$status" -eq 0 ] + [[ "$output" == *"[deploy] [dry-run] nomad job validate"*"forgejo.hcl"* ]] + [[ "$output" == *"[deploy] [dry-run] nomad job validate"*"woodpecker-server.hcl"* ]] + [[ "$output" == *"[deploy] [dry-run] nomad job validate"*"woodpecker-agent.hcl"* ]] } @test "disinto init --backend=nomad --with forgejo (flag=value syntax) works" { @@ -348,3 +385,60 @@ setup_file() { [ "$status" -ne 0 ] [[ "$output" == *"--empty and --import-env/--import-sops/--age-key are mutually exclusive"* ]] } + +# S4.2: agents service auto-expansion and dependencies +@test "disinto init --backend=nomad --with agents auto-includes forgejo and woodpecker" { + run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with agents --dry-run + [ "$status" -eq 0 ] + [[ "$output" == *"services to deploy: forgejo,agents,woodpecker-server,woodpecker-agent"* ]] + [[ "$output" == *"Note: --with agents implies --with forgejo"* ]] + [[ "$output" == *"Note: --with agents implies --with woodpecker"* ]] +} + +@test "disinto init --backend=nomad --with agents deploys in correct order" { + run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with agents --dry-run + [ "$status" -eq 0 ] + [[ "$output" == *"deployment order: forgejo woodpecker-server woodpecker-agent agents"* ]] +} + +@test "disinto init --backend=nomad --with agents seeds agents service" { + run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with agents --dry-run + [ "$status" -eq 0 ] + [[ "$output" == *"tools/vault-seed-forgejo.sh --dry-run"* ]] + [[ "$output" == *"tools/vault-seed-woodpecker.sh --dry-run"* ]] + [[ "$output" == *"tools/vault-seed-agents.sh --dry-run"* ]] +} + +@test "disinto init --backend=nomad --with agents deploys all four services" { + run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with agents --dry-run + [ "$status" -eq 0 ] + [[ "$output" == *"[deploy] [dry-run] nomad job validate"*"forgejo.hcl"* ]] + [[ "$output" == *"[deploy] [dry-run] nomad job validate"*"woodpecker-server.hcl"* ]] + [[ "$output" == *"[deploy] [dry-run] nomad job validate"*"woodpecker-agent.hcl"* ]] + [[ "$output" == *"[deploy] [dry-run] nomad job validate"*"agents.hcl"* ]] +} + +@test "disinto init --backend=nomad --with woodpecker,agents expands correctly" { + run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with woodpecker,agents --dry-run + [ "$status" -eq 0 ] + # woodpecker expands to server+agent, agents is already explicit + # forgejo is auto-included by agents + [[ "$output" == *"services to deploy: forgejo,woodpecker-server,woodpecker-agent,agents"* ]] + [[ "$output" == *"deployment order: forgejo woodpecker-server woodpecker-agent agents"* ]] +} + +# S5.1 / #1035 — edge service seeds ops-repo (dispatcher FORGE_TOKEN) +@test "disinto init --backend=nomad --with edge deploys edge" { + run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with edge --dry-run + [ "$status" -eq 0 ] + # edge depends on all backend services, so all are included + [[ "$output" == *"services to deploy: edge,forgejo"* ]] + [[ "$output" == *"deployment order: forgejo woodpecker-server woodpecker-agent agents staging chat edge"* ]] + [[ "$output" == *"[deploy] [dry-run] nomad job validate"*"edge.hcl"* ]] +} + +@test "disinto init --backend=nomad --with edge seeds ops-repo" { + run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with edge --dry-run + [ "$status" -eq 0 ] + [[ "$output" == *"tools/vault-seed-ops-repo.sh --dry-run"* ]] +} diff --git a/tests/smoke-edge-subpath.sh b/tests/smoke-edge-subpath.sh new file mode 100755 index 0000000..6a1f383 --- /dev/null +++ b/tests/smoke-edge-subpath.sh @@ -0,0 +1,310 @@ +#!/usr/bin/env bash +# ============================================================================= +# smoke-edge-subpath.sh — End-to-end subpath routing smoke test +# +# Verifies Forgejo, Woodpecker, and chat function correctly under subpaths: +# - Forgejo at /forge/ +# - Woodpecker at /ci/ +# - Chat at /chat/ +# - Staging at /staging/ +# +# Usage: +# smoke-edge-subpath.sh [--base-url BASE_URL] +# +# Environment variables: +# BASE_URL — Edge proxy URL (default: http://localhost) +# EDGE_TIMEOUT — Request timeout in seconds (default: 30) +# EDGE_MAX_RETRIES — Max retries per request (default: 3) +# +# Exit codes: +# 0 — All checks passed +# 1 — One or more checks failed +# ============================================================================= +set -euo pipefail + +# Script directory for relative paths +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Source common helpers if available +source "${SCRIPT_DIR}/../lib/env.sh" 2>/dev/null || true + +# ───────────────────────────────────────────────────────────────────────────── +# Configuration +# ───────────────────────────────────────────────────────────────────────────── + +BASE_URL="${BASE_URL:-http://localhost}" +EDGE_TIMEOUT="${EDGE_TIMEOUT:-30}" +EDGE_MAX_RETRIES="${EDGE_MAX_RETRIES:-3}" + +# Subpaths to test +FORGE_PATH="/forge/" +CI_PATH="/ci/" +CHAT_PATH="/chat/" +STAGING_PATH="/staging/" + +# Track overall test status +FAILED=0 +PASSED=0 +SKIPPED=0 + +# ───────────────────────────────────────────────────────────────────────────── +# Logging helpers +# ───────────────────────────────────────────────────────────────────────────── + +log_info() { + echo "[INFO] $*" +} + +log_pass() { + echo "[PASS] $*" + ((PASSED++)) || true +} + +log_fail() { + echo "[FAIL] $*" + ((FAILED++)) || true +} + +log_skip() { + echo "[SKIP] $*" + ((SKIPPED++)) || true +} + +log_section() { + echo "" + echo "=== $* ===" + echo "" +} + +# ───────────────────────────────────────────────────────────────────────────── +# HTTP helpers +# ───────────────────────────────────────────────────────────────────────────── + +# Make an HTTP request with retry logic +# Usage: http_request <method> <url> [options...] +# Returns: HTTP status code on stdout +http_request() { + local method="$1" + local url="$2" + shift 2 + + local retries=0 + local response status + + while [ "$retries" -lt "$EDGE_MAX_RETRIES" ]; do + response=$(curl -sS -w '\n%{http_code}' -X "$method" \ + --max-time "$EDGE_TIMEOUT" \ + -o /tmp/edge-response-$$ \ + "$@" 2>&1) || { + retries=$((retries + 1)) + log_info "Retry $retries/$EDGE_MAX_RETRIES for $url" + sleep 1 + continue + } + + status=$(echo "$response" | tail -n1) + + echo "$status" + return 0 + done + + log_fail "Max retries exceeded for $url" + return 1 +} + +# Make a GET request and return status code +http_get() { + local url="$1" + shift || true + http_request "GET" "$url" "$@" +} + +# Make a HEAD request (no body) +http_head() { + local url="$1" + shift || true + http_request "HEAD" "$url" "$@" +} + +# Make a GET request and return the response body +http_get_body() { + local url="$1" + shift || true + curl -sS --max-time "$EDGE_TIMEOUT" "$@" "$url" +} + +# ───────────────────────────────────────────────────────────────────────────── +# Test functions +# ───────────────────────────────────────────────────────────────────────────── + +test_root_redirect() { + log_section "Test 1: Root redirect to /forge/" + + local status + status=$(http_head "$BASE_URL/") + + if [ "$status" = "302" ]; then + log_pass "Root / redirects with 302" + else + log_fail "Expected 302 redirect from /, got status $status" + fi +} + +test_forgejo_subpath() { + log_section "Test 2: Forgejo at /forge/" + + local status + status=$(http_head "$BASE_URL${FORGE_PATH}") + + if [ "$status" -ge 200 ] && [ "$status" -lt 400 ]; then + log_pass "Forgejo at ${BASE_URL}${FORGE_PATH} returns status $status" + else + log_fail "Forgejo at ${BASE_URL}${FORGE_PATH} returned unexpected status $status" + fi +} + +test_woodpecker_subpath() { + log_section "Test 3: Woodpecker at /ci/" + + local status + status=$(http_head "$BASE_URL${CI_PATH}") + + if [ "$status" -ge 200 ] && [ "$status" -lt 400 ]; then + log_pass "Woodpecker at ${BASE_URL}${CI_PATH} returns status $status" + else + log_fail "Woodpecker at ${BASE_URL}${CI_PATH} returned unexpected status $status" + fi +} + +test_chat_subpath() { + log_section "Test 4: Chat at /chat/" + + # Test chat login endpoint + local status + status=$(http_head "$BASE_URL${CHAT_PATH}login") + + if [ "$status" -ge 200 ] && [ "$status" -lt 400 ]; then + log_pass "Chat login at ${BASE_URL}${CHAT_PATH}login returns status $status" + else + log_fail "Chat login at ${BASE_URL}${CHAT_PATH}login returned unexpected status $status" + fi + + # Test chat OAuth callback endpoint + status=$(http_head "$BASE_URL${CHAT_PATH}oauth/callback") + + if [ "$status" -ge 200 ] && [ "$status" -lt 400 ]; then + log_pass "Chat OAuth callback at ${BASE_URL}${CHAT_PATH}oauth/callback returns status $status" + else + log_fail "Chat OAuth callback at ${BASE_URL}${CHAT_PATH}oauth/callback returned unexpected status $status" + fi +} + +test_staging_subpath() { + log_section "Test 5: Staging at /staging/" + + local status + status=$(http_head "$BASE_URL${STAGING_PATH}") + + if [ "$status" -ge 200 ] && [ "$status" -lt 400 ]; then + log_pass "Staging at ${BASE_URL}${STAGING_PATH} returns status $status" + else + log_fail "Staging at ${BASE_URL}${STAGING_PATH} returned unexpected status $status" + fi +} + +test_forward_auth_rejection() { + log_section "Test 6: Forward auth on /chat/* rejects unauthenticated requests" + + # Request a protected chat endpoint without auth header + # Should return 401 (Unauthorized) due to forward_auth + local status + status=$(http_head "$BASE_URL${CHAT_PATH}auth/verify") + + if [ "$status" = "401" ]; then + log_pass "Unauthenticated /chat/auth/verify returns 401 (forward_auth working)" + elif [ "$status" -ge 200 ] && [ "$status" -lt 400 ]; then + log_skip "Unauthenticated /chat/auth/verify returns $status (forward_auth may be disabled)" + else + log_fail "Expected 401 for unauthenticated /chat/auth/verify, got status $status" + fi +} + +test_forgejo_oauth_callback() { + log_section "Test 7: Forgejo OAuth callback for Woodpecker under subpath" + + # Test that Forgejo OAuth callback path works (Woodpecker OAuth integration) + local status + status=$(http_head "$BASE_URL${FORGE_PATH}login/oauth/callback") + + if [ "$status" -ge 200 ] && [ "$status" -lt 400 ]; then + log_pass "Forgejo OAuth callback at ${BASE_URL}${FORGE_PATH}login/oauth/callback works" + else + log_fail "Forgejo OAuth callback returned unexpected status $status" + fi +} + +# ───────────────────────────────────────────────────────────────────────────── +# Main +# ───────────────────────────────────────────────────────────────────────────── + +main() { + log_info "Starting subpath routing smoke test" + log_info "Base URL: $BASE_URL" + log_info "Timeout: ${EDGE_TIMEOUT}s, Max retries: ${EDGE_MAX_RETRIES}" + + # Run all tests + test_root_redirect + test_forgejo_subpath + test_woodpecker_subpath + test_chat_subpath + test_staging_subpath + test_forward_auth_rejection + test_forgejo_oauth_callback + + # Summary + log_section "Test Summary" + log_info "Passed: $PASSED" + log_info "Failed: $FAILED" + log_info "Skipped: $SKIPPED" + + if [ "$FAILED" -gt 0 ]; then + log_fail "Some tests failed" + exit 1 + fi + + log_pass "All tests passed!" + exit 0 +} + +# Parse arguments +while [[ $# -gt 0 ]]; do + case "$1" in + --base-url) + BASE_URL="$2" + shift 2 + ;; + --base-url=*) + BASE_URL="${1#*=}" + shift + ;; + --help) + echo "Usage: $0 [options]" + echo "" + echo "Options:" + echo " --base-url URL Set base URL (default: http://localhost)" + echo " --help Show this help message" + echo "" + echo "Environment variables:" + echo " BASE_URL Base URL for edge proxy (default: http://localhost)" + echo " EDGE_TIMEOUT Request timeout in seconds (default: 30)" + echo " EDGE_MAX_RETRIES Max retries per request (default: 3)" + exit 0 + ;; + *) + echo "Unknown option: $1" >&2 + exit 1 + ;; + esac +done + +main diff --git a/tests/smoke-init.sh b/tests/smoke-init.sh index 306f7ee..8cd4fee 100644 --- a/tests/smoke-init.sh +++ b/tests/smoke-init.sh @@ -15,6 +15,7 @@ set -euo pipefail FACTORY_ROOT="$(cd "$(dirname "$0")/.." && pwd)" +export FACTORY_ROOT_REAL="$FACTORY_ROOT" # Always use localhost for mock Forgejo (in case FORGE_URL is set from docker-compose) export FORGE_URL="http://localhost:3000" MOCK_BIN="/tmp/smoke-mock-bin" @@ -30,7 +31,8 @@ cleanup() { rm -rf "$MOCK_BIN" /tmp/smoke-test-repo \ "${FACTORY_ROOT}/projects/smoke-repo.toml" \ /tmp/smoke-claude-shared /tmp/smoke-home-claude \ - /tmp/smoke-env-before-rerun /tmp/smoke-env-before-dryrun + /tmp/smoke-env-before-rerun /tmp/smoke-env-before-dryrun \ + "${FACTORY_ROOT}/docker-compose.yml" # Restore .env only if we created the backup if [ -f "${FACTORY_ROOT}/.env.smoke-backup" ]; then mv "${FACTORY_ROOT}/.env.smoke-backup" "${FACTORY_ROOT}/.env" @@ -423,6 +425,51 @@ export CLAUDE_SHARED_DIR="$ORIG_CLAUDE_SHARED_DIR" export CLAUDE_CONFIG_DIR="$ORIG_CLAUDE_CONFIG_DIR" rm -rf /tmp/smoke-claude-shared /tmp/smoke-home-claude +# ── 8. Test duplicate service name detection ────────────────────────────── +echo "=== 8/8 Testing duplicate service name detection ===" + +# Isolated factory root — do NOT touch the real ${FACTORY_ROOT}/projects/ +SMOKE_DUP_ROOT=$(mktemp -d) +mkdir -p "$SMOKE_DUP_ROOT/projects" +cat > "$SMOKE_DUP_ROOT/projects/duplicate-test.toml" <<'TOMLEOF' +name = "duplicate-test" +description = "dup-detection smoke" + +[ci] +woodpecker_repo_id = "999" + +[agents.llama] +base_url = "http://localhost:8080" +model = "qwen:latest" +roles = ["dev"] +forge_user = "llama-bot" +TOMLEOF + +# Call the generator directly — no `disinto init` to overwrite the TOML. +# FACTORY_ROOT tells generators.sh where projects/ + compose_file live. +( + export FACTORY_ROOT="$SMOKE_DUP_ROOT" + export ENABLE_LLAMA_AGENT=1 + # shellcheck disable=SC1091 + source "${FACTORY_ROOT_REAL:-$(cd "$(dirname "$0")/.." && pwd)}/lib/generators.sh" + # Use a temp file to capture output since pipefail will kill the pipeline + # when _generate_compose_impl returns non-zero + _generate_compose_impl > /tmp/smoke-dup-output.txt 2>&1 || true + if grep -q "Duplicate service name" /tmp/smoke-dup-output.txt; then + pass "Duplicate service detection: conflict between ENABLE_LLAMA_AGENT and [agents.llama] reported" + rm -f /tmp/smoke-dup-output.txt + exit 0 + else + fail "Duplicate service detection: no error raised for ENABLE_LLAMA_AGENT + [agents.llama]" + cat /tmp/smoke-dup-output.txt >&2 + rm -f /tmp/smoke-dup-output.txt + exit 1 + fi +) || FAILED=1 + +rm -rf "$SMOKE_DUP_ROOT" +unset ENABLE_LLAMA_AGENT + # ── Summary ────────────────────────────────────────────────────────────────── echo "" if [ "$FAILED" -ne 0 ]; then diff --git a/tests/test-caddyfile-routing.sh b/tests/test-caddyfile-routing.sh new file mode 100755 index 0000000..7eea806 --- /dev/null +++ b/tests/test-caddyfile-routing.sh @@ -0,0 +1,238 @@ +#!/usr/bin/env bash +# ============================================================================= +# test-caddyfile-routing.sh — Caddyfile routing block unit test +# +# Extracts the Caddyfile template from nomad/jobs/edge.hcl and validates its +# structure without requiring a running Caddy instance. +# +# Checks: +# - Forgejo subpath (/forge/* -> :3000) +# - Woodpecker subpath (/ci/* -> :8000) +# - Staging subpath (/staging/* -> nomadService discovery) +# - Chat subpath (/chat/* with forward_auth and OAuth routes) +# - Root redirect to /forge/ +# +# Usage: +# test-caddyfile-routing.sh +# +# Exit codes: +# 0 — All checks passed +# 1 — One or more checks failed +# ============================================================================= +set -euo pipefail + +# Script directory for relative paths +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" + +EDGE_TEMPLATE="${REPO_ROOT}/nomad/jobs/edge.hcl" + +# Track test status +FAILED=0 +PASSED=0 + +# ───────────────────────────────────────────────────────────────────────────── +# Logging helpers +# ───────────────────────────────────────────────────────────────────────────── + +tr_info() { + echo "[INFO] $*" +} + +tr_pass() { + echo "[PASS] $*" + ((PASSED++)) || true +} + +tr_fail() { + echo "[FAIL] $*" + ((FAILED++)) || true +} + +tr_section() { + echo "" + echo "=== $* ===" + echo "" +} + +# ───────────────────────────────────────────────────────────────────────────── +# Caddyfile extraction +# ───────────────────────────────────────────────────────────────────────────── + +extract_caddyfile() { + local template_file="$1" + + # Extract the Caddyfile template (content between <<EOT and EOT markers + # within the template stanza) + local caddyfile + caddyfile=$(sed -n '/data[[:space:]]*=[[:space:]]*<<[Ee][Oo][Tt]/,/^EOT$/p' "$template_file" | sed '1s/.*/# Caddyfile extracted from Nomad template/; $d') + + if [ -z "$caddyfile" ]; then + echo "ERROR: Could not extract Caddyfile template from $template_file" >&2 + return 1 + fi + + echo "$caddyfile" +} + +# ───────────────────────────────────────────────────────────────────────────── +# Validation functions +# ───────────────────────────────────────────────────────────────────────────── + +check_forgejo_routing() { + tr_section "Validating Forgejo routing" + + # Check handle block for /forge/* + if echo "$CADDYFILE" | grep -q "handle /forge/\*"; then + tr_pass "Forgejo handle block (handle /forge/*)" + else + tr_fail "Missing Forgejo handle block (handle /forge/*)" + fi + + # Check reverse_proxy to Forgejo on port 3000 + if echo "$CADDYFILE" | grep -q "reverse_proxy 127.0.0.1:3000"; then + tr_pass "Forgejo reverse_proxy configured (127.0.0.1:3000)" + else + tr_fail "Missing Forgejo reverse_proxy (127.0.0.1:3000)" + fi +} + +check_woodpecker_routing() { + tr_section "Validating Woodpecker routing" + + # Check handle block for /ci/* + if echo "$CADDYFILE" | grep -q "handle /ci/\*"; then + tr_pass "Woodpecker handle block (handle /ci/*)" + else + tr_fail "Missing Woodpecker handle block (handle /ci/*)" + fi + + # Check reverse_proxy to Woodpecker on port 8000 + if echo "$CADDYFILE" | grep -q "reverse_proxy 127.0.0.1:8000"; then + tr_pass "Woodpecker reverse_proxy configured (127.0.0.1:8000)" + else + tr_fail "Missing Woodpecker reverse_proxy (127.0.0.1:8000)" + fi +} + +check_staging_routing() { + tr_section "Validating Staging routing" + + # Check handle block for /staging/* + if echo "$CADDYFILE" | grep -q "handle /staging/\*"; then + tr_pass "Staging handle block (handle /staging/*)" + else + tr_fail "Missing Staging handle block (handle /staging/*)" + fi + + # Check for uri strip_prefix /staging directive + if echo "$CADDYFILE" | grep -q "uri strip_prefix /staging"; then + tr_pass "Staging uri strip_prefix configured (/staging)" + else + tr_fail "Missing uri strip_prefix /staging for staging" + fi + + # Check for nomadService discovery (dynamic port) + if echo "$CADDYFILE" | grep -q "nomadService"; then + tr_pass "Staging uses Nomad service discovery" + else + tr_fail "Missing Nomad service discovery for staging" + fi +} + +check_chat_routing() { + tr_section "Validating Chat routing" + + # Check login endpoint + if echo "$CADDYFILE" | grep -q "handle /chat/login"; then + tr_pass "Chat login handle block (handle /chat/login)" + else + tr_fail "Missing Chat login handle block (handle /chat/login)" + fi + + # Check OAuth callback endpoint + if echo "$CADDYFILE" | grep -q "handle /chat/oauth/callback"; then + tr_pass "Chat OAuth callback handle block (handle /chat/oauth/callback)" + else + tr_fail "Missing Chat OAuth callback handle block (handle /chat/oauth/callback)" + fi + + # Check catch-all for /chat/* + if echo "$CADDYFILE" | grep -q "handle /chat/\*"; then + tr_pass "Chat catch-all handle block (handle /chat/*)" + else + tr_fail "Missing Chat catch-all handle block (handle /chat/*)" + fi + + # Check reverse_proxy to Chat on port 8080 + if echo "$CADDYFILE" | grep -q "reverse_proxy 127.0.0.1:8080"; then + tr_pass "Chat reverse_proxy configured (127.0.0.1:8080)" + else + tr_fail "Missing Chat reverse_proxy (127.0.0.1:8080)" + fi + + # Check forward_auth block for /chat/* + if echo "$CADDYFILE" | grep -A10 "handle /chat/\*" | grep -q "forward_auth"; then + tr_pass "forward_auth block configured for /chat/*" + else + tr_fail "Missing forward_auth block for /chat/*" + fi + + # Check forward_auth URI + if echo "$CADDYFILE" | grep -q "uri /chat/auth/verify"; then + tr_pass "forward_auth URI configured (/chat/auth/verify)" + else + tr_fail "Missing forward_auth URI (/chat/auth/verify)" + fi +} + +check_root_redirect() { + tr_section "Validating root redirect" + + # Check root redirect to /forge/ + if echo "$CADDYFILE" | grep -q "redir /forge/ 302"; then + tr_pass "Root redirect to /forge/ configured (302)" + else + tr_fail "Missing root redirect to /forge/" + fi +} + +# ───────────────────────────────────────────────────────────────────────────── +# Main +# ───────────────────────────────────────────────────────────────────────────── + +main() { + tr_info "Extracting Caddyfile template from $EDGE_TEMPLATE" + + # Extract Caddyfile + CADDYFILE=$(extract_caddyfile "$EDGE_TEMPLATE") + + if [ -z "$CADDYFILE" ]; then + tr_fail "Could not extract Caddyfile template" + exit 1 + fi + + tr_pass "Caddyfile template extracted successfully" + + # Run all validation checks + check_forgejo_routing + check_woodpecker_routing + check_staging_routing + check_chat_routing + check_root_redirect + + # Summary + tr_section "Test Summary" + tr_info "Passed: $PASSED" + tr_info "Failed: $FAILED" + + if [ "$FAILED" -gt 0 ]; then + tr_fail "Some checks failed" + exit 1 + fi + + tr_pass "All routing blocks validated!" + exit 0 +} + +main diff --git a/tests/test-duplicate-service-detection.sh b/tests/test-duplicate-service-detection.sh new file mode 100755 index 0000000..11fde86 --- /dev/null +++ b/tests/test-duplicate-service-detection.sh @@ -0,0 +1,210 @@ +#!/usr/bin/env bash +# tests/test-duplicate-service-detection.sh — Unit test for duplicate service detection +# +# Tests that the compose generator correctly detects duplicate service names +# between ENABLE_LLAMA_AGENT=1 and [agents.llama] TOML configuration. + +set -euo pipefail + +# Get the absolute path to the disinto root +DISINTO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +TEST_DIR=$(mktemp -d) +trap "rm -rf \"\$TEST_DIR\"" EXIT + +FAILED=0 + +fail() { printf 'FAIL: %s\n' "$*" >&2; FAILED=1; } +pass() { printf 'PASS: %s\n' "$*"; } + +# Test 1: Duplicate between ENABLE_LLAMA_AGENT and [agents.llama] +echo "=== Test 1: Duplicate between ENABLE_LLAMA_AGENT and [agents.llama] ===" + +# Create projects directory and test project TOML with an agent named "llama" +mkdir -p "${TEST_DIR}/projects" +cat > "${TEST_DIR}/projects/test-project.toml" <<'TOMLEOF' +name = "test-project" +description = "Test project for duplicate detection" + +[ci] +woodpecker_repo_id = "123" + +[agents.llama] +base_url = "http://localhost:8080" +model = "qwen:latest" +roles = ["dev"] +forge_user = "llama-bot" +TOMLEOF + +# Create a minimal compose file +cat > "${TEST_DIR}/docker-compose.yml" <<'COMPOSEEOF' +# Test compose file +services: + agents: + image: test:latest + command: echo "hello" + +volumes: + test-data: + +networks: + test-net: +COMPOSEEOF + +# Set up the test environment +export FACTORY_ROOT="${TEST_DIR}" +export PROJECT_NAME="test-project" +export ENABLE_LLAMA_AGENT="1" +export FORGE_TOKEN="" +export FORGE_PASS="" +export CLAUDE_TIMEOUT="7200" +export POLL_INTERVAL="300" +export GARDENER_INTERVAL="21600" +export ARCHITECT_INTERVAL="21600" +export PLANNER_INTERVAL="43200" +export SUPERVISOR_INTERVAL="1200" + +# Source the generators module and run the compose generator directly +source "${DISINTO_ROOT}/lib/generators.sh" + +# Delete the compose file to force regeneration +rm -f "${TEST_DIR}/docker-compose.yml" + +# Run the compose generator directly +if _generate_compose_impl 3000 false 2>&1 | tee "${TEST_DIR}/output.txt"; then + # Check if the output contains the duplicate error message + if grep -q "Duplicate service name 'agents-llama'" "${TEST_DIR}/output.txt"; then + pass "Duplicate detection: correctly detected conflict between ENABLE_LLAMA_AGENT and [agents.llama]" + else + fail "Duplicate detection: should have detected conflict between ENABLE_LLAMA_AGENT and [agents.llama]" + cat "${TEST_DIR}/output.txt" >&2 + fi +else + # Generator should fail with non-zero exit code + if grep -q "Duplicate service name 'agents-llama'" "${TEST_DIR}/output.txt"; then + pass "Duplicate detection: correctly detected conflict and returned non-zero exit code" + else + fail "Duplicate detection: should have failed with duplicate error" + cat "${TEST_DIR}/output.txt" >&2 + fi +fi + +# Test 2: No duplicate when only ENABLE_LLAMA_AGENT is set (no conflicting TOML) +echo "" +echo "=== Test 2: No duplicate when only ENABLE_LLAMA_AGENT is set ===" + +# Remove the projects directory created in Test 1 +rm -rf "${TEST_DIR}/projects" + +# Create a fresh compose file +cat > "${TEST_DIR}/docker-compose.yml" <<'COMPOSEEOF' +# Test compose file +services: + agents: + image: test:latest + +volumes: + test-data: + +networks: + test-net: +COMPOSEEOF + +# Set ENABLE_LLAMA_AGENT +export ENABLE_LLAMA_AGENT="1" + +# Delete the compose file to force regeneration +rm -f "${TEST_DIR}/docker-compose.yml" + +if _generate_compose_impl 3000 false 2>&1 | tee "${TEST_DIR}/output2.txt"; then + if grep -q "Duplicate" "${TEST_DIR}/output2.txt"; then + fail "No duplicate: should not detect duplicate when only ENABLE_LLAMA_AGENT is set" + else + pass "No duplicate: correctly generated compose without duplicates" + fi +else + # Non-zero exit is fine if there's a legitimate reason (e.g., missing files) + if grep -q "Duplicate" "${TEST_DIR}/output2.txt"; then + fail "No duplicate: should not detect duplicate when only ENABLE_LLAMA_AGENT is set" + else + pass "No duplicate: generator failed for other reason (acceptable)" + fi +fi + +# Test 3: Duplicate between two TOML agents with same name +echo "" +echo "=== Test 3: Duplicate between two TOML agents with same name ===" + +rm -f "${TEST_DIR}/docker-compose.yml" + +# Create projects directory for Test 3 +mkdir -p "${TEST_DIR}/projects" + +cat > "${TEST_DIR}/projects/project1.toml" <<'TOMLEOF' +name = "project1" +description = "First project" + +[ci] +woodpecker_repo_id = "1" + +[agents.llama] +base_url = "http://localhost:8080" +model = "qwen:latest" +roles = ["dev"] +forge_user = "llama-bot1" +TOMLEOF + +cat > "${TEST_DIR}/projects/project2.toml" <<'TOMLEOF' +name = "project2" +description = "Second project" + +[ci] +woodpecker_repo_id = "2" + +[agents.llama] +base_url = "http://localhost:8080" +model = "qwen:latest" +roles = ["dev"] +forge_user = "llama-bot2" +TOMLEOF + +cat > "${TEST_DIR}/docker-compose.yml" <<'COMPOSEEOF' +# Test compose file +services: + agents: + image: test:latest + +volumes: + test-data: + +networks: + test-net: +COMPOSEEOF + +unset ENABLE_LLAMA_AGENT + +# Delete the compose file to force regeneration +rm -f "${TEST_DIR}/docker-compose.yml" + +if _generate_compose_impl 3000 false 2>&1 | tee "${TEST_DIR}/output3.txt"; then + if grep -q "Duplicate service name 'agents-llama'" "${TEST_DIR}/output3.txt"; then + pass "Duplicate detection: correctly detected conflict between two [agents.llama] blocks" + else + fail "Duplicate detection: should have detected conflict between two [agents.llama] blocks" + cat "${TEST_DIR}/output3.txt" >&2 + fi +else + if grep -q "Duplicate service name 'agents-llama'" "${TEST_DIR}/output3.txt"; then + pass "Duplicate detection: correctly detected conflict and returned non-zero exit code" + else + fail "Duplicate detection: should have failed with duplicate error" + cat "${TEST_DIR}/output3.txt" >&2 + fi +fi + +# Summary +echo "" +if [ "$FAILED" -ne 0 ]; then + echo "=== TESTS FAILED ===" + exit 1 +fi +echo "=== ALL TESTS PASSED ===" diff --git a/tests/test-watchdog-process-group.sh b/tests/test-watchdog-process-group.sh new file mode 100755 index 0000000..54fedf9 --- /dev/null +++ b/tests/test-watchdog-process-group.sh @@ -0,0 +1,129 @@ +#!/usr/bin/env bash +# test-watchdog-process-group.sh — Test that claude_run_with_watchdog kills orphan children +# +# This test verifies that when claude_run_with_watchdog terminates the Claude process, +# all child processes (including those spawned by Claude's Bash tool) are also killed. +# +# Reproducer scenario: +# 1. Create a fake "claude" stub that: +# a. Spawns a long-running child process (sleep 3600) +# b. Writes a result marker to stdout to trigger idle detection +# c. Stays running +# 2. Run claude_run_with_watchdog with the stub +# 3. Before the fix: sleep child survives (orphaned to PID 1) +# 4. After the fix: sleep child dies (killed as part of process group with -PID) +# +# Usage: ./tests/test-watchdog-process-group.sh + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")/.." && pwd)" +TEST_TMP="/tmp/test-watchdog-$$" +LOGFILE="${TEST_TMP}/log.txt" +PASS=true + +# shellcheck disable=SC2317 +cleanup_test() { + rm -rf "$TEST_TMP" +} +trap cleanup_test EXIT INT TERM + +mkdir -p "$TEST_TMP" + +log() { + printf '[TEST] %s\n' "$*" | tee -a "$LOGFILE" +} + +fail() { + printf '[TEST] FAIL: %s\n' "$*" | tee -a "$LOGFILE" + PASS=false +} + +pass() { + printf '[TEST] PASS: %s\n' "$*" | tee -a "$LOGFILE" +} + +# Export required environment variables +export CLAUDE_TIMEOUT=10 # Short timeout for testing +export CLAUDE_IDLE_GRACE=2 # Short grace period for testing +export LOGFILE="${LOGFILE}" # Required by agent-sdk.sh + +# Create a fake claude stub that: +# 1. Spawns a long-running child process (sleep 3600) that will become an orphan if parent is killed +# 2. Writes a result marker to stdout (to trigger the watchdog's idle-after-result path) +# 3. Stays running so the watchdog can kill it +cat > "${TEST_TMP}/fake-claude" << 'FAKE_CLAUDE_EOF' +#!/usr/bin/env bash +# Fake claude that spawns a child and stays running +# Simulates Claude's behavior when it spawns a Bash tool command + +# Write result marker to stdout (triggers watchdog idle detection) +echo '{"type":"result","session_id":"test-session-123","verdict":"APPROVE"}' + +# Spawn a child that simulates Claude's Bash tool hanging +# This is the process that should be killed when the parent is terminated +sleep 3600 & +CHILD_PID=$! + +# Log the child PID for debugging +echo "FAKE_CLAUDE_CHILD_PID=$CHILD_PID" >&2 + +# Stay running - sleep in a loop so the watchdog can kill us +while true; do + sleep 3600 & + wait $! 2>/dev/null || true +done +FAKE_CLAUDE_EOF +chmod +x "${TEST_TMP}/fake-claude" + +log "Testing claude_run_with_watchdog process group cleanup..." + +# Source the library and run claude_run_with_watchdog +cd "$SCRIPT_DIR" +source lib/agent-sdk.sh + +log "Starting claude_run_with_watchdog with fake claude..." + +# Run the function directly (not as a script) +# We need to capture output and redirect stderr +OUTPUT_FILE="${TEST_TMP}/output.txt" +timeout 35 bash -c " + source '${SCRIPT_DIR}/lib/agent-sdk.sh' + CLAUDE_TIMEOUT=10 CLAUDE_IDLE_GRACE=2 LOGFILE='${LOGFILE}' claude_run_with_watchdog '${TEST_TMP}/fake-claude' > '${OUTPUT_FILE}' 2>&1 + exit \$? +" || true + +# Give the watchdog a moment to clean up +log "Waiting for cleanup..." +sleep 5 + +# More precise check: look for sleep 3600 processes +# These would be the orphans from our fake claude +ORPHAN_COUNT=$(pgrep -a sleep 2>/dev/null | grep -c "sleep 3600" 2>/dev/null || echo "0") + +if [ "$ORPHAN_COUNT" -gt 0 ]; then + log "Found $ORPHAN_COUNT orphan sleep 3600 processes:" + pgrep -a sleep | grep "sleep 3600" + fail "Orphan children found - process group cleanup did not work" +else + pass "No orphan children found - process group cleanup worked" +fi + +# Also verify that the fake claude itself is not running +FAKE_CLAUDE_COUNT=$(pgrep -c -f "fake-claude" 2>/dev/null || echo "0") +if [ "$FAKE_CLAUDE_COUNT" -gt 0 ]; then + log "Found $FAKE_CLAUDE_COUNT fake-claude processes still running" + fail "Fake claude process(es) still running" +else + pass "Fake claude process terminated" +fi + +# Summary +echo "" +if [ "$PASS" = true ]; then + log "All tests passed!" + exit 0 +else + log "Some tests failed. See log at $LOGFILE" + exit 1 +fi diff --git a/tests/vault-import.bats b/tests/vault-import.bats index 890a900..e59e92e 100644 --- a/tests/vault-import.bats +++ b/tests/vault-import.bats @@ -137,6 +137,7 @@ setup() { "${VAULT_ADDR}/v1/kv/data/disinto/shared/woodpecker" [ "$status" -eq 0 ] echo "$output" | grep -q "wp-agent-secret" + # Forgejo keys are normalized: WP_FORGEJO_* → forgejo_* (no wp_ prefix in key name) echo "$output" | grep -q "wp-forgejo-client" echo "$output" | grep -q "wp-forgejo-secret" echo "$output" | grep -q "wp-token" @@ -294,6 +295,8 @@ setup() { "deploy-key-test" "npm-test-token" "dockerhub-test-token" + # Note: forgejo-client and forgejo-secret are NOT in the output + # because they are read from Vault, not logged ) for pattern in "${secret_patterns[@]}"; do diff --git a/tools/edge-control/README.md b/tools/edge-control/README.md index 019b385..0c95dda 100644 --- a/tools/edge-control/README.md +++ b/tools/edge-control/README.md @@ -21,6 +21,7 @@ This control plane runs on the public edge host (Debian DO box) and provides: │ │ disinto-register│ │ /var/lib/disinto/ │ │ │ │ (authorized_keys│ │ ├── registry.json (source of truth) │ │ │ │ forced cmd) │ │ ├── registry.lock (flock) │ │ +│ │ │ │ └── allowlist.json (admin-approved names) │ │ │ │ │ │ └── authorized_keys (rebuildable) │ │ │ └────────┬─────────┘ └───────────────────────────────────────────────┘ │ │ │ │ @@ -79,7 +80,7 @@ curl -sL https://raw.githubusercontent.com/disinto-admin/disinto/fix/issue-621/t - `disinto-tunnel` — no password, no shell, only receives reverse tunnels 2. **Creates data directory**: - - `/var/lib/disinto/` with `registry.json`, `registry.lock` + - `/var/lib/disinto/` with `registry.json`, `registry.lock`, `allowlist.json` - Permissions: `root:disinto-register 0750` 3. **Installs Caddy**: @@ -180,6 +181,43 @@ Shows all registered tunnels with their ports and FQDNs. } ``` +## Allowlist + +The allowlist prevents project name squatting by requiring admin approval before a name can be registered. It is **opt-in**: when `allowlist.json` is empty (no project entries), registration works as before. Once the admin adds entries, only approved names are accepted. + +### Setup + +Edit `/var/lib/disinto/allowlist.json` as root: + +```json +{ + "version": 1, + "allowed": { + "myproject": { + "pubkey_fingerprint": "SHA256:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, + "open-project": { + "pubkey_fingerprint": "" + } + } +} +``` + +- **With `pubkey_fingerprint`**: Only the specified SSH key can register this project name. The fingerprint is the SHA256 output of `ssh-keygen -lf <keyfile>`. +- **With empty `pubkey_fingerprint`**: Any caller may register this project name (name reservation without key binding). +- **Not listed**: Registration is refused with `{"error":"name not approved"}`. + +### Workflow + +1. Admin edits `/var/lib/disinto/allowlist.json` (via ops repo PR, or direct `ssh root@edge`). +2. File is `root:root 0644` — `disinto-register` only reads it; `register.sh` never mutates it. +3. Callers run `register` as usual. The allowlist is checked transparently. + +### Security + +- The allowlist is a **first-come-first-serve defense**: once a name is approved for a key, no one else can claim it. +- It does **not** replace per-operation ownership checks (sibling issue #1094) — it only prevents the initial race. + ## Recovery ### After State Loss @@ -274,6 +312,7 @@ ssh disinto-register@edge.disinto.ai "register myproject $(cat ~/.ssh/id_ed25519 - `lib/ports.sh` — Port allocator over `20000-29999`, jq-based, flockd - `lib/authorized_keys.sh` — Deterministic rebuild of `disinto-tunnel` authorized_keys - `lib/caddy.sh` — POST to Caddy admin API for route mapping +- `/var/lib/disinto/allowlist.json` — Admin-approved project name allowlist (root-owned, read-only by register.sh) ## Dependencies diff --git a/tools/edge-control/install.sh b/tools/edge-control/install.sh index 9571311..cf21456 100755 --- a/tools/edge-control/install.sh +++ b/tools/edge-control/install.sh @@ -7,7 +7,7 @@ # # What it does: # 1. Creates users: disinto-register, disinto-tunnel -# 2. Creates /var/lib/disinto/ with registry.json, registry.lock +# 2. Creates /var/lib/disinto/ with registry.json, registry.lock, allowlist.json # 3. Installs Caddy with Gandi DNS plugin # 4. Sets up SSH authorized_keys for both users # 5. Installs control plane scripts to /opt/disinto-edge/ @@ -44,6 +44,7 @@ REGISTRY_DIR="/var/lib/disinto" CADDY_VERSION="2.8.4" DOMAIN_SUFFIX="disinto.ai" EXTRA_CADDYFILE="/etc/caddy/extra.d/*.caddy" +ADMIN_TAG="admin" usage() { cat <<EOF @@ -57,6 +58,7 @@ Options: --domain-suffix <suffix> Domain suffix for tunnels (default: disinto.ai) --extra-caddyfile <path> Import path for operator-owned Caddy config (default: /etc/caddy/extra.d/*.caddy) + --admin-tag <name> Caller tag for the initial admin key (default: admin) -h, --help Show this help Example: @@ -91,6 +93,10 @@ while [[ $# -gt 0 ]]; do EXTRA_CADDYFILE="$2" shift 2 ;; + --admin-tag) + ADMIN_TAG="$2" + shift 2 + ;; -h|--help) usage ;; @@ -152,8 +158,53 @@ LOCK_FILE="${REGISTRY_DIR}/registry.lock" touch "$LOCK_FILE" chmod 0644 "$LOCK_FILE" +# Initialize allowlist.json (empty = no restrictions until admin populates) +ALLOWLIST_FILE="${REGISTRY_DIR}/allowlist.json" +if [ ! -f "$ALLOWLIST_FILE" ]; then + echo '{"version":1,"allowed":{}}' > "$ALLOWLIST_FILE" + chmod 0644 "$ALLOWLIST_FILE" + chown root:root "$ALLOWLIST_FILE" + log_info "Initialized allowlist: ${ALLOWLIST_FILE}" +fi + # ============================================================================= -# Step 3: Install Caddy with Gandi DNS plugin +# Step 3: Create audit log directory and logrotate config +# ============================================================================= +log_info "Setting up audit log..." + +LOG_DIR="/var/log/disinto" +LOG_FILE="${LOG_DIR}/edge-register.log" + +mkdir -p "$LOG_DIR" +chown root:disinto-register "$LOG_DIR" +chmod 0750 "$LOG_DIR" + +# Touch the log file so it exists from day one +touch "$LOG_FILE" +chmod 0640 "$LOG_FILE" +chown root:disinto-register "$LOG_FILE" + +# Install logrotate config (daily rotation, 30 days retention) +LOGROTATE_CONF="/etc/logrotate.d/disinto-edge" +cat > "$LOGROTATE_CONF" <<EOF +${LOG_FILE} { + daily + rotate 30 + compress + delaycompress + missingok + notifempty + create 0640 root disinto-register + copytruncate +} +EOF +chmod 0644 "$LOGROTATE_CONF" + +log_info "Audit log: ${LOG_FILE}" +log_info "Logrotate config: ${LOGROTATE_CONF}" + +# ============================================================================= +# Step 4: Install Caddy with Gandi DNS plugin # ============================================================================= log_info "Installing Caddy ${CADDY_VERSION} with Gandi DNS plugin..." @@ -284,7 +335,7 @@ systemctl restart caddy 2>/dev/null || { log_info "Caddy configured with admin API on 127.0.0.1:2019" # ============================================================================= -# Step 4: Install control plane scripts +# Step 5: Install control plane scripts # ============================================================================= log_info "Installing control plane scripts to ${INSTALL_DIR}..." @@ -306,7 +357,7 @@ chmod 750 "${INSTALL_DIR}/lib" log_info "Control plane scripts installed" # ============================================================================= -# Step 5: Set up SSH authorized_keys +# Step 6: Set up SSH authorized_keys # ============================================================================= log_info "Setting up SSH authorized_keys..." @@ -348,7 +399,7 @@ source "${INSTALL_DIR}/lib/authorized_keys.sh" rebuild_authorized_keys # ============================================================================= -# Step 6: Configure forced command for disinto-register +# Step 7: Configure forced command for disinto-register # ============================================================================= log_info "Configuring forced command for disinto-register..." @@ -359,8 +410,8 @@ if [ -n "$ADMIN_PUBKEY" ]; then KEY_TYPE="${ADMIN_PUBKEY%% *}" KEY_DATA="${ADMIN_PUBKEY#* }" - # Create forced command entry - FORCED_CMD="restrict,command=\"${INSTALL_DIR}/register.sh\" ${KEY_TYPE} ${KEY_DATA}" + # Create forced command entry with caller attribution tag + FORCED_CMD="restrict,command=\"${INSTALL_DIR}/register.sh --as ${ADMIN_TAG}\" ${KEY_TYPE} ${KEY_DATA}" # Replace the pubkey line echo "$FORCED_CMD" > /home/disinto-register/.ssh/authorized_keys @@ -371,7 +422,7 @@ if [ -n "$ADMIN_PUBKEY" ]; then fi # ============================================================================= -# Step 7: Final configuration +# Step 8: Final configuration # ============================================================================= log_info "Configuring domain suffix: ${DOMAIN_SUFFIX}" diff --git a/tools/edge-control/lib/ports.sh b/tools/edge-control/lib/ports.sh index 7fe447f..1f5efac 100755 --- a/tools/edge-control/lib/ports.sh +++ b/tools/edge-control/lib/ports.sh @@ -54,13 +54,14 @@ _registry_write() { } # Allocate a port for a project -# Usage: allocate_port <project> <pubkey> <fqdn> +# Usage: allocate_port <project> <pubkey> <fqdn> [<registered_by>] # Returns: port number on stdout # Writes: registry.json with project entry allocate_port() { local project="$1" local pubkey="$2" local fqdn="$3" + local registered_by="${4:-unknown}" _ensure_registry_dir @@ -116,11 +117,13 @@ allocate_port() { --arg pubkey "$pubkey" \ --arg fqdn "$fqdn" \ --arg timestamp "$timestamp" \ + --arg registered_by "$registered_by" \ '.projects[$project] = { "port": $port, "fqdn": $fqdn, "pubkey": $pubkey, - "registered_at": $timestamp + "registered_at": $timestamp, + "registered_by": $registered_by }') _registry_write "$new_registry" @@ -184,7 +187,7 @@ list_ports() { local registry registry=$(_registry_read) - echo "$registry" | jq -r '.projects | to_entries | map({name: .key, port: .value.port, fqdn: .value.fqdn}) | .[] | @json' 2>/dev/null + echo "$registry" | jq -r '.projects | to_entries | map({name: .key, port: .value.port, fqdn: .value.fqdn, registered_by: (.value.registered_by // "unknown")}) | .[] | @json' 2>/dev/null } # Get full project info from registry diff --git a/tools/edge-control/register.sh b/tools/edge-control/register.sh index 3ac0d09..850a172 100755 --- a/tools/edge-control/register.sh +++ b/tools/edge-control/register.sh @@ -5,9 +5,13 @@ # This script runs as a forced command for the disinto-register SSH user. # It parses SSH_ORIGINAL_COMMAND and dispatches to register|deregister|list. # +# Per-caller attribution: each admin key's forced-command passes --as <tag>, +# which is stored as registered_by in the registry. Missing --as defaults to +# "unknown" for backwards compatibility. +# # Usage (via SSH): # ssh disinto-register@edge "register <project> <pubkey>" -# ssh disinto-register@edge "deregister <project>" +# ssh disinto-register@edge "deregister <project> <pubkey>" # ssh disinto-register@edge "list" # # Output: JSON on stdout @@ -25,12 +29,68 @@ source "${SCRIPT_DIR}/lib/authorized_keys.sh" # Domain suffix DOMAIN_SUFFIX="${DOMAIN_SUFFIX:-disinto.ai}" +# Reserved project names — operator-adjacent, internal roles, and subdomain-mode prefixes +RESERVED_NAMES=(www api admin root mail chat forge ci edge caddy disinto register tunnel) + +# Allowlist path (root-owned, never mutated by this script) +ALLOWLIST_FILE="${ALLOWLIST_FILE:-/var/lib/disinto/allowlist.json}" + +# Audit log path +AUDIT_LOG="${AUDIT_LOG:-/var/log/disinto/edge-register.log}" + +# Captured error from check_allowlist (used for JSON response) +_ALLOWLIST_ERROR="" + +# Caller tag (set via --as <tag> in forced command) +CALLER="unknown" + +# Parse script arguments (from forced command, not SSH_ORIGINAL_COMMAND) +while [[ $# -gt 0 ]]; do + case $1 in + --as) + CALLER="$2" + shift 2 + ;; + *) + shift + ;; + esac +done + +# Append one line to the audit log. +# Usage: audit_log <action> <project> <port> <pubkey_fp> +# Fails silently — write errors are warned but never abort. +audit_log() { + local action="$1" project="$2" port="$3" pubkey_fp="$4" + local timestamp + timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ") + + local line="${timestamp} ${action} project=${project} port=${port} pubkey_fp=${pubkey_fp} caller=${CALLER}" + + # Ensure log directory exists + local log_dir + log_dir=$(dirname "$AUDIT_LOG") + if [ ! -d "$log_dir" ]; then + mkdir -p "$log_dir" 2>/dev/null || { + echo "[WARN] audit log: cannot create ${log_dir}" >&2 + return 0 + } + chown root:disinto-register "$log_dir" 2>/dev/null || true + chmod 0750 "$log_dir" + fi + + # Append — write failure is non-fatal + if ! printf '%s\n' "$line" >> "$AUDIT_LOG" 2>/dev/null; then + echo "[WARN] audit log: failed to write to ${AUDIT_LOG}" >&2 + fi +} + # Print usage usage() { cat <<EOF Usage: register <project> <pubkey> Register a new tunnel - deregister <project> Remove a tunnel + deregister <project> <pubkey> Remove a tunnel (requires owner pubkey) list List all registered tunnels Example: @@ -39,23 +99,75 @@ EOF exit 1 } -# TODO(#713): Subdomain fallback — if subpath routing (#704/#708) fails, this -# function would need to register additional routes for forge.<project>, -# ci.<project>, chat.<project> subdomains (or accept a --subdomain parameter). -# See docs/edge-routing-fallback.md for the full pivot plan. +# Check whether the project/pubkey pair is allowed by the allowlist. +# Usage: check_allowlist <project> <pubkey> +# Returns: 0 if allowed, 1 if denied (prints error JSON to stderr) +check_allowlist() { + local project="$1" + local pubkey="$2" + + # If allowlist file does not exist, allow all (opt-in policy) + if [ ! -f "$ALLOWLIST_FILE" ]; then + return 0 + fi + + # Look up the project in the allowlist + local entry + entry=$(jq -c --arg p "$project" '.allowed[$p] // empty' "$ALLOWLIST_FILE" 2>/dev/null) || entry="" + + if [ -z "$entry" ]; then + # Project not in allowlist at all + _ALLOWLIST_ERROR="name not approved" + return 1 + fi + + # Project found — check pubkey fingerprint binding + local bound_fingerprint + bound_fingerprint=$(echo "$entry" | jq -r '.pubkey_fingerprint // ""' 2>/dev/null) + + if [ -n "$bound_fingerprint" ]; then + # Fingerprint is bound — verify caller's pubkey matches + local caller_fingerprint + caller_fingerprint=$(ssh-keygen -lf /dev/stdin <<<"$pubkey" 2>/dev/null | awk '{print $2}') || caller_fingerprint="" + + if [ -z "$caller_fingerprint" ]; then + _ALLOWLIST_ERROR="invalid pubkey for fingerprint check" + return 1 + fi + + if [ "$caller_fingerprint" != "$bound_fingerprint" ]; then + _ALLOWLIST_ERROR="pubkey does not match allowed key for this project" + return 1 + fi + fi + + return 0 +} # Register a new tunnel # Usage: do_register <project> <pubkey> +# When EDGE_ROUTING_MODE=subdomain, also registers forge.<project>, ci.<project>, +# and chat.<project> subdomain routes (see docs/edge-routing-fallback.md). do_register() { local project="$1" local pubkey="$2" - # Validate project name (alphanumeric, hyphens, underscores) - if ! [[ "$project" =~ ^[a-zA-Z0-9_-]+$ ]]; then + # Validate project name — strict DNS label: lowercase alphanumeric, inner hyphens, + # 3-63 chars, no leading/trailing hyphen, no underscore (RFC 1035) + if ! [[ "$project" =~ ^[a-z0-9][a-z0-9-]{1,61}[a-z0-9]$ ]]; then echo '{"error":"invalid project name"}' exit 1 fi + # Check against reserved names + local reserved + for reserved in "${RESERVED_NAMES[@]}"; do + if [[ "$project" = "$reserved" ]]; then + echo '{"error":"name reserved"}' + exit 1 + fi + done + # Extract key type and key from pubkey (format: "ssh-ed25519 AAAAC3...") local key_type key key_type=$(echo "$pubkey" | awk '{print $1}') @@ -75,30 +187,65 @@ do_register() { # Full pubkey for registry local full_pubkey="${key_type} ${key}" + # Check allowlist (opt-in: no file = allow all) + if ! check_allowlist "$project" "$full_pubkey"; then + echo "{\"error\":\"${_ALLOWLIST_ERROR}\"}" + exit 1 + fi + # Allocate port (idempotent - returns existing if already registered) local port - port=$(allocate_port "$project" "$full_pubkey" "${project}.${DOMAIN_SUFFIX}") + port=$(allocate_port "$project" "$full_pubkey" "${project}.${DOMAIN_SUFFIX}" "$CALLER") - # Add Caddy route + # Add Caddy route for main project domain add_route "$project" "$port" + # Subdomain mode: register additional routes for per-service subdomains + local routing_mode="${EDGE_ROUTING_MODE:-subpath}" + if [ "$routing_mode" = "subdomain" ]; then + local subdomain + for subdomain in forge ci chat; do + add_route "${subdomain}.${project}" "$port" + done + fi + # Rebuild authorized_keys for tunnel user rebuild_authorized_keys # Reload Caddy reload_caddy - # Return JSON response - echo "{\"port\":${port},\"fqdn\":\"${project}.${DOMAIN_SUFFIX}\"}" + # Audit log + local pubkey_fp + pubkey_fp=$(ssh-keygen -lf /dev/stdin <<<"$full_pubkey" 2>/dev/null | awk '{print $2}') || pubkey_fp="unknown" + audit_log "register" "$project" "$port" "$pubkey_fp" + + # Build JSON response + local response="{\"port\":${port},\"fqdn\":\"${project}.${DOMAIN_SUFFIX}\"" + if [ "$routing_mode" = "subdomain" ]; then + response="${response},\"routing_mode\":\"subdomain\"" + response="${response},\"subdomains\":{\"forge\":\"forge.${project}.${DOMAIN_SUFFIX}\",\"ci\":\"ci.${project}.${DOMAIN_SUFFIX}\",\"chat\":\"chat.${project}.${DOMAIN_SUFFIX}\"}" + fi + response="${response}}" + echo "$response" } # Deregister a tunnel -# Usage: do_deregister <project> +# Usage: do_deregister <project> <pubkey> do_deregister() { local project="$1" + local caller_pubkey="$2" - # Get current port before removing - local port + if [ -z "$caller_pubkey" ]; then + echo '{"error":"deregister requires <project> <pubkey>"}' + exit 1 + fi + + # Record who is deregistering before removal + local deregistered_by="$CALLER" + + # Get current port and pubkey before removing + local port pubkey_fp port=$(get_port "$project") if [ -z "$port" ]; then @@ -106,20 +253,42 @@ do_deregister() { exit 1 fi + # Verify caller owns this project — pubkey must match stored value + local stored_pubkey + stored_pubkey=$(get_project_info "$project" | jq -r '.pubkey // empty' 2>/dev/null) || stored_pubkey="" + if [ "$caller_pubkey" != "$stored_pubkey" ]; then + echo '{"error":"pubkey mismatch"}' + exit 1 + fi + + pubkey_fp=$(ssh-keygen -lf /dev/stdin <<<"$stored_pubkey" 2>/dev/null | awk '{print $2}') || pubkey_fp="unknown" + # Remove from registry free_port "$project" >/dev/null - # Remove Caddy route + # Remove Caddy route for main project domain remove_route "$project" + # Subdomain mode: also remove per-service subdomain routes + local routing_mode="${EDGE_ROUTING_MODE:-subpath}" + if [ "$routing_mode" = "subdomain" ]; then + local subdomain + for subdomain in forge ci chat; do + remove_route "${subdomain}.${project}" + done + fi + # Rebuild authorized_keys for tunnel user rebuild_authorized_keys # Reload Caddy reload_caddy + # Audit log + audit_log "deregister" "$project" "$port" "$pubkey_fp" + # Return JSON response - echo "{\"removed\":true,\"port\":${port},\"fqdn\":\"${project}.${DOMAIN_SUFFIX}\"}" + echo "{\"removed\":true,\"port\":${port},\"fqdn\":\"${project}.${DOMAIN_SUFFIX}\",\"deregistered_by\":\"${deregistered_by}\"}" } # List all registered tunnels @@ -175,13 +344,17 @@ main() { do_register "$project" "$pubkey" ;; deregister) - # deregister <project> - local project="$args" - if [ -z "$project" ]; then - echo '{"error":"deregister requires <project>"}' + # deregister <project> <pubkey> + local project="${args%% *}" + local pubkey="${args#* }" + if [ "$pubkey" = "$args" ]; then + pubkey="" + fi + if [ -z "$project" ] || [ -z "$pubkey" ]; then + echo '{"error":"deregister requires <project> <pubkey>"}' exit 1 fi - do_deregister "$project" + do_deregister "$project" "$pubkey" ;; list) do_list diff --git a/tools/edge-control/verify-chat-sandbox.sh b/tools/edge-control/verify-chat-sandbox.sh deleted file mode 100755 index 245d1da..0000000 --- a/tools/edge-control/verify-chat-sandbox.sh +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# verify-chat-sandbox.sh — One-shot sandbox verification for disinto-chat (#706) -# -# Runs against a live compose project and asserts hardening constraints. -# Exit 0 if all pass, non-zero otherwise. - -CONTAINER="disinto-chat" -PASS=0 -FAIL=0 - -pass() { printf ' ✓ %s\n' "$1"; PASS=$((PASS + 1)); } -fail() { printf ' ✗ %s\n' "$1"; FAIL=$((FAIL + 1)); } - -echo "=== disinto-chat sandbox verification ===" -echo - -# --- docker inspect checks --- - -inspect_json=$(docker inspect "$CONTAINER" 2>/dev/null) || { - echo "ERROR: container '$CONTAINER' not found or not running" - exit 1 -} - -# ReadonlyRootfs -readonly_rootfs=$(echo "$inspect_json" | python3 -c "import sys,json; print(json.load(sys.stdin)[0]['HostConfig']['ReadonlyRootfs'])") -if [ "$readonly_rootfs" = "True" ]; then - pass "ReadonlyRootfs=true" -else - fail "ReadonlyRootfs expected true, got $readonly_rootfs" -fi - -# CapAdd — should be null or empty -cap_add=$(echo "$inspect_json" | python3 -c "import sys,json; print(json.load(sys.stdin)[0]['HostConfig']['CapAdd'])") -if [ "$cap_add" = "None" ] || [ "$cap_add" = "[]" ]; then - pass "CapAdd=null (no extra capabilities)" -else - fail "CapAdd expected null, got $cap_add" -fi - -# CapDrop — should contain ALL -cap_drop=$(echo "$inspect_json" | python3 -c "import sys,json; caps=json.load(sys.stdin)[0]['HostConfig']['CapDrop'] or []; print(' '.join(caps))") -if echo "$cap_drop" | grep -q "ALL"; then - pass "CapDrop contains ALL" -else - fail "CapDrop expected ALL, got: $cap_drop" -fi - -# PidsLimit -pids_limit=$(echo "$inspect_json" | python3 -c "import sys,json; print(json.load(sys.stdin)[0]['HostConfig']['PidsLimit'])") -if [ "$pids_limit" = "128" ]; then - pass "PidsLimit=128" -else - fail "PidsLimit expected 128, got $pids_limit" -fi - -# Memory limit (512MB = 536870912 bytes) -mem_limit=$(echo "$inspect_json" | python3 -c "import sys,json; print(json.load(sys.stdin)[0]['HostConfig']['Memory'])") -if [ "$mem_limit" = "536870912" ]; then - pass "Memory=512m" -else - fail "Memory expected 536870912, got $mem_limit" -fi - -# SecurityOpt — must contain no-new-privileges -sec_opt=$(echo "$inspect_json" | python3 -c "import sys,json; opts=json.load(sys.stdin)[0]['HostConfig']['SecurityOpt'] or []; print(' '.join(opts))") -if echo "$sec_opt" | grep -q "no-new-privileges"; then - pass "SecurityOpt contains no-new-privileges" -else - fail "SecurityOpt missing no-new-privileges (got: $sec_opt)" -fi - -# No docker.sock bind mount -binds=$(echo "$inspect_json" | python3 -c "import sys,json; binds=json.load(sys.stdin)[0]['HostConfig']['Binds'] or []; print(' '.join(binds))") -if echo "$binds" | grep -q "docker.sock"; then - fail "docker.sock is bind-mounted" -else - pass "No docker.sock mount" -fi - -echo - -# --- runtime exec checks --- - -# touch /root/x should fail (read-only rootfs + unprivileged user) -if docker exec "$CONTAINER" touch /root/x 2>/dev/null; then - fail "touch /root/x succeeded (should fail)" -else - pass "touch /root/x correctly denied" -fi - -# /var/run/docker.sock must not exist -if docker exec "$CONTAINER" ls /var/run/docker.sock 2>/dev/null; then - fail "/var/run/docker.sock is accessible" -else - pass "/var/run/docker.sock not accessible" -fi - -# /etc/shadow should not be readable -if docker exec "$CONTAINER" cat /etc/shadow 2>/dev/null; then - fail "cat /etc/shadow succeeded (should fail)" -else - pass "cat /etc/shadow correctly denied" -fi - -echo -echo "=== Results: $PASS passed, $FAIL failed ===" - -if [ "$FAIL" -gt 0 ]; then - exit 1 -fi -exit 0 diff --git a/tools/vault-import.sh b/tools/vault-import.sh index f85dd16..dd1b73a 100755 --- a/tools/vault-import.sh +++ b/tools/vault-import.sh @@ -391,7 +391,13 @@ EOF local val="${!key}" if [ -n "$val" ]; then local lowercase_key="${key,,}" - operations+=("woodpecker|$lowercase_key|$env_file|$key") + # Normalize WP_FORGEJO_* → forgejo_* (strip wp_ prefix to match template) + if [[ "$lowercase_key" =~ ^wp_(.+)$ ]]; then + vault_key="${BASH_REMATCH[1]}" + else + vault_key="$lowercase_key" + fi + operations+=("woodpecker|$vault_key|$env_file|$key") fi done diff --git a/tools/vault-seed-agents.sh b/tools/vault-seed-agents.sh new file mode 100755 index 0000000..fbed325 --- /dev/null +++ b/tools/vault-seed-agents.sh @@ -0,0 +1,176 @@ +#!/usr/bin/env bash +# ============================================================================= +# tools/vault-seed-agents.sh — Idempotent seed for all bot KV paths +# +# Part of the Nomad+Vault migration (S4.1, issue #955). Populates +# kv/disinto/bots/<role> with token + pass for each of the 7 agent roles +# plus the vault bot. Handles the "fresh factory, no .env import" case. +# +# Companion to tools/vault-import.sh — when that runs against a box with +# an existing stack, it overwrites seeded values with real ones. +# +# Idempotency contract (per bot): +# - Both token and pass present → skip, log "<role> unchanged". +# - Either missing → generate random values for missing keys, preserve +# existing keys, write back atomically. +# +# Preconditions: +# - Vault reachable + unsealed at $VAULT_ADDR. +# - VAULT_TOKEN set (env) or /etc/vault.d/root.token readable. +# - curl, jq, openssl +# +# Usage: +# tools/vault-seed-agents.sh +# tools/vault-seed-agents.sh --dry-run +# +# Exit codes: +# 0 success (seed applied, or already applied) +# 1 precondition / API / mount-mismatch failure +# ============================================================================= +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" + +# shellcheck source=../lib/hvault.sh +source "${REPO_ROOT}/lib/hvault.sh" + +KV_MOUNT="kv" +TOKEN_BYTES=32 # 32 bytes → 64 hex chars +PASS_BYTES=16 # 16 bytes → 32 hex chars + +# All bot roles seeded by this script. +BOT_ROLES=(dev review gardener architect planner predictor supervisor vault) + +LOG_TAG="[vault-seed-agents]" +log() { printf '%s %s\n' "$LOG_TAG" "$*"; } +die() { printf '%s ERROR: %s\n' "$LOG_TAG" "$*" >&2; exit 1; } + +# ── Flag parsing ───────────────────────────────────────────────────────────── +# while/shift shape — distinct from forgejo (arity:value case) and +# woodpecker (for-loop). +DRY_RUN=0 +while [ $# -gt 0 ]; do + case "$1" in + --dry-run) DRY_RUN=1 ;; + -h|--help) + printf 'Usage: %s [--dry-run]\n\n' "$(basename "$0")" + printf 'Seed kv/disinto/bots/<role> with token + pass for all agent\n' + printf 'roles. Idempotent: existing non-empty values are preserved.\n\n' + printf ' --dry-run Print planned actions without writing.\n' + exit 0 + ;; + *) die "invalid argument: ${1} (try --help)" ;; + esac + shift +done + +# ── Preconditions ──────────────────────────────────────────────────────────── +for bin in curl jq openssl; do + command -v "$bin" >/dev/null 2>&1 \ + || die "required binary not found: ${bin}" +done +[ -n "${VAULT_ADDR:-}" ] \ + || die "VAULT_ADDR unset — e.g. export VAULT_ADDR=http://127.0.0.1:8200" +hvault_token_lookup >/dev/null \ + || die "Vault auth probe failed — check VAULT_ADDR + VAULT_TOKEN" + +# ── Step 1: ensure kv/ mount exists and is KV v2 ──────────────────────────── +log "── Step 1: ensure ${KV_MOUNT}/ is KV v2 ──" +export DRY_RUN +hvault_ensure_kv_v2 "$KV_MOUNT" "${LOG_TAG}" \ + || die "KV mount check failed" + +# ── Step 2: seed each bot role ─────────────────────────────────────────────── +total_generated=0 + +# Check if shared forge credentials exist for dev role fallback +shared_forge_exists=0 +shared_forge_raw="$(hvault_get_or_empty "${KV_MOUNT}/data/disinto/shared/forge")" \ + || true +if [ -n "$shared_forge_raw" ]; then + shared_forge_token="$(printf '%s' "$shared_forge_raw" | jq -r '.data.data.token // ""')" + shared_forge_pass="$(printf '%s' "$shared_forge_raw" | jq -r '.data.data.pass // ""')" + if [ -n "$shared_forge_token" ] && [ -n "$shared_forge_pass" ]; then + shared_forge_exists=1 + fi +fi + +for role in "${BOT_ROLES[@]}"; do + kv_logical="disinto/bots/${role}" + kv_api="${KV_MOUNT}/data/${kv_logical}" + + log "── seed ${kv_logical} ──" + + existing_raw="$(hvault_get_or_empty "${kv_api}")" \ + || die "failed to read ${kv_api}" + + existing_token="" + existing_pass="" + existing_data="{}" + if [ -n "$existing_raw" ]; then + existing_data="$(printf '%s' "$existing_raw" | jq '.data.data // {}')" + existing_token="$(printf '%s' "$existing_raw" | jq -r '.data.data.token // ""')" + existing_pass="$(printf '%s' "$existing_raw" | jq -r '.data.data.pass // ""')" + fi + + generated=() + desired_token="$existing_token" + desired_pass="$existing_pass" + + # Special case: dev role uses shared forge credentials if available + if [ "$role" = "dev" ] && [ "$shared_forge_exists" -eq 1 ]; then + # Use shared FORGE_TOKEN + FORGE_PASS for dev role + if [ -z "$existing_token" ]; then + desired_token="$shared_forge_token" + generated+=("token") + fi + if [ -z "$existing_pass" ]; then + desired_pass="$shared_forge_pass" + generated+=("pass") + fi + else + # Generate random values for missing keys + if [ -z "$existing_token" ]; then + generated+=("token") + fi + if [ -z "$existing_pass" ]; then + generated+=("pass") + fi + + for key in "${generated[@]}"; do + case "$key" in + token) desired_token="$(openssl rand -hex "$TOKEN_BYTES")" ;; + pass) desired_pass="$(openssl rand -hex "$PASS_BYTES")" ;; + esac + done + fi + + if [ "${#generated[@]}" -eq 0 ]; then + log "${role}: unchanged" + continue + fi + + if [ "$DRY_RUN" -eq 1 ]; then + log "[dry-run] ${role}: would generate ${generated[*]}" + total_generated=$(( total_generated + ${#generated[@]} )) + continue + fi + + # Merge new keys into existing data to preserve any keys we don't own. + payload="$(printf '%s' "$existing_data" \ + | jq --arg t "$desired_token" --arg p "$desired_pass" \ + '{data: (. + {token: $t, pass: $p})}')" + + _hvault_request POST "${kv_api}" "$payload" >/dev/null \ + || die "failed to write ${kv_api}" + + log "${role}: generated ${generated[*]}" + total_generated=$(( total_generated + ${#generated[@]} )) +done + +if [ "$total_generated" -eq 0 ]; then + log "all bot paths already seeded — no-op" +else + log "done — ${total_generated} key(s) seeded across ${#BOT_ROLES[@]} bot paths" +fi diff --git a/tools/vault-seed-chat.sh b/tools/vault-seed-chat.sh new file mode 100755 index 0000000..08e3837 --- /dev/null +++ b/tools/vault-seed-chat.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env bash +# ============================================================================= +# tools/vault-seed-chat.sh — Idempotent seed for kv/disinto/shared/chat +# +# Part of the Nomad+Vault migration (S5.2, issue #989). Populates the KV v2 +# path that nomad/jobs/chat.hcl reads from, so a clean-install factory +# (no old-stack secrets to import) still has per-key values for +# CHAT_OAUTH_CLIENT_ID, CHAT_OAUTH_CLIENT_SECRET, and FORWARD_AUTH_SECRET. +# +# Companion to tools/vault-import.sh (S2.2) — when that import runs against +# a box with an existing stack, it overwrites these seeded values with the +# real ones. Order doesn't matter: whichever runs last wins, and both +# scripts are idempotent in the sense that re-running never rotates an +# existing non-empty key. +# +# Uses _hvault_seed_key (lib/hvault.sh) for each key — the helper reads +# existing data and merges to preserve sibling keys (KV v2 replaces .data +# atomically). +# +# Preconditions: +# - Vault reachable + unsealed at $VAULT_ADDR. +# - VAULT_TOKEN set (env) or /etc/vault.d/root.token readable. +# - The `kv/` mount is enabled as KV v2. +# +# Requires: VAULT_ADDR, VAULT_TOKEN, curl, jq, openssl +# +# Usage: +# tools/vault-seed-chat.sh +# tools/vault-seed-chat.sh --dry-run +# +# Exit codes: +# 0 success (seed applied, or already applied) +# 1 precondition / API / mount-mismatch failure +# ============================================================================= +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" + +# shellcheck source=../lib/hvault.sh +source "${REPO_ROOT}/lib/hvault.sh" + +KV_MOUNT="kv" +KV_LOGICAL_PATH="disinto/shared/chat" + +# Keys to seed — array-driven loop (structurally distinct from forgejo's +# sequential if-blocks and agents' role loop). +SEED_KEYS=(chat_oauth_client_id chat_oauth_client_secret forward_auth_secret) + +LOG_TAG="[vault-seed-chat]" +log() { printf '%s %s\n' "$LOG_TAG" "$*"; } +die() { printf '%s ERROR: %s\n' "$LOG_TAG" "$*" >&2; exit 1; } + +# ── Flag parsing — [[ ]] guard + case: shape distinct from forgejo +# (arity:value case), woodpecker (for-loop), agents (while/shift). +DRY_RUN=0 +if [[ $# -gt 0 ]]; then + case "$1" in + --dry-run) DRY_RUN=1 ;; + -h|--help) + printf 'Usage: %s [--dry-run]\n\n' "$(basename "$0")" + printf 'Seed kv/disinto/shared/chat with random OAuth client\n' + printf 'credentials and forward auth secret if missing.\n' + printf 'Idempotent: existing non-empty values are preserved.\n\n' + printf ' --dry-run Show what would be seeded without writing.\n' + exit 0 + ;; + *) die "invalid argument: ${1} (try --help)" ;; + esac +fi + +# ── Preconditions — inline check-or-die (shape distinct from agents' array +# loop and forgejo's continuation-line style) ───────────────────────────── +command -v curl >/dev/null 2>&1 || die "curl not found" +command -v jq >/dev/null 2>&1 || die "jq not found" +command -v openssl >/dev/null 2>&1 || die "openssl not found" +[ -n "${VAULT_ADDR:-}" ] || die "VAULT_ADDR unset — export VAULT_ADDR=http://127.0.0.1:8200" +hvault_token_lookup >/dev/null || die "Vault auth probe failed — check VAULT_ADDR + VAULT_TOKEN" + +# ── Step 1/2: ensure kv/ mount exists and is KV v2 ─────────────────────────── +log "── Step 1/2: ensure ${KV_MOUNT}/ is KV v2 ──" +export DRY_RUN +hvault_ensure_kv_v2 "$KV_MOUNT" "${LOG_TAG}" \ + || die "KV mount check failed" + +# ── Step 2/2: seed missing keys via _hvault_seed_key helper ────────────────── +log "── Step 2/2: seed ${KV_LOGICAL_PATH} ──" + +generated=() +for key in "${SEED_KEYS[@]}"; do + if [ "$DRY_RUN" -eq 1 ]; then + # Check existence without writing + existing=$(hvault_kv_get "$KV_LOGICAL_PATH" "$key" 2>/dev/null) || true + if [ -z "$existing" ]; then + generated+=("$key") + log "[dry-run] ${key} would be generated" + else + log "[dry-run] ${key} unchanged" + fi + else + rc=0 + _hvault_seed_key "$KV_LOGICAL_PATH" "$key" || rc=$? + case "$rc" in + 0) generated+=("$key"); log "${key} generated" ;; + 1) log "${key} unchanged" ;; + *) die "API error seeding ${key} (rc=${rc})" ;; + esac + fi +done + +if [ "${#generated[@]}" -eq 0 ]; then + log "all keys present — no-op" +else + log "done — ${#generated[@]} key(s) seeded at kv/${KV_LOGICAL_PATH}" +fi diff --git a/tools/vault-seed-ops-repo.sh b/tools/vault-seed-ops-repo.sh new file mode 100755 index 0000000..09a2fba --- /dev/null +++ b/tools/vault-seed-ops-repo.sh @@ -0,0 +1,149 @@ +#!/usr/bin/env bash +# ============================================================================= +# tools/vault-seed-ops-repo.sh — Idempotent seed for kv/disinto/shared/ops-repo +# +# Part of the Nomad+Vault migration (S5.1, issue #1035). Populates the KV v2 +# path that nomad/jobs/edge.hcl dispatcher task reads from, so the edge +# proxy has FORGE_TOKEN for ops repo access. +# +# Seeds from kv/disinto/bots/vault (the vault bot credentials) — copies the +# token field to kv/disinto/shared/ops-repo. This is the "service" path that +# dispatcher uses, distinct from the "agent" path (bots/vault) used by +# agent tasks under the service-agents policy. +# +# Idempotency contract: +# - Key present with non-empty value → leave untouched, log "token unchanged". +# - Key missing or empty → copy from bots/vault, log "token copied". +# - If bots/vault is also empty → generate a random value, log "token generated". +# +# Preconditions: +# - Vault reachable + unsealed at $VAULT_ADDR. +# - VAULT_TOKEN set (env) or /etc/vault.d/root.token readable. +# - The `kv/` mount is enabled as KV v2. +# +# Requires: +# - VAULT_ADDR (e.g. http://127.0.0.1:8200) +# - VAULT_TOKEN (env OR /etc/vault.d/root.token, resolved by lib/hvault.sh) +# - curl, jq, openssl +# +# Usage: +# tools/vault-seed-ops-repo.sh +# tools/vault-seed-ops-repo.sh --dry-run +# +# Exit codes: +# 0 success (seed applied, or already applied) +# 1 precondition / API / mount-mismatch failure +# ============================================================================= +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" + +# shellcheck source=../lib/hvault.sh +source "${REPO_ROOT}/lib/hvault.sh" + +# KV v2 mount + logical paths +KV_MOUNT="kv" +OPS_REPO_PATH="disinto/shared/ops-repo" +VAULT_BOT_PATH="disinto/bots/vault" + +OPS_REPO_API="${KV_MOUNT}/data/${OPS_REPO_PATH}" +VAULT_BOT_API="${KV_MOUNT}/data/${VAULT_BOT_PATH}" + +log() { printf '[vault-seed-ops-repo] %s\n' "$*"; } +die() { printf '[vault-seed-ops-repo] ERROR: %s\n' "$*" >&2; exit 1; } + +# ── Flag parsing ───────────────────────────────────────────────────────────── +DRY_RUN=0 +case "$#:${1-}" in + 0:) + ;; + 1:--dry-run) + DRY_RUN=1 + ;; + 1:-h|1:--help) + printf 'Usage: %s [--dry-run]\n\n' "$(basename "$0")" + printf 'Seed kv/disinto/shared/ops-repo with FORGE_TOKEN.\n\n' + printf 'Copies token from kv/disinto/bots/vault if present;\n' + printf 'otherwise generates a random value. Idempotent:\n' + printf 'existing non-empty values are left untouched.\n\n' + printf ' --dry-run Print planned actions without writing.\n' + exit 0 + ;; + *) + die "invalid arguments: $* (try --help)" + ;; +esac + +# ── Preconditions ──────────────────────────────────────────────────────────── +for bin in curl jq openssl; do + command -v "$bin" >/dev/null 2>&1 \ + || die "required binary not found: ${bin}" +done + +[ -n "${VAULT_ADDR:-}" ] \ + || die "VAULT_ADDR unset — e.g. export VAULT_ADDR=http://127.0.0.1:8200" +hvault_token_lookup >/dev/null \ + || die "Vault auth probe failed — check VAULT_ADDR + VAULT_TOKEN" + +# ── Step 1/2: ensure kv/ mount exists and is KV v2 ─────────────────────────── +log "── Step 1/2: ensure ${KV_MOUNT}/ is KV v2 ──" +export DRY_RUN +hvault_ensure_kv_v2 "$KV_MOUNT" "[vault-seed-ops-repo]" \ + || die "KV mount check failed" + +# ── Step 2/2: seed ops-repo from vault bot ─────────────────────────────────── +log "── Step 2/2: seed ${OPS_REPO_API} ──" + +# Read existing ops-repo value +existing_raw="$(hvault_get_or_empty "${OPS_REPO_API}")" \ + || die "failed to read ${OPS_REPO_API}" + +existing_token="" +if [ -n "$existing_raw" ]; then + existing_token="$(printf '%s' "$existing_raw" | jq -r '.data.data.token // ""')" +fi + +desired_token="$existing_token" +action="" + +if [ -z "$existing_token" ]; then + # Token missing — try to copy from vault bot + bot_raw="$(hvault_get_or_empty "${VAULT_BOT_API}")" || true + if [ -n "$bot_raw" ]; then + bot_token="$(printf '%s' "$bot_raw" | jq -r '.data.data.token // ""')" + if [ -n "$bot_token" ]; then + desired_token="$bot_token" + action="copied" + fi + fi + + # If still no token, generate one + if [ -z "$desired_token" ]; then + if [ "$DRY_RUN" -eq 1 ]; then + action="generated (dry-run)" + else + desired_token="$(openssl rand -hex 32)" + action="generated" + fi + fi +fi + +if [ -z "$action" ]; then + log "all keys present at ${OPS_REPO_API} — no-op" + log "token unchanged" + exit 0 +fi + +if [ "$DRY_RUN" -eq 1 ]; then + log "[dry-run] ${OPS_REPO_PATH}: would ${action} token" + exit 0 +fi + +# Write the token +payload="$(jq -n --arg t "$desired_token" '{data: {token: $t}}')" +_hvault_request POST "${OPS_REPO_API}" "$payload" >/dev/null \ + || die "failed to write ${OPS_REPO_API}" + +log "${OPS_REPO_PATH}: ${action} token" +log "done — ${OPS_REPO_API} seeded" diff --git a/vault/policies/AGENTS.md b/vault/policies/AGENTS.md index 692c885..ed271b0 100644 --- a/vault/policies/AGENTS.md +++ b/vault/policies/AGENTS.md @@ -1,4 +1,4 @@ -<!-- last-reviewed: 8ad5aca6bbee77634b3c63523042b1d39cefa96a --> +<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 --> # vault/policies/ — Agent Instructions HashiCorp Vault ACL policies for the disinto factory. One `.hcl` file per @@ -30,6 +30,9 @@ KV v2). Vault addresses KV v2 data at `kv/data/<path>` and metadata at |---|---| | `service-forgejo` | `kv/data/disinto/shared/forgejo/*` | | `service-woodpecker` | `kv/data/disinto/shared/woodpecker/*` | +| `service-agents` | All 7 `kv/data/disinto/bots/<role>/*` namespaces + `kv/data/disinto/shared/forge/*`; composite policy for the `agents` Nomad job (S4.1) | +| `service-chat` | `kv/data/disinto/shared/chat/*`; read-only OAuth client config + forward-auth secret for the chat Nomad job (S5.2, #989) | +| `service-dispatcher` | `kv/data/disinto/runner/*` (list+read) + `kv/data/disinto/shared/ops-repo/*` (read); used by edge dispatcher sidecar (S5.1, #988) | | `bot-<role>` (dev, review, gardener, architect, planner, predictor, supervisor, vault, dev-qwen) | `kv/data/disinto/bots/<role>/*` + `kv/data/disinto/shared/forge/*` | | `runner-<TOKEN>` (GITHUB\_TOKEN, CODEBERG\_TOKEN, CLAWHUB\_TOKEN, DEPLOY\_KEY, NPM\_TOKEN, DOCKER\_HUB\_TOKEN) | `kv/data/disinto/runner/<TOKEN>` (exactly one) | | `dispatcher` | `kv/data/disinto/runner/*` + `kv/data/disinto/shared/ops-repo/*` | diff --git a/vault/policies/bot-architect.hcl b/vault/policies/bot-architect.hcl index 9381b61..9f84de1 100644 --- a/vault/policies/bot-architect.hcl +++ b/vault/policies/bot-architect.hcl @@ -3,14 +3,14 @@ # Architect agent: reads its own bot KV namespace + the shared forge URL. # Attached to the architect-agent Nomad job via workload identity (S2.4). -path "kv/data/disinto/bots/architect/*" { +path "kv/data/disinto/bots/architect" { capabilities = ["read"] } -path "kv/metadata/disinto/bots/architect/*" { +path "kv/metadata/disinto/bots/architect" { capabilities = ["list", "read"] } -path "kv/data/disinto/shared/forge/*" { +path "kv/data/disinto/shared/forge" { capabilities = ["read"] } diff --git a/vault/policies/bot-dev-qwen.hcl b/vault/policies/bot-dev-qwen.hcl index b71283d..50f2d2d 100644 --- a/vault/policies/bot-dev-qwen.hcl +++ b/vault/policies/bot-dev-qwen.hcl @@ -5,14 +5,14 @@ # via workload identity (S2.4). KV path mirrors the bot basename: # kv/disinto/bots/dev-qwen/*. -path "kv/data/disinto/bots/dev-qwen/*" { +path "kv/data/disinto/bots/dev-qwen" { capabilities = ["read"] } -path "kv/metadata/disinto/bots/dev-qwen/*" { +path "kv/metadata/disinto/bots/dev-qwen" { capabilities = ["list", "read"] } -path "kv/data/disinto/shared/forge/*" { +path "kv/data/disinto/shared/forge" { capabilities = ["read"] } diff --git a/vault/policies/bot-dev.hcl b/vault/policies/bot-dev.hcl index 3771288..35cf6de 100644 --- a/vault/policies/bot-dev.hcl +++ b/vault/policies/bot-dev.hcl @@ -3,14 +3,14 @@ # Dev agent: reads its own bot KV namespace + the shared forge URL. # Attached to the dev-agent Nomad job via workload identity (S2.4). -path "kv/data/disinto/bots/dev/*" { +path "kv/data/disinto/bots/dev" { capabilities = ["read"] } -path "kv/metadata/disinto/bots/dev/*" { +path "kv/metadata/disinto/bots/dev" { capabilities = ["list", "read"] } -path "kv/data/disinto/shared/forge/*" { +path "kv/data/disinto/shared/forge" { capabilities = ["read"] } diff --git a/vault/policies/bot-gardener.hcl b/vault/policies/bot-gardener.hcl index f5ef230..ed45431 100644 --- a/vault/policies/bot-gardener.hcl +++ b/vault/policies/bot-gardener.hcl @@ -3,14 +3,14 @@ # Gardener agent: reads its own bot KV namespace + the shared forge URL. # Attached to the gardener-agent Nomad job via workload identity (S2.4). -path "kv/data/disinto/bots/gardener/*" { +path "kv/data/disinto/bots/gardener" { capabilities = ["read"] } -path "kv/metadata/disinto/bots/gardener/*" { +path "kv/metadata/disinto/bots/gardener" { capabilities = ["list", "read"] } -path "kv/data/disinto/shared/forge/*" { +path "kv/data/disinto/shared/forge" { capabilities = ["read"] } diff --git a/vault/policies/bot-planner.hcl b/vault/policies/bot-planner.hcl index 440f6aa..ae3e910 100644 --- a/vault/policies/bot-planner.hcl +++ b/vault/policies/bot-planner.hcl @@ -3,14 +3,14 @@ # Planner agent: reads its own bot KV namespace + the shared forge URL. # Attached to the planner-agent Nomad job via workload identity (S2.4). -path "kv/data/disinto/bots/planner/*" { +path "kv/data/disinto/bots/planner" { capabilities = ["read"] } -path "kv/metadata/disinto/bots/planner/*" { +path "kv/metadata/disinto/bots/planner" { capabilities = ["list", "read"] } -path "kv/data/disinto/shared/forge/*" { +path "kv/data/disinto/shared/forge" { capabilities = ["read"] } diff --git a/vault/policies/bot-predictor.hcl b/vault/policies/bot-predictor.hcl index 3a3b6b2..7159d72 100644 --- a/vault/policies/bot-predictor.hcl +++ b/vault/policies/bot-predictor.hcl @@ -3,14 +3,14 @@ # Predictor agent: reads its own bot KV namespace + the shared forge URL. # Attached to the predictor-agent Nomad job via workload identity (S2.4). -path "kv/data/disinto/bots/predictor/*" { +path "kv/data/disinto/bots/predictor" { capabilities = ["read"] } -path "kv/metadata/disinto/bots/predictor/*" { +path "kv/metadata/disinto/bots/predictor" { capabilities = ["list", "read"] } -path "kv/data/disinto/shared/forge/*" { +path "kv/data/disinto/shared/forge" { capabilities = ["read"] } diff --git a/vault/policies/bot-review.hcl b/vault/policies/bot-review.hcl index 04c7668..f0ddfe4 100644 --- a/vault/policies/bot-review.hcl +++ b/vault/policies/bot-review.hcl @@ -3,14 +3,14 @@ # Review agent: reads its own bot KV namespace + the shared forge URL. # Attached to the review-agent Nomad job via workload identity (S2.4). -path "kv/data/disinto/bots/review/*" { +path "kv/data/disinto/bots/review" { capabilities = ["read"] } -path "kv/metadata/disinto/bots/review/*" { +path "kv/metadata/disinto/bots/review" { capabilities = ["list", "read"] } -path "kv/data/disinto/shared/forge/*" { +path "kv/data/disinto/shared/forge" { capabilities = ["read"] } diff --git a/vault/policies/bot-supervisor.hcl b/vault/policies/bot-supervisor.hcl index 36ecc90..4d7f1e2 100644 --- a/vault/policies/bot-supervisor.hcl +++ b/vault/policies/bot-supervisor.hcl @@ -3,14 +3,14 @@ # Supervisor agent: reads its own bot KV namespace + the shared forge URL. # Attached to the supervisor-agent Nomad job via workload identity (S2.4). -path "kv/data/disinto/bots/supervisor/*" { +path "kv/data/disinto/bots/supervisor" { capabilities = ["read"] } -path "kv/metadata/disinto/bots/supervisor/*" { +path "kv/metadata/disinto/bots/supervisor" { capabilities = ["list", "read"] } -path "kv/data/disinto/shared/forge/*" { +path "kv/data/disinto/shared/forge" { capabilities = ["read"] } diff --git a/vault/policies/bot-vault.hcl b/vault/policies/bot-vault.hcl index 0a088dd..d2f9fe4 100644 --- a/vault/policies/bot-vault.hcl +++ b/vault/policies/bot-vault.hcl @@ -7,14 +7,14 @@ # NOTE: distinct from the runner-* policies, which gate per-secret access # for vault-runner ephemeral dispatches (Step 5). -path "kv/data/disinto/bots/vault/*" { +path "kv/data/disinto/bots/vault" { capabilities = ["read"] } -path "kv/metadata/disinto/bots/vault/*" { +path "kv/metadata/disinto/bots/vault" { capabilities = ["list", "read"] } -path "kv/data/disinto/shared/forge/*" { +path "kv/data/disinto/shared/forge" { capabilities = ["read"] } diff --git a/vault/policies/dispatcher.hcl b/vault/policies/dispatcher.hcl index 6383ae7..a18f1ab 100644 --- a/vault/policies/dispatcher.hcl +++ b/vault/policies/dispatcher.hcl @@ -20,10 +20,10 @@ path "kv/metadata/disinto/runner/*" { capabilities = ["list", "read"] } -path "kv/data/disinto/shared/ops-repo/*" { +path "kv/data/disinto/shared/ops-repo" { capabilities = ["read"] } -path "kv/metadata/disinto/shared/ops-repo/*" { +path "kv/metadata/disinto/shared/ops-repo" { capabilities = ["list", "read"] } diff --git a/vault/policies/service-agents.hcl b/vault/policies/service-agents.hcl new file mode 100644 index 0000000..4c65a13 --- /dev/null +++ b/vault/policies/service-agents.hcl @@ -0,0 +1,76 @@ +# vault/policies/service-agents.hcl +# +# Composite policy for the `agents` Nomad job (S4.1, issue #955). +# Grants read access to all 7 bot KV namespaces + shared forge config, +# so a single job running all agent roles can pull per-bot tokens from +# Vault via workload identity. + +# ── Per-bot KV paths (token + pass per role) ───────────────────────────────── +path "kv/data/disinto/bots/dev" { + capabilities = ["read"] +} + +path "kv/metadata/disinto/bots/dev" { + capabilities = ["list", "read"] +} + +path "kv/data/disinto/bots/review" { + capabilities = ["read"] +} + +path "kv/metadata/disinto/bots/review" { + capabilities = ["list", "read"] +} + +path "kv/data/disinto/bots/gardener" { + capabilities = ["read"] +} + +path "kv/metadata/disinto/bots/gardener" { + capabilities = ["list", "read"] +} + +path "kv/data/disinto/bots/architect" { + capabilities = ["read"] +} + +path "kv/metadata/disinto/bots/architect" { + capabilities = ["list", "read"] +} + +path "kv/data/disinto/bots/planner" { + capabilities = ["read"] +} + +path "kv/metadata/disinto/bots/planner" { + capabilities = ["list", "read"] +} + +path "kv/data/disinto/bots/predictor" { + capabilities = ["read"] +} + +path "kv/metadata/disinto/bots/predictor" { + capabilities = ["list", "read"] +} + +path "kv/data/disinto/bots/supervisor" { + capabilities = ["read"] +} + +path "kv/metadata/disinto/bots/supervisor" { + capabilities = ["list", "read"] +} + +path "kv/data/disinto/bots/vault" { + capabilities = ["read"] +} + +path "kv/metadata/disinto/bots/vault" { + capabilities = ["list", "read"] +} + +# ── Shared forge config (URL, bot usernames) ───────────────────────────────── +path "kv/data/disinto/shared/forge" { + capabilities = ["read"] +} diff --git a/vault/policies/service-chat.hcl b/vault/policies/service-chat.hcl new file mode 100644 index 0000000..a021006 --- /dev/null +++ b/vault/policies/service-chat.hcl @@ -0,0 +1,15 @@ +# vault/policies/service-chat.hcl +# +# Read-only access to shared Chat secrets (OAuth client config, forward auth +# secret). Attached to the Chat Nomad job via workload identity (S5.2). +# +# Scope: kv/disinto/shared/chat — entries owned by the operator and +# shared between the chat service and edge proxy. + +path "kv/data/disinto/shared/chat" { + capabilities = ["read"] +} + +path "kv/metadata/disinto/shared/chat" { + capabilities = ["list", "read"] +} diff --git a/vault/policies/service-dispatcher.hcl b/vault/policies/service-dispatcher.hcl new file mode 100644 index 0000000..bdc7ddb --- /dev/null +++ b/vault/policies/service-dispatcher.hcl @@ -0,0 +1,29 @@ +# vault/policies/service-dispatcher.hcl +# +# Edge dispatcher policy: needs to enumerate the runner secret namespace +# (to check secret presence before dispatching) and read the shared +# ops-repo credentials (token + clone URL) it uses to fetch action TOMLs. +# +# Scope: +# - kv/disinto/runner/* — read all per-secret values + list keys +# - kv/disinto/shared/ops-repo/* — read the ops-repo creds bundle +# +# The actual ephemeral runner container created per dispatch gets the +# narrow runner-<NAME> policies, NOT this one. This policy stays bound +# to the long-running dispatcher only. + +path "kv/data/disinto/runner/*" { + capabilities = ["read"] +} + +path "kv/metadata/disinto/runner/*" { + capabilities = ["list", "read"] +} + +path "kv/data/disinto/shared/ops-repo" { + capabilities = ["read"] +} + +path "kv/metadata/disinto/shared/ops-repo" { + capabilities = ["list", "read"] +} diff --git a/vault/policies/service-woodpecker.hcl b/vault/policies/service-woodpecker.hcl index 19c9726..34b3795 100644 --- a/vault/policies/service-woodpecker.hcl +++ b/vault/policies/service-woodpecker.hcl @@ -6,10 +6,10 @@ # Scope: kv/disinto/shared/woodpecker/* — entries owned by the operator # and consumed by woodpecker-server + woodpecker-agent. -path "kv/data/disinto/shared/woodpecker/*" { +path "kv/data/disinto/shared/woodpecker" { capabilities = ["read"] } -path "kv/metadata/disinto/shared/woodpecker/*" { +path "kv/metadata/disinto/shared/woodpecker" { capabilities = ["list", "read"] } diff --git a/vault/roles.yaml b/vault/roles.yaml index 2109504..c058a30 100644 --- a/vault/roles.yaml +++ b/vault/roles.yaml @@ -62,6 +62,21 @@ roles: namespace: default job_id: woodpecker-agent + # ── Agents composite (nomad/jobs/agents.hcl — S4.1) ────────────────────── + # Single job running all 7 agent roles. Uses a composite policy + # (vault/policies/service-agents.hcl) that unions all bot KV paths. + - name: service-agents + policy: service-agents + namespace: default + job_id: agents + + # ── Chat UI (nomad/jobs/chat.hcl — S5.2) ───────────────────────────────── + # Claude chat UI service with OAuth secrets. Uses vault/policies/service-chat.hcl. + - name: service-chat + policy: service-chat + namespace: default + job_id: chat + # ── Per-agent bots (nomad/jobs/bot-<role>.hcl — land in later steps) ─────── # job_id placeholders match the policy name 1:1 until each bot's jobspec # lands. When a bot's jobspec is added under nomad/jobs/, update the @@ -113,10 +128,10 @@ roles: job_id: bot-vault # ── Edge dispatcher ──────────────────────────────────────────────────────── - - name: dispatcher - policy: dispatcher + - name: service-dispatcher + policy: service-dispatcher namespace: default - job_id: dispatcher + job_id: edge # ── Per-secret runner roles ──────────────────────────────────────────────── # vault-runner (Step 5) composes runner-<NAME> policies onto each