Compare commits

..

1 commit

Author SHA1 Message Date
Agent
677f2a97e7 fix: [nomad-step-2] S2-fix-B — extract _hvault_default_env helper to lib/hvault.sh (prereq for other S2 fixes) (#919)
All checks were successful
ci/woodpecker/push/ci Pipeline was successful
ci/woodpecker/push/nomad-validate Pipeline was successful
ci/woodpecker/pr/ci Pipeline was successful
ci/woodpecker/pr/nomad-validate Pipeline was successful
ci/woodpecker/pr/secret-scan Pipeline was successful
2026-04-16 21:16:40 +00:00
93 changed files with 805 additions and 7464 deletions

1
.gitignore vendored
View file

@ -20,6 +20,7 @@ metrics/supervisor-metrics.jsonl
# OS
.DS_Store
dev/ci-fixes-*.json
gardener/dust.jsonl
# Individual encrypted secrets (managed by disinto secrets add)
secrets/

View file

@ -294,45 +294,6 @@ def main() -> int:
"9f6ae8e7811575b964279d8820494eb0": "Verification helper: for loop done pattern",
# Standard lib source block shared across formula-driven agent run scripts
"330e5809a00b95ade1a5fce2d749b94b": "Standard lib source block (env.sh, formula-session.sh, worktree.sh, guard.sh, agent-sdk.sh)",
# Test data for duplicate service detection tests (#850)
# Intentionally duplicated TOML blocks in smoke-init.sh and test-duplicate-service-detection.sh
"334967b8b4f1a8d3b0b9b8e0912f3bfb": "Test TOML: [agents.llama] block header (smoke-init.sh + test-duplicate-service-detection.sh)",
"d82f30077e5bb23b5fc01db003033d5d": "Test TOML: [agents.llama] block body (smoke-init.sh + test-duplicate-service-detection.sh)",
# Common vault-seed script patterns: logging helpers + flag parsing
# Used in tools/vault-seed-woodpecker.sh + lib/init/nomad/wp-oauth-register.sh
"843a1cbf987952697d4e05e96ed2b2d5": "Logging helpers + DRY_RUN init (vault-seed-woodpecker + wp-oauth-register)",
"ee51df9642f2ef37af73b0c15f4d8406": "Logging helpers + DRY_RUN loop start (vault-seed-woodpecker + wp-oauth-register)",
"9a57368f3c1dfd29ec328596b86962a0": "Flag parsing loop + case start (vault-seed-woodpecker + wp-oauth-register)",
"9d72d40ff303cbed0b7e628fc15381c3": "Case loop + dry-run handler (vault-seed-woodpecker + wp-oauth-register)",
"5b52ddbbf47948e3cbc1b383f0909588": "Help + invalid arg handler end (vault-seed-woodpecker + wp-oauth-register)",
# forgejo-bootstrap.sh follows wp-oauth-register.sh pattern (issue #1069)
"2b80185e4ae2b54e2e01f33e5555c688": "Standard header (set -euo pipefail, SCRIPT_DIR, REPO_ROOT) (forgejo-bootstrap + wp-oauth-register)",
"38a1f20a60d69f0d6bfb06a0532b3bd7": "Logging helpers + DRY_RUN init (forgejo-bootstrap + wp-oauth-register)",
"4dd3c526fa29bdaa88b274c3d7d01032": "Flag parsing loop + case start (forgejo-bootstrap + wp-oauth-register)",
# Common vault-seed script preamble + precondition patterns
# Shared across tools/vault-seed-{forgejo,agents,woodpecker}.sh
"dff3675c151fcdbd2fef798826ae919b": "Vault-seed preamble: set -euo + path setup + source hvault.sh + KV_MOUNT",
"1cd9f0d083e24e6e6b2071db9b6dae09": "Vault-seed preconditions: binary check loop + VAULT_ADDR guard",
"63bfa88d71764c95c65a9a248f3e40ab": "Vault-seed preconditions: binary check end + VAULT_ADDR die",
"34873ad3570b211ce1d90468ab6ac94c": "Vault-seed preconditions: VAULT_ADDR die + hvault_token_lookup",
"71a52270f249e843cda48ad896d9f781": "Vault-seed preconditions: VAULT_ADDR + hvault_token_lookup + die",
# Common vault-seed script flag parsing patterns
# Shared across tools/vault-seed-{forgejo,ops-repo}.sh
"6906b7787796c2ccb8dd622e2ad4e7bf": "vault-seed DRY_RUN init + case pattern (forgejo + ops-repo)",
"a0df5283b616b964f8bc32fd99ec1b5a": "vault-seed case pattern start (forgejo + ops-repo)",
"e15e3272fdd9f0f46ce9e726aea9f853": "vault-seed case pattern dry-run handler (forgejo + ops-repo)",
"c9f22385cc49a3dac1d336bc14c6315b": "vault-seed DRY_RUN assignment (forgejo + ops-repo)",
"106f4071e88f841b3208b01144cd1c39": "vault-seed case pattern dry-run end (forgejo + ops-repo)",
"c15506dcb6bb340b25d1c39d442dd2e6": "vault-seed help text + invalid arg handler (forgejo + ops-repo)",
"1feecd3b3caf00045fae938ddf2811de": "vault-seed invalid arg handler (forgejo + ops-repo)",
"919780d5e7182715344f5aa02b191294": "vault-seed invalid arg + esac pattern (forgejo + ops-repo)",
"8dce1d292bce8e60ef4c0665b62945b0": "vault-seed esac + binary check loop (forgejo + ops-repo)",
"ca043687143a5b47bd54e65a99ce8ee8": "vault-seed binary check loop start (forgejo + ops-repo)",
"aefd9f655411a955395e6e5995ddbe6f": "vault-seed binary check pattern (forgejo + ops-repo)",
"60f0c46deb5491599457efb4048918e5": "vault-seed VAULT_ADDR + hvault_token_lookup check (forgejo + ops-repo)",
"f6838f581ef6b4d82b55268389032769": "vault-seed VAULT_ADDR + hvault_token_lookup die (forgejo + ops-repo)",
# Common shell control-flow: if → return 1 → fi → fi (env.sh + register.sh)
"a8bdb7f1a5d8cbd0a5921b17b6cf6f4d": "Common shell control-flow (return 1 / fi / fi / return 0 / }) (env.sh + register.sh)",
}
if not sh_files:

View file

@ -1,317 +0,0 @@
# =============================================================================
# .woodpecker/edge-subpath.yml — Edge subpath routing static checks
#
# Static validation for edge subpath routing configuration. This pipeline does
# NOT run live service curls — it validates the configuration that would be
# used by a deployed edge proxy.
#
# Checks:
# 1. shellcheck — syntax check on tests/smoke-edge-subpath.sh
# 2. caddy validate — validate the Caddyfile template syntax
# 3. caddyfile-routing-test — verify Caddyfile routing block shape
# 4. test-caddyfile-routing — run standalone unit test for Caddyfile structure
#
# Triggers:
# - Pull requests that modify edge-related files
#
# Environment variables (inherited from WOODPECKER_ENVIRONMENT):
# EDGE_BASE_URL — Edge proxy URL for reference (default: http://localhost)
# EDGE_TIMEOUT — Request timeout in seconds (default: 30)
# EDGE_MAX_RETRIES — Max retries per request (default: 3)
# =============================================================================
when:
event: pull_request
steps:
# ── 1. ShellCheck on smoke script ────────────────────────────────────────
# `shellcheck` validates bash syntax, style, and common pitfalls.
# Exit codes:
# 0 — all checks passed
# 1 — one or more issues found
- name: shellcheck-smoke
image: koalaman/shellcheck-alpine:stable
commands:
- shellcheck --severity=warning tests/smoke-edge-subpath.sh tests/test-caddyfile-routing.sh
# ── 2. Caddyfile template rendering ───────────────────────────────────────
# Render a mock Caddyfile for validation. The template uses Nomad's
# templating syntax ({{ range ... }}) which must be processed before Caddy
# can validate it. We render a mock version with Nomad templates expanded
# to static values for validation purposes.
- name: render-caddyfile
image: alpine:3.19
commands:
- apk add --no-cache coreutils
- |
set -e
mkdir -p edge-render
# Render mock Caddyfile with Nomad templates expanded
{
echo '# Caddyfile — edge proxy configuration (Nomad-rendered)'
echo '# Staging upstream discovered via Nomad service registration.'
echo ''
echo ':80 {'
echo ' # Redirect root to Forgejo'
echo ' handle / {'
echo ' redir /forge/ 302'
echo ' }'
echo ''
echo ' # Reverse proxy to Forgejo'
echo ' handle /forge/* {'
echo ' reverse_proxy 127.0.0.1:3000'
echo ' }'
echo ''
echo ' # Reverse proxy to Woodpecker CI'
echo ' handle /ci/* {'
echo ' reverse_proxy 127.0.0.1:8000'
echo ' }'
echo ''
echo ' # Reverse proxy to staging — dynamic port via Nomad service discovery'
echo ' handle /staging/* {'
echo ' reverse_proxy 127.0.0.1:8081'
echo ' }'
echo ''
echo ' # Chat service — reverse proxy to disinto-chat backend (#705)'
echo ' # OAuth routes bypass forward_auth — unauthenticated users need these (#709)'
echo ' handle /chat/login {'
echo ' reverse_proxy 127.0.0.1:8080'
echo ' }'
echo ' handle /chat/oauth/callback {'
echo ' reverse_proxy 127.0.0.1:8080'
echo ' }'
echo ' # Defense-in-depth: forward_auth stamps X-Forwarded-User from session (#709)'
echo ' handle /chat/* {'
echo ' forward_auth 127.0.0.1:8080 {'
echo ' uri /chat/auth/verify'
echo ' copy_headers X-Forwarded-User'
echo ' header_up X-Forward-Auth-Secret {$FORWARD_AUTH_SECRET}'
echo ' }'
echo ' reverse_proxy 127.0.0.1:8080'
echo ' }'
echo '}'
} > edge-render/Caddyfile
cp edge-render/Caddyfile edge-render/Caddyfile.rendered
echo "Caddyfile rendered successfully"
# ── 3. Caddy config validation ───────────────────────────────────────────
# `caddy validate` checks Caddyfile syntax and configuration.
# This validates the rendered Caddyfile against Caddy's parser.
# Exit codes:
# 0 — configuration is valid
# 1 — configuration has errors
- name: caddy-validate
image: alpine:3.19
commands:
- apk add --no-cache ca-certificates curl
- curl -sS -o /tmp/caddy "https://caddyserver.com/api/download?os=linux&arch=amd64"
- chmod +x /tmp/caddy
- /tmp/caddy version
- /tmp/caddy validate --config edge-render/Caddyfile.rendered --adapter caddyfile
# ── 4. Caddyfile routing block shape test ─────────────────────────────────
# Verify that the Caddyfile contains all required routing blocks:
# - /forge/ — Forgejo subpath
# - /ci/ — Woodpecker subpath
# - /staging/ — Staging subpath
# - /chat/ — Chat subpath with forward_auth
#
# This is a unit test that validates the expected structure without
# requiring a running Caddy instance.
- name: caddyfile-routing-test
image: alpine:3.19
commands:
- apk add --no-cache grep coreutils
- |
set -e
CADDYFILE="edge-render/Caddyfile.rendered"
echo "=== Validating Caddyfile routing blocks ==="
# Check that all required subpath handlers exist
# POSIX-safe loop (alpine /bin/sh has no arrays)
FAILED=0
for handler in "handle /forge/\*" "handle /ci/\*" "handle /staging/\*" "handle /chat/login" "handle /chat/oauth/callback" "handle /chat/\*"; do
if grep -q "$handler" "$CADDYFILE"; then
echo "[PASS] Found handler: $handler"
else
echo "[FAIL] Missing handler: $handler"
FAILED=1
fi
done
# Check forward_auth block exists for /chat/*
if grep -A5 "handle /chat/\*" "$CADDYFILE" | grep -q "forward_auth"; then
echo "[PASS] forward_auth block found for /chat/*"
else
echo "[FAIL] forward_auth block missing for /chat/*"
FAILED=1
fi
# Check reverse_proxy to Forgejo (port 3000)
if grep -q "reverse_proxy 127.0.0.1:3000" "$CADDYFILE"; then
echo "[PASS] Forgejo reverse_proxy configured (port 3000)"
else
echo "[FAIL] Forgejo reverse_proxy not configured"
FAILED=1
fi
# Check reverse_proxy to Woodpecker (port 8000)
if grep -q "reverse_proxy 127.0.0.1:8000" "$CADDYFILE"; then
echo "[PASS] Woodpecker reverse_proxy configured (port 8000)"
else
echo "[FAIL] Woodpecker reverse_proxy not configured"
FAILED=1
fi
# Check reverse_proxy to Chat (port 8080)
if grep -q "reverse_proxy 127.0.0.1:8080" "$CADDYFILE"; then
echo "[PASS] Chat reverse_proxy configured (port 8080)"
else
echo "[FAIL] Chat reverse_proxy not configured"
FAILED=1
fi
# Check root redirect to /forge/
if grep -q "redir /forge/ 302" "$CADDYFILE"; then
echo "[PASS] Root redirect to /forge/ configured"
else
echo "[FAIL] Root redirect to /forge/ not configured"
FAILED=1
fi
echo ""
if [ $FAILED -eq 0 ]; then
echo "=== All routing blocks validated ==="
exit 0
else
echo "=== Routing block validation failed ===" >&2
exit 1
fi
# ── 5. Standalone Caddyfile routing test ─────────────────────────────────
# Run the standalone unit test for Caddyfile routing block validation.
# This test extracts the Caddyfile template from edge.hcl and validates
# its structure without requiring a running Caddy instance.
- name: test-caddyfile-routing
image: alpine:3.19
commands:
- apk add --no-cache grep coreutils
- |
set -e
EDGE_TEMPLATE="nomad/jobs/edge.hcl"
echo "=== Extracting Caddyfile template from $EDGE_TEMPLATE ==="
# Extract the Caddyfile template (content between <<EOT and EOT markers)
CADDYFILE=$(sed -n '/data[[:space:]]*=[[:space:]]*<<[Ee][Oo][Tt]/,/^EOT$/p' "$EDGE_TEMPLATE" | sed '1s/.*/# Caddyfile extracted from Nomad template/; $d')
if [ -z "$CADDYFILE" ]; then
echo "ERROR: Could not extract Caddyfile template from $EDGE_TEMPLATE" >&2
exit 1
fi
echo "Caddyfile template extracted successfully"
echo ""
FAILED=0
# Check Forgejo subpath
if echo "$CADDYFILE" | grep -q "handle /forge/\*"; then
echo "[PASS] Forgejo handle block"
else
echo "[FAIL] Forgejo handle block"
FAILED=1
fi
if echo "$CADDYFILE" | grep -q "reverse_proxy 127.0.0.1:3000"; then
echo "[PASS] Forgejo reverse_proxy (port 3000)"
else
echo "[FAIL] Forgejo reverse_proxy (port 3000)"
FAILED=1
fi
# Check Woodpecker subpath
if echo "$CADDYFILE" | grep -q "handle /ci/\*"; then
echo "[PASS] Woodpecker handle block"
else
echo "[FAIL] Woodpecker handle block"
FAILED=1
fi
if echo "$CADDYFILE" | grep -q "reverse_proxy 127.0.0.1:8000"; then
echo "[PASS] Woodpecker reverse_proxy (port 8000)"
else
echo "[FAIL] Woodpecker reverse_proxy (port 8000)"
FAILED=1
fi
# Check Staging subpath
if echo "$CADDYFILE" | grep -q "handle /staging/\*"; then
echo "[PASS] Staging handle block"
else
echo "[FAIL] Staging handle block"
FAILED=1
fi
if echo "$CADDYFILE" | grep -q "nomadService"; then
echo "[PASS] Staging Nomad service discovery"
else
echo "[FAIL] Staging Nomad service discovery"
FAILED=1
fi
# Check Chat subpath
if echo "$CADDYFILE" | grep -q "handle /chat/login"; then
echo "[PASS] Chat login handle block"
else
echo "[FAIL] Chat login handle block"
FAILED=1
fi
if echo "$CADDYFILE" | grep -q "handle /chat/oauth/callback"; then
echo "[PASS] Chat OAuth callback handle block"
else
echo "[FAIL] Chat OAuth callback handle block"
FAILED=1
fi
if echo "$CADDYFILE" | grep -q "handle /chat/\*"; then
echo "[PASS] Chat catch-all handle block"
else
echo "[FAIL] Chat catch-all handle block"
FAILED=1
fi
if echo "$CADDYFILE" | grep -q "reverse_proxy 127.0.0.1:8080"; then
echo "[PASS] Chat reverse_proxy (port 8080)"
else
echo "[FAIL] Chat reverse_proxy (port 8080)"
FAILED=1
fi
# Check forward_auth for chat
if echo "$CADDYFILE" | grep -A10 "handle /chat/\*" | grep -q "forward_auth"; then
echo "[PASS] forward_auth block for /chat/*"
else
echo "[FAIL] forward_auth block for /chat/*"
FAILED=1
fi
# Check root redirect
if echo "$CADDYFILE" | grep -q "redir /forge/ 302"; then
echo "[PASS] Root redirect to /forge/"
else
echo "[FAIL] Root redirect to /forge/"
FAILED=1
fi
echo ""
if [ $FAILED -eq 0 ]; then
echo "=== All routing blocks validated ==="
exit 0
else
echo "=== Routing block validation failed ===" >&2
exit 1
fi

View file

@ -1,4 +1,4 @@
<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 -->
<!-- last-reviewed: 6bdbeb5bd2a200ff1b23724564da9383193f3e30 -->
# Disinto — Agent Instructions
## What this repo is
@ -37,20 +37,19 @@ disinto/ (code repo)
│ examples/ — example vault action TOMLs (promote, publish, release, webhook-call)
├── lib/ env.sh, agent-sdk.sh, ci-helpers.sh, ci-debug.sh, load-project.sh, parse-deps.sh, guard.sh, mirrors.sh, pr-lifecycle.sh, issue-lifecycle.sh, worktree.sh, formula-session.sh, stack-lock.sh, forge-setup.sh, forge-push.sh, ops-setup.sh, ci-setup.sh, generators.sh, hire-agent.sh, release.sh, build-graph.py, branch-protection.sh, secret-scan.sh, tea-helpers.sh, action-vault.sh, ci-log-reader.py, git-creds.sh, sprint-filer.sh, hvault.sh
│ hooks/ — Claude Code session hooks (on-compact-reinject, on-idle-stop, on-phase-change, on-pretooluse-guard, on-session-end, on-stop-failure)
│ init/nomad/ — cluster-up.sh, install.sh, vault-init.sh, lib-systemd.sh (Nomad+Vault Step 0 installers, #821-#825); wp-oauth-register.sh (Forgejo OAuth2 app + Vault KV seeder for Woodpecker, S3.3); deploy.sh (dependency-ordered Nomad job deploy + health-wait, S4)
├── nomad/ server.hcl, client.hcl (allow_privileged for woodpecker-agent, S3-fix-5), vault.hcl — HCL configs deployed to /etc/nomad.d/ and /etc/vault.d/ by lib/init/nomad/cluster-up.sh
│ jobs/ — Nomad jobspecs: forgejo.hcl (Vault secrets via template, S2.4); woodpecker-server.hcl + woodpecker-agent.hcl (host-net, docker.sock, Vault KV, S3.1-S3.2); agents.hcl (7 roles, llama, Vault-templated bot tokens, S4.1); vault-runner.hcl (parameterized batch dispatch, S5.3); staging.hcl (Caddy file-server, dynamic port — edge discovers via service registration, S5.2); chat.hcl (Claude chat UI, tmpfs via mount block, Vault OAuth secrets, S5.2); edge.hcl (Caddy proxy + dispatcher sidecar, S5.1)
│ init/nomad/ — cluster-up.sh, install.sh, vault-init.sh, lib-systemd.sh (Nomad+Vault Step 0 installers, #821-#825)
├── nomad/ server.hcl, client.hcl, vault.hcl — HCL configs deployed to /etc/nomad.d/ and /etc/vault.d/ by lib/init/nomad/cluster-up.sh
│ jobs/ — Nomad jobspecs (forgejo.hcl reads Vault secrets via template stanza, S2.4)
├── projects/ *.toml.example — templates; *.toml — local per-box config (gitignored)
├── formulas/ Issue templates (TOML specs for multi-step agent tasks)
├── docker/ Dockerfiles and entrypoints: reproduce, triage, edge (Caddy + chat server subprocess + dispatcher), chat (server.py, ui/ — copied into edge image at build time)
├── tools/ Operational tools: edge-control/ (register.sh, install.sh, verify-chat-sandbox.sh; register.sh enforces: reserved-name blocklist, admin-approved allowlist via /var/lib/disinto/allowlist.json, per-caller attribution via --as <tag> forced-command arg stored as registered_by, append-only audit log at /var/log/disinto/edge-register.log, ownership check on deregister requiring pubkey match)
│ vault-apply-policies.sh, vault-apply-roles.sh, vault-import.sh — Vault provisioning (S2.1/S2.2)
│ vault-seed-<svc>.sh — per-service Vault secret seeders; auto-invoked by `bin/disinto --with <svc>` (add a new file to support a new service)
├── docker/ Dockerfiles and entrypoints: reproduce, triage, edge dispatcher, chat (server.py, entrypoint-chat.sh, Dockerfile, ui/)
├── tools/ Operational tools: edge-control/ (register.sh, install.sh, verify-chat-sandbox.sh)
│ vault-apply-policies.sh, vault-apply-roles.sh, vault-import.sh, vault-seed-forgejo.sh — Vault provisioning (S2.1/S2.2)
├── docs/ Protocol docs (PHASE-PROTOCOL.md, EVIDENCE-ARCHITECTURE.md)
├── site/ disinto.ai website content
├── tests/ Test files (mock-forgejo.py, smoke-init.sh, lib-hvault.bats, lib-generators.bats, vault-import.bats, disinto-init-nomad.bats)
├── tests/ Test files (mock-forgejo.py, smoke-init.sh, lib-hvault.bats, disinto-init-nomad.bats)
├── templates/ Issue templates
├── bin/ The `disinto` CLI script (`--with <svc>` deploys services + runs their Vault seeders)
├── bin/ The `disinto` CLI script
├── disinto-factory/ Setup documentation and skill
├── state/ Runtime state
├── .woodpecker/ Woodpecker CI pipeline configs

View file

@ -1,4 +1,4 @@
<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 -->
<!-- last-reviewed: 6bdbeb5bd2a200ff1b23724564da9383193f3e30 -->
# Architect — Agent Instructions
## What this agent is

View file

@ -12,7 +12,6 @@
# disinto secrets <subcommand> Manage encrypted secrets
# disinto run <action-id> Run action in ephemeral runner container
# disinto ci-logs <pipeline> [--step <name>] Read CI logs from Woodpecker SQLite
# disinto backup create <outfile> Export factory state for migration
#
# Usage:
# disinto init https://github.com/user/repo
@ -40,9 +39,7 @@ source "${FACTORY_ROOT}/lib/generators.sh"
source "${FACTORY_ROOT}/lib/forge-push.sh"
source "${FACTORY_ROOT}/lib/ci-setup.sh"
source "${FACTORY_ROOT}/lib/release.sh"
source "${FACTORY_ROOT}/lib/backup.sh"
source "${FACTORY_ROOT}/lib/claude-config.sh"
source "${FACTORY_ROOT}/lib/disinto/backup.sh" # backup create/import
# ── Helpers ──────────────────────────────────────────────────────────────────
@ -65,9 +62,7 @@ Usage:
disinto hire-an-agent <agent-name> <role> [--formula <path>] [--local-model <url>] [--model <name>]
Hire a new agent (create user + .profile repo; re-run to rotate credentials)
disinto agent <subcommand> Manage agent state (enable/disable)
disinto backup create <outfile> Export factory state (issues + ops bundle)
disinto edge <verb> [options] Manage edge tunnel registrations
disinto backup <subcommand> Backup and restore factory state
Edge subcommands:
register [project] Register a new tunnel (generates keypair if needed)
@ -87,7 +82,7 @@ Init options:
--ci-id <n> Woodpecker CI repo ID (default: 0 = no CI)
--forge-url <url> Forge base URL (default: http://localhost:3000)
--backend <value> Orchestration backend: docker (default) | nomad
--with <services> (nomad) Deploy services: forgejo,woodpecker,agents,staging,chat,edge[,...] (S1.3, S3.4, S4.2, S5.2, S5.5)
--with <services> (nomad) Deploy services: forgejo[,...] (S1.3)
--empty (nomad) Bring up cluster only, no jobs (S0.4)
--bare Skip compose generation (bare-metal setup)
--build Use local docker build instead of registry images (dev mode)
@ -106,18 +101,6 @@ Hire an agent options:
CI logs options:
--step <name> Filter logs to a specific step (e.g., smoke-init)
Backup subcommands:
create <file> Create backup of factory state to tarball
import <file> Restore factory state from backup tarball
Import behavior:
- Unpacks tarball to temp directory
- Creates disinto repo via Forgejo API (mirror config is manual)
- Creates disinto-ops repo and pushes refs from bundle
- Imports issues from issues/*.json (idempotent - skips existing)
- Logs issue number mapping (Forgejo auto-assigns numbers)
- Prints summary: created X repos, pushed Y refs, imported Z issues, skipped W
EOF
exit 1
}
@ -687,7 +670,6 @@ _disinto_init_nomad() {
local import_env="${4:-}" import_sops="${5:-}" age_key="${6:-}"
local cluster_up="${FACTORY_ROOT}/lib/init/nomad/cluster-up.sh"
local deploy_sh="${FACTORY_ROOT}/lib/init/nomad/deploy.sh"
local vault_engines_sh="${FACTORY_ROOT}/lib/init/nomad/vault-engines.sh"
local vault_policies_sh="${FACTORY_ROOT}/tools/vault-apply-policies.sh"
local vault_auth_sh="${FACTORY_ROOT}/lib/init/nomad/vault-nomad-auth.sh"
local vault_import_sh="${FACTORY_ROOT}/tools/vault-import.sh"
@ -708,22 +690,15 @@ _disinto_init_nomad() {
# --empty combined with --with or any --import-* flag, so reaching
# this branch with those set is a bug in the caller.
#
# On the default (non-empty) path, vault-engines.sh (enables the kv/
# mount), vault-apply-policies.sh, and vault-nomad-auth.sh are invoked
# unconditionally — they are idempotent and cheap to re-run, and
# subsequent --with deployments depend on them. vault-import.sh is
# invoked only when an --import-* flag is set. vault-engines.sh runs
# first because every policy and role below references kv/disinto/*
# paths, which 403 if the engine is not yet mounted (issue #912).
# On the default (non-empty) path, vault-apply-policies.sh and
# vault-nomad-auth.sh are invoked unconditionally — they are idempotent
# and cheap to re-run, and subsequent --with deployments depend on
# them. vault-import.sh is invoked only when an --import-* flag is set.
local import_any=false
if [ -n "$import_env" ] || [ -n "$import_sops" ]; then
import_any=true
fi
if [ "$empty" != "true" ]; then
if [ ! -x "$vault_engines_sh" ]; then
echo "Error: ${vault_engines_sh} not found or not executable" >&2
exit 1
fi
if [ ! -x "$vault_policies_sh" ]; then
echo "Error: ${vault_policies_sh} not found or not executable" >&2
exit 1
@ -762,15 +737,10 @@ _disinto_init_nomad() {
exit 0
fi
# Vault engines + policies + auth are invoked on every nomad real-run
# path regardless of --import-* flags (they're idempotent; S2.1 + S2.3).
# Engines runs first because policies/roles/templates all reference the
# kv/ mount it enables (issue #912). Mirror that ordering in the
# dry-run plan so the operator sees the full sequence Step 2 will
# execute.
echo "── Vault engines dry-run ──────────────────────────────"
echo "[engines] [dry-run] ${vault_engines_sh} --dry-run"
echo ""
# Vault policies + auth are invoked on every nomad real-run path
# regardless of --import-* flags (they're idempotent; S2.1 + S2.3).
# Mirror that ordering in the dry-run plan so the operator sees the
# full sequence Step 2 will execute.
echo "── Vault policies dry-run ─────────────────────────────"
echo "[policies] [dry-run] ${vault_policies_sh} --dry-run"
echo ""
@ -800,37 +770,19 @@ _disinto_init_nomad() {
fi
if [ -n "$with_services" ]; then
# Interleaved seed/deploy per service (S2.6, #928, #948): match the
# real-run path so dry-run output accurately represents execution order.
# Build ordered deploy list: only include services present in with_services
local DEPLOY_ORDER=""
for ordered_svc in forgejo woodpecker-server woodpecker-agent agents staging chat edge; do
if echo ",$with_services," | grep -q ",$ordered_svc,"; then
DEPLOY_ORDER="${DEPLOY_ORDER:+${DEPLOY_ORDER} }${ordered_svc}"
fi
done
local IFS=' '
echo "[deploy] deployment order: ${DEPLOY_ORDER}"
for svc in $DEPLOY_ORDER; do
# Seed this service (if seed script exists)
local seed_name="$svc"
case "$svc" in
woodpecker-server|woodpecker-agent) seed_name="woodpecker" ;;
agents) seed_name="agents" ;;
chat) seed_name="chat" ;;
edge) seed_name="ops-repo" ;;
esac
local seed_script="${FACTORY_ROOT}/tools/vault-seed-${seed_name}.sh"
if [ -x "$seed_script" ]; then
echo "── Vault seed dry-run ─────────────────────────────────"
echo "[seed] [dry-run] ${seed_script} --dry-run"
echo ""
fi
# Deploy this service
echo "── Deploy services dry-run ────────────────────────────"
echo "[deploy] services to deploy: ${with_services}"
local IFS=','
for svc in $with_services; do
svc=$(echo "$svc" | xargs) # trim whitespace
# Validate known services first
case "$svc" in
forgejo) ;;
*)
echo "Error: unknown service '${svc}' — known: forgejo" >&2
exit 1
;;
esac
local jobspec_path="${FACTORY_ROOT}/nomad/jobs/${svc}.hcl"
if [ ! -f "$jobspec_path" ]; then
echo "Error: jobspec not found: ${jobspec_path}" >&2
@ -838,40 +790,9 @@ _disinto_init_nomad() {
fi
echo "[deploy] [dry-run] nomad job validate ${jobspec_path}"
echo "[deploy] [dry-run] nomad job run -detach ${jobspec_path}"
# Post-deploy: forgejo-bootstrap
if [ "$svc" = "forgejo" ]; then
local bootstrap_script="${FACTORY_ROOT}/lib/init/nomad/forgejo-bootstrap.sh"
echo "[deploy] [dry-run] [post-deploy] would run ${bootstrap_script}"
fi
done
echo "[deploy] dry-run complete"
fi
# Dry-run vault-runner (unconditionally, not gated by --with)
echo ""
echo "── Vault-runner dry-run ───────────────────────────────────"
local vault_runner_path="${FACTORY_ROOT}/nomad/jobs/vault-runner.hcl"
if [ -f "$vault_runner_path" ]; then
echo "[deploy] vault-runner: [dry-run] nomad job validate ${vault_runner_path}"
echo "[deploy] vault-runner: [dry-run] nomad job run -detach ${vault_runner_path}"
else
echo "[deploy] vault-runner: jobspec not found, skipping"
fi
# Build custom images dry-run (if agents, chat, or edge services are included)
if echo ",$with_services," | grep -qE ",(agents|chat|edge),"; then
echo ""
echo "── Build images dry-run ──────────────────────────────"
if echo ",$with_services," | grep -q ",agents,"; then
echo "[build] [dry-run] docker build -t disinto/agents:local -f ${FACTORY_ROOT}/docker/agents/Dockerfile ${FACTORY_ROOT}"
fi
if echo ",$with_services," | grep -q ",chat,"; then
echo "[build] [dry-run] docker build -t disinto/chat:local -f ${FACTORY_ROOT}/docker/chat/Dockerfile ${FACTORY_ROOT}/docker/chat"
fi
if echo ",$with_services," | grep -q ",edge,"; then
echo "[build] [dry-run] docker build -t disinto/edge:local -f ${FACTORY_ROOT}/docker/edge/Dockerfile ${FACTORY_ROOT}/docker/edge"
fi
fi
exit 0
fi
@ -893,22 +814,6 @@ _disinto_init_nomad() {
exit 0
fi
# Enable Vault secret engines (S2.1 / issue #912) — must precede
# policies/auth/import because every policy and every import target
# addresses paths under kv/. Idempotent, safe to re-run.
echo ""
echo "── Enabling Vault secret engines ──────────────────────"
local -a engines_cmd=("$vault_engines_sh")
if [ "$(id -u)" -eq 0 ]; then
"${engines_cmd[@]}" || exit $?
else
if ! command -v sudo >/dev/null 2>&1; then
echo "Error: vault-engines.sh must run as root and sudo is not installed" >&2
exit 1
fi
sudo -n -- "${engines_cmd[@]}" || exit $?
fi
# Apply Vault policies (S2.1) — idempotent, safe to re-run.
echo ""
echo "── Applying Vault policies ────────────────────────────"
@ -959,97 +864,36 @@ _disinto_init_nomad() {
echo "[import] no --import-env/--import-sops — skipping; set them or seed kv/disinto/* manually before deploying secret-dependent services"
fi
# Build custom images required by Nomad jobs (S4.2, S5.2, S5.5) — before deploy.
# Single-node factory dev box: no multi-node pull needed, no registry auth.
# Can upgrade to approach B (registry push/pull) later if multi-node.
if echo ",$with_services," | grep -qE ",(agents|chat|edge),"; then
echo ""
echo "── Building custom images ─────────────────────────────"
if echo ",$with_services," | grep -q ",agents,"; then
local tag="disinto/agents:local"
echo "── Building $tag ─────────────────────────────"
docker build -t "$tag" -f "${FACTORY_ROOT}/docker/agents/Dockerfile" "${FACTORY_ROOT}" 2>&1 | tail -5
fi
if echo ",$with_services," | grep -q ",chat,"; then
local tag="disinto/chat:local"
echo "── Building $tag ─────────────────────────────"
docker build -t "$tag" -f "${FACTORY_ROOT}/docker/chat/Dockerfile" "${FACTORY_ROOT}/docker/chat" 2>&1 | tail -5
fi
if echo ",$with_services," | grep -q ",edge,"; then
local tag="disinto/edge:local"
echo "── Building $tag ─────────────────────────────"
docker build -t "$tag" -f "${FACTORY_ROOT}/docker/edge/Dockerfile" "${FACTORY_ROOT}/docker/edge" 2>&1 | tail -5
fi
fi
# Interleaved seed/deploy per service (S2.6, #928, #948).
# We interleave seed + deploy per service (not batch all seeds then all deploys)
# so that OAuth-dependent services can reach their dependencies during seeding.
# E.g., seed-forgejo → deploy-forgejo → seed-woodpecker (OAuth can now reach
# running forgejo) → deploy-woodpecker.
# Deploy services if requested
if [ -n "$with_services" ]; then
local vault_addr="${VAULT_ADDR:-http://127.0.0.1:8200}"
# Build ordered deploy list (S3.4, S4.2, S5.2, S5.5): forgejo → woodpecker-server → woodpecker-agent → agents → staging → chat → edge
local DEPLOY_ORDER=""
for ordered_svc in forgejo woodpecker-server woodpecker-agent agents staging chat edge; do
if echo ",$with_services," | grep -q ",$ordered_svc,"; then
DEPLOY_ORDER="${DEPLOY_ORDER:+${DEPLOY_ORDER} }${ordered_svc}"
fi
done
local IFS=' '
for svc in $DEPLOY_ORDER; do
# Seed this service (if seed script exists)
local seed_name="$svc"
case "$svc" in
woodpecker-server|woodpecker-agent) seed_name="woodpecker" ;;
agents) seed_name="agents" ;;
chat) seed_name="chat" ;;
edge) seed_name="ops-repo" ;;
esac
local seed_script="${FACTORY_ROOT}/tools/vault-seed-${seed_name}.sh"
if [ -x "$seed_script" ]; then
echo ""
echo "── Seeding Vault for ${seed_name} ───────────────────────────"
if [ "$(id -u)" -eq 0 ]; then
VAULT_ADDR="$vault_addr" "$seed_script" || exit $?
else
if ! command -v sudo >/dev/null 2>&1; then
echo "Error: vault-seed-${seed_name}.sh must run as root and sudo is not installed" >&2
echo "── Deploying services ─────────────────────────────────"
local -a deploy_cmd=("$deploy_sh")
# Split comma-separated service list into positional args
local IFS=','
for svc in $with_services; do
svc=$(echo "$svc" | xargs) # trim whitespace
if ! echo "$svc" | grep -qE '^[a-zA-Z0-9_-]+$'; then
echo "Error: invalid service name '${svc}' — must match ^[a-zA-Z0-9_-]+$" >&2
exit 1
fi
sudo -n -- env "VAULT_ADDR=$vault_addr" "$seed_script" || exit $?
fi
fi
# Deploy this service
echo ""
echo "── Deploying ${svc} ───────────────────────────────────────"
# Seed host volumes before deployment (if needed)
# Validate known services FIRST (before jobspec check)
case "$svc" in
staging)
# Seed site-content host volume (/srv/disinto/docker) with static content
# The staging jobspec mounts this volume read-only to /srv/site
local site_content_src="${FACTORY_ROOT}/docker/index.html"
local site_content_dst="/srv/disinto/docker"
if [ -f "$site_content_src" ] && [ -d "$site_content_dst" ]; then
if ! cmp -s "$site_content_src" "${site_content_dst}/index.html" 2>/dev/null; then
echo "[staging] seeding site-content volume..."
cp "$site_content_src" "${site_content_dst}/index.html"
fi
fi
forgejo) ;;
*)
echo "Error: unknown service '${svc}' — known: forgejo" >&2
exit 1
;;
esac
# Check jobspec exists
local jobspec_path="${FACTORY_ROOT}/nomad/jobs/${svc}.hcl"
if [ ! -f "$jobspec_path" ]; then
echo "Error: jobspec not found: ${jobspec_path}" >&2
exit 1
fi
deploy_cmd+=("$svc")
done
local -a deploy_cmd=("$deploy_sh" "$svc")
if [ "$(id -u)" -eq 0 ]; then
"${deploy_cmd[@]}" || exit $?
else
@ -1057,49 +901,7 @@ _disinto_init_nomad() {
echo "Error: deploy.sh must run as root and sudo is not installed" >&2
exit 1
fi
sudo -n --preserve-env=FORGE_ADMIN_PASS,FORGE_TOKEN,FORGE_URL -- "${deploy_cmd[@]}" || exit $?
fi
# Post-deploy: bootstrap Forgejo admin user after forgejo deployment
if [ "$svc" = "forgejo" ]; then
echo ""
echo "── Bootstrapping Forgejo admin user ───────────────────────"
local bootstrap_script="${FACTORY_ROOT}/lib/init/nomad/forgejo-bootstrap.sh"
if [ -x "$bootstrap_script" ]; then
if [ "$(id -u)" -eq 0 ]; then
"$bootstrap_script" || exit $?
else
if ! command -v sudo >/dev/null 2>&1; then
echo "Error: forgejo-bootstrap.sh must run as root and sudo is not installed" >&2
exit 1
fi
sudo -n --preserve-env=FORGE_ADMIN_PASS,FORGE_TOKEN,FORGE_URL -- "$bootstrap_script" || exit $?
fi
else
echo "warning: forgejo-bootstrap.sh not found or not executable" >&2
fi
fi
done
# Run vault-runner (unconditionally, not gated by --with) — infrastructure job
# vault-runner is always present since it's needed for vault action dispatch
echo ""
echo "── Running vault-runner ────────────────────────────────────"
local vault_runner_path="${FACTORY_ROOT}/nomad/jobs/vault-runner.hcl"
if [ -f "$vault_runner_path" ]; then
echo "[deploy] vault-runner: running Nomad job (infrastructure)"
local -a vault_runner_cmd=("$deploy_sh" "vault-runner")
if [ "$(id -u)" -eq 0 ]; then
"${vault_runner_cmd[@]}" || exit $?
else
if ! command -v sudo >/dev/null 2>&1; then
echo "Error: deploy.sh must run as root and sudo is not installed" >&2
exit 1
fi
sudo -n -- "${vault_runner_cmd[@]}" || exit $?
fi
else
echo "[deploy] vault-runner: jobspec not found, skipping"
sudo -n -- "${deploy_cmd[@]}" || exit $?
fi
# Print final summary
@ -1117,24 +919,9 @@ _disinto_init_nomad() {
echo "Imported: (none — seed kv/disinto/* manually before deploying secret-dependent services)"
fi
echo "Deployed: ${with_services}"
if echo ",$with_services," | grep -q ",forgejo,"; then
if echo "$with_services" | grep -q "forgejo"; then
echo "Ports: forgejo: 3000"
fi
if echo ",$with_services," | grep -q ",woodpecker-server,"; then
echo " woodpecker-server: 8000"
fi
if echo ",$with_services," | grep -q ",woodpecker-agent,"; then
echo " woodpecker-agent: (agent connected)"
fi
if echo ",$with_services," | grep -q ",agents,"; then
echo " agents: (polling loop running)"
fi
if echo ",$with_services," | grep -q ",staging,"; then
echo " staging: (internal, no external port)"
fi
if echo ",$with_services," | grep -q ",chat,"; then
echo " chat: 8080"
fi
echo "────────────────────────────────────────────────────────"
fi
@ -1220,70 +1007,6 @@ disinto_init() {
exit 1
fi
# Normalize --with services (S3.4): expand 'woodpecker' shorthand to
# 'woodpecker-server,woodpecker-agent', auto-include forgejo when
# woodpecker is requested (OAuth dependency), and validate all names.
if [ -n "$with_services" ]; then
# Expand 'woodpecker' (bare) → 'woodpecker-server,woodpecker-agent'.
# Must not match already-expanded 'woodpecker-server'/'woodpecker-agent'.
local expanded=""
local IFS=','
for _svc in $with_services; do
_svc=$(echo "$_svc" | xargs)
case "$_svc" in
woodpecker) _svc="woodpecker-server,woodpecker-agent" ;;
agents) _svc="agents" ;;
esac
expanded="${expanded:+${expanded},}${_svc}"
done
with_services="$expanded"
unset IFS
# Auto-include forgejo when woodpecker is requested
if echo ",$with_services," | grep -q ",woodpecker-server,\|,woodpecker-agent," \
&& ! echo ",$with_services," | grep -q ",forgejo,"; then
echo "Note: --with woodpecker implies --with forgejo (OAuth dependency)"
with_services="forgejo,${with_services}"
fi
# Auto-include forgejo and woodpecker when agents is requested
if echo ",$with_services," | grep -q ",agents,"; then
if ! echo ",$with_services," | grep -q ",forgejo,"; then
echo "Note: --with agents implies --with forgejo (agents need forge)"
with_services="forgejo,${with_services}"
fi
if ! echo ",$with_services," | grep -q ",woodpecker-server,\|,woodpecker-agent,"; then
echo "Note: --with agents implies --with woodpecker (agents need CI)"
with_services="${with_services},woodpecker-server,woodpecker-agent"
fi
fi
# Auto-include all dependencies when edge is requested (S5.5)
if echo ",$with_services," | grep -q ",edge,"; then
# Edge depends on all backend services
for dep in forgejo woodpecker-server woodpecker-agent agents staging chat; do
if ! echo ",$with_services," | grep -q ",${dep},"; then
echo "Note: --with edge implies --with ${dep} (edge depends on all backend services)"
with_services="${with_services},${dep}"
fi
done
fi
# Validate all service names are known
local IFS=','
for _svc in $with_services; do
_svc=$(echo "$_svc" | xargs)
case "$_svc" in
forgejo|woodpecker-server|woodpecker-agent|agents|staging|chat|edge) ;;
*)
echo "Error: unknown service '${_svc}' — known: forgejo, woodpecker-server, woodpecker-agent, agents, staging, chat, edge" >&2
exit 1
;;
esac
done
unset IFS
fi
# --import-* flag validation (S2.5). These three flags form an import
# triple and must be consistent before dispatch: sops encryption is
# useless without the age key to decrypt it, so either both --import-sops
@ -1474,36 +1197,6 @@ p.write_text(text)
exit 0
fi
# Configure Forgejo and Woodpecker URLs when EDGE_TUNNEL_FQDN is set.
# In subdomain mode, uses per-service FQDNs at root path instead of subpath URLs.
# Must run BEFORE generate_compose so the .env file is available for variable substitution.
if [ -n "${EDGE_TUNNEL_FQDN:-}" ]; then
local routing_mode="${EDGE_ROUTING_MODE:-subpath}"
# Create .env file if it doesn't exist yet (needed before compose generation)
if [ "$bare" = false ] && [ ! -f "${FACTORY_ROOT}/.env" ]; then
touch "${FACTORY_ROOT}/.env"
fi
if [ "$routing_mode" = "subdomain" ]; then
# Subdomain mode: Forgejo at forge.<project>.disinto.ai (root path)
if ! grep -q '^FORGEJO_ROOT_URL=' "${FACTORY_ROOT}/.env" 2>/dev/null; then
echo "FORGEJO_ROOT_URL=https://${EDGE_TUNNEL_FQDN_FORGE:-forge.${EDGE_TUNNEL_FQDN}}/" >> "${FACTORY_ROOT}/.env"
fi
# Subdomain mode: Woodpecker at ci.<project>.disinto.ai (root path)
if ! grep -q '^WOODPECKER_HOST=' "${FACTORY_ROOT}/.env" 2>/dev/null; then
echo "WOODPECKER_HOST=https://${EDGE_TUNNEL_FQDN_CI:-ci.${EDGE_TUNNEL_FQDN}}" >> "${FACTORY_ROOT}/.env"
fi
else
# Subpath mode: Forgejo ROOT_URL with /forge/ subpath (trailing slash required)
if ! grep -q '^FORGEJO_ROOT_URL=' "${FACTORY_ROOT}/.env" 2>/dev/null; then
echo "FORGEJO_ROOT_URL=https://${EDGE_TUNNEL_FQDN}/forge/" >> "${FACTORY_ROOT}/.env"
fi
# Subpath mode: Woodpecker WOODPECKER_HOST with /ci subpath (no trailing slash for v3)
if ! grep -q '^WOODPECKER_HOST=' "${FACTORY_ROOT}/.env" 2>/dev/null; then
echo "WOODPECKER_HOST=https://${EDGE_TUNNEL_FQDN}/ci" >> "${FACTORY_ROOT}/.env"
fi
fi
fi
# Generate compose files (unless --bare)
if [ "$bare" = false ]; then
local forge_port
@ -1518,6 +1211,18 @@ p.write_text(text)
touch "${FACTORY_ROOT}/.env"
fi
# Configure Forgejo and Woodpecker subpath URLs when EDGE_TUNNEL_FQDN is set
if [ -n "${EDGE_TUNNEL_FQDN:-}" ]; then
# Forgejo ROOT_URL with /forge/ subpath (note trailing slash - Forgejo needs it)
if ! grep -q '^FORGEJO_ROOT_URL=' "${FACTORY_ROOT}/.env" 2>/dev/null; then
echo "FORGEJO_ROOT_URL=https://${EDGE_TUNNEL_FQDN}/forge/" >> "${FACTORY_ROOT}/.env"
fi
# Woodpecker WOODPECKER_HOST with /ci subpath (no trailing slash for v3)
if ! grep -q '^WOODPECKER_HOST=' "${FACTORY_ROOT}/.env" 2>/dev/null; then
echo "WOODPECKER_HOST=https://${EDGE_TUNNEL_FQDN}/ci" >> "${FACTORY_ROOT}/.env"
fi
fi
# Prompt for FORGE_ADMIN_PASS before setup_forge
# This ensures the password is set before Forgejo user creation
prompt_admin_password "${FACTORY_ROOT}/.env"
@ -1621,15 +1326,9 @@ p.write_text(text)
create_woodpecker_oauth "$forge_url" "$forge_repo"
# Create OAuth2 app on Forgejo for disinto-chat (#708)
# In subdomain mode, callback is at chat.<project> root instead of /chat/ subpath.
local chat_redirect_uri
if [ -n "${EDGE_TUNNEL_FQDN:-}" ]; then
local chat_routing_mode="${EDGE_ROUTING_MODE:-subpath}"
if [ "$chat_routing_mode" = "subdomain" ]; then
chat_redirect_uri="https://${EDGE_TUNNEL_FQDN_CHAT:-chat.${EDGE_TUNNEL_FQDN}}/oauth/callback"
else
chat_redirect_uri="https://${EDGE_TUNNEL_FQDN}/chat/oauth/callback"
fi
else
chat_redirect_uri="http://localhost/chat/oauth/callback"
fi
@ -2829,29 +2528,15 @@ disinto_edge() {
# Write to .env (replace existing entries to avoid duplicates)
local tmp_env
tmp_env=$(mktemp)
grep -Ev "^EDGE_TUNNEL_(HOST|PORT|FQDN|FQDN_FORGE|FQDN_CI|FQDN_CHAT)=" "$env_file" > "$tmp_env" 2>/dev/null || true
grep -Ev "^EDGE_TUNNEL_(HOST|PORT|FQDN)=" "$env_file" > "$tmp_env" 2>/dev/null || true
mv "$tmp_env" "$env_file"
echo "EDGE_TUNNEL_HOST=${edge_host}" >> "$env_file"
echo "EDGE_TUNNEL_PORT=${port}" >> "$env_file"
echo "EDGE_TUNNEL_FQDN=${fqdn}" >> "$env_file"
# Subdomain mode: write per-service FQDNs (#1028)
local reg_routing_mode="${EDGE_ROUTING_MODE:-subpath}"
if [ "$reg_routing_mode" = "subdomain" ]; then
echo "EDGE_TUNNEL_FQDN_FORGE=forge.${fqdn}" >> "$env_file"
echo "EDGE_TUNNEL_FQDN_CI=ci.${fqdn}" >> "$env_file"
echo "EDGE_TUNNEL_FQDN_CHAT=chat.${fqdn}" >> "$env_file"
fi
echo "Registered: ${project}"
echo " Port: ${port}"
echo " FQDN: ${fqdn}"
if [ "$reg_routing_mode" = "subdomain" ]; then
echo " Mode: subdomain"
echo " Forge: forge.${fqdn}"
echo " CI: ci.${fqdn}"
echo " Chat: chat.${fqdn}"
fi
echo " Saved to: ${env_file}"
;;
@ -2885,23 +2570,12 @@ disinto_edge() {
edge_host="${EDGE_HOST:-edge.disinto.ai}"
fi
# Read tunnel pubkey for ownership proof
local secrets_dir="${FACTORY_ROOT}/secrets"
local tunnel_pubkey="${secrets_dir}/tunnel_key.pub"
if [ ! -f "$tunnel_pubkey" ]; then
echo "Error: tunnel keypair not found at ${tunnel_pubkey}" >&2
echo "Cannot prove ownership without the tunnel public key." >&2
exit 1
fi
local pubkey
pubkey=$(tr -d '\n' < "$tunnel_pubkey")
# SSH to edge host and deregister
echo "Deregistering tunnel for ${project} on ${edge_host}..."
local response
response=$(ssh -o StrictHostKeyChecking=accept-new -o BatchMode=yes \
"disinto-register@${edge_host}" \
"deregister ${project} ${pubkey}" 2>&1) || {
"deregister ${project}" 2>&1) || {
echo "Error: failed to deregister tunnel" >&2
echo "Response: ${response}" >&2
exit 1
@ -2984,33 +2658,6 @@ EOF
esac
}
# ── backup command ────────────────────────────────────────────────────────────
# Usage: disinto backup <subcommand> [args]
# Subcommands:
# create <outfile.tar.gz> Create backup of factory state
# import <infile.tar.gz> Restore factory state from backup
disinto_backup() {
local subcmd="${1:-}"
shift || true
case "$subcmd" in
create)
backup_create "$@"
;;
import)
backup_import "$@"
;;
*)
echo "Usage: disinto backup <subcommand> [args]" >&2
echo "" >&2
echo "Subcommands:" >&2
echo " create <outfile.tar.gz> Create backup of factory state" >&2
echo " import <infile.tar.gz> Restore factory state from backup" >&2
exit 1
;;
esac
}
# ── Main dispatch ────────────────────────────────────────────────────────────
case "${1:-}" in
@ -3027,7 +2674,6 @@ case "${1:-}" in
hire-an-agent) shift; disinto_hire_an_agent "$@" ;;
agent) shift; disinto_agent "$@" ;;
edge) shift; disinto_edge "$@" ;;
backup) shift; disinto_backup "$@" ;;
-h|--help) usage ;;
*) usage ;;
esac

View file

@ -1,4 +1,4 @@
<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 -->
<!-- last-reviewed: 6bdbeb5bd2a200ff1b23724564da9383193f3e30 -->
# Dev Agent
**Role**: Implement issues autonomously — write code, push branches, address

View file

@ -15,6 +15,7 @@ services:
- project-repos:/home/agent/repos
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
- ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
- ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro
- woodpecker-data:/woodpecker-data:ro
@ -77,6 +78,7 @@ services:
- project-repos:/home/agent/repos
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
- ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
- ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro
- woodpecker-data:/woodpecker-data:ro
@ -137,6 +139,7 @@ services:
- project-repos:/home/agent/repos
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
- ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
- ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro
- woodpecker-data:/woodpecker-data:ro
@ -208,8 +211,8 @@ services:
edge:
build:
context: .
dockerfile: docker/edge/Dockerfile
context: docker/edge
dockerfile: Dockerfile
image: disinto/edge:latest
container_name: disinto-edge
security_opt:
@ -220,8 +223,6 @@ services:
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/root/.claude.json:ro
- ${CLAUDE_DIR:-${HOME}/.claude}:/root/.claude:ro
- disinto-logs:/opt/disinto-logs
# Chat history persistence (merged from chat container, #1083)
- ${CHAT_HISTORY_DIR:-./state/chat-history}:/var/lib/chat/history
environment:
- FORGE_SUPERVISOR_TOKEN=${FORGE_SUPERVISOR_TOKEN:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
@ -233,17 +234,6 @@ services:
- PRIMARY_BRANCH=main
- DISINTO_CONTAINER=1
- FORGE_ADMIN_USERS=disinto-admin,vault-bot,admin
# Chat env vars (merged from chat container into edge, #1083)
- CHAT_HOST=127.0.0.1
- CHAT_PORT=8080
- CHAT_OAUTH_CLIENT_ID=${CHAT_OAUTH_CLIENT_ID:-}
- CHAT_OAUTH_CLIENT_SECRET=${CHAT_OAUTH_CLIENT_SECRET:-}
- DISINTO_CHAT_ALLOWED_USERS=${DISINTO_CHAT_ALLOWED_USERS:-}
- FORWARD_AUTH_SECRET=${FORWARD_AUTH_SECRET:-}
- EDGE_TUNNEL_FQDN=${EDGE_TUNNEL_FQDN:-}
- EDGE_TUNNEL_FQDN_CHAT=${EDGE_TUNNEL_FQDN_CHAT:-}
- EDGE_ROUTING_MODE=${EDGE_ROUTING_MODE:-subpath}
# Rate limiting removed (#1084)
ports:
- "80:80"
- "443:443"

View file

@ -1,26 +1,21 @@
FROM debian:bookworm-slim
RUN apt-get update && apt-get install -y --no-install-recommends \
bash curl git jq tmux nodejs npm python3 python3-pip openssh-client ca-certificates age shellcheck procps gosu \
bash curl git jq tmux python3 python3-pip openssh-client ca-certificates age shellcheck procps gosu \
&& pip3 install --break-system-packages networkx tomlkit \
&& rm -rf /var/lib/apt/lists/*
# Pre-built binaries (copied from docker/agents/bin/)
# SOPS — encrypted data decryption tool
# Download sops binary (replaces manual COPY of vendored binary)
ARG SOPS_VERSION=3.9.4
RUN curl -fsSL "https://github.com/getsops/sops/releases/download/v${SOPS_VERSION}/sops-v${SOPS_VERSION}.linux.amd64" \
-o /usr/local/bin/sops && chmod +x /usr/local/bin/sops
COPY docker/agents/bin/sops /usr/local/bin/sops
RUN chmod +x /usr/local/bin/sops
# tea CLI — official Gitea/Forgejo CLI for issue/label/comment operations
# Download tea binary (replaces manual COPY of vendored binary)
ARG TEA_VERSION=0.9.2
RUN curl -fsSL "https://dl.gitea.com/tea/${TEA_VERSION}/tea-${TEA_VERSION}-linux-amd64" \
-o /usr/local/bin/tea && chmod +x /usr/local/bin/tea
COPY docker/agents/bin/tea /usr/local/bin/tea
RUN chmod +x /usr/local/bin/tea
# Install Claude Code CLI — agent runtime for all LLM backends (llama, Claude API).
# The CLI is the execution environment; ANTHROPIC_BASE_URL selects the model provider.
RUN npm install -g @anthropic-ai/claude-code@2.1.84
# Claude CLI is mounted from the host via docker-compose volume.
# No internet access to cli.anthropic.com required at build time.
# Non-root user
RUN useradd -m -u 1000 -s /bin/bash agent

35
docker/chat/Dockerfile Normal file
View file

@ -0,0 +1,35 @@
# disinto-chat — minimal HTTP backend for Claude chat UI
#
# Small Debian slim base with Python runtime.
# Chosen for simplicity and small image size (~100MB).
#
# Image size: ~100MB (well under the 200MB ceiling)
#
# The claude binary is mounted from the host at runtime via docker-compose,
# not baked into the image — same pattern as the agents container.
FROM debian:bookworm-slim
# Install Python (no build-time network access needed)
RUN apt-get update && apt-get install -y --no-install-recommends \
python3 \
&& rm -rf /var/lib/apt/lists/*
# Non-root user — fixed UID 10001 for sandbox hardening (#706)
RUN useradd -m -u 10001 -s /bin/bash chat
# Copy application files
COPY server.py /usr/local/bin/server.py
COPY entrypoint-chat.sh /entrypoint-chat.sh
COPY ui/ /var/chat/ui/
RUN chmod +x /entrypoint-chat.sh /usr/local/bin/server.py
USER chat
WORKDIR /var/chat
EXPOSE 8080
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
CMD python3 -c "import urllib.request; urllib.request.urlopen('http://localhost:8080/health')" || exit 1
ENTRYPOINT ["/entrypoint-chat.sh"]

37
docker/chat/entrypoint-chat.sh Executable file
View file

@ -0,0 +1,37 @@
#!/usr/bin/env bash
set -euo pipefail
# entrypoint-chat.sh — Start the disinto-chat backend server
#
# Exec-replace pattern: this script is the container entrypoint and runs
# the server directly (no wrapper needed). Logs to stdout for docker logs.
LOGFILE="/tmp/chat.log"
log() {
printf '[%s] %s\n' "$(date -u '+%Y-%m-%d %H:%M:%S UTC')" "$*" | tee -a "$LOGFILE"
}
# Sandbox sanity checks (#706) — fail fast if isolation is broken
if [ -e /var/run/docker.sock ]; then
log "FATAL: /var/run/docker.sock is accessible — sandbox violation"
exit 1
fi
if [ "$(id -u)" = "0" ]; then
log "FATAL: running as root (uid 0) — sandbox violation"
exit 1
fi
# Verify Claude CLI is available (expected via volume mount from host).
if ! command -v claude &>/dev/null; then
log "FATAL: claude CLI not found in PATH"
log "Mount the host binary into the container, e.g.:"
log " volumes:"
log " - /usr/local/bin/claude:/usr/local/bin/claude:ro"
exit 1
fi
log "Claude CLI: $(claude --version 2>&1 || true)"
# Start the Python server (exec-replace so signals propagate correctly)
log "Starting disinto-chat server on port 8080..."
exec python3 /usr/local/bin/server.py

View file

@ -20,15 +20,9 @@ OAuth flow:
6. Redirects to /chat/
The claude binary is expected to be mounted from the host at /usr/local/bin/claude.
Workspace access:
- CHAT_WORKSPACE_DIR environment variable: bind-mounted project working tree
- Claude invocation uses --permission-mode acceptEdits for code modification
- CWD is set to workspace directory when configured, enabling Claude to
inspect, explain, or modify code scoped to that tree only
"""
import asyncio
import datetime
import json
import os
import re
@ -36,33 +30,21 @@ import secrets
import subprocess
import sys
import time
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from urllib.parse import urlparse, parse_qs, urlencode
import socket
import struct
import base64
import hashlib
# Configuration
HOST = os.environ.get("CHAT_HOST", "127.0.0.1")
HOST = os.environ.get("CHAT_HOST", "0.0.0.0")
PORT = int(os.environ.get("CHAT_PORT", 8080))
UI_DIR = "/var/chat/ui"
STATIC_DIR = os.path.join(UI_DIR, "static")
CLAUDE_BIN = "/usr/local/bin/claude"
# Workspace directory: bind-mounted project working tree for Claude access
# Defaults to empty; when set, Claude can read/write to this directory
WORKSPACE_DIR = os.environ.get("CHAT_WORKSPACE_DIR", "")
# OAuth configuration
FORGE_URL = os.environ.get("FORGE_URL", "http://localhost:3000")
CHAT_OAUTH_CLIENT_ID = os.environ.get("CHAT_OAUTH_CLIENT_ID", "")
CHAT_OAUTH_CLIENT_SECRET = os.environ.get("CHAT_OAUTH_CLIENT_SECRET", "")
EDGE_TUNNEL_FQDN = os.environ.get("EDGE_TUNNEL_FQDN", "")
EDGE_TUNNEL_FQDN_CHAT = os.environ.get("EDGE_TUNNEL_FQDN_CHAT", "")
EDGE_ROUTING_MODE = os.environ.get("EDGE_ROUTING_MODE", "subpath")
# Shared secret for Caddy forward_auth verify endpoint (#709).
# When set, only requests carrying this value in X-Forward-Auth-Secret are
@ -70,6 +52,10 @@ EDGE_ROUTING_MODE = os.environ.get("EDGE_ROUTING_MODE", "subpath")
# (acceptable during local dev; production MUST set this).
FORWARD_AUTH_SECRET = os.environ.get("FORWARD_AUTH_SECRET", "")
# Rate limiting / cost caps (#711)
CHAT_MAX_REQUESTS_PER_HOUR = int(os.environ.get("CHAT_MAX_REQUESTS_PER_HOUR", 60))
CHAT_MAX_REQUESTS_PER_DAY = int(os.environ.get("CHAT_MAX_REQUESTS_PER_DAY", 500))
CHAT_MAX_TOKENS_PER_DAY = int(os.environ.get("CHAT_MAX_TOKENS_PER_DAY", 1000000))
# Allowed users - disinto-admin always allowed; CSV allowlist extends it
_allowed_csv = os.environ.get("DISINTO_CHAT_ALLOWED_USERS", "")
@ -95,10 +81,11 @@ _sessions = {}
# Pending OAuth state tokens: state -> expires (float)
_oauth_states = {}
# WebSocket message queues per user
# user -> asyncio.Queue (for streaming messages to connected clients)
_websocket_queues = {}
# Per-user rate limiting state (#711)
# user -> list of request timestamps (for sliding-window hourly/daily caps)
_request_log = {}
# user -> {"tokens": int, "date": "YYYY-MM-DD"}
_daily_tokens = {}
# MIME types for static files
MIME_TYPES = {
@ -112,22 +99,9 @@ MIME_TYPES = {
".ico": "image/x-icon",
}
# WebSocket subprotocol for chat streaming
WEBSOCKET_SUBPROTOCOL = "chat-stream-v1"
# WebSocket opcodes
OPCODE_CONTINUATION = 0x0
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xA
def _build_callback_uri():
"""Build the OAuth callback URI based on tunnel configuration."""
if EDGE_ROUTING_MODE == "subdomain" and EDGE_TUNNEL_FQDN_CHAT:
return f"https://{EDGE_TUNNEL_FQDN_CHAT}/oauth/callback"
if EDGE_TUNNEL_FQDN:
return f"https://{EDGE_TUNNEL_FQDN}/chat/oauth/callback"
return "http://localhost/chat/oauth/callback"
@ -213,9 +187,69 @@ def _fetch_user(access_token):
return None
# =============================================================================
# Rate Limiting Functions (#711)
# =============================================================================
def _check_rate_limit(user):
"""Check per-user rate limits. Returns (allowed, retry_after, reason) (#711).
Checks hourly request cap, daily request cap, and daily token cap.
"""
now = time.time()
one_hour_ago = now - 3600
today = datetime.date.today().isoformat()
# Prune old entries from request log
timestamps = _request_log.get(user, [])
timestamps = [t for t in timestamps if t > now - 86400]
_request_log[user] = timestamps
# Hourly request cap
hourly = [t for t in timestamps if t > one_hour_ago]
if len(hourly) >= CHAT_MAX_REQUESTS_PER_HOUR:
oldest_in_window = min(hourly)
retry_after = int(oldest_in_window + 3600 - now) + 1
return False, max(retry_after, 1), "hourly request limit"
# Daily request cap
start_of_day = time.mktime(datetime.date.today().timetuple())
daily = [t for t in timestamps if t >= start_of_day]
if len(daily) >= CHAT_MAX_REQUESTS_PER_DAY:
next_day = start_of_day + 86400
retry_after = int(next_day - now) + 1
return False, max(retry_after, 1), "daily request limit"
# Daily token cap
token_info = _daily_tokens.get(user, {"tokens": 0, "date": today})
if token_info["date"] != today:
token_info = {"tokens": 0, "date": today}
_daily_tokens[user] = token_info
if token_info["tokens"] >= CHAT_MAX_TOKENS_PER_DAY:
next_day = start_of_day + 86400
retry_after = int(next_day - now) + 1
return False, max(retry_after, 1), "daily token limit"
return True, 0, ""
def _record_request(user):
"""Record a request timestamp for the user (#711)."""
_request_log.setdefault(user, []).append(time.time())
def _record_tokens(user, tokens):
"""Record token usage for the user (#711)."""
today = datetime.date.today().isoformat()
token_info = _daily_tokens.get(user, {"tokens": 0, "date": today})
if token_info["date"] != today:
token_info = {"tokens": 0, "date": today}
token_info["tokens"] += tokens
_daily_tokens[user] = token_info
def _parse_stream_json(output):
"""Parse stream-json output from claude --print.
"""Parse stream-json output from claude --print (#711).
Returns (text_content, total_tokens). Falls back gracefully if the
usage event is absent or malformed.
@ -261,313 +295,6 @@ def _parse_stream_json(output):
return "".join(text_parts), total_tokens
# =============================================================================
# WebSocket Handler Class
# =============================================================================
class _WebSocketHandler:
"""Handle WebSocket connections for chat streaming."""
def __init__(self, reader, writer, user, message_queue):
self.reader = reader
self.writer = writer
self.user = user
self.message_queue = message_queue
self.closed = False
async def accept_connection(self, sec_websocket_key, sec_websocket_protocol=None):
"""Accept the WebSocket handshake.
The HTTP request has already been parsed by BaseHTTPRequestHandler,
so we use the provided key and protocol instead of re-reading from socket.
"""
# Validate subprotocol
if sec_websocket_protocol and sec_websocket_protocol != WEBSOCKET_SUBPROTOCOL:
self._send_http_error(
400,
"Bad Request",
f"Unsupported subprotocol. Expected: {WEBSOCKET_SUBPROTOCOL}",
)
self._close_connection()
return False
# Generate accept key
accept_key = self._generate_accept_key(sec_websocket_key)
# Send handshake response
response = (
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n"
f"Sec-WebSocket-Accept: {accept_key}\r\n"
)
if sec_websocket_protocol:
response += f"Sec-WebSocket-Protocol: {sec_websocket_protocol}\r\n"
response += "\r\n"
self.writer.write(response.encode("utf-8"))
await self.writer.drain()
return True
def _generate_accept_key(self, sec_key):
"""Generate the Sec-WebSocket-Accept key."""
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
combined = sec_key + GUID
sha1 = hashlib.sha1(combined.encode("utf-8"))
return base64.b64encode(sha1.digest()).decode("utf-8")
async def _read_line(self):
"""Read a line from the socket."""
data = await self.reader.read(1)
line = ""
while data:
if data == b"\r":
data = await self.reader.read(1)
continue
if data == b"\n":
return line
line += data.decode("utf-8", errors="replace")
data = await self.reader.read(1)
return line
def _send_http_error(self, code, title, message):
"""Send an HTTP error response."""
response = (
f"HTTP/1.1 {code} {title}\r\n"
"Content-Type: text/plain; charset=utf-8\r\n"
"Content-Length: " + str(len(message)) + "\r\n"
"\r\n"
+ message
)
try:
self.writer.write(response.encode("utf-8"))
self.writer.drain()
except Exception:
pass
def _close_connection(self):
"""Close the connection."""
try:
self.writer.close()
except Exception:
pass
async def send_text(self, data):
"""Send a text frame."""
if self.closed:
return
try:
frame = self._encode_frame(OPCODE_TEXT, data.encode("utf-8"))
self.writer.write(frame)
await self.writer.drain()
except Exception as e:
print(f"WebSocket send error: {e}", file=sys.stderr)
async def send_binary(self, data):
"""Send a binary frame."""
if self.closed:
return
try:
if isinstance(data, str):
data = data.encode("utf-8")
frame = self._encode_frame(OPCODE_BINARY, data)
self.writer.write(frame)
await self.writer.drain()
except Exception as e:
print(f"WebSocket send error: {e}", file=sys.stderr)
def _encode_frame(self, opcode, payload):
"""Encode a WebSocket frame."""
frame = bytearray()
frame.append(0x80 | opcode) # FIN + opcode
length = len(payload)
if length < 126:
frame.append(length)
elif length < 65536:
frame.append(126)
frame.extend(struct.pack(">H", length))
else:
frame.append(127)
frame.extend(struct.pack(">Q", length))
frame.extend(payload)
return bytes(frame)
async def _decode_frame(self):
"""Decode a WebSocket frame. Returns (opcode, payload)."""
try:
# Read first two bytes (use readexactly for guaranteed length)
header = await self.reader.readexactly(2)
fin = (header[0] >> 7) & 1
opcode = header[0] & 0x0F
masked = (header[1] >> 7) & 1
length = header[1] & 0x7F
# Extended payload length
if length == 126:
ext = await self.reader.readexactly(2)
length = struct.unpack(">H", ext)[0]
elif length == 127:
ext = await self.reader.readexactly(8)
length = struct.unpack(">Q", ext)[0]
# Masking key
if masked:
mask_key = await self.reader.readexactly(4)
# Payload
payload = await self.reader.readexactly(length)
# Unmask if needed
if masked:
payload = bytes(b ^ mask_key[i % 4] for i, b in enumerate(payload))
return opcode, payload
except Exception as e:
print(f"WebSocket decode error: {e}", file=sys.stderr)
return None, None
async def handle_connection(self):
"""Handle the WebSocket connection loop."""
try:
while not self.closed:
opcode, payload = await self._decode_frame()
if opcode is None:
break
if opcode == OPCODE_CLOSE:
await self._send_close()
break
elif opcode == OPCODE_PING:
await self._send_pong(payload)
elif opcode == OPCODE_PONG:
pass # Ignore pong
elif opcode in (OPCODE_TEXT, OPCODE_BINARY):
# Handle text messages from client (e.g., chat_request)
try:
msg = payload.decode("utf-8")
data = json.loads(msg)
if data.get("type") == "chat_request":
# Invoke Claude with the message
await self._handle_chat_request(data.get("message", ""))
except (json.JSONDecodeError, UnicodeDecodeError):
pass
# Check if we should stop waiting for messages
if self.closed:
break
except Exception as e:
print(f"WebSocket connection error: {e}", file=sys.stderr)
finally:
self._close_connection()
# Clean up the message queue on disconnect
if self.user in _websocket_queues:
del _websocket_queues[self.user]
async def _send_close(self):
"""Send a close frame."""
try:
# Close code 1000 = normal closure
frame = self._encode_frame(OPCODE_CLOSE, struct.pack(">H", 1000))
self.writer.write(frame)
await self.writer.drain()
except Exception:
pass
async def _send_pong(self, payload):
"""Send a pong frame."""
try:
frame = self._encode_frame(OPCODE_PONG, payload)
self.writer.write(frame)
await self.writer.drain()
except Exception:
pass
async def _handle_chat_request(self, message):
"""Handle a chat_request WebSocket frame by invoking Claude."""
if not message:
return
# Validate Claude binary exists
if not os.path.exists(CLAUDE_BIN):
await self.send_text(json.dumps({
"type": "error",
"message": "Claude CLI not found",
}))
return
try:
# Build claude command with permission mode (acceptEdits allows file edits)
claude_args = [CLAUDE_BIN, "--print", "--output-format", "stream-json", "--permission-mode", "acceptEdits", message]
# Spawn claude --print with stream-json for streaming output
# Set cwd to workspace directory if configured, allowing Claude to access project code
cwd = WORKSPACE_DIR if WORKSPACE_DIR else None
proc = subprocess.Popen(
claude_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
cwd=cwd,
bufsize=1,
)
# Stream output line by line
for line in iter(proc.stdout.readline, ""):
line = line.strip()
if not line:
continue
try:
event = json.loads(line)
etype = event.get("type", "")
# Extract text content from content_block_delta events
if etype == "content_block_delta":
delta = event.get("delta", {})
if delta.get("type") == "text_delta":
text = delta.get("text", "")
if text:
# Send tokens to client
await self.send_text(text)
# Check for usage event to know when complete
if etype == "result":
pass # Will send complete after loop
except json.JSONDecodeError:
pass
# Wait for process to complete
proc.wait()
if proc.returncode != 0:
await self.send_text(json.dumps({
"type": "error",
"message": f"Claude CLI failed with exit code {proc.returncode}",
}))
return
# Send complete signal
await self.send_text(json.dumps({
"type": "complete",
}))
except FileNotFoundError:
await self.send_text(json.dumps({
"type": "error",
"message": "Claude CLI not found",
}))
except Exception as e:
await self.send_text(json.dumps({
"type": "error",
"message": str(e),
}))
# =============================================================================
# Conversation History Functions (#710)
# =============================================================================
@ -817,9 +544,9 @@ class ChatHandler(BaseHTTPRequestHandler):
self.serve_static(path)
return
# WebSocket upgrade endpoint
if path == "/chat/ws" or path == "/ws" or path.startswith("/ws"):
self.handle_websocket_upgrade()
# Reserved WebSocket endpoint (future use)
if path == "/ws" or path.startswith("/ws"):
self.send_error_page(501, "WebSocket upgrade not yet implemented")
return
# 404 for unknown paths
@ -1009,13 +736,33 @@ class ChatHandler(BaseHTTPRequestHandler):
except IOError as e:
self.send_error_page(500, f"Error reading file: {e}")
def _send_rate_limit_response(self, retry_after, reason):
"""Send a 429 response with Retry-After header and HTMX fragment (#711)."""
body = (
f'<div class="rate-limit-error">'
f"Rate limit exceeded: {reason}. "
f"Please try again in {retry_after} seconds."
f"</div>"
)
self.send_response(429)
self.send_header("Retry-After", str(retry_after))
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Content-Length", str(len(body.encode("utf-8"))))
self.end_headers()
self.wfile.write(body.encode("utf-8"))
def handle_chat(self, user):
"""
Handle chat requests by spawning `claude --print` with the user message.
Streams tokens over WebSocket if connected.
Enforces per-user rate limits and tracks token usage (#711).
"""
# Check rate limits before processing (#711)
allowed, retry_after, reason = _check_rate_limit(user)
if not allowed:
self._send_rate_limit_response(retry_after, reason)
return
# Read request body
content_length = int(self.headers.get("Content-Length", 0))
if content_length == 0:
@ -1052,63 +799,23 @@ class ChatHandler(BaseHTTPRequestHandler):
if not conv_id or not _validate_conversation_id(conv_id):
conv_id = _generate_conversation_id()
# Record request for rate limiting (#711)
_record_request(user)
try:
# Save user message to history
_write_message(user, conv_id, "user", message)
# Build claude command with permission mode (acceptEdits allows file edits)
claude_args = [CLAUDE_BIN, "--print", "--output-format", "stream-json", "--permission-mode", "acceptEdits", message]
# Spawn claude --print with stream-json for token tracking (#711)
# Set cwd to workspace directory if configured, allowing Claude to access project code
cwd = WORKSPACE_DIR if WORKSPACE_DIR else None
proc = subprocess.Popen(
claude_args,
[CLAUDE_BIN, "--print", "--output-format", "stream-json", message],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
cwd=cwd,
bufsize=1, # Line buffered
)
# Stream output line by line
response_parts = []
total_tokens = 0
for line in iter(proc.stdout.readline, ""):
line = line.strip()
if not line:
continue
try:
event = json.loads(line)
etype = event.get("type", "")
raw_output = proc.stdout.read()
# Extract text content from content_block_delta events
if etype == "content_block_delta":
delta = event.get("delta", {})
if delta.get("type") == "text_delta":
text = delta.get("text", "")
if text:
response_parts.append(text)
# Stream to WebSocket if connected
if user in _websocket_queues:
try:
_websocket_queues[user].put_nowait(text)
except Exception:
pass # Client disconnected
# Parse usage from result event
if etype == "result":
usage = event.get("usage", {})
total_tokens = usage.get("input_tokens", 0) + usage.get("output_tokens", 0)
elif "usage" in event:
usage = event["usage"]
if isinstance(usage, dict):
total_tokens = usage.get("input_tokens", 0) + usage.get("output_tokens", 0)
except json.JSONDecodeError:
pass
# Wait for process to complete
error_output = proc.stderr.read()
if error_output:
print(f"Claude stderr: {error_output}", file=sys.stderr)
@ -1119,12 +826,20 @@ class ChatHandler(BaseHTTPRequestHandler):
self.send_error_page(500, f"Claude CLI failed with exit code {proc.returncode}")
return
# Combine response parts
response = "".join(response_parts)
# Parse stream-json for text and token usage (#711)
response, total_tokens = _parse_stream_json(raw_output)
# Track token usage - does not block *this* request (#711)
if total_tokens > 0:
_record_tokens(user, total_tokens)
print(
f"Token usage: user={user} tokens={total_tokens}",
file=sys.stderr,
)
# Fall back to raw output if stream-json parsing yielded no text
if not response:
response = proc.stdout.getvalue() if hasattr(proc.stdout, 'getvalue') else ""
response = raw_output
# Save assistant response to history
_write_message(user, conv_id, "assistant", response)
@ -1194,106 +909,6 @@ class ChatHandler(BaseHTTPRequestHandler):
self.end_headers()
self.wfile.write(json.dumps({"conversation_id": conv_id}, ensure_ascii=False).encode("utf-8"))
@staticmethod
def push_to_websocket(user, message):
"""Push a message to a WebSocket connection for a user.
This is called from the chat handler to stream tokens to connected clients.
The message is added to the user's WebSocket message queue.
"""
# Get the message queue from the WebSocket handler's queue
# We store the queue in a global dict keyed by user
if user in _websocket_queues:
_websocket_queues[user].put_nowait(message)
def handle_websocket_upgrade(self):
"""Handle WebSocket upgrade request for chat streaming."""
# Check session cookie
user = _validate_session(self.headers.get("Cookie"))
if not user:
self.send_error_page(401, "Unauthorized: no valid session")
return
# Create message queue for this user
_websocket_queues[user] = asyncio.Queue()
# Get WebSocket upgrade headers from the HTTP request
sec_websocket_key = self.headers.get("Sec-WebSocket-Key", "")
sec_websocket_protocol = self.headers.get("Sec-WebSocket-Protocol", "")
# Validate Sec-WebSocket-Key
if not sec_websocket_key:
self.send_error_page(400, "Bad Request", "Missing Sec-WebSocket-Key")
return
# Get the socket from the connection
sock = self.connection
sock.setblocking(False)
# Create async server to handle the connection
async def handle_ws():
try:
# Wrap the socket in asyncio streams using open_connection
reader, writer = await asyncio.open_connection(sock=sock)
# Create WebSocket handler
ws_handler = _WebSocketHandler(reader, writer, user, _websocket_queues[user])
# Accept the connection (pass headers from HTTP request)
if not await ws_handler.accept_connection(sec_websocket_key, sec_websocket_protocol):
return
# Start a task to read from the queue and send to client
async def send_stream():
while not ws_handler.closed:
try:
data = await asyncio.wait_for(ws_handler.message_queue.get(), timeout=1.0)
await ws_handler.send_text(data)
except asyncio.TimeoutError:
# Send ping to keep connection alive
try:
frame = ws_handler._encode_frame(OPCODE_PING, b"")
writer.write(frame)
await writer.drain()
except Exception:
break
except Exception as e:
print(f"Send stream error: {e}", file=sys.stderr)
break
# Start sending task
send_task = asyncio.create_task(send_stream())
# Handle incoming WebSocket frames
await ws_handler.handle_connection()
# Cancel send task
send_task.cancel()
try:
await send_task
except asyncio.CancelledError:
pass
except Exception as e:
print(f"WebSocket handler error: {e}", file=sys.stderr)
finally:
try:
writer.close()
await writer.wait_closed()
except Exception:
pass
# Run the async handler in a thread
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(handle_ws())
except Exception as e:
print(f"WebSocket error: {e}", file=sys.stderr)
finally:
loop.close()
sock.close()
def do_DELETE(self):
"""Handle DELETE requests."""
parsed = urlparse(self.path)
@ -1329,6 +944,12 @@ def main():
print("forward_auth secret configured (#709)", file=sys.stderr)
else:
print("WARNING: FORWARD_AUTH_SECRET not set - verify endpoint unrestricted", file=sys.stderr)
print(
f"Rate limits (#711): {CHAT_MAX_REQUESTS_PER_HOUR}/hr, "
f"{CHAT_MAX_REQUESTS_PER_DAY}/day, "
f"{CHAT_MAX_TOKENS_PER_DAY} tokens/day",
file=sys.stderr,
)
httpd.serve_forever()

View file

@ -430,10 +430,6 @@
return div.innerHTML.replace(/\n/g, '<br>');
}
// WebSocket connection for streaming
let ws = null;
let wsMessageId = null;
// Send message handler
async function sendMessage() {
const message = textarea.value.trim();
@ -453,14 +449,6 @@
await createNewConversation();
}
// Try WebSocket streaming first, fall back to fetch
if (window.location.protocol === 'https:' || window.location.hostname === 'localhost') {
if (tryWebSocketSend(message)) {
return;
}
}
// Fallback to fetch
try {
// Use fetch with URLSearchParams for application/x-www-form-urlencoded
const params = new URLSearchParams();
@ -497,111 +485,6 @@
}
}
// Try to send message via WebSocket streaming
function tryWebSocketSend(message) {
try {
// Generate a unique message ID for this request
wsMessageId = Date.now().toString(36) + Math.random().toString(36).substr(2);
// Connect to WebSocket
const wsUrl = window.location.protocol === 'https:'
? `wss://${window.location.host}/chat/ws`
: `ws://${window.location.host}/chat/ws`;
ws = new WebSocket(wsUrl);
ws.onopen = function() {
// Send the message as JSON with message ID
const data = {
type: 'chat_request',
message_id: wsMessageId,
message: message,
conversation_id: currentConversationId
};
ws.send(JSON.stringify(data));
};
ws.onmessage = function(event) {
try {
const data = JSON.parse(event.data);
if (data.type === 'token') {
// Stream a token to the UI
addTokenToLastMessage(data.token);
} else if (data.type === 'complete') {
// Streaming complete
closeWebSocket();
textarea.disabled = false;
sendBtn.disabled = false;
sendBtn.textContent = 'Send';
textarea.focus();
messagesDiv.scrollTop = messagesDiv.scrollHeight;
loadConversations();
} else if (data.type === 'error') {
addSystemMessage(`Error: ${data.message}`);
closeWebSocket();
textarea.disabled = false;
sendBtn.disabled = false;
sendBtn.textContent = 'Send';
textarea.focus();
}
} catch (e) {
console.error('Failed to parse WebSocket message:', e);
}
};
ws.onerror = function(error) {
console.error('WebSocket error:', error);
addSystemMessage('WebSocket connection error. Falling back to regular chat.');
closeWebSocket();
sendMessage(); // Retry with fetch
};
ws.onclose = function() {
wsMessageId = null;
};
return true; // WebSocket attempt started
} catch (error) {
console.error('Failed to create WebSocket:', error);
return false; // Fall back to fetch
}
}
// Add a token to the last assistant message (for streaming)
function addTokenToLastMessage(token) {
const messages = messagesDiv.querySelectorAll('.message.assistant');
if (messages.length === 0) {
// No assistant message yet, create one
const msgDiv = document.createElement('div');
msgDiv.className = 'message assistant';
msgDiv.innerHTML = `
<div class="role">assistant</div>
<div class="content streaming"></div>
`;
messagesDiv.appendChild(msgDiv);
}
const lastMsg = messagesDiv.querySelector('.message.assistant .content.streaming');
if (lastMsg) {
lastMsg.textContent += token;
messagesDiv.scrollTop = messagesDiv.scrollHeight;
}
}
// Close WebSocket connection
function closeWebSocket() {
if (ws) {
ws.onopen = null;
ws.onmessage = null;
ws.onerror = null;
ws.onclose = null;
ws.close();
ws = null;
}
}
// Event listeners
sendBtn.addEventListener('click', sendMessage);

View file

@ -1,12 +1,6 @@
FROM caddy:latest
RUN apk add --no-cache bash jq curl git docker-cli python3 openssh-client autossh \
nodejs npm
# Claude Code CLI — chat backend runtime (merged from docker/chat, #1083)
RUN npm install -g @anthropic-ai/claude-code@2.1.84
COPY docker/edge/entrypoint-edge.sh /usr/local/bin/entrypoint-edge.sh
# Chat server and UI (merged from docker/chat into edge, #1083)
COPY docker/chat/server.py /usr/local/bin/chat-server.py
COPY docker/chat/ui/ /var/chat/ui/
RUN apk add --no-cache bash jq curl git docker-cli python3 openssh-client autossh
COPY entrypoint-edge.sh /usr/local/bin/entrypoint-edge.sh
VOLUME /data

View file

@ -560,168 +560,10 @@ _launch_runner_docker() {
# _launch_runner_nomad ACTION_ID SECRETS_CSV MOUNTS_CSV
#
# Dispatches a vault-runner batch job via `nomad job dispatch`.
# Polls `nomad job status` until terminal state (completed/failed).
# Reads exit code from allocation and writes <action-id>.result.json.
#
# Usage: _launch_runner_nomad <action_id> <secrets_csv> <mounts_csv>
# Returns: exit code of the nomad job (0=success, non-zero=failure)
# Nomad backend stub — will be implemented in migration Step 5.
_launch_runner_nomad() {
local action_id="$1"
local secrets_csv="$2"
local mounts_csv="$3"
log "Dispatching vault-runner batch job via Nomad for action: ${action_id}"
# Dispatch the parameterized batch job
# The vault-runner job expects meta: action_id, secrets_csv
# Note: mounts_csv is not passed as meta (not declared in vault-runner.hcl)
local dispatch_output
dispatch_output=$(nomad job dispatch \
-detach \
-meta action_id="$action_id" \
-meta secrets_csv="$secrets_csv" \
vault-runner 2>&1) || {
log "ERROR: Failed to dispatch vault-runner job for ${action_id}"
log "Dispatch output: ${dispatch_output}"
write_result "$action_id" 1 "Nomad dispatch failed: ${dispatch_output}"
echo "nomad backend not yet implemented" >&2
return 1
}
# Extract dispatched job ID from output (format: "vault-runner/dispatch-<timestamp>-<uuid>")
local dispatched_job_id
dispatched_job_id=$(echo "$dispatch_output" | grep -oP '(?<=Dispatched Job ID = ).+' || true)
if [ -z "$dispatched_job_id" ]; then
log "ERROR: Could not extract dispatched job ID from nomad output"
log "Dispatch output: ${dispatch_output}"
write_result "$action_id" 1 "Could not extract dispatched job ID from nomad output"
return 1
fi
log "Dispatched vault-runner with job ID: ${dispatched_job_id}"
# Poll job status until terminal state
# Batch jobs transition: running -> completed/failed
local max_wait=300 # 5 minutes max wait
local elapsed=0
local poll_interval=5
local alloc_id=""
log "Polling nomad job status for ${dispatched_job_id}..."
while [ "$elapsed" -lt "$max_wait" ]; do
# Get job status with JSON output for the dispatched child job
local job_status_json
job_status_json=$(nomad job status -json "$dispatched_job_id" 2>/dev/null) || {
log "ERROR: Failed to get job status for ${dispatched_job_id}"
write_result "$action_id" 1 "Failed to get job status for ${dispatched_job_id}"
return 1
}
# Check job status field (transitions to "dead" on completion)
local job_state
job_state=$(echo "$job_status_json" | jq -r '.Status // empty' 2>/dev/null) || job_state=""
# Check allocation state directly
alloc_id=$(echo "$job_status_json" | jq -r '.Allocations[0]?.ID // empty' 2>/dev/null) || alloc_id=""
if [ -n "$alloc_id" ]; then
local alloc_state
alloc_state=$(nomad alloc status -short "$alloc_id" 2>/dev/null || true)
case "$alloc_state" in
*completed*|*success*|*dead*)
log "Allocation ${alloc_id} reached terminal state: ${alloc_state}"
break
;;
*running*|*pending*|*starting*)
log "Allocation ${alloc_id} still running (state: ${alloc_state})..."
;;
*failed*|*crashed*)
log "Allocation ${alloc_id} failed (state: ${alloc_state})"
break
;;
esac
fi
# Also check job-level state
case "$job_state" in
dead)
log "Job ${dispatched_job_id} reached terminal state: ${job_state}"
break
;;
failed)
log "Job ${dispatched_job_id} failed"
break
;;
esac
sleep "$poll_interval"
elapsed=$((elapsed + poll_interval))
done
if [ "$elapsed" -ge "$max_wait" ]; then
log "ERROR: Timeout waiting for vault-runner job to complete"
write_result "$action_id" 1 "Timeout waiting for nomad job to complete"
return 1
fi
# Get final job status and exit code
local final_status_json
final_status_json=$(nomad job status -json "$dispatched_job_id" 2>/dev/null) || {
log "ERROR: Failed to get final job status"
write_result "$action_id" 1 "Failed to get final job status"
return 1
}
# Get allocation exit code
local exit_code=0
local logs=""
if [ -n "$alloc_id" ]; then
# Get allocation logs
logs=$(nomad alloc logs -short "$alloc_id" 2>/dev/null || true)
# Try to get exit code from alloc status JSON
# Nomad alloc status -json has .TaskStates["<task_name>"].Events[].ExitCode
local alloc_exit_code
alloc_exit_code=$(nomad alloc status -json "$alloc_id" 2>/dev/null | jq -r '.TaskStates["runner"].Events[-1].ExitCode // empty' 2>/dev/null) || alloc_exit_code=""
if [ -n "$alloc_exit_code" ] && [ "$alloc_exit_code" != "null" ]; then
exit_code="$alloc_exit_code"
fi
fi
# If we couldn't get exit code from alloc, check job state as fallback
# Note: "dead" = terminal state for batch jobs (includes successful completion)
# Only "failed" indicates actual failure
if [ "$exit_code" -eq 0 ]; then
local final_state
final_state=$(echo "$final_status_json" | jq -r '.Status // empty' 2>/dev/null) || final_state=""
case "$final_state" in
failed)
exit_code=1
;;
esac
fi
# Truncate logs if too long
if [ ${#logs} -gt 1000 ]; then
logs="${logs: -1000}"
fi
# Write result file
write_result "$action_id" "$exit_code" "$logs"
if [ "$exit_code" -eq 0 ]; then
log "Vault-runner job completed successfully for action: ${action_id}"
else
log "Vault-runner job failed for action: ${action_id} (exit code: ${exit_code})"
fi
return "$exit_code"
}
# Launch runner for the given action (backend-agnostic orchestrator)
@ -1209,8 +1051,11 @@ main() {
# Validate backend selection at startup
case "$DISPATCHER_BACKEND" in
docker|nomad)
log "Using ${DISPATCHER_BACKEND} backend for vault-runner dispatch"
docker) ;;
nomad)
log "ERROR: nomad backend not yet implemented"
echo "nomad backend not yet implemented" >&2
exit 1
;;
*)
log "ERROR: unknown DISPATCHER_BACKEND=${DISPATCHER_BACKEND}"

View file

@ -173,15 +173,11 @@ PROJECT_TOML="${PROJECT_TOML:-projects/disinto.toml}"
sleep 1200 # 20 minutes
done) &
# ── Load optional secrets from secrets/*.enc (#777) ────────────────────
# Engagement collection (collect-engagement.sh) requires CADDY_ secrets to
# SCP access logs from a remote edge host. When age key or secrets dir is
# missing, or any secret fails to decrypt, log a warning and skip the cron.
# Caddy itself does not depend on these secrets.
# ── Load required secrets from secrets/*.enc (#777) ────────────────────
# Edge container declares its required secrets; missing ones cause a hard fail.
_AGE_KEY_FILE="${HOME}/.config/sops/age/keys.txt"
_SECRETS_DIR="/opt/disinto/secrets"
EDGE_REQUIRED_SECRETS="CADDY_SSH_KEY CADDY_SSH_HOST CADDY_SSH_USER CADDY_ACCESS_LOG"
EDGE_ENGAGEMENT_READY=0 # Assume not ready until proven otherwise
_edge_decrypt_secret() {
local enc_path="${_SECRETS_DIR}/${1}.enc"
@ -196,25 +192,22 @@ if [ -f "$_AGE_KEY_FILE" ] && [ -d "$_SECRETS_DIR" ]; then
export "$_secret_name=$_val"
done
if [ -n "$_missing" ]; then
echo "WARN: required engagement secrets missing from secrets/*.enc:${_missing}" >&2
echo " collect-engagement cron will be skipped. Run 'disinto secrets add <NAME>' to enable." >&2
EDGE_ENGAGEMENT_READY=0
else
echo "edge: loaded required engagement secrets: ${EDGE_REQUIRED_SECRETS}" >&2
EDGE_ENGAGEMENT_READY=1
echo "FATAL: required secrets missing from secrets/*.enc:${_missing}" >&2
echo " Run 'disinto secrets add <NAME>' for each missing secret." >&2
echo " If migrating from .env.vault.enc, run 'disinto secrets migrate-from-vault' first." >&2
exit 1
fi
echo "edge: loaded required secrets: ${EDGE_REQUIRED_SECRETS}" >&2
else
echo "WARN: age key (${_AGE_KEY_FILE}) or secrets dir (${_SECRETS_DIR}) not found — engagement secrets unavailable" >&2
echo " collect-engagement cron will be skipped. Run 'disinto secrets add <NAME>' to enable." >&2
EDGE_ENGAGEMENT_READY=0
echo "FATAL: age key (${_AGE_KEY_FILE}) or secrets dir (${_SECRETS_DIR}) not found — cannot load required secrets" >&2
echo " Ensure age is installed and secrets/*.enc files are present." >&2
exit 1
fi
# Start daily engagement collection cron loop in background (#745)
# Runs collect-engagement.sh daily at ~23:50 UTC via a sleep loop that
# calculates seconds until the next 23:50 window. SSH key from secrets/*.enc (#777).
# Guarded: only start if EDGE_ENGAGEMENT_READY=1.
if [ "$EDGE_ENGAGEMENT_READY" -eq 1 ]; then
(while true; do
(while true; do
# Calculate seconds until next 23:50 UTC
_now=$(date -u +%s)
_target=$(date -u -d "today 23:50" +%s 2>/dev/null || date -u -d "23:50" +%s 2>/dev/null || echo 0)
@ -239,20 +232,7 @@ if [ "$EDGE_ENGAGEMENT_READY" -eq 1 ]; then
echo "edge: collect-engagement: fetched log is empty, skipping parse" >&2
fi
rm -f "$_fetch_log"
done) &
else
echo "edge: collect-engagement cron skipped (EDGE_ENGAGEMENT_READY=0)" >&2
fi
# Start chat server in background (#1083 — merged from docker/chat into edge)
(python3 /usr/local/bin/chat-server.py 2>&1 | tee -a /opt/disinto-logs/chat.log) &
# Nomad template renders Caddyfile to /local/Caddyfile via service discovery;
# copy it into the expected location if present (compose uses the mounted path).
if [ -f /local/Caddyfile ]; then
cp /local/Caddyfile /etc/caddy/Caddyfile
echo "edge: using Nomad-rendered Caddyfile from /local/Caddyfile" >&2
fi
done) &
# Caddy as main process — run in foreground via wait so background jobs survive
# (exec replaces the shell, which can orphan backgrounded subshells)

View file

@ -1,183 +0,0 @@
# Nomad Cutover Runbook
End-to-end procedure to cut over the disinto factory from docker-compose on
disinto-dev-box to Nomad on disinto-nomad-box.
**Target**: disinto-nomad-box (10.10.10.216) becomes production; disinto-dev-box
stays warm for rollback.
**Downtime budget**: <5 min blue-green flip.
**Data scope**: Forgejo issues + disinto-ops git bundle only. Everything else is
regenerated or discarded. OAuth secrets are regenerated on fresh init (all
sessions invalidated).
---
## 1. Pre-cutover readiness checklist
- [ ] Nomad + Vault stack healthy on a fresh wipe+init (step 5 verified)
- [ ] Codeberg mirror current — `git log` parity between dev-box Forgejo and
Codeberg
- [ ] SSH key pair generated for nomad-box, registered on DO edge (see §4.6)
- [ ] Companion tools landed:
- `disinto backup create` (#1057)
- `disinto backup import` (#1058)
- [ ] Backup tarball produced and tested against a scratch LXC (see §3)
---
## 2. Pre-cutover artifact: backup
On disinto-dev-box:
```bash
./bin/disinto backup create /tmp/disinto-backup-$(date +%Y%m%d).tar.gz
```
Copy the tarball to nomad-box (and optionally to a local workstation for
safekeeping):
```bash
scp /tmp/disinto-backup-*.tar.gz nomad-box:/tmp/
```
---
## 3. Pre-cutover dry-run
On a throwaway LXC:
```bash
lxc launch ubuntu:24.04 cutover-dryrun
# inside the container:
disinto init --backend=nomad --import-env .env --with edge
./bin/disinto backup import /tmp/disinto-backup-*.tar.gz
```
Verify:
- Issue count matches source Forgejo
- disinto-ops repo refs match source bundle
Destroy the LXC once satisfied:
```bash
lxc delete cutover-dryrun --force
```
---
## 4. Cutover T-0 (operator executes; <5 min target)
### 4.1 Stop dev-box services
```bash
# On disinto-dev-box — stop, do NOT remove volumes (rollback needs them)
docker-compose stop
```
### 4.2 Provision nomad-box (if not already done)
```bash
# On disinto-nomad-box
disinto init --backend=nomad --import-env .env --with edge
```
### 4.3 Import backup
```bash
# On disinto-nomad-box
./bin/disinto backup import /tmp/disinto-backup-*.tar.gz
```
### 4.4 Configure Codeberg pull mirror
Manual, one-time step in the new Forgejo UI:
1. Create a mirror repository pointing at the Codeberg upstream
2. Confirm initial sync completes
### 4.5 Claude login
```bash
# On disinto-nomad-box
claude login
```
Set up Anthropic OAuth so agents can authenticate.
### 4.6 Autossh tunnel swap
> **Operator step** — cross-host, no dev-agent involvement. Do NOT automate.
1. Stop the tunnel on dev-box:
```bash
# On disinto-dev-box
systemctl stop reverse-tunnel
```
2. Copy or regenerate the tunnel unit on nomad-box:
```bash
# Copy from dev-box, or let init regenerate it
scp dev-box:/etc/systemd/system/reverse-tunnel.service \
nomad-box:/etc/systemd/system/
```
3. Register nomad-box's public key on DO edge:
```bash
# On DO edge box — same restricted-command as the dev-box key
echo "<nomad-box-pubkey>" >> /home/johba/.ssh/authorized_keys
```
4. Start the tunnel on nomad-box:
```bash
# On disinto-nomad-box
systemctl enable --now reverse-tunnel
```
5. Verify end-to-end:
```bash
curl https://self.disinto.ai/api/v1/version
# Should return the new box's Forgejo version
```
---
## 5. Post-cutover smoke
- [ ] `curl https://self.disinto.ai` → Forgejo welcome page
- [ ] Create a test PR → Woodpecker pipeline runs → agents assign and work
- [ ] Claude chat login via Forgejo OAuth succeeds
---
## 6. Rollback (if any step 4 gate fails)
1. Stop the tunnel on nomad-box:
```bash
systemctl stop reverse-tunnel # on nomad-box
```
2. Restore the tunnel on dev-box:
```bash
systemctl start reverse-tunnel # on dev-box
```
3. Bring dev-box services back up:
```bash
docker-compose up -d # on dev-box
```
4. DO Caddy config is unchanged — traffic restores in <5 min.
5. File a post-mortem issue. Keep nomad-box state intact for debugging.
---
## 7. Post-stable cleanup (T+1 week)
- `docker-compose down -v` on dev-box
- Archive `/var/lib/docker/volumes/disinto_*` to cold storage
- Delete disinto-dev-box LXC or keep as permanent rollback reserve (operator
decision)

View file

@ -29,7 +29,7 @@ and injected into your prompt above. Review them now.
1. Read the injected metrics data carefully (System Resources, Docker,
Active Sessions, Phase Files, Stale Phase Cleanup, Lock Files, Agent Logs,
CI Pipelines, Open PRs, Issue Status, Stale Worktrees, **Woodpecker Agent Health**).
CI Pipelines, Open PRs, Issue Status, Stale Worktrees).
Note: preflight.sh auto-removes PHASE:escalate files for closed issues
(24h grace period). Check the "Stale Phase Cleanup" section for any
files cleaned or in grace period this run.
@ -75,10 +75,6 @@ Categorize every finding from the metrics into priority levels.
- Dev/action sessions in PHASE:escalate for > 24h (session timeout)
(Note: PHASE:escalate files for closed issues are auto-cleaned by preflight;
this check covers sessions where the issue is still open)
- **Woodpecker agent unhealthy** see "Woodpecker Agent Health" section in preflight:
- Container not running or in unhealthy state
- gRPC errors >= 3 in last 20 minutes
- Fast-failure pipelines (duration < 60s) >= 3 in last 15 minutes
### P3 — Factory degraded
- PRs stale: CI finished >20min ago AND no git push to the PR branch since CI completed
@ -104,15 +100,6 @@ For each finding from the health assessment, decide and execute an action.
### Auto-fixable (execute these directly)
**P2 Woodpecker agent unhealthy:**
The supervisor-run.sh script automatically handles WP agent recovery:
- Detects unhealthy state via preflight.sh health checks
- Restarts container via `docker restart`
- Scans for `blocked: ci_exhausted` issues updated in last 30 minutes
- Unassigns and removes blocked label from affected issues
- Posts recovery comment with infra-flake context
- Avoids duplicate restarts via 5-minute cooldown in history file
**P0 Memory crisis:**
# Kill stale one-shot claude processes (>3h old)
pgrep -f "claude -p" --older 10800 2>/dev/null | xargs kill 2>/dev/null || true
@ -261,11 +248,6 @@ Format:
- <what was fixed>
(or "No actions needed")
### WP Agent Recovery (if applicable)
- WP agent restart: <time of restart or "none">
- Issues recovered: <count>
- Reason: <health check reason or "healthy">
### Vault items filed
- vault/pending/<id>.md <reason>
(or "None")

View file

@ -1,4 +1,4 @@
<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 -->
<!-- last-reviewed: 6bdbeb5bd2a200ff1b23724564da9383193f3e30 -->
# Gardener Agent
**Role**: Backlog grooming — detect duplicate issues, missing acceptance

View file

View file

@ -1 +1,117 @@
[]
[
{
"action": "edit_body",
"issue": 900,
"body": "Flagged by AI reviewer in PR #897.\n\n## Problem\n\nThe policy at `vault/policies/service-forgejo.hcl` grants:\n\n```hcl\npath \"kv/data/disinto/shared/forgejo/*\" {\n capabilities = [\"read\"]\n}\n```\n\nBut the consul-template stanza in `nomad/jobs/forgejo.hcl` reads:\n\n```\n{{- with secret \"kv/data/disinto/shared/forgejo\" -}}\n```\n\nVault glob `/*` requires at least one path segment after `forgejo/` (e.g. `forgejo/subkey`). It does **not** match the bare path `kv/data/disinto/shared/forgejo` that the template actually calls. Vault ACL longest-prefix matching: `forgejo/*` is never hit for a request to `forgejo`.\n\nRuntime consequence: consul-template `with` block receives a 403 permission denied → evaluates to empty (false) → `else` branch renders `seed-me` placeholder values → Forgejo starts with obviously-wrong secrets despite `vault-seed-forgejo.sh` having run successfully.\n\n## Fix\n\nReplace the glob with an exact path in `vault/policies/service-forgejo.hcl`:\n\n```hcl\npath \"kv/data/disinto/shared/forgejo\" {\n capabilities = [\"read\"]\n}\n\npath \"kv/metadata/disinto/shared/forgejo\" {\n capabilities = [\"list\", \"read\"]\n}\n```\n\n(The `/*` glob is only useful if future subkeys are written under `forgejo/`; the current design stores both secrets in a single KV document at the `forgejo` path.)\n\nThis is a pre-existing defect in `vault/policies/service-forgejo.hcl`; that file was not changed by PR #897.\n\n---\n*Auto-created from AI review*\n\n## Affected files\n- `vault/policies/service-forgejo.hcl` — replace glob path with exact path + metadata path\n\n## Acceptance criteria\n- [ ] `vault/policies/service-forgejo.hcl` grants exact path `kv/data/disinto/shared/forgejo` (not `forgejo/*`)\n- [ ] Metadata path `kv/metadata/disinto/shared/forgejo` is also granted read+list\n- [ ] consul-template `with secret \"kv/data/disinto/shared/forgejo\"` resolves without 403 (verified via `vault policy read service-forgejo`)\n- [ ] `shellcheck` clean (no shell changes expected)\n"
},
{
"action": "add_label",
"issue": 900,
"label": "backlog"
},
{
"action": "edit_body",
"issue": 898,
"body": "Flagged by AI reviewer in PR #889.\n\n## Problem\n\n`tools/vault-import.sh` serializes each entry in `ops_data` as `\"${source_value}|${status}\"` (line 498). Extraction at lines 510-511 uses `${data%%|*}` (first field) and `${data##*|}` (last field). If `source_value` contains a literal `|`, `${data%%|*}` truncates it to the first segment, silently writing a corrupted value to Vault.\n\nThe same separator is used in `paths_to_write` (line 519) to join multiple kv-pairs for a path. When `IFS=\"|\"` splits the string back into an array (line 540), a value containing `|` is split across array elements, corrupting the write.\n\n## Failure mode\n\nAny secret value with a pipe character (e.g. a generated password or composed token like `abc|xyz`) is silently truncated or misrouted on import. No error is emitted.\n\n## Fix\n\nReplace the `|`-delimited string with a bash indexed array for accumulating per-path kv pairs, eliminating the need for a delimiter that conflicts with possible value characters.\n\n---\n*Auto-created from AI review of PR #889*\n\n## Affected files\n- `tools/vault-import.sh` — replace pipe-delimited string accumulation with bash indexed arrays (lines ~498540)\n\n## Acceptance criteria\n- [ ] A secret value containing `|` (e.g. `abc|xyz`) is imported to Vault without truncation or corruption\n- [ ] No regression for values without `|`\n- [ ] `shellcheck` clean\n"
},
{
"action": "add_label",
"issue": 898,
"label": "backlog"
},
{
"action": "edit_body",
"issue": 893,
"body": "Flagged by AI reviewer in PR #892.\n\n## Problem\n\n`disinto init --build` generates the `agents:` service by first emitting `image: ghcr.io/disinto/agents:${DISINTO_IMAGE_TAG:-latest}` and then running a `sed -i` substitution (`lib/generators.sh:793`) that replaces the `image:` line with a `build:` block. The substitution does not add `pull_policy: build`.\n\nResult: `docker compose up` with `--build`-generated compose files still uses the cached image for the base `agents:` service, even when `docker/agents/` source has changed — the same silent-stale-image bug that #887 fixed for the three local-model service stanzas.\n\n## Fix\n\nThe `sed` substitution on line 793 should also inject `pull_policy: build` after the emitted `build:` block.\n\n---\n*Auto-created from AI review of PR #892*\n\n## Affected files\n- `lib/generators.sh` (line ~793) — add `pull_policy: build` to the agents service sed substitution\n\n## Acceptance criteria\n- [ ] `disinto init --build`-generated compose file includes `pull_policy: build` in the `agents:` service stanza\n- [ ] `docker compose up` rebuilds the agents image from local source when `docker/agents/` changes\n- [ ] Non-`--build` compose generation is unchanged\n- [ ] `shellcheck` clean\n"
},
{
"action": "add_label",
"issue": 893,
"label": "backlog"
},
{
"action": "edit_body",
"issue": 890,
"body": "Flagged by AI reviewer in PR #888.\n\n## Problem\n\n`lib/hvault.sh` functions `hvault_kv_get`, `hvault_kv_put`, and `hvault_kv_list` all hardcode `secret/data/` and `secret/metadata/` as KV v2 path prefixes (lines 117, 157, 173).\n\nThe Nomad+Vault migration (S2.1, #879) establishes `kv/` as the mount name for all factory secrets — every policy in `vault/policies/*.hcl` grants ACL on `kv/data/disinto/...` paths.\n\nIf any agent calls `hvault_kv_get` after the migration, Vault will route the request to `secret/data/...` but the token only holds ACL for `kv/data/...`, producing a 403 Forbidden.\n\n## Fix\n\nChange the mount prefix in `hvault_kv_get`, `hvault_kv_put`, and `hvault_kv_list` from `secret/` to `kv/`, or make the mount name configurable via `VAULT_KV_MOUNT` (defaulting to `kv`). Coordinate with S2.2 (#880) which writes secrets into the `kv/` mount.\n\n---\n*Auto-created from AI review of PR #888*\n\n## Affected files\n- `lib/hvault.sh` — change `secret/data/` and `secret/metadata/` prefixes to `kv/data/` and `kv/metadata/` (lines ~117, 157, 173); optionally make configurable via `VAULT_KV_MOUNT`\n\n## Acceptance criteria\n- [ ] `hvault_kv_get`, `hvault_kv_put`, `hvault_kv_list` use `kv/` mount prefix (not `secret/`)\n- [ ] Agents can read/write KV paths that policies in `vault/policies/*.hcl` grant (no 403)\n- [ ] Optionally: `VAULT_KV_MOUNT` env var overrides the mount name (defaults to `kv`)\n- [ ] `shellcheck` clean\n"
},
{
"action": "add_label",
"issue": 890,
"label": "backlog"
},
{
"action": "edit_body",
"issue": 877,
"body": "Flagged by AI reviewer in PR #875.\n\n## Problem\n\n`validate_projects_dir()` in `docker/agents/entrypoint.sh` uses a command substitution that triggers `set -e` before the intended error-logging branch runs:\n\n```bash\ntoml_count=$(compgen -G \"${DISINTO_DIR}/projects/*.toml\" 2>/dev/null | wc -l)\n```\n\nWhen no `.toml` files are present, `compgen -G` exits 1. With `pipefail`, the pipeline exits 1. `set -e` causes the script to exit before `if [ \"$toml_count\" -eq 0 ]` is evaluated, so the FATAL diagnostic messages are never printed. The container still fast-fails (correct outcome), but the operator sees no explanation.\n\nEvery other `compgen -G` usage in the file uses the safer conditional pattern (lines 259, 322).\n\n## Fix\n\nReplace the `wc -l` pattern with:\n\n```bash\nif ! compgen -G \"${DISINTO_DIR}/projects/*.toml\" >/dev/null 2>&1; then\n log \"FATAL: No real .toml files found in ${DISINTO_DIR}/projects/\"\n ...\n exit 1\nfi\n```\n\n---\n*Auto-created from AI review*\n\n## Affected files\n- `docker/agents/entrypoint.sh` — fix `validate_projects_dir()` to use conditional compgen pattern instead of `wc -l` pipeline\n\n## Acceptance criteria\n- [ ] When no `.toml` files are present, the FATAL message is printed before the container exits\n- [ ] Container still exits non-zero in that case\n- [ ] Matches the pattern already used at lines 259 and 322\n- [ ] `shellcheck` clean\n"
},
{
"action": "add_label",
"issue": 877,
"label": "backlog"
},
{
"action": "add_label",
"issue": 773,
"label": "backlog"
},
{
"action": "edit_body",
"issue": 883,
"body": "Part of the Nomad+Vault migration. **Step 2 — Vault policies + workload identity + secrets import.**\n\n~~**Blocked by: #880 (S2.2), #881 (S2.3).**~~ Dependencies closed; unblocked.\n\n## Goal\n\nWire the Step-2 building blocks (import, auth, policies) into `bin/disinto init --backend=nomad` so a single command on a fresh LXC provisions cluster + policies + auth + imports secrets + deploys services.\n\n## Scope\n\nAdd flags to `disinto init --backend=nomad`:\n\n- `--import-env PATH` — points at an existing `.env` (from old stack).\n- `--import-sops PATH` — points at the sops-encrypted `.env.vault.enc`.\n- `--age-key PATH` — points at the sops age keyfile (required if `--import-sops` is set).\n\nFlow when any of `--import-*` is set:\n\n1. `cluster-up.sh` (Step 0, unchanged).\n2. `tools/vault-apply-policies.sh` (S2.1, idempotent).\n3. `lib/init/nomad/vault-nomad-auth.sh` (S2.3, idempotent).\n4. `tools/vault-import.sh --env PATH --sops PATH --age-key PATH` (S2.2).\n5. If `--with <service>` was also passed, `lib/init/nomad/deploy.sh <service>` (Step 1, unchanged).\n6. Final summary: cluster + policies + auth + imported secrets count + deployed services + ports.\n\nFlow when **no** import flags are set:\n- Skip step 4; still apply policies + auth.\n- Log: `[import] no --import-env/--import-sops — skipping; set them or seed kv/disinto/* manually before deploying secret-dependent services`.\n\nFlag validation:\n- `--import-sops` without `--age-key` → error.\n- `--age-key` without `--import-sops` → error.\n- `--import-env` alone (no sops) → OK.\n- `--backend=docker` + any `--import-*` → error.\n\n## Affected files\n- `bin/disinto` — add `--import-env`, `--import-sops`, `--age-key` flags to `init --backend=nomad`\n- `docs/nomad-migration.md` (new) — cutover-day invocation shape\n- `lib/init/nomad/vault-nomad-auth.sh` (S2.3) — called as step 3\n- `tools/vault-import.sh` (S2.2) — called as step 4\n- `tools/vault-apply-policies.sh` (S2.1) — called as step 2\n\n## Acceptance criteria\n- [ ] `disinto init --backend=nomad --import-env /tmp/.env --import-sops /tmp/.enc --age-key /tmp/keys.txt --with forgejo` completes: cluster up, policies applied, JWT auth configured, KV populated, Forgejo deployed reading Vault secrets\n- [ ] Re-running is a no-op at every layer\n- [ ] `--import-sops` without `--age-key` exits with a clear error\n- [ ] `--backend=docker` with `--import-env` exits with a clear error\n- [ ] `--dry-run` prints the full plan, touches nothing\n- [ ] Never logs a secret value\n- [ ] `shellcheck` clean\n"
},
{
"action": "remove_label",
"issue": 883,
"label": "blocked"
},
{
"action": "add_label",
"issue": 883,
"label": "backlog"
},
{
"action": "edit_body",
"issue": 884,
"body": "Part of the Nomad+Vault migration. **Step 2 — Vault policies + workload identity + secrets import.**\n\nS2.1 (#879) is now closed; this step has no blocking dependencies.\n\n## Goal\n\nExtend the Woodpecker CI to validate Vault policy HCL files under `vault/policies/` and role definitions.\n\n## Scope\n\nExtend `.woodpecker/nomad-validate.yml`:\n\n- `vault policy fmt -check vault/policies/*.hcl` — fails on unformatted HCL.\n- `for f in vault/policies/*.hcl; do vault policy validate \"$f\"; done` — syntax + semantic validation (requires a dev-mode vault spun inline).\n- If `vault/roles.yaml` exists: yamllint check + custom validator that each role references a policy file that actually exists in `vault/policies/`.\n- Secret-scan gate: ensure no policy file contains what looks like a literal secret.\n- Trigger: on any PR touching `vault/policies/`, `vault/roles.yaml`, or `lib/init/nomad/vault-*.sh`.\n\nAlso:\n- Add `vault/policies/AGENTS.md` cross-reference: policy lifecycle (add policy HCL → update roles.yaml → add Vault KV path), what CI enforces, common failure modes.\n\n## Non-goals\n\n- No runtime check against a real cluster.\n- No enforcement of specific naming conventions beyond what S2.1 docs describe.\n\n## Affected files\n- `.woodpecker/nomad-validate.yml` — add vault policy fmt + validate + roles.yaml gates\n- `vault/policies/AGENTS.md` (new) — policy lifecycle documentation\n\n## Acceptance criteria\n- [ ] Deliberately broken policy HCL (typo in `path` block) fails CI with the vault-fmt error\n- [ ] Policy that references a non-existent capability (e.g. `\"frobnicate\"`) fails validation\n- [ ] `vault/roles.yaml` referencing a policy not in `vault/policies/` fails CI\n- [ ] Clean PRs pass within normal pipeline time budget\n- [ ] Existing S0.5 + S1.4 CI gates unaffected\n- [ ] `shellcheck` clean on any shell added\n"
},
{
"action": "remove_label",
"issue": 884,
"label": "blocked"
},
{
"action": "add_label",
"issue": 884,
"label": "backlog"
},
{
"action": "edit_body",
"issue": 846,
"body": "## Problem\n\nLlama-backed sidecar agents can be activated through two different mechanisms:\n\n1. **Legacy:** `ENABLE_LLAMA_AGENT=1` env flag toggles a hardcoded `agents-llama` service block in `docker-compose.yml`.\n2. **Modern:** `[agents.X]` TOML block consumed by `hire-an-agent`, emitting a service per block.\n\nNeither the docs nor the CLI explain which path wins. Setting both produces a YAML `mapping key \"agents-llama\" already defined` error from compose because the service block is duplicated.\n\n## Sub-symptom: env-var naming collision\n\nThe two paths key secrets differently:\n\n- Legacy: `FORGE_TOKEN_LLAMA`, `FORGE_PASS_LLAMA`.\n- Modern: `FORGE_TOKEN_<FORGE_USER_UPPER>` — e.g. `FORGE_TOKEN_DEV_QWEN`.\n\nA user migrating between paths ends up with two sets of secrets in `.env`, neither cleanly mapped to the currently-active service block. Silent auth failures (401 from Forgejo) follow.\n\n## Proposal\n\n- Pick the TOML `[agents.X]` path as canonical.\n- Remove the `ENABLE_LLAMA_AGENT` branch and its hardcoded service block from the generator.\n- Detection of `ENABLE_LLAMA_AGENT` in `.env` at `disinto up` time: hard-fail immediately with a migration message (option (a) — simpler, no external consumers depend on this flag).\n\n~~Dependencies: #845, #847~~ — both now closed; unblocked.\n\nRelated: #845, #847.\n\n## Affected files\n- `lib/generators.sh` — remove `ENABLE_LLAMA_AGENT` branch and hardcoded `agents-llama:` service block\n- `docker/agents/entrypoint.sh` — detect `ENABLE_LLAMA_AGENT` in env, emit migration error\n- `.env.example` — remove `ENABLE_LLAMA_AGENT`\n- `docs/agents-llama.md` — update to document TOML `[agents.X]` as the one canonical path\n\n## Acceptance criteria\n- [ ] One documented activation path: TOML `[agents.X]` block\n- [ ] `ENABLE_LLAMA_AGENT` removed from compose generator; presence in `.env` at startup triggers a clear migration error naming the replacement\n- [ ] `.env.example` and `docs/agents-llama.md` updated\n- [ ] `shellcheck` clean\n"
},
{
"action": "remove_label",
"issue": 846,
"label": "blocked"
},
{
"action": "add_label",
"issue": 846,
"label": "backlog"
},
{
"action": "edit_body",
"issue": 850,
"body": "## Problem\n\nWhen the compose generator emits the same service name twice — e.g. both the legacy `ENABLE_LLAMA_AGENT=1` branch and a matching `[agents.llama]` TOML block produce an `agents-llama:` key — the failure is deferred all the way to `docker compose` YAML parsing:\n\n```\nfailed to parse /home/johba/disinto/docker-compose.yml: yaml: construct errors:\n line 4: line 431: mapping key \"agents-llama\" already defined at line 155\n```\n\nBy then, the user has already paid the cost of: pre-build binary downloads, generator run, Caddyfile regeneration. The only hint about what went wrong is a line number in a generated file. Root cause (dual activation) is not surfaced.\n\n## Fix\n\nAdd a generate-time guard to `lib/generators.sh`:\n\n- After collecting all service blocks to emit, compare the set of service names against duplicates.\n- If a duplicate is detected, abort with a clear message naming both source of truth (e.g. `\"agents-llama\" emitted twice — from ENABLE_LLAMA_AGENT=1 and from [agents.llama] in projects/disinto.toml; remove one`).\n\nEven after #846 resolves (one canonical activation path), this guard remains valuable as a safety net against future regressions or user misconfiguration (e.g. two TOML blocks with same `forge_user`).\n\n## Prior art: PR #872 (closed, branch `fix/issue-850` retained)\n\ndev-qwen's first attempt (`db009e3`) landed the dup-detection logic in `lib/generators.sh` correctly (unit test `tests/test-duplicate-service-detection.sh` passes all 3 cases), but the smoke test fails on CI.\n\n**Why the smoke test fails:** sections 1-7 of `smoke-init.sh` already run `bin/disinto init`, materializing `docker-compose.yml`. Section 8 re-invokes `bin/disinto init` to verify the dup guard fires — but `_generate_compose_impl` early-returns with `\"Compose: already exists, skipping\"` before reaching the dup-check.\n\n**Suggested fix:** in `tests/smoke-init.sh` section 8 (around line 452, before the second `bin/disinto init` invocation), add:\n\n```bash\nrm -f \"${FACTORY_ROOT}/docker-compose.yml\"\n```\n\nso the generator actually runs and the dup-detection path is exercised. Do **not** hoist the dup-check above the early-return.\n\nThe branch `fix/issue-850` is preserved as a starting point — pick up from `db009e3` and patch the smoke-test cleanup.\n\nRelated: #846.\n\n## Affected files\n- `lib/generators.sh` — duplicate service name check after collecting all service blocks\n- `tests/smoke-init.sh` — section 8: add `rm -f docker-compose.yml` before second `disinto init`\n- `tests/test-duplicate-service-detection.sh` (likely already correct from prior art)\n\n## Acceptance criteria\n- [ ] Running `disinto up` with a known duplicate activation produces a clear generator-time error naming both conflicting sources\n- [ ] Exit code non-zero before `docker compose` is invoked\n- [ ] Smoke test section 8 passes on CI (dup guard is actually exercised)\n- [ ] `shellcheck` clean\n"
},
{
"action": "remove_label",
"issue": 850,
"label": "blocked"
},
{
"action": "add_label",
"issue": 850,
"label": "backlog"
}
]

View file

@ -1,4 +1,4 @@
<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 -->
<!-- last-reviewed: 6bdbeb5bd2a200ff1b23724564da9383193f3e30 -->
# Shared Helpers (`lib/`)
All agents source `lib/env.sh` as their first action. Additional helpers are
@ -7,7 +7,7 @@ sourced as needed.
| File | What it provides | Sourced by |
|---|---|---|
| `lib/env.sh` | Loads `.env`, sets `FACTORY_ROOT`, exports project config (`FORGE_REPO`, `PROJECT_NAME`, etc.), defines `log()`, `forge_api()`, `forge_api_all()` (paginates all pages; accepts optional second TOKEN parameter, defaults to `$FORGE_TOKEN`; handles invalid/empty JSON responses gracefully — returns empty on parse error instead of crashing), `woodpecker_api()`, `wpdb()`, `memory_guard()` (skips agent if RAM < threshold), `load_secret()` (secret-source abstraction see below). Auto-loads project TOML if `PROJECT_TOML` is set. Exports per-agent tokens (`FORGE_PLANNER_TOKEN`, `FORGE_GARDENER_TOKEN`, `FORGE_VAULT_TOKEN`, `FORGE_SUPERVISOR_TOKEN`, `FORGE_PREDICTOR_TOKEN`) each falls back to `$FORGE_TOKEN` if not set. **Vault-only token guard (AD-006)**: `unset GITHUB_TOKEN CLAWHUB_TOKEN` so agents never hold external-action tokens only the runner container receives them. **Container note**: when `DISINTO_CONTAINER=1`, `.env` is NOT re-sourced compose already injects env vars (including `FORGE_URL=http://forgejo:3000`) and re-sourcing would clobber them. **Save/restore scope (#364)**: only `FORGE_URL` is preserved across `.env` re-sourcing (compose injects `http://forgejo:3000`, `.env` has `http://localhost:3000`). `FORGE_TOKEN` is NOT preserved so refreshed tokens in `.env` take effect immediately. **Per-agent token override (#762)**: agent run scripts export `FORGE_TOKEN_OVERRIDE=<agent-specific-token>` BEFORE sourcing `env.sh`; `env.sh` applies this override at lines 98-100, ensuring the correct identity survives any re-sourcing of `env.sh` by nested shells or `claude -p` invocations. **Required env var**: `FORGE_PASS` bot password for git HTTP push (Forgejo 11.x rejects API tokens for `git push`, #361). **Hard preconditions (#674)**: `USER` and `HOME` must be exported by the entrypoint before sourcing. When `PROJECT_TOML` is set, `PROJECT_REPO_ROOT`, `PRIMARY_BRANCH`, and `OPS_REPO_ROOT` must also be set (by entrypoint or TOML). **`load_secret NAME [DEFAULT]` (#793)**: backend-agnostic secret resolution. Precedence: (1) `/secrets/<NAME>.env` Nomad-rendered template, (2) current environment already set by `.env.enc` / compose, (3) `secrets/<NAME>.enc` age-encrypted per-key file (decrypted on demand, cached in process env), (4) DEFAULT or empty. Consumers call `$(load_secret GITHUB_TOKEN)` instead of `${GITHUB_TOKEN}` identical behavior whether secrets come from Docker compose injection or Nomad Vault templates. | Every agent |
| `lib/ci-helpers.sh` | `ci_passed()` — returns 0 if CI state is "success" (or no CI configured). `ci_required_for_pr()` — returns 0 if PR has code files (CI required), 1 if non-code only (CI not required). `is_infra_step()` — returns 0 if a single CI step failure matches infra heuristics (clone/git exit 128, any exit 137, log timeout patterns). `classify_pipeline_failure()` — returns "infra \<reason>" if any failed Woodpecker step matches infra heuristics via `is_infra_step()`, else "code". `ensure_priority_label()` — looks up (or creates) the `priority` label and returns its ID; caches in `_PRIORITY_LABEL_ID`. `ci_commit_status <sha>` — queries Woodpecker directly for CI state, falls back to forge commit status API. `ci_pipeline_number <sha>` — returns the Woodpecker pipeline number for a commit, falls back to parsing forge status `target_url`. `ci_promote <repo_id> <pipeline_num> <environment>` — promotes a pipeline to a named Woodpecker environment (vault-gated deployment: vault approves, vault-fire calls this — vault redesign in progress, see #73-#77). `ci_get_logs <pipeline_number> [--step <name>]` — reads CI logs from Woodpecker SQLite database via `lib/ci-log-reader.py`; outputs last 200 lines to stdout. Requires mounted woodpecker-data volume at /woodpecker-data. `ci_get_step_logs <pipeline_num> <step_id>` — fetches per-step logs via Woodpecker REST API (`/repos/{id}/logs/{pipeline}/{step_id}`); returns raw log data for a single step. Used by `pr_poll_ci()` to build per-workflow/per-step CI diagnostics (#1051). | dev-poll, review-poll, review-pr |
| `lib/ci-helpers.sh` | `ci_passed()` — returns 0 if CI state is "success" (or no CI configured). `ci_required_for_pr()` — returns 0 if PR has code files (CI required), 1 if non-code only (CI not required). `is_infra_step()` — returns 0 if a single CI step failure matches infra heuristics (clone/git exit 128, any exit 137, log timeout patterns). `classify_pipeline_failure()` — returns "infra \<reason>" if any failed Woodpecker step matches infra heuristics via `is_infra_step()`, else "code". `ensure_priority_label()` — looks up (or creates) the `priority` label and returns its ID; caches in `_PRIORITY_LABEL_ID`. `ci_commit_status <sha>` — queries Woodpecker directly for CI state, falls back to forge commit status API. `ci_pipeline_number <sha>` — returns the Woodpecker pipeline number for a commit, falls back to parsing forge status `target_url`. `ci_promote <repo_id> <pipeline_num> <environment>` — promotes a pipeline to a named Woodpecker environment (vault-gated deployment: vault approves, vault-fire calls this — vault redesign in progress, see #73-#77). `ci_get_logs <pipeline_number> [--step <name>]` — reads CI logs from Woodpecker SQLite database via `lib/ci-log-reader.py`; outputs last 200 lines to stdout. Requires mounted woodpecker-data volume at /woodpecker-data. | dev-poll, review-poll, review-pr |
| `lib/ci-debug.sh` | CLI tool for Woodpecker CI: `list`, `status`, `logs`, `failures` subcommands. Not sourced — run directly. | Humans / dev-agent (tool access) |
| `lib/ci-log-reader.py` | Python tool: reads CI logs from Woodpecker SQLite database. `<pipeline_number> [--step <name>]` — returns last 200 lines from failed steps (or specified step). Used by `ci_get_logs()` in ci-helpers.sh. Requires `WOODPECKER_DATA_DIR` (default: /woodpecker-data). | ci-helpers.sh |
| `lib/load-project.sh` | Parses a `projects/*.toml` file into env vars (`PROJECT_NAME`, `FORGE_REPO`, `WOODPECKER_REPO_ID`, monitoring toggles, mirror config, etc.). Also exports `FORGE_REPO_OWNER` (the owner component of `FORGE_REPO`, e.g. `disinto-admin` from `disinto-admin/disinto`). Reads `repo_root` and `ops_repo_root` from the TOML for host-CLI callers. **Container path handling (#674)**: no longer derives `PROJECT_REPO_ROOT` or `OPS_REPO_ROOT` inside the script — container entrypoints export the correct paths before agent scripts source `env.sh`, and the `DISINTO_CONTAINER` guard (line 90) skips TOML overrides when those vars are already set. | env.sh (when `PROJECT_TOML` is set) |
@ -20,7 +20,7 @@ sourced as needed.
| `lib/stack-lock.sh` | File-based lock protocol for singleton project stack access. `stack_lock_acquire(holder, project)` — polls until free, breaks stale heartbeats (>10 min old), claims lock. `stack_lock_release(project)` — deletes lock file. `stack_lock_check(project)` — inspect current lock state. `stack_lock_heartbeat(project)` — update heartbeat timestamp (callers must call every 2 min while holding). Lock files at `~/data/locks/<project>-stack.lock`. | docker/edge/dispatcher.sh, reproduce formula |
| `lib/tea-helpers.sh` | `tea_file_issue(title, body, labels...)` — create issue via tea CLI with secret scanning; sets `FILED_ISSUE_NUM`. `tea_relabel(issue_num, labels...)` — replace labels using tea's `edit` subcommand (not `label`). `tea_comment(issue_num, body)` — add comment with secret scanning. `tea_close(issue_num)` — close issue. All use `TEA_LOGIN` and `FORGE_REPO` from env.sh. Labels by name (no ID lookup). Tea binary download verified via sha256 checksum. Sourced by env.sh when `tea` binary is available. | env.sh (conditional) |
| `lib/worktree.sh` | Reusable git worktree management: `worktree_create(path, branch, [base_ref])` — create worktree, checkout base, fetch submodules. `worktree_recover(path, branch, [remote])` — detect existing worktree, reuse if on correct branch (sets `_WORKTREE_REUSED`), otherwise clean and recreate. `worktree_cleanup(path)``git worktree remove --force`, clear Claude Code project cache (`~/.claude/projects/` matching path). `worktree_cleanup_stale([max_age_hours])` — scan `/tmp` for orphaned worktrees older than threshold, skip preserved and active tmux worktrees, prune. `worktree_preserve(path, reason)` — mark worktree as preserved for debugging (writes `.worktree-preserved` marker, skipped by stale cleanup). | dev-agent.sh, supervisor-run.sh, planner-run.sh, predictor-run.sh, gardener-run.sh |
| `lib/pr-lifecycle.sh` | Reusable PR lifecycle library: `pr_create()`, `pr_find_by_branch()`, `pr_poll_ci()`, `pr_poll_review()`, `pr_merge()`, `pr_is_merged()`, `pr_walk_to_merge()`, `build_phase_protocol_prompt()`. `pr_poll_ci()` builds a **per-workflow/per-step CI diagnostics prompt** (#1051): on failure, each failed workflow gets its own section with step name, exit code (annotated with standard meanings for 126/127/128), and step-local log tail (via `ci_get_step_logs`); passing workflows are listed explicitly so agents don't waste fix attempts on them. Falls back to legacy combined-log fetch if per-step API is unavailable. Requires `lib/ci-helpers.sh`. | dev-agent.sh (future) |
| `lib/pr-lifecycle.sh` | Reusable PR lifecycle library: `pr_create()`, `pr_find_by_branch()`, `pr_poll_ci()`, `pr_poll_review()`, `pr_merge()`, `pr_is_merged()`, `pr_walk_to_merge()`, `build_phase_protocol_prompt()`. Requires `lib/ci-helpers.sh`. | dev-agent.sh (future) |
| `lib/issue-lifecycle.sh` | Reusable issue lifecycle library: `issue_claim()` (add in-progress, remove backlog), `issue_release()` (remove in-progress, add backlog), `issue_block()` (post diagnostic comment with secret redaction, add blocked label), `issue_close()`, `issue_check_deps()` (parse deps, check transitive closure; sets `_ISSUE_BLOCKED_BY`, `_ISSUE_SUGGESTION`), `issue_suggest_next()` (find next unblocked backlog issue; sets `_ISSUE_NEXT`), `issue_post_refusal()` (structured refusal comment with dedup). Label IDs cached in globals on first lookup. Sources `lib/secret-scan.sh`. | dev-agent.sh (future) |
| `lib/action-vault.sh` | **Vault PR helper** — create vault action PRs on ops repo via Forgejo API (works from containers without SSH). `vault_request <action_id> <toml_content>` validates TOML (using `validate_vault_action` from `action-vault/vault-env.sh`), creates branch `vault/<action-id>`, writes `vault/actions/<action-id>.toml`, creates PR targeting `main` with title `vault: <action-id>` and body from context field, returns PR number. Idempotent: if PR exists, returns existing number. **Low-tier bypass**: if the action's `blast_radius` classifies as `low` (via `action-vault/classify.sh`), `vault_request` calls `_vault_commit_direct()` which commits directly to ops `main` using `FORGE_ADMIN_TOKEN` — no PR, no approval wait. Returns `0` (not a PR number) for direct commits. Requires `FORGE_TOKEN`, `FORGE_ADMIN_TOKEN` (low-tier only), `FORGE_URL`, `FORGE_REPO`, `FORGE_OPS_REPO`. Uses the calling agent's own token (saves/restores `FORGE_TOKEN` around sourcing `vault-env.sh`), so approval workflow respects individual agent identities. | dev-agent (vault actions), future vault dispatcher |
| `lib/branch-protection.sh` | Branch protection helpers for Forgejo repos. `setup_vault_branch_protection()` — configures admin-only merge protection on main (require 1 approval, restrict merge to admin role, block direct pushes). `setup_profile_branch_protection()` — same protection for `.profile` repos. `verify_branch_protection()` — checks protection is correctly configured. `remove_branch_protection()` — removes protection (cleanup/testing). Handles race condition after initial push: retries with backoff if Forgejo hasn't processed the branch yet. Requires `FORGE_TOKEN`, `FORGE_URL`, `FORGE_OPS_REPO`. | bin/disinto (hire-an-agent) |
@ -30,11 +30,9 @@ sourced as needed.
| `lib/git-creds.sh` | Shared git credential helper configuration. `configure_git_creds([HOME_DIR] [RUN_AS_CMD])` — writes a static credential helper script and configures git globally to use password-based HTTP auth (Forgejo 11.x rejects API tokens for `git push`, #361). **Retry on cold boot (#741)**: resolves bot username from `FORGE_TOKEN` with 5 retries (exponential backoff 1-5s); fails loudly and returns 1 if Forgejo is unreachable — never falls back to a wrong hardcoded default (exports `BOT_USER` on success). `repair_baked_cred_urls([--as RUN_AS_CMD] DIR ...)` — rewrites any git remote URLs that have credentials baked in to use clean URLs instead; uses `safe.directory` bypass for root-owned repos (#671). Requires `FORGE_PASS`, `FORGE_URL`, `FORGE_TOKEN`. | entrypoints (agents, edge) |
| `lib/ops-setup.sh` | `setup_ops_repo()` — creates ops repo on Forgejo if it doesn't exist, configures bot collaborators, clones/initializes ops repo locally, seeds directory structure (vault, knowledge, evidence, sprints). Evidence subdirectories seeded: engagement/, red-team/, holdout/, evolution/, user-test/. Also seeds sprints/ for architect output. Exports `_ACTUAL_OPS_SLUG`. `migrate_ops_repo(ops_root, [primary_branch])` — idempotent migration helper that seeds missing directories and .gitkeep files on existing ops repos (pre-#407 deployments). | bin/disinto (init) |
| `lib/ci-setup.sh` | `_install_cron_impl()` — installs crontab entries for bare-metal deployments (compose mode uses polling loop instead). `_create_forgejo_oauth_app()` — generic helper to create an OAuth2 app on Forgejo (shared by Woodpecker and chat). `_create_woodpecker_oauth_impl()` — creates Woodpecker OAuth2 app (thin wrapper). `_create_chat_oauth_impl()` — creates disinto-chat OAuth2 app, writes `CHAT_OAUTH_CLIENT_ID`/`CHAT_OAUTH_CLIENT_SECRET` to `.env` (#708). `_generate_woodpecker_token_impl()` — auto-generates WOODPECKER_TOKEN via OAuth2 flow. `_activate_woodpecker_repo_impl()` — activates repo in Woodpecker. All gated by `_load_ci_context()` which validates required env vars. | bin/disinto (init) |
| `lib/generators.sh` | Template generation for `disinto init`: `generate_compose()` — docker-compose.yml (**duplicate service detection**: tracks service names during generation, aborts with `ERROR: Duplicate service name '$name' detected` on conflict; detection state is reset between calls so idempotent reinvocation is safe, #850) (uses `codeberg.org/forgejo/forgejo:11.0` tag; `CLAUDE_BIN_DIR` volume mount removed from agents/llama services — only `reproduce` and `edge` still use the host-mounted CLI (#992); adds `security_opt: [apparmor:unconfined]` to all services for rootless container compatibility; Forgejo includes a healthcheck so dependent services use `condition: service_healthy` — fixes cold-start races, #665; adds `chat` service block with isolated `chat-config` named volume and `CHAT_HISTORY_DIR` bind-mount for per-user NDJSON history persistence (#710); injects `FORWARD_AUTH_SECRET` for Caddy↔chat defense-in-depth auth (#709); subdomain fallback: `EDGE_ROUTING_MODE` (default `subpath`) and per-service `EDGE_TUNNEL_FQDN_*` vars injected into edge service (#1028); chat service rate limiting removed (#1084); chat workspace dir bind-mount: `${CHAT_WORKSPACE_DIR:-./workspace}:/var/workspace` + `CHAT_WORKSPACE_DIR` env var injected so Claude can access project working tree (#1027); all `depends_on` now use `condition: service_healthy/started` instead of bare service names; all services now include `restart: unless-stopped` including the edge service — #768; agents service now uses `image: ghcr.io/disinto/agents:${DISINTO_IMAGE_TAG:-latest}` instead of `build:` (#429); `WOODPECKER_PLUGINS_PRIVILEGED` env var added to woodpecker service (#779); agents-llama conditional block gated on `ENABLE_LLAMA_AGENT=1` (#769); `agents-llama-all` compose service (profile `agents-llama-all`, all 7 roles: review,dev,gardener,architect,planner,predictor,supervisor) added by #801; agents service gains volume mounts for `./projects`, `./.env`, `./state`), `generate_caddyfile()` — Caddyfile (routes: `/forge/*` → forgejo:3000, `/woodpecker/*` → woodpecker:8000, `/staging/*` → staging:80; `/chat/login` and `/chat/oauth/callback` bypass `forward_auth` so unauthenticated users can reach the OAuth flow; `/chat/*` gated by `forward_auth` on `chat:8080/chat/auth/verify` which stamps `X-Forwarded-User` (#709); root `/` redirects to `/forge/`), `generate_staging_index()` — staging index, `generate_deploy_pipelines()` — Woodpecker deployment pipeline configs. Requires `FACTORY_ROOT`, `PROJECT_NAME`, `PRIMARY_BRANCH`. | bin/disinto (init) |
| `lib/backup.sh` | Factory backup creation. `backup_create <outfile.tar.gz>` — exports factory state: fetches all issues (open+closed) from the project and ops repos via Forgejo API, bundles the ops repo as a git bundle, and writes a tarball. Requires `FORGE_URL`, `FORGE_TOKEN`, `FORGE_REPO`, `FORGE_OPS_REPO`, `OPS_REPO_ROOT`. Sourced by `bin/disinto backup create` (#1057). | bin/disinto (backup create) |
| `lib/disinto/backup.sh` | Factory backup restore. `backup_import <infile.tar.gz>` — restores from a backup tarball: creates missing repos via Forgejo API, imports issues (idempotent — skips by number if present), unpacks ops repo git bundle. Idempotent: running twice produces same end state with no errors. Requires `FORGE_URL`, `FORGE_TOKEN`. Sourced by `bin/disinto backup import` (#1058). | bin/disinto (backup import) |
| `lib/generators.sh` | Template generation for `disinto init`: `generate_compose()` — docker-compose.yml (uses `codeberg.org/forgejo/forgejo:11.0` tag; adds `security_opt: [apparmor:unconfined]` to all services for rootless container compatibility; Forgejo includes a healthcheck so dependent services use `condition: service_healthy` — fixes cold-start races, #665; adds `chat` service block with isolated `chat-config` named volume and `CHAT_HISTORY_DIR` bind-mount for per-user NDJSON history persistence (#710); injects `FORWARD_AUTH_SECRET` for Caddy↔chat defense-in-depth auth (#709); cost-cap env vars `CHAT_MAX_REQUESTS_PER_HOUR`, `CHAT_MAX_REQUESTS_PER_DAY`, `CHAT_MAX_TOKENS_PER_DAY` (#711); subdomain fallback comment for `EDGE_TUNNEL_FQDN_*` vars (#713); all `depends_on` now use `condition: service_healthy/started` instead of bare service names; all services now include `restart: unless-stopped` including the edge service — #768; agents service now uses `image: ghcr.io/disinto/agents:${DISINTO_IMAGE_TAG:-latest}` instead of `build:` (#429); `WOODPECKER_PLUGINS_PRIVILEGED` env var added to woodpecker service (#779); agents-llama conditional block gated on `ENABLE_LLAMA_AGENT=1` (#769); `agents-llama-all` compose service (profile `agents-llama-all`, all 7 roles: review,dev,gardener,architect,planner,predictor,supervisor) added by #801; agents service gains volume mounts for `./projects`, `./.env`, `./state`), `generate_caddyfile()` — Caddyfile (routes: `/forge/*` → forgejo:3000, `/woodpecker/*` → woodpecker:8000, `/staging/*` → staging:80; `/chat/login` and `/chat/oauth/callback` bypass `forward_auth` so unauthenticated users can reach the OAuth flow; `/chat/*` gated by `forward_auth` on `chat:8080/chat/auth/verify` which stamps `X-Forwarded-User` (#709); root `/` redirects to `/forge/`), `generate_staging_index()` — staging index, `generate_deploy_pipelines()` — Woodpecker deployment pipeline configs. Requires `FACTORY_ROOT`, `PROJECT_NAME`, `PRIMARY_BRANCH`. | bin/disinto (init) |
| `lib/sprint-filer.sh` | Post-merge sub-issue filer for sprint PRs. Invoked by the `.woodpecker/ops-filer.yml` pipeline after a sprint PR merges to ops repo `main`. Parses `<!-- filer:begin --> ... <!-- filer:end -->` blocks from sprint PR bodies to extract sub-issue definitions, creates them on the project repo using `FORGE_FILER_TOKEN` (narrow-scope `filer-bot` identity with `issues:write` only), adds `in-progress` label to the parent vision issue, and handles vision lifecycle closure when all sub-issues are closed. Uses `filer_api_all()` for paginated fetches. Idempotent: uses `<!-- decomposed-from: #<vision>, sprint: <slug>, id: <id> -->` markers to skip already-filed issues. Requires `FORGE_FILER_TOKEN`, `FORGE_API`, `FORGE_API_BASE`, `FORGE_OPS_REPO`. | `.woodpecker/ops-filer.yml` (CI pipeline on ops repo) |
| `lib/hire-agent.sh` | `disinto_hire_an_agent()` — user creation, `.profile` repo setup, formula copying, branch protection, and state marker creation for hiring a new agent. Requires `FORGE_URL`, `FORGE_TOKEN`, `FACTORY_ROOT`, `PROJECT_NAME`. Extracted from `bin/disinto`. | bin/disinto (hire) |
| `lib/release.sh` | `disinto_release()` — vault TOML creation, branch setup on ops repo, PR creation, and auto-merge request for a versioned release. `_assert_release_globals()` validates required env vars. Requires `FORGE_URL`, `FORGE_TOKEN`, `FORGE_OPS_REPO`, `FACTORY_ROOT`, `PRIMARY_BRANCH`. Extracted from `bin/disinto`. | bin/disinto (release) |
| `lib/hvault.sh` | HashiCorp Vault helper module. `hvault_kv_get(PATH, [KEY])` — read KV v2 secret, optionally extract one key. `hvault_kv_put(PATH, KEY=VAL ...)` — write KV v2 secret. `hvault_kv_list(PATH)` — list keys at a KV path. `hvault_get_or_empty(PATH)` — GET /v1/PATH; 200→raw body, 404→empty, else structured error + return 1 (used by sync scripts to distinguish "absent, create" from hard failure without tripping errexit, #881). `hvault_ensure_kv_v2(MOUNT, [LOG_PREFIX])` — idempotent KV v2 mount assertion: enables mount if absent, fails loudly if present as wrong type/version. Extracted from all `vault-seed-*.sh` scripts to eliminate dup-detector violations. Respects `DRY_RUN=1`. `hvault_policy_apply(NAME, FILE)` — idempotent policy upsert. `hvault_jwt_login(ROLE, JWT)` — exchange JWT for short-lived token. `hvault_token_lookup()` — returns TTL/policies/accessor for current token. `_hvault_seed_key(PATH, KEY, [GENERATOR])` — seed one KV key if absent; reads existing data and merges to preserve sibling keys (KV v2 replaces atomically); returns 0=created, 1=unchanged, 2=API error (#992). All functions use `VAULT_ADDR` + `VAULT_TOKEN` from env (fallback: `/etc/vault.d/root.token`), emit structured JSON errors to stderr on failure. Tests: `tests/lib-hvault.bats` (requires `vault server -dev`). | `tools/vault-apply-policies.sh`, `tools/vault-apply-roles.sh`, `lib/init/nomad/vault-nomad-auth.sh`, `tools/vault-seed-*.sh` |
| `lib/init/nomad/` | Nomad+Vault installer scripts. `cluster-up.sh` — idempotent Step-0 orchestrator that runs all steps in order (installs packages, writes HCL, enables systemd units, unseals Vault); uses `poll_until_healthy()` helper for deduped readiness polling; `HOST_VOLUME_DIRS` array now includes `/srv/disinto/docker` (for staging file-server, S5.2, #989, #992). `install.sh` — installs pinned Nomad+Vault apt packages. `vault-init.sh` — initializes Vault (unseal keys → `/etc/vault.d/`), creates dev-persisted unseal unit. `lib-systemd.sh` — shared systemd unit helpers. `systemd-nomad.sh`, `systemd-vault.sh` — write and enable service units. `vault-nomad-auth.sh` — Step-2 script that enables Vault's JWT auth at path `jwt-nomad`, writes the JWKS/algs config pointing at Nomad's workload-identity signer, delegates role sync to `tools/vault-apply-roles.sh`, installs `/etc/nomad.d/server.hcl`, and SIGHUPs `nomad.service` if the file changed (#881). `wp-oauth-register.sh` — S3.3 script that creates the Woodpecker OAuth2 app in Forgejo and stores `forgejo_client`/`forgejo_secret` in Vault KV v2 at `kv/disinto/shared/woodpecker`; idempotent (skips if app or secrets already present); called by `bin/disinto --with woodpecker`. `deploy.sh` — S4 dependency-ordered Nomad job deploy + health-wait; takes a list of jobspec basenames, submits each to Nomad and polls until healthy before proceeding to the next; supports `--dry-run` and per-job timeout overrides via `JOB_READY_TIMEOUT_<JOBNAME>`; global default timeout `JOB_READY_TIMEOUT_SECS` is 360s (raised from 240s for chat cold-start, #1036); invoked by `bin/disinto --with <svc>` and `cluster-up.sh`; deploy order now covers staging, chat, edge (S5.5, #992). Idempotent: each step checks current state before acting. Sourced and called by `cluster-up.sh`; not sourced by agents. | `bin/disinto init --backend=nomad` |
| `lib/hvault.sh` | HashiCorp Vault helper module. `hvault_kv_get(PATH, [KEY])` — read KV v2 secret, optionally extract one key. `hvault_kv_put(PATH, KEY=VAL ...)` — write KV v2 secret. `hvault_kv_list(PATH)` — list keys at a KV path. `hvault_get_or_empty(PATH)` — GET /v1/PATH; 200→raw body, 404→empty, else structured error + return 1 (used by sync scripts to distinguish "absent, create" from hard failure without tripping errexit, #881). `hvault_policy_apply(NAME, FILE)` — idempotent policy upsert. `hvault_jwt_login(ROLE, JWT)` — exchange JWT for short-lived token. `hvault_token_lookup()` — returns TTL/policies/accessor for current token. All functions use `VAULT_ADDR` + `VAULT_TOKEN` from env (fallback: `/etc/vault.d/root.token`), emit structured JSON errors to stderr on failure. Tests: `tests/lib-hvault.bats` (requires `vault server -dev`). | `tools/vault-apply-policies.sh`, `tools/vault-apply-roles.sh`, `lib/init/nomad/vault-nomad-auth.sh` |
| `lib/init/nomad/` | Nomad+Vault installer scripts. `cluster-up.sh` — idempotent Step-0 orchestrator that runs all steps in order (installs packages, writes HCL, enables systemd units, unseals Vault); uses `poll_until_healthy()` helper for deduped readiness polling. `install.sh` — installs pinned Nomad+Vault apt packages. `vault-init.sh` — initializes Vault (unseal keys → `/etc/vault.d/`), creates dev-persisted unseal unit. `lib-systemd.sh` — shared systemd unit helpers. `systemd-nomad.sh`, `systemd-vault.sh` — write and enable service units. `vault-nomad-auth.sh` — Step-2 script that enables Vault's JWT auth at path `jwt-nomad`, writes the JWKS/algs config pointing at Nomad's workload-identity signer, delegates role sync to `tools/vault-apply-roles.sh`, installs `/etc/nomad.d/server.hcl`, and SIGHUPs `nomad.service` if the file changed (#881). Idempotent: each step checks current state before acting. Sourced and called by `cluster-up.sh`; not sourced by agents. | `bin/disinto init --backend=nomad` |

View file

@ -52,9 +52,8 @@ claude_run_with_watchdog() {
out_file=$(mktemp) || return 1
trap 'rm -f "$out_file"' RETURN
# Start claude in new process group (setsid creates new session, $pid is PGID leader)
# All children of claude will inherit this process group
setsid "${cmd[@]}" > "$out_file" 2>>"$LOGFILE" &
# Start claude in background, capturing stdout to temp file
"${cmd[@]}" > "$out_file" 2>>"$LOGFILE" &
pid=$!
# Background watchdog: poll for final result marker
@ -85,12 +84,12 @@ claude_run_with_watchdog() {
sleep "$grace"
if kill -0 "$pid" 2>/dev/null; then
log "watchdog: claude -p idle for ${grace}s after final result; SIGTERM"
kill -TERM -- "-$pid" 2>/dev/null || true
kill -TERM "$pid" 2>/dev/null || true
# Give it a moment to clean up
sleep 5
if kill -0 "$pid" 2>/dev/null; then
log "watchdog: force kill after SIGTERM timeout"
kill -KILL -- "-$pid" 2>/dev/null || true
kill -KILL "$pid" 2>/dev/null || true
fi
fi
fi
@ -101,16 +100,16 @@ claude_run_with_watchdog() {
timeout --foreground "${CLAUDE_TIMEOUT:-7200}" tail --pid="$pid" -f /dev/null 2>/dev/null
rc=$?
# Clean up the watchdog (target process group if it spawned children)
kill -- "-$grace_pid" 2>/dev/null || true
# Clean up the watchdog
kill "$grace_pid" 2>/dev/null || true
wait "$grace_pid" 2>/dev/null || true
# When timeout fires (rc=124), explicitly kill the orphaned claude process group
# When timeout fires (rc=124), explicitly kill the orphaned claude process
# tail --pid is a passive waiter, not a supervisor
if [ "$rc" -eq 124 ]; then
kill -TERM -- "-$pid" 2>/dev/null || true
kill "$pid" 2>/dev/null || true
sleep 1
kill -KILL -- "-$pid" 2>/dev/null || true
kill -KILL "$pid" 2>/dev/null || true
fi
# Output the captured stdout

View file

@ -1,136 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# disinto backup — export factory state for migration
#
# Usage: source this file, then call backup_create <outfile.tar.gz>
# Requires: FORGE_URL, FORGE_TOKEN, FORGE_REPO, FORGE_OPS_REPO, OPS_REPO_ROOT
# =============================================================================
set -euo pipefail
# Fetch all issues (open + closed) for a repo slug and emit the normalized JSON array.
# Usage: _backup_fetch_issues <org/repo>
_backup_fetch_issues() {
local repo_slug="$1"
local api_url="${FORGE_API_BASE}/repos/${repo_slug}"
local all_issues="[]"
for state in open closed; do
local page=1
while true; do
local page_items
page_items=$(curl -sf -X GET \
-H "Authorization: token ${FORGE_TOKEN}" \
-H "Content-Type: application/json" \
"${api_url}/issues?state=${state}&type=issues&limit=50&page=${page}") || {
echo "ERROR: failed to fetch ${state} issues from ${repo_slug} (page ${page})" >&2
return 1
}
local count
count=$(printf '%s' "$page_items" | jq 'length' 2>/dev/null) || count=0
[ -z "$count" ] && count=0
[ "$count" -eq 0 ] && break
all_issues=$(printf '%s\n%s' "$all_issues" "$page_items" | jq -s 'add')
[ "$count" -lt 50 ] && break
page=$((page + 1))
done
done
# Normalize to the schema: number, title, body, labels, state
printf '%s' "$all_issues" | jq '[.[] | {
number: .number,
title: .title,
body: .body,
labels: [.labels[]?.name],
state: .state
}] | sort_by(.number)'
}
# Create a backup tarball of factory state.
# Usage: backup_create <outfile.tar.gz>
backup_create() {
local outfile="${1:-}"
if [ -z "$outfile" ]; then
echo "Error: output file required" >&2
echo "Usage: disinto backup create <outfile.tar.gz>" >&2
return 1
fi
# Resolve to absolute path before cd-ing into tmpdir
case "$outfile" in
/*) ;;
*) outfile="$(pwd)/${outfile}" ;;
esac
# Validate required env
: "${FORGE_URL:?FORGE_URL must be set}"
: "${FORGE_TOKEN:?FORGE_TOKEN must be set}"
: "${FORGE_REPO:?FORGE_REPO must be set}"
local forge_ops_repo="${FORGE_OPS_REPO:-${FORGE_REPO}-ops}"
local ops_repo_root="${OPS_REPO_ROOT:-}"
if [ -z "$ops_repo_root" ] || [ ! -d "$ops_repo_root/.git" ]; then
echo "Error: OPS_REPO_ROOT (${ops_repo_root:-<unset>}) is not a valid git repo" >&2
return 1
fi
local tmpdir
tmpdir=$(mktemp -d)
trap 'rm -rf "$tmpdir"' EXIT
local project_name="${FORGE_REPO##*/}"
echo "=== disinto backup create ==="
echo "Forge: ${FORGE_URL}"
echo "Repos: ${FORGE_REPO}, ${forge_ops_repo}"
# ── 1. Export issues ──────────────────────────────────────────────────────
mkdir -p "${tmpdir}/issues"
echo "Fetching issues for ${FORGE_REPO}..."
_backup_fetch_issues "$FORGE_REPO" > "${tmpdir}/issues/${project_name}.json"
local main_count
main_count=$(jq 'length' "${tmpdir}/issues/${project_name}.json")
echo " ${main_count} issues exported"
echo "Fetching issues for ${forge_ops_repo}..."
_backup_fetch_issues "$forge_ops_repo" > "${tmpdir}/issues/${project_name}-ops.json"
local ops_count
ops_count=$(jq 'length' "${tmpdir}/issues/${project_name}-ops.json")
echo " ${ops_count} issues exported"
# ── 2. Git bundle of ops repo ────────────────────────────────────────────
mkdir -p "${tmpdir}/repos"
echo "Creating git bundle for ${forge_ops_repo}..."
git -C "$ops_repo_root" bundle create "${tmpdir}/repos/${project_name}-ops.bundle" --all 2>&1
echo " bundle created ($(du -h "${tmpdir}/repos/${project_name}-ops.bundle" | cut -f1))"
# ── 3. Metadata ──────────────────────────────────────────────────────────
local created_at
created_at=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
jq -n \
--arg created_at "$created_at" \
--arg source_host "$(hostname)" \
--argjson schema_version 1 \
--arg forgejo_url "$FORGE_URL" \
'{
created_at: $created_at,
source_host: $source_host,
schema_version: $schema_version,
forgejo_url: $forgejo_url
}' > "${tmpdir}/metadata.json"
# ── 4. Pack tarball ──────────────────────────────────────────────────────
echo "Creating tarball: ${outfile}"
tar -czf "$outfile" -C "$tmpdir" metadata.json issues repos
local size
size=$(du -h "$outfile" | cut -f1)
echo "=== Backup complete: ${outfile} (${size}) ==="
# Clean up before returning — the EXIT trap references the local $tmpdir
# which goes out of scope after return, causing 'unbound variable' under set -u.
trap - EXIT
rm -rf "$tmpdir"
}

View file

@ -247,31 +247,6 @@ ci_promote() {
echo "$new_num"
}
# ci_get_step_logs <pipeline_num> <step_id>
# Fetches logs for a single CI step via the Woodpecker API.
# Requires: WOODPECKER_REPO_ID, woodpecker_api() (from env.sh)
# Returns: 0 on success, 1 on failure. Outputs log text to stdout.
#
# Usage:
# ci_get_step_logs 1423 5 # Get logs for step ID 5 in pipeline 1423
ci_get_step_logs() {
local pipeline_num="$1" step_id="$2"
if [ -z "$pipeline_num" ] || [ -z "$step_id" ]; then
echo "Usage: ci_get_step_logs <pipeline_num> <step_id>" >&2
return 1
fi
if [ -z "${WOODPECKER_REPO_ID:-}" ] || [ "${WOODPECKER_REPO_ID}" = "0" ]; then
echo "ERROR: WOODPECKER_REPO_ID not set or zero" >&2
return 1
fi
woodpecker_api "/repos/${WOODPECKER_REPO_ID}/logs/${pipeline_num}/${step_id}" \
--max-time 15 2>/dev/null \
| jq -r '.[].data // empty' 2>/dev/null
}
# ci_get_logs <pipeline_number> [--step <step_name>]
# Reads CI logs from the Woodpecker SQLite database.
# Requires: WOODPECKER_DATA_DIR env var or mounted volume at /woodpecker-data

View file

@ -142,7 +142,6 @@ _create_forgejo_oauth_app() {
# Set up Woodpecker CI to use Forgejo as its forge backend.
# Creates an OAuth2 app on Forgejo for Woodpecker, activates the repo.
# Respects EDGE_ROUTING_MODE: in subdomain mode, uses EDGE_TUNNEL_FQDN_CI for redirect URI.
# Usage: create_woodpecker_oauth <forge_url> <repo_slug>
_create_woodpecker_oauth_impl() {
local forge_url="$1"
@ -151,13 +150,7 @@ _create_woodpecker_oauth_impl() {
echo ""
echo "── Woodpecker OAuth2 setup ────────────────────────────"
local wp_redirect_uri="http://localhost:8000/authorize"
local routing_mode="${EDGE_ROUTING_MODE:-subpath}"
if [ "$routing_mode" = "subdomain" ] && [ -n "${EDGE_TUNNEL_FQDN_CI:-}" ]; then
wp_redirect_uri="https://${EDGE_TUNNEL_FQDN_CI}/authorize"
fi
_create_forgejo_oauth_app "woodpecker-ci" "$wp_redirect_uri" || return 0
_create_forgejo_oauth_app "woodpecker-ci" "http://localhost:8000/authorize" || return 0
local client_id="${_OAUTH_CLIENT_ID}"
local client_secret="${_OAUTH_CLIENT_SECRET}"
@ -165,15 +158,10 @@ _create_woodpecker_oauth_impl() {
# WP_FORGEJO_CLIENT/SECRET match the docker-compose.yml variable references
# WOODPECKER_HOST must be host-accessible URL to match OAuth2 redirect_uri
local env_file="${FACTORY_ROOT}/.env"
local wp_host="http://localhost:8000"
if [ "$routing_mode" = "subdomain" ] && [ -n "${EDGE_TUNNEL_FQDN_CI:-}" ]; then
wp_host="https://${EDGE_TUNNEL_FQDN_CI}"
fi
local wp_vars=(
"WOODPECKER_FORGEJO=true"
"WOODPECKER_FORGEJO_URL=${forge_url}"
"WOODPECKER_HOST=${wp_host}"
"WOODPECKER_HOST=http://localhost:8000"
)
if [ -n "${client_id:-}" ]; then
wp_vars+=("WP_FORGEJO_CLIENT=${client_id}")

View file

@ -1,391 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# backup.sh — backup/restore utilities for disinto factory state
#
# Subcommands:
# create <outfile.tar.gz> Create backup of factory state
# import <infile.tar.gz> Restore factory state from backup
#
# Usage:
# source "${FACTORY_ROOT}/lib/disinto/backup.sh"
# backup_import <tarball>
#
# Environment:
# FORGE_URL - Forgejo instance URL (target)
# FORGE_TOKEN - Admin token for target Forgejo
#
# Idempotency:
# - Repos: created via API if missing
# - Issues: check if exists by number, skip if present
# - Runs twice = same end state, no errors
# =============================================================================
set -euo pipefail
# ── Helper: log with timestamp ───────────────────────────────────────────────
backup_log() {
local msg="$1"
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $msg"
}
# ── Helper: create repo if it doesn't exist ─────────────────────────────────
# Usage: backup_create_repo_if_missing <slug>
# Returns: 0 if repo exists or was created, 1 on error
backup_create_repo_if_missing() {
local slug="$1"
local org_name="${slug%%/*}"
local repo_name="${slug##*/}"
# Check if repo exists
if curl -sf --max-time 5 \
-H "Authorization: token ${FORGE_TOKEN}" \
"${FORGE_URL}/api/v1/repos/${slug}" >/dev/null 2>&1; then
backup_log "Repo ${slug} already exists"
return 0
fi
backup_log "Creating repo ${slug}..."
# Create org if needed
curl -sf -X POST \
-H "Authorization: token ${FORGE_TOKEN}" \
-H "Content-Type: application/json" \
"${FORGE_URL}/api/v1/orgs" \
-d "{\"username\":\"${org_name}\",\"visibility\":\"public\"}" >/dev/null 2>&1 || true
# Create repo
local response
response=$(curl -sf -X POST \
-H "Authorization: token ${FORGE_TOKEN}" \
-H "Content-Type: application/json" \
"${FORGE_URL}/api/v1/orgs/${org_name}/repos" \
-d "{\"name\":\"${repo_name}\",\"auto_init\":false,\"default_branch\":\"main\"}" 2>/dev/null) \
|| response=""
if [ -n "$response" ] && echo "$response" | grep -q '"id":\|[0-9]'; then
backup_log "Created repo ${slug}"
BACKUP_CREATED_REPOS=$((BACKUP_CREATED_REPOS + 1))
return 0
fi
# Fallback: admin endpoint
response=$(curl -sf -X POST \
-H "Authorization: token ${FORGE_TOKEN}" \
-H "Content-Type: application/json" \
"${FORGE_URL}/api/v1/admin/users/${org_name}/repos" \
-d "{\"name\":\"${repo_name}\",\"auto_init\":false,\"default_branch\":\"main\"}" 2>/dev/null) \
|| response=""
if [ -n "$response" ] && echo "$response" | grep -q '"id":\|[0-9]'; then
backup_log "Created repo ${slug} (via admin API)"
BACKUP_CREATED_REPOS=$((BACKUP_CREATED_REPOS + 1))
return 0
fi
backup_log "ERROR: failed to create repo ${slug}" >&2
return 1
}
# ── Helper: check if issue exists by number ──────────────────────────────────
# Usage: backup_issue_exists <slug> <issue_number>
# Returns: 0 if exists, 1 if not
backup_issue_exists() {
local slug="$1"
local issue_num="$2"
curl -sf --max-time 5 \
-H "Authorization: token ${FORGE_TOKEN}" \
"${FORGE_URL}/api/v1/repos/${slug}/issues/${issue_num}" >/dev/null 2>&1
}
# ── Helper: create issue with specific number (if Forgejo supports it) ───────
# Note: Forgejo API auto-assigns next integer; we accept renumbering and log mapping
# Usage: backup_create_issue <slug> <original_number> <title> <body> [labels...]
# Returns: new_issue_number on success, 0 on failure
backup_create_issue() {
local slug="$1"
local original_num="$2"
local title="$3"
local body="$4"
shift 4
# Build labels array
local -a labels=()
for label in "$@"; do
# Resolve label name to ID
local label_id
label_id=$(curl -sf --max-time 5 \
-H "Authorization: token ${FORGE_TOKEN}" \
"${FORGE_URL}/api/v1/repos/${slug}/labels" 2>/dev/null \
| jq -r ".[] | select(.name == \"${label}\") | .id" 2>/dev/null) || label_id=""
if [ -n "$label_id" ] && [ "$label_id" != "null" ]; then
labels+=("$label_id")
fi
done
# Build payload
local payload
if [ ${#labels[@]} -gt 0 ]; then
payload=$(jq -n \
--arg title "$title" \
--arg body "$body" \
--argjson labels "$(printf '%s\n' "${labels[@]}" | jq -R . | jq -s .)" \
'{title: $title, body: $body, labels: $labels}')
else
payload=$(jq -n --arg title "$title" --arg body "$body" '{title: $title, body: $body, labels: []}')
fi
local response
response=$(curl -sf -X POST \
-H "Authorization: token ${FORGE_TOKEN}" \
-H "Content-Type: application/json" \
"${FORGE_URL}/api/v1/repos/${slug}/issues" \
-d "$payload" 2>/dev/null) || {
backup_log "ERROR: failed to create issue '${title}'" >&2
return 1
}
local new_num
new_num=$(printf '%s' "$response" | jq -r '.number // empty')
# Log the mapping
echo "${original_num}:${new_num}" >> "${BACKUP_MAPPING_FILE}"
backup_log "Created issue '${title}' as #${new_num} (original: #${original_num})"
echo "$new_num"
}
# ── Step 1: Unpack tarball to temp dir ───────────────────────────────────────
# Usage: backup_unpack_tarball <tarball>
# Returns: temp dir path via BACKUP_TEMP_DIR
backup_unpack_tarball() {
local tarball="$1"
if [ ! -f "$tarball" ]; then
backup_log "ERROR: tarball not found: ${tarball}" >&2
return 1
fi
BACKUP_TEMP_DIR=$(mktemp -d -t disinto-backup.XXXXXX)
backup_log "Unpacking ${tarball} to ${BACKUP_TEMP_DIR}"
if ! tar -xzf "$tarball" -C "$BACKUP_TEMP_DIR"; then
backup_log "ERROR: failed to unpack tarball" >&2
rm -rf "$BACKUP_TEMP_DIR"
return 1
fi
# Verify expected structure
if [ ! -d "${BACKUP_TEMP_DIR}/repos" ]; then
backup_log "ERROR: tarball missing 'repos/' directory" >&2
rm -rf "$BACKUP_TEMP_DIR"
return 1
fi
backup_log "Tarball unpacked successfully"
}
# ── Step 2: disinto repo — create via Forgejo API, trigger sync (manual) ─────
# Usage: backup_import_disinto_repo
# Returns: 0 on success, 1 on failure
backup_import_disinto_repo() {
backup_log "Step 2: Configuring disinto repo..."
# Create disinto repo if missing
backup_create_repo_if_missing "disinto-admin/disinto"
# Note: Manual mirror configuration recommended (avoids SSH deploy-key handling)
backup_log "Note: Configure Codeberg → Forgejo pull mirror manually"
backup_log " Run on Forgejo admin panel: Repository Settings → Repository Mirroring"
backup_log " Source: ssh://git@codeberg.org/johba/disinto.git"
backup_log " Mirror: disinto-admin/disinto"
backup_log " Or use: git clone --mirror ssh://git@codeberg.org/johba/disinto.git"
backup_log " cd disinto.git && git push --mirror ${FORGE_URL}/disinto-admin/disinto.git"
return 0
}
# ── Step 3: disinto-ops repo — create empty, push from bundle ────────────────
# Usage: backup_import_disinto_ops_repo
# Returns: 0 on success, 1 on failure
backup_import_disinto_ops_repo() {
backup_log "Step 3: Configuring disinto-ops repo from bundle..."
local bundle_path="${BACKUP_TEMP_DIR}/repos/disinto-ops.bundle"
if [ ! -f "$bundle_path" ]; then
backup_log "WARNING: Bundle not found at ${bundle_path}, skipping"
return 0
fi
# Create ops repo if missing
backup_create_repo_if_missing "disinto-admin/disinto-ops"
# Clone bundle and push to Forgejo
local clone_dir
clone_dir=$(mktemp -d -t disinto-ops-clone.XXXXXX)
backup_log "Cloning bundle to ${clone_dir}"
if ! git clone --bare "$bundle_path" "$clone_dir/disinto-ops.git"; then
backup_log "ERROR: failed to clone bundle"
rm -rf "$clone_dir"
return 1
fi
# Push all refs to Forgejo
backup_log "Pushing refs to Forgejo..."
if ! cd "$clone_dir/disinto-ops.git" && \
git push --mirror "${FORGE_URL}/disinto-admin/disinto-ops.git" 2>&1; then
backup_log "ERROR: failed to push refs"
rm -rf "$clone_dir"
return 1
fi
local ref_count
ref_count=$(cd "$clone_dir/disinto-ops.git" && git show-ref | wc -l)
BACKUP_PUSHED_REFS=$((BACKUP_PUSHED_REFS + ref_count))
backup_log "Pushed ${ref_count} refs to disinto-ops"
rm -rf "$clone_dir"
return 0
}
# ── Step 4: Import issues from backup ────────────────────────────────────────
# Usage: backup_import_issues <slug> <issues_file>
# issues_file is a JSON array of issues (per create schema)
# Returns: 0 on success
backup_import_issues() {
local slug="$1"
local issues_file="$2"
if [ ! -f "$issues_file" ]; then
backup_log "No issues file found, skipping"
return 0
fi
local count
count=$(jq 'length' "$issues_file")
backup_log "Importing ${count} issues from ${issues_file}"
local created=0
local skipped=0
for i in $(seq 0 $((count - 1))); do
local issue_num title body
issue_num=$(jq -r ".[${i}].number" "$issues_file")
title=$(jq -r ".[${i}].title" "$issues_file")
body=$(jq -r ".[${i}].body" "$issues_file")
if [ -z "$issue_num" ] || [ "$issue_num" = "null" ]; then
backup_log "WARNING: skipping issue without number at index ${i}"
continue
fi
# Check if issue already exists
if backup_issue_exists "$slug" "$issue_num"; then
backup_log "Issue #${issue_num} already exists, skipping"
skipped=$((skipped + 1))
continue
fi
# Extract labels
local -a labels=()
while IFS= read -r label; do
[ -n "$label" ] && labels+=("$label")
done < <(jq -r ".[${i}].labels[]? // empty" "$issues_file")
# Create issue
local new_num
if new_num=$(backup_create_issue "$slug" "$issue_num" "$title" "$body" "${labels[@]}"); then
created=$((created + 1))
fi
done
BACKUP_CREATED_ISSUES=$((BACKUP_CREATED_ISSUES + created))
BACKUP_SKIPPED_ISSUES=$((BACKUP_SKIPPED_ISSUES + skipped))
backup_log "Created ${created} issues, skipped ${skipped}"
}
# ── Main: import subcommand ──────────────────────────────────────────────────
# Usage: backup_import <tarball>
backup_import() {
local tarball="$1"
# Validate required environment
[ -n "${FORGE_URL:-}" ] || { echo "Error: FORGE_URL not set" >&2; exit 1; }
[ -n "${FORGE_TOKEN:-}" ] || { echo "Error: FORGE_TOKEN not set" >&2; exit 1; }
backup_log "=== Backup Import Started ==="
backup_log "Target: ${FORGE_URL}"
backup_log "Tarball: ${tarball}"
# Initialize counters
BACKUP_CREATED_REPOS=0
BACKUP_PUSHED_REFS=0
BACKUP_CREATED_ISSUES=0
BACKUP_SKIPPED_ISSUES=0
# Create temp dir for mapping file
BACKUP_MAPPING_FILE=$(mktemp -t disinto-mapping.XXXXXX.json)
echo '{"mappings": []}' > "$BACKUP_MAPPING_FILE"
# Step 1: Unpack tarball
if ! backup_unpack_tarball "$tarball"; then
exit 1
fi
# Step 2: disinto repo
if ! backup_import_disinto_repo; then
exit 1
fi
# Step 3: disinto-ops repo
if ! backup_import_disinto_ops_repo; then
exit 1
fi
# Step 4: Import issues — iterate issues/<slug>.json files, each is a JSON array
for issues_file in "${BACKUP_TEMP_DIR}/issues"/*.json; do
[ -f "$issues_file" ] || continue
local slug_filename
slug_filename=$(basename "$issues_file" .json)
# Map slug-filename → forgejo-slug: "disinto" → "disinto-admin/disinto",
# "disinto-ops" → "disinto-admin/disinto-ops"
local slug
case "$slug_filename" in
"disinto") slug="${FORGE_REPO}" ;;
"disinto-ops") slug="${FORGE_OPS_REPO}" ;;
*) slug="disinto-admin/${slug_filename}" ;;
esac
backup_log "Processing issues from ${slug_filename}.json (${slug})"
backup_import_issues "$slug" "$issues_file"
done
# Summary
backup_log "=== Backup Import Complete ==="
backup_log "Created ${BACKUP_CREATED_REPOS} repos"
backup_log "Pushed ${BACKUP_PUSHED_REFS} refs"
backup_log "Imported ${BACKUP_CREATED_ISSUES} issues"
backup_log "Skipped ${BACKUP_SKIPPED_ISSUES} (already present)"
backup_log "Issue mapping saved to: ${BACKUP_MAPPING_FILE}"
# Cleanup
rm -rf "$BACKUP_TEMP_DIR"
exit 0
}
# ── Entry point: if sourced, don't run; if executed directly, run import ────
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
if [ $# -lt 1 ]; then
echo "Usage: $0 <tarball>" >&2
exit 1
fi
backup_import "$1"
fi

View file

@ -26,28 +26,6 @@ PROJECT_NAME="${PROJECT_NAME:-project}"
# PRIMARY_BRANCH defaults to main (env.sh may have set it to 'master')
PRIMARY_BRANCH="${PRIMARY_BRANCH:-main}"
# Track service names for duplicate detection
declare -A _seen_services
declare -A _service_sources
# Record a service name and its source; return 0 if unique, 1 if duplicate
_record_service() {
local service_name="$1"
local source="$2"
if [ -n "${_seen_services[$service_name]:-}" ]; then
local original_source="${_service_sources[$service_name]}"
echo "ERROR: Duplicate service name '$service_name' detected —" >&2
echo " '$service_name' emitted twice — from $original_source and from $source" >&2
echo " Remove one of the conflicting activations to proceed." >&2
return 1
fi
_seen_services[$service_name]=1
_service_sources[$service_name]="$source"
return 0
}
# Helper: extract woodpecker_repo_id from a project TOML file
# Returns empty string if not found or file doesn't exist
_get_woodpecker_repo_id() {
@ -119,16 +97,6 @@ _generate_local_model_services() {
POLL_INTERVAL) poll_interval_val="$value" ;;
---)
if [ -n "$service_name" ] && [ -n "$base_url" ]; then
# Record service for duplicate detection using the full service name
local full_service_name="agents-${service_name}"
local toml_basename
toml_basename=$(basename "$toml")
if ! _record_service "$full_service_name" "[agents.$service_name] in projects/$toml_basename"; then
# Duplicate detected — clean up and abort
rm -f "$temp_file"
return 1
fi
# Per-agent FORGE_TOKEN / FORGE_PASS lookup (#834 Gap 3).
# Two hired llama agents must not share the same Forgejo identity,
# so we key the env-var lookup by forge_user (which hire-agent.sh
@ -169,6 +137,7 @@ _generate_local_model_services() {
- project-repos-${service_name}:/home/agent/repos
- \${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:\${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
- \${CLAUDE_CONFIG_FILE:-\${HOME}/.claude.json}:/home/agent/.claude.json:ro
- \${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro
- \${AGENT_SSH_DIR:-\${HOME}/.ssh}:/home/agent/.ssh:ro
- ./projects:/home/agent/disinto/projects:ro
- ./.env:/home/agent/disinto/.env:ro
@ -313,20 +282,6 @@ _generate_compose_impl() {
return 0
fi
# Reset duplicate detection state for fresh run
_seen_services=()
_service_sources=()
# Initialize duplicate detection with base services defined in the template
_record_service "forgejo" "base compose template" || return 1
_record_service "woodpecker" "base compose template" || return 1
_record_service "woodpecker-agent" "base compose template" || return 1
_record_service "agents" "base compose template" || return 1
_record_service "runner" "base compose template" || return 1
_record_service "edge" "base compose template" || return 1
_record_service "staging" "base compose template" || return 1
_record_service "staging-deploy" "base compose template" || return 1
# Extract primary woodpecker_repo_id from project TOML files
local wp_repo_id
wp_repo_id=$(_get_primary_woodpecker_repo_id)
@ -404,9 +359,6 @@ services:
WOODPECKER_SERVER: localhost:9000
WOODPECKER_AGENT_SECRET: ${WOODPECKER_AGENT_SECRET:-}
WOODPECKER_GRPC_SECURE: "false"
WOODPECKER_GRPC_KEEPALIVE_TIME: "10s"
WOODPECKER_GRPC_KEEPALIVE_TIMEOUT: "20s"
WOODPECKER_GRPC_KEEPALIVE_PERMIT_WITHOUT_CALLS: "true"
WOODPECKER_HEALTHCHECK_ADDR: ":3333"
WOODPECKER_BACKEND_DOCKER_NETWORK: ${WOODPECKER_CI_NETWORK:-disinto_disinto-net}
WOODPECKER_MAX_WORKFLOWS: 1
@ -430,6 +382,7 @@ services:
- project-repos:/home/agent/repos
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
- ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
- ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro
- woodpecker-data:/woodpecker-data:ro
@ -485,76 +438,6 @@ services:
COMPOSEEOF
# ── Conditional agents-llama block (ENABLE_LLAMA_AGENT=1) ──────────────
# This legacy flag was removed in #846 but kept for duplicate detection testing
if [ "${ENABLE_LLAMA_AGENT:-0}" = "1" ]; then
if ! _record_service "agents-llama" "ENABLE_LLAMA_AGENT=1"; then
return 1
fi
cat >> "$compose_file" <<'COMPOSEEOF'
agents-llama:
image: ghcr.io/disinto/agents:${DISINTO_IMAGE_TAG:-latest}
container_name: disinto-agents-llama
restart: unless-stopped
security_opt:
- apparmor=unconfined
volumes:
- agent-data:/home/agent/data
- project-repos:/home/agent/repos
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
- woodpecker-data:/woodpecker-data:ro
- ./projects:/home/agent/disinto/projects:ro
- ./.env:/home/agent/disinto/.env:ro
- ./state:/home/agent/disinto/state
environment:
FORGE_URL: http://forgejo:3000
FORGE_REPO: ${FORGE_REPO:-disinto-admin/disinto}
FORGE_TOKEN: ${FORGE_TOKEN:-}
FORGE_REVIEW_TOKEN: ${FORGE_REVIEW_TOKEN:-}
FORGE_PLANNER_TOKEN: ${FORGE_PLANNER_TOKEN:-}
FORGE_GARDENER_TOKEN: ${FORGE_GARDENER_TOKEN:-}
FORGE_VAULT_TOKEN: ${FORGE_VAULT_TOKEN:-}
FORGE_SUPERVISOR_TOKEN: ${FORGE_SUPERVISOR_TOKEN:-}
FORGE_PREDICTOR_TOKEN: ${FORGE_PREDICTOR_TOKEN:-}
FORGE_ARCHITECT_TOKEN: ${FORGE_ARCHITECT_TOKEN:-}
FORGE_BOT_USERNAMES: ${FORGE_BOT_USERNAMES:-}
WOODPECKER_TOKEN: ${WOODPECKER_TOKEN:-}
CLAUDE_TIMEOUT: ${CLAUDE_TIMEOUT:-7200}
CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: ${CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC:-1}
ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY:-}
FORGE_PASS: ${FORGE_PASS:-}
FORGE_ADMIN_PASS: ${FORGE_ADMIN_PASS:-}
FACTORY_REPO: ${FORGE_REPO:-disinto-admin/disinto}
DISINTO_CONTAINER: "1"
PROJECT_NAME: ${PROJECT_NAME:-project}
PROJECT_REPO_ROOT: /home/agent/repos/${PROJECT_NAME:-project}
WOODPECKER_DATA_DIR: /woodpecker-data
WOODPECKER_REPO_ID: "PLACEHOLDER_WP_REPO_ID"
CLAUDE_CONFIG_DIR: ${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}
POLL_INTERVAL: ${POLL_INTERVAL:-300}
GARDENER_INTERVAL: ${GARDENER_INTERVAL:-21600}
ARCHITECT_INTERVAL: ${ARCHITECT_INTERVAL:-21600}
PLANNER_INTERVAL: ${PLANNER_INTERVAL:-43200}
healthcheck:
test: ["CMD", "pgrep", "-f", "entrypoint.sh"]
interval: 60s
timeout: 5s
retries: 3
start_period: 30s
depends_on:
forgejo:
condition: service_healthy
woodpecker:
condition: service_started
networks:
- disinto-net
COMPOSEEOF
fi
# Resume the rest of the compose file (runner onward)
cat >> "$compose_file" <<'COMPOSEEOF'
@ -606,21 +489,11 @@ COMPOSEEOF
- EDGE_TUNNEL_USER=${EDGE_TUNNEL_USER:-tunnel}
- EDGE_TUNNEL_PORT=${EDGE_TUNNEL_PORT:-}
- EDGE_TUNNEL_FQDN=${EDGE_TUNNEL_FQDN:-}
# Subdomain fallback (#1028): per-service FQDNs for subdomain routing mode.
# Set EDGE_ROUTING_MODE=subdomain to activate. See docs/edge-routing-fallback.md.
- EDGE_ROUTING_MODE=${EDGE_ROUTING_MODE:-subpath}
- EDGE_TUNNEL_FQDN_FORGE=${EDGE_TUNNEL_FQDN_FORGE:-}
- EDGE_TUNNEL_FQDN_CI=${EDGE_TUNNEL_FQDN_CI:-}
- EDGE_TUNNEL_FQDN_CHAT=${EDGE_TUNNEL_FQDN_CHAT:-}
# Subdomain fallback (#713): if subpath routing (#704/#708) fails, add:
# EDGE_TUNNEL_FQDN_FORGE, EDGE_TUNNEL_FQDN_CI, EDGE_TUNNEL_FQDN_CHAT
# See docs/edge-routing-fallback.md for the full pivot plan.
# Shared secret for Caddy ↔ chat forward_auth (#709)
- FORWARD_AUTH_SECRET=${FORWARD_AUTH_SECRET:-}
# Chat env vars (merged from chat container into edge, #1083)
- CHAT_HOST=127.0.0.1
- CHAT_PORT=8080
- CHAT_OAUTH_CLIENT_ID=${CHAT_OAUTH_CLIENT_ID:-}
- CHAT_OAUTH_CLIENT_SECRET=${CHAT_OAUTH_CLIENT_SECRET:-}
- DISINTO_CHAT_ALLOWED_USERS=${DISINTO_CHAT_ALLOWED_USERS:-}
# Rate limiting removed (#1084)
volumes:
- ./docker/Caddyfile:/etc/caddy/Caddyfile
- caddy_data:/data
@ -628,8 +501,6 @@ COMPOSEEOF
- ./secrets/tunnel_key:/run/secrets/tunnel_key:ro
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
# Chat history persistence (merged from chat container, #1083)
- ${CHAT_HISTORY_DIR:-./state/chat-history}:/var/lib/chat/history
healthcheck:
test: ["CMD", "curl", "-fsS", "http://localhost:2019/config/"]
interval: 30s
@ -681,7 +552,6 @@ COMPOSEEOF
# Chat container — Claude chat UI backend (#705)
# Internal service only; edge proxy routes to chat:8080
# Sandbox hardened per #706 — no docker.sock, read-only rootfs, minimal caps
# Rate limiting removed (#1084)
chat:
build:
context: ./docker/chat
@ -705,9 +575,6 @@ COMPOSEEOF
- chat-config:/var/chat/config
# Chat history persistence: per-user NDJSON files on bind-mounted host volume
- ${CHAT_HISTORY_DIR:-./state/chat-history}:/var/lib/chat/history
# Workspace directory: bind-mounted project working tree for Claude access (#1027)
# Mounted when CHAT_WORKSPACE_DIR is set (defaults to ./workspace)
- ${CHAT_WORKSPACE_DIR:-./workspace}:/var/workspace
environment:
CHAT_HOST: "0.0.0.0"
CHAT_PORT: "8080"
@ -715,14 +582,13 @@ COMPOSEEOF
CHAT_OAUTH_CLIENT_ID: ${CHAT_OAUTH_CLIENT_ID:-}
CHAT_OAUTH_CLIENT_SECRET: ${CHAT_OAUTH_CLIENT_SECRET:-}
EDGE_TUNNEL_FQDN: ${EDGE_TUNNEL_FQDN:-}
EDGE_TUNNEL_FQDN_CHAT: ${EDGE_TUNNEL_FQDN_CHAT:-}
EDGE_ROUTING_MODE: ${EDGE_ROUTING_MODE:-subpath}
DISINTO_CHAT_ALLOWED_USERS: ${DISINTO_CHAT_ALLOWED_USERS:-}
# Shared secret for Caddy forward_auth verify endpoint (#709)
FORWARD_AUTH_SECRET: ${FORWARD_AUTH_SECRET:-}
# Rate limiting removed (#1084)
# Workspace directory for Claude code access (#1027)
CHAT_WORKSPACE_DIR: ${CHAT_WORKSPACE_DIR:-./workspace}
# Cost caps / rate limiting (#711)
CHAT_MAX_REQUESTS_PER_HOUR: ${CHAT_MAX_REQUESTS_PER_HOUR:-60}
CHAT_MAX_REQUESTS_PER_DAY: ${CHAT_MAX_REQUESTS_PER_DAY:-500}
CHAT_MAX_TOKENS_PER_DAY: ${CHAT_MAX_TOKENS_PER_DAY:-1000000}
healthcheck:
test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8080/health')"]
interval: 30s
@ -738,6 +604,7 @@ volumes:
agent-data:
project-repos:
caddy_data:
chat-config:
networks:
disinto-net:
@ -766,19 +633,16 @@ COMPOSEEOF
fi
# Append local-model agent services if any are configured
if ! _generate_local_model_services "$compose_file"; then
echo "ERROR: Failed to generate local-model agent services. See errors above." >&2
return 1
fi
_generate_local_model_services "$compose_file"
# Resolve the Claude CLI binary path and persist as CLAUDE_BIN_DIR in .env.
# Only used by reproduce and edge services which still use host-mounted CLI.
# docker-compose.yml references ${CLAUDE_BIN_DIR} so the value must be set.
local claude_bin
claude_bin="$(command -v claude 2>/dev/null || true)"
if [ -n "$claude_bin" ]; then
claude_bin="$(readlink -f "$claude_bin")"
else
echo "Warning: claude CLI not found in PATH — reproduce/edge services will fail to start" >&2
echo "Warning: claude CLI not found in PATH — set CLAUDE_BIN_DIR in .env manually" >&2
claude_bin="/usr/local/bin/claude"
fi
# Persist CLAUDE_BIN_DIR into .env so docker-compose can resolve it.
@ -795,8 +659,9 @@ COMPOSEEOF
# In build mode, replace image: with build: for locally-built images
if [ "$use_build" = true ]; then
sed -i 's|^\( agents:\)|\1|' "$compose_file"
sed -i '/^ image: ghcr\.io\/disinto\/agents:/{s|image: ghcr\.io/disinto/agents:.*|build:\n context: .\n dockerfile: docker/agents/Dockerfile\n pull_policy: build|}' "$compose_file"
sed -i '/^ image: ghcr\.io\/disinto\/edge:/{s|image: ghcr\.io/disinto/edge:.*|build:\n context: .\n dockerfile: docker/edge/Dockerfile\n pull_policy: build|}' "$compose_file"
sed -i '/^ image: ghcr\.io\/disinto\/edge:/{s|image: ghcr\.io/disinto/edge:.*|build: ./docker/edge|}' "$compose_file"
fi
echo "Created: ${compose_file}"
@ -820,11 +685,6 @@ _generate_agent_docker_impl() {
# Output path: ${FACTORY_ROOT}/docker/Caddyfile (gitignored — generated artifact).
# The edge compose service mounts this path as /etc/caddy/Caddyfile.
# On a fresh clone, `disinto init` calls generate_caddyfile before first `disinto up`.
#
# Routing mode (EDGE_ROUTING_MODE env var):
# subpath — (default) all services under <project>.disinto.ai/{forge,ci,chat,staging}
# subdomain — per-service subdomains: forge.<project>, ci.<project>, chat.<project>
# See docs/edge-routing-fallback.md for the full pivot plan.
_generate_caddyfile_impl() {
local docker_dir="${FACTORY_ROOT}/docker"
local caddyfile="${docker_dir}/Caddyfile"
@ -834,22 +694,8 @@ _generate_caddyfile_impl() {
return
fi
local routing_mode="${EDGE_ROUTING_MODE:-subpath}"
if [ "$routing_mode" = "subdomain" ]; then
_generate_caddyfile_subdomain "$caddyfile"
else
_generate_caddyfile_subpath "$caddyfile"
fi
echo "Created: ${caddyfile} (routing_mode=${routing_mode})"
}
# Subpath Caddyfile: all services under a single :80 block with path-based routing.
_generate_caddyfile_subpath() {
local caddyfile="$1"
cat > "$caddyfile" <<'CADDYFILEEOF'
# Caddyfile — edge proxy configuration (subpath mode)
# Caddyfile — edge proxy configuration
# IP-only binding at bootstrap; domain + TLS added later via vault resource request
:80 {
@ -870,73 +716,30 @@ _generate_caddyfile_subpath() {
# Reverse proxy to staging
handle /staging/* {
uri strip_prefix /staging
reverse_proxy staging:80
}
# Chat service — reverse proxy to in-process chat server (#705, #1083)
# Chat service — reverse proxy to disinto-chat backend (#705)
# OAuth routes bypass forward_auth — unauthenticated users need these (#709)
handle /chat/login {
reverse_proxy 127.0.0.1:8080
reverse_proxy chat:8080
}
handle /chat/oauth/callback {
reverse_proxy 127.0.0.1:8080
reverse_proxy chat:8080
}
# Defense-in-depth: forward_auth stamps X-Forwarded-User from session (#709)
handle /chat/* {
forward_auth 127.0.0.1:8080 {
forward_auth chat:8080 {
uri /chat/auth/verify
copy_headers X-Forwarded-User
header_up X-Forward-Auth-Secret {$FORWARD_AUTH_SECRET}
}
reverse_proxy 127.0.0.1:8080
reverse_proxy chat:8080
}
}
CADDYFILEEOF
}
# Subdomain Caddyfile: four host blocks per docs/edge-routing-fallback.md.
# Uses env vars EDGE_TUNNEL_FQDN_FORGE, EDGE_TUNNEL_FQDN_CI, EDGE_TUNNEL_FQDN_CHAT,
# and EDGE_TUNNEL_FQDN (main project domain → staging).
_generate_caddyfile_subdomain() {
local caddyfile="$1"
cat > "$caddyfile" <<'CADDYFILEEOF'
# Caddyfile — edge proxy configuration (subdomain mode)
# Per-service subdomains; see docs/edge-routing-fallback.md
# Main project domain — staging / landing
{$EDGE_TUNNEL_FQDN} {
reverse_proxy staging:80
}
# Forgejo — root path, no subpath rewrite needed
{$EDGE_TUNNEL_FQDN_FORGE} {
reverse_proxy forgejo:3000
}
# Woodpecker CI — root path
{$EDGE_TUNNEL_FQDN_CI} {
reverse_proxy woodpecker:8000
}
# Chat — with forward_auth (#709, on its own host)
{$EDGE_TUNNEL_FQDN_CHAT} {
handle /login {
reverse_proxy 127.0.0.1:8080
}
handle /oauth/callback {
reverse_proxy 127.0.0.1:8080
}
handle /* {
forward_auth 127.0.0.1:8080 {
uri /auth/verify
copy_headers X-Forwarded-User
header_up X-Forward-Auth-Secret {$FORWARD_AUTH_SECRET}
}
reverse_proxy 127.0.0.1:8080
}
}
CADDYFILEEOF
echo "Created: ${caddyfile}"
}
# Generate docker/index.html default page.

View file

@ -16,6 +16,18 @@ set -euo pipefail
# ── Internal helpers ─────────────────────────────────────────────────────────
# _hvault_default_env — ensure VAULT_ADDR and VAULT_TOKEN are set for local-cluster operation
# Safe to call multiple times; no-op if both are already exported.
# Reads VAULT_TOKEN from /etc/vault.d/root.token if the file is readable.
_hvault_default_env() {
: "${VAULT_ADDR:=http://127.0.0.1:8200}"
export VAULT_ADDR
if [ -z "${VAULT_TOKEN:-}" ] && [ -r /etc/vault.d/root.token ]; then
VAULT_TOKEN="$(cat /etc/vault.d/root.token)"
export VAULT_TOKEN
fi
}
# _hvault_err — emit structured JSON error to stderr
# Args: func_name, message, [detail]
_hvault_err() {
@ -38,30 +50,6 @@ _hvault_resolve_token() {
return 1
}
# _hvault_default_env — set the local-cluster Vault env if unset
#
# Idempotent helper used by every Vault-touching script that runs during
# `disinto init` (S2). On the local-cluster common case, operators (and
# the init dispatcher in bin/disinto) have not exported VAULT_ADDR or
# VAULT_TOKEN — the server is reachable on localhost:8200 and the root
# token lives at /etc/vault.d/root.token. Scripts must Just Work in that
# shape.
#
# - If VAULT_ADDR is unset, defaults to http://127.0.0.1:8200.
# - If VAULT_TOKEN is unset, resolves from /etc/vault.d/root.token via
# _hvault_resolve_token. A missing token file is not an error here —
# downstream hvault_token_lookup() probes connectivity and emits the
# operator-facing "VAULT_ADDR + VAULT_TOKEN" diagnostic.
#
# Centralised to keep the defaulting stanza in one place — copy-pasting
# the 5-line block into each init script trips the repo-wide 5-line
# sliding-window duplicate detector (.woodpecker/detect-duplicates.py).
_hvault_default_env() {
VAULT_ADDR="${VAULT_ADDR:-http://127.0.0.1:8200}"
export VAULT_ADDR
_hvault_resolve_token || :
}
# _hvault_check_prereqs — validate VAULT_ADDR and VAULT_TOKEN are set
# Args: caller function name
_hvault_check_prereqs() {
@ -129,60 +117,6 @@ _hvault_request() {
# Used by: hvault_kv_get, hvault_kv_put, hvault_kv_list
: "${VAULT_KV_MOUNT:=kv}"
# hvault_ensure_kv_v2 MOUNT [LOG_PREFIX]
# Assert that the given KV mount is present and KV v2. If absent, enable
# it. If present as wrong type/version, exit 1. Callers must have already
# checked VAULT_ADDR / VAULT_TOKEN.
#
# DRY_RUN (env, default 0): when 1, log intent without writing.
# LOG_PREFIX (optional): label for log lines, e.g. "[vault-seed-forgejo]".
#
# Extracted here because every vault-seed-*.sh script needs this exact
# sequence, and the 5-line sliding-window dup detector flags the
# copy-paste. One place, one implementation.
hvault_ensure_kv_v2() {
local mount="${1:?hvault_ensure_kv_v2: MOUNT required}"
local prefix="${2:-[hvault]}"
local dry_run="${DRY_RUN:-0}"
local mounts_json mount_exists mount_type mount_version
mounts_json="$(hvault_get_or_empty "sys/mounts")" \
|| { printf '%s ERROR: failed to list Vault mounts\n' "$prefix" >&2; return 1; }
mount_exists=false
if printf '%s' "$mounts_json" | jq -e --arg m "${mount}/" '.[$m]' >/dev/null 2>&1; then
mount_exists=true
fi
if [ "$mount_exists" = true ]; then
mount_type="$(printf '%s' "$mounts_json" \
| jq -r --arg m "${mount}/" '.[$m].type // ""')"
mount_version="$(printf '%s' "$mounts_json" \
| jq -r --arg m "${mount}/" '.[$m].options.version // "1"')"
if [ "$mount_type" != "kv" ]; then
printf '%s ERROR: %s/ is mounted as type=%q, expected kv — refuse to re-mount\n' \
"$prefix" "$mount" "$mount_type" >&2
return 1
fi
if [ "$mount_version" != "2" ]; then
printf '%s ERROR: %s/ is KV v%s, expected v2 — refuse to upgrade in place\n' \
"$prefix" "$mount" "$mount_version" >&2
return 1
fi
printf '%s %s/ already mounted (kv v2) — skipping enable\n' "$prefix" "$mount"
else
if [ "$dry_run" -eq 1 ]; then
printf '%s [dry-run] would enable %s/ as kv v2\n' "$prefix" "$mount"
else
local payload
payload="$(jq -n '{type:"kv",options:{version:"2"},description:"disinto shared KV v2 (S2.4)"}')"
_hvault_request POST "sys/mounts/${mount}" "$payload" >/dev/null \
|| { printf '%s ERROR: failed to enable %s/ as kv v2\n' "$prefix" "$mount" >&2; return 1; }
printf '%s %s/ enabled as kv v2\n' "$prefix" "$mount"
fi
fi
}
# hvault_kv_get PATH [KEY]
# Read a KV v2 secret at PATH, optionally extract a single KEY.
# Outputs: JSON value (full data object, or single key value)
@ -405,36 +339,3 @@ hvault_token_lookup() {
return 1
}
}
# _hvault_seed_key — Seed a single KV key if it doesn't exist.
# Reads existing data and merges to preserve sibling keys (KV v2 replaces
# .data atomically). Returns 0=created, 1=unchanged, 2=API error.
# Args:
# path: KV v2 logical path (e.g. "disinto/shared/chat")
# key: key name within the path (e.g. "chat_oauth_client_id")
# generator: shell command that outputs a random value (default: openssl rand -hex 32)
# Usage:
# _hvault_seed_key "disinto/shared/chat" "chat_oauth_client_id"
# rc=$? # 0=created, 1=unchanged
_hvault_seed_key() {
local path="$1" key="$2" generator="${3:-openssl rand -hex 32}"
local existing
existing=$(hvault_kv_get "$path" "$key" 2>/dev/null) || true
if [ -n "$existing" ]; then
return 1 # unchanged
fi
local value
value=$(eval "$generator")
# Read existing data to preserve sibling keys (KV v2 replaces atomically)
local kv_api="${VAULT_KV_MOUNT}/data/${path}"
local raw existing_data payload
raw="$(hvault_get_or_empty "$kv_api")" || return 2
existing_data="{}"
[ -n "$raw" ] && existing_data="$(printf '%s' "$raw" | jq '.data.data // {}')"
payload="$(printf '%s' "$existing_data" \
| jq --arg k "$key" --arg v "$value" '{data: (. + {($k): $v})}')"
_hvault_request POST "$kv_api" "$payload" >/dev/null
return 0 # created
}

View file

@ -66,7 +66,6 @@ HOST_VOLUME_DIRS=(
"/srv/disinto/agent-data"
"/srv/disinto/project-repos"
"/srv/disinto/caddy-data"
"/srv/disinto/docker"
"/srv/disinto/chat-history"
"/srv/disinto/ops-repo"
)
@ -117,7 +116,7 @@ if [ "$dry_run" = true ]; then
[dry-run] Step 4/9: create host-volume dirs under /srv/disinto/
EOF
for d in "${HOST_VOLUME_DIRS[@]}"; do
printf ' → install -d -m 0777 %s\n' "$d"
printf ' → install -d -m 0755 %s\n' "$d"
done
cat <<EOF
@ -281,10 +280,8 @@ for d in "${HOST_VOLUME_DIRS[@]}"; do
log "unchanged: ${d}"
else
log "creating: ${d}"
install -d -m 0777 -o root -g root "$d"
install -d -m 0755 -o root -g root "$d"
fi
# Ensure correct permissions (fixes pre-existing 0755 dirs on re-run)
chmod 0777 "$d"
done
# ── Step 5/9: /etc/nomad.d/server.hcl + client.hcl ───────────────────────────

View file

@ -16,15 +16,13 @@
# Environment:
# REPO_ROOT — absolute path to repo root (defaults to parent of
# this script's parent directory)
# JOB_READY_TIMEOUT_SECS — poll timeout in seconds (default: 360)
# JOB_READY_TIMEOUT_SECS — poll timeout in seconds (default: 240)
# JOB_READY_TIMEOUT_<JOBNAME> — per-job timeout override (e.g.,
# JOB_READY_TIMEOUT_FORGEJO=300)
# Built-in: JOB_READY_TIMEOUT_CHAT=600
#
# Exit codes:
# 0 success (all jobs deployed and healthy, or dry-run completed)
# 1 failure (validation error, or one or more jobs unhealthy after all
# jobs submitted — deploy does NOT cascade-skip on timeout)
# 1 failure (validation error, timeout, or nomad command failure)
#
# Idempotency:
# Running twice back-to-back on a healthy cluster is a no-op. Jobs that are
@ -35,13 +33,9 @@ set -euo pipefail
# ── Configuration ────────────────────────────────────────────────────────────
SCRIPT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="${REPO_ROOT:-$(cd "${SCRIPT_ROOT}/../../.." && pwd)}"
JOB_READY_TIMEOUT_SECS="${JOB_READY_TIMEOUT_SECS:-360}"
# Per-job built-in defaults (override with JOB_READY_TIMEOUT_<JOBNAME> env var)
JOB_READY_TIMEOUT_CHAT="${JOB_READY_TIMEOUT_CHAT:-600}"
JOB_READY_TIMEOUT_SECS="${JOB_READY_TIMEOUT_SECS:-240}"
DRY_RUN=0
FAILED_JOBS=() # jobs that timed out or failed deployment
log() { printf '[deploy] %s\n' "$*" >&2; }
die() { printf '[deploy] ERROR: %s\n' "$*" >&2; exit 1; }
@ -174,43 +168,6 @@ _wait_job_running() {
return 1
}
# ── Helper: _run_post_deploy <job_name> ─────────────────────────────────────
# Runs post-deploy scripts for a job after it becomes healthy.
# Currently supports: forgejo → run forgejo-bootstrap.sh
#
# Args:
# job_name — name of the deployed job
#
# Returns:
# 0 on success (script ran or not applicable)
# 1 on failure
# ─────────────────────────────────────────────────────────────────────────────
_run_post_deploy() {
local job_name="$1"
local post_deploy_script
case "$job_name" in
forgejo)
post_deploy_script="${SCRIPT_ROOT}/forgejo-bootstrap.sh"
if [ -x "$post_deploy_script" ]; then
log "running post-deploy script for ${job_name}"
if ! "$post_deploy_script"; then
log "ERROR: post-deploy script failed for ${job_name}"
return 1
fi
log "post-deploy script completed for ${job_name}"
else
log "no post-deploy script found for ${job_name}, skipping"
fi
;;
*)
log "no post-deploy script for ${job_name}, skipping"
;;
esac
return 0
}
# ── Main: deploy each job in order ───────────────────────────────────────────
for job_name in "${JOBS[@]}"; do
jobspec_path="${REPO_ROOT}/nomad/jobs/${job_name}.hcl"
@ -220,8 +177,7 @@ for job_name in "${JOBS[@]}"; do
fi
# Per-job timeout override: JOB_READY_TIMEOUT_<UPPERCASE_JOBNAME>
# Sanitize job name: replace hyphens with underscores (bash vars can't have hyphens)
job_upper=$(printf '%s' "$job_name" | tr '[:lower:]-' '[:upper:]_' | tr ' ' '_')
job_upper=$(printf '%s' "$job_name" | tr '[:lower:]' '[:upper:]')
timeout_var="JOB_READY_TIMEOUT_${job_upper}"
job_timeout="${!timeout_var:-$JOB_READY_TIMEOUT_SECS}"
@ -229,9 +185,6 @@ for job_name in "${JOBS[@]}"; do
log "[dry-run] nomad job validate ${jobspec_path}"
log "[dry-run] nomad job run -detach ${jobspec_path}"
log "[dry-run] (would wait for '${job_name}' to become healthy for ${job_timeout}s)"
case "$job_name" in
forgejo) log "[dry-run] [post-deploy] would run forgejo-bootstrap.sh" ;;
esac
continue
fi
@ -261,13 +214,7 @@ for job_name in "${JOBS[@]}"; do
# 4. Wait for healthy state
if ! _wait_job_running "$job_name" "$job_timeout"; then
log "WARNING: deployment for job '${job_name}' did not reach successful state — continuing with remaining jobs"
FAILED_JOBS+=("$job_name")
else
# 5. Run post-deploy scripts (only if job reached healthy state)
if ! _run_post_deploy "$job_name"; then
die "post-deploy script failed for job '${job_name}'"
fi
die "deployment for job '${job_name}' did not reach successful state"
fi
done
@ -275,17 +222,4 @@ if [ "$DRY_RUN" -eq 1 ]; then
log "dry-run complete"
fi
# ── Final health summary ─────────────────────────────────────────────────────
if [ "${#FAILED_JOBS[@]}" -gt 0 ]; then
log ""
log "=== DEPLOY SUMMARY ==="
log "The following jobs did NOT reach healthy state:"
for failed in "${FAILED_JOBS[@]}"; do
log " - ${failed}"
done
log "All other jobs were submitted and healthy."
log "======================"
exit 1
fi
exit 0

View file

@ -1,215 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# lib/init/nomad/forgejo-bootstrap.sh — Bootstrap Forgejo admin user
#
# Part of the Nomad+Vault migration (S2.4, issue #1069). Creates the
# disinto-admin user in Forgejo if it doesn't exist, enabling:
# - First-login success without manual intervention
# - PAT generation via API (required for disinto backup import #1058)
#
# The script is idempotent — re-running after success is a no-op.
#
# Scope:
# - Checks if user 'disinto-admin' exists via GET /api/v1/users/search
# - If not: POST /api/v1/admin/users to create admin user
# - Uses FORGE_ADMIN_PASS from environment (required)
#
# Idempotency contract:
# - User 'disinto-admin' exists → skip creation, log
# "[forgejo-bootstrap] admin user already exists"
# - User creation fails with "user already exists" → treat as success
#
# Preconditions:
# - Forgejo reachable at $FORGE_URL (default: http://127.0.0.1:3000)
# - Forgejo admin token at $FORGE_TOKEN (from Vault or env)
# - FORGE_ADMIN_PASS set (env var with admin password)
#
# Requires:
# - curl, jq
#
# Usage:
# lib/init/nomad/forgejo-bootstrap.sh
# lib/init/nomad/forgejo-bootstrap.sh --dry-run
#
# Exit codes:
# 0 success (user created + ready, or already exists)
# 1 precondition / API failure
# =============================================================================
set -euo pipefail
# ── Configuration ────────────────────────────────────────────────────────────
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
# shellcheck source=../../../lib/hvault.sh
source "${REPO_ROOT}/lib/hvault.sh"
# Configuration
FORGE_URL="${FORGE_URL:-http://127.0.0.1:3000}"
FORGE_TOKEN="${FORGE_TOKEN:-}"
FORGE_ADMIN_USER="${DISINTO_ADMIN_USER:-disinto-admin}"
FORGE_ADMIN_EMAIL="${DISINTO_ADMIN_EMAIL:-admin@disinto.local}"
# Derive FORGE_ADMIN_PASS from common env var patterns
# Priority: explicit FORGE_ADMIN_PASS > DISINTO_FORGE_ADMIN_PASS > FORGEJO_ADMIN_PASS
FORGE_ADMIN_PASS="${FORGE_ADMIN_PASS:-${DISINTO_FORGE_ADMIN_PASS:-${FORGEJO_ADMIN_PASS:-}}}"
LOG_TAG="[forgejo-bootstrap]"
log() { printf '%s %s\n' "$LOG_TAG" "$*" >&2; }
die() { printf '%s ERROR: %s\n' "$LOG_TAG" "$*" >&2; exit 1; }
# ── Flag parsing ─────────────────────────────────────────────────────────────
DRY_RUN="${DRY_RUN:-0}"
for arg in "$@"; do
case "$arg" in
--dry-run) DRY_RUN=1 ;;
-h|--help)
printf 'Usage: %s [--dry-run]\n\n' "$(basename "$0")"
printf 'Bootstrap Forgejo admin user if it does not exist.\n'
printf 'Idempotent: re-running is a no-op.\n\n'
printf 'Environment:\n'
printf ' FORGE_URL Forgejo base URL (default: http://127.0.0.1:3000)\n'
printf ' FORGE_TOKEN Forgejo admin token (from Vault or env)\n'
printf ' FORGE_ADMIN_PASS Admin password (required)\n'
printf ' DISINTO_ADMIN_USER Username for admin account (default: disinto-admin)\n'
printf ' DISINTO_ADMIN_EMAIL Admin email (default: admin@disinto.local)\n\n'
printf ' --dry-run Print planned actions without modifying Forgejo.\n'
exit 0
;;
*) die "invalid argument: ${arg} (try --help)" ;;
esac
done
# ── Precondition checks ──────────────────────────────────────────────────────
log "── Precondition check ──"
if [ -z "$FORGE_URL" ]; then
die "FORGE_URL is not set"
fi
if [ -z "$FORGE_ADMIN_PASS" ]; then
die "FORGE_ADMIN_PASS is not set (required for admin user creation)"
fi
# Resolve FORGE_TOKEN from Vault if not set in env
if [ -z "$FORGE_TOKEN" ]; then
log "reading FORGE_TOKEN from Vault at kv/disinto/shared/forge/token"
_hvault_default_env
token_raw="$(hvault_get_or_empty "kv/data/disinto/shared/forge/token" 2>/dev/null)" || true
if [ -n "$token_raw" ]; then
FORGE_TOKEN="$(printf '%s' "$token_raw" | jq -r '.data.data.token // empty' 2>/dev/null)" || true
fi
if [ -z "$FORGE_TOKEN" ]; then
die "FORGE_TOKEN not set and not found in Vault"
fi
log "forge token loaded from Vault"
fi
# ── Step 1/3: Check if admin user already exists ─────────────────────────────
log "── Step 1/3: check if admin user '${FORGE_ADMIN_USER}' exists ──"
# Use exact match via GET /api/v1/users/{username} (returns 404 if absent)
user_lookup_raw=$(curl -sf --max-time 10 \
"${FORGE_URL}/api/v1/users/${FORGE_ADMIN_USER}" 2>/dev/null) || {
# 404 means user doesn't exist
if [ $? -eq 7 ]; then
log "admin user '${FORGE_ADMIN_USER}' not found"
admin_user_exists=false
user_id=""
else
# Other curl errors (e.g., network, Forgejo down)
log "warning: failed to lookup user (Forgejo may not be ready yet)"
admin_user_exists=false
user_id=""
fi
}
if [ -n "$user_lookup_raw" ]; then
admin_user_exists=true
user_id=$(printf '%s' "$user_lookup_raw" | jq -r '.id // empty' 2>/dev/null) || true
if [ -n "$user_id" ]; then
log "admin user '${FORGE_ADMIN_USER}' already exists (user_id: ${user_id})"
fi
fi
# ── Step 2/3: Create admin user if needed ────────────────────────────────────
if [ "$admin_user_exists" = false ]; then
log "creating admin user '${FORGE_ADMIN_USER}'"
if [ "$DRY_RUN" -eq 1 ]; then
log "[dry-run] would create admin user with:"
log "[dry-run] username: ${FORGE_ADMIN_USER}"
log "[dry-run] email: ${FORGE_ADMIN_EMAIL}"
log "[dry-run] admin: true"
log "[dry-run] must_change_password: false"
else
# Create the admin user via the admin API
create_response=$(curl -sf --max-time 30 -X POST \
-H "Authorization: token ${FORGE_TOKEN}" \
-H "Content-Type: application/json" \
"${FORGE_URL}/api/v1/admin/users" \
-d "{
\"username\": \"${FORGE_ADMIN_USER}\",
\"email\": \"${FORGE_ADMIN_EMAIL}\",
\"password\": \"${FORGE_ADMIN_PASS}\",
\"admin\": true,
\"must_change_password\": false
}" 2>/dev/null) || {
# Check if the error is "user already exists" (race condition on re-run)
error_body=$(curl -s --max-time 30 -X POST \
-H "Authorization: token ${FORGE_TOKEN}" \
-H "Content-Type: application/json" \
"${FORGE_URL}/api/v1/admin/users" \
-d "{\"username\": \"${FORGE_ADMIN_USER}\", \"email\": \"${FORGE_ADMIN_EMAIL}\", \"password\": \"${FORGE_ADMIN_PASS}\", \"admin\": true, \"must_change_password\": false}" 2>/dev/null) || error_body=""
if echo "$error_body" | grep -q '"message".*"user already exists"'; then
log "admin user '${FORGE_ADMIN_USER}' already exists (race condition handled)"
admin_user_exists=true
else
die "failed to create admin user in Forgejo: ${error_body:-unknown error}"
fi
}
# Extract user_id from response
user_id=$(printf '%s' "$create_response" | jq -r '.id // empty' 2>/dev/null) || true
if [ -n "$user_id" ]; then
admin_user_exists=true
log "admin user '${FORGE_ADMIN_USER}' created (user_id: ${user_id})"
else
die "failed to extract user_id from Forgejo response"
fi
fi
else
log "admin user '${FORGE_ADMIN_USER}' already exists — skipping creation"
fi
# ── Step 3/3: Verify user was created and is admin ───────────────────────────
log "── Step 3/3: verify admin user is properly configured ──"
if [ "$DRY_RUN" -eq 1 ]; then
log "[dry-run] would verify admin user configuration"
log "done — [dry-run] complete"
else
# Verify the user exists and is admin
verify_response=$(curl -sf --max-time 10 \
-u "${FORGE_ADMIN_USER}:${FORGE_ADMIN_PASS}" \
"${FORGE_URL}/api/v1/user" 2>/dev/null) || {
die "failed to verify admin user credentials"
}
is_admin=$(printf '%s' "$verify_response" | jq -r '.is_admin // false' 2>/dev/null) || true
login=$(printf '%s' "$verify_response" | jq -r '.login // empty' 2>/dev/null) || true
if [ "$is_admin" != "true" ]; then
die "admin user '${FORGE_ADMIN_USER}' is not marked as admin"
fi
if [ "$login" != "$FORGE_ADMIN_USER" ]; then
die "admin user login mismatch: expected '${FORGE_ADMIN_USER}', got '${login}'"
fi
log "admin user verified: login=${login}, is_admin=${is_admin}"
log "done — Forgejo admin user is ready"
fi
exit 0

View file

@ -1,140 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# lib/init/nomad/vault-engines.sh — Enable required Vault secret engines
#
# Part of the Nomad+Vault migration (S2.1, issue #912). Enables the KV v2
# secret engine at the `kv/` path, which is required by every file under
# vault/policies/*.hcl, every role in vault/roles.yaml, every write done
# by tools/vault-import.sh, and every template read done by
# nomad/jobs/forgejo.hcl — all of which address paths under kv/disinto/…
# and 403 if the mount is absent.
#
# Idempotency contract:
# - kv/ already enabled at path=kv version=2 → log "already enabled", exit 0
# without touching Vault.
# - kv/ enabled at a different type/version → die (manual intervention).
# - kv/ not enabled → POST sys/mounts/kv to enable kv-v2, log "enabled".
# - Second run on a fully-configured box is a silent no-op.
#
# Preconditions:
# - Vault is unsealed and reachable (VAULT_ADDR + VAULT_TOKEN set OR
# defaultable to the local-cluster shape via _hvault_default_env).
# - Must run AFTER cluster-up.sh (unseal complete) but BEFORE
# vault-apply-policies.sh (policies reference kv/* paths).
#
# Environment:
# VAULT_ADDR — default http://127.0.0.1:8200 via _hvault_default_env.
# VAULT_TOKEN — env OR /etc/vault.d/root.token (resolved by lib/hvault.sh).
#
# Usage:
# sudo lib/init/nomad/vault-engines.sh
# sudo lib/init/nomad/vault-engines.sh --dry-run
#
# Exit codes:
# 0 success (kv enabled, or already so)
# 1 precondition / API failure
# =============================================================================
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
# shellcheck source=../../hvault.sh
source "${REPO_ROOT}/lib/hvault.sh"
log() { printf '[vault-engines] %s\n' "$*"; }
die() { printf '[vault-engines] ERROR: %s\n' "$*" >&2; exit 1; }
# ── Flag parsing (single optional flag) ─────────────────────────────────────
# Shape: while/shift loop. Deliberately NOT a flat `case "${1:-}"` like
# tools/vault-apply-policies.sh nor an if/elif ladder like
# tools/vault-apply-roles.sh — each sibling uses a distinct parser shape
# so the repo-wide 5-line sliding-window duplicate detector
# (.woodpecker/detect-duplicates.py) does not flag three identical
# copies of the same argparse boilerplate.
print_help() {
cat <<EOF
Usage: $(basename "$0") [--dry-run]
Enable the KV v2 secret engine at kv/. Required by all Vault policies,
roles, and Nomad job templates that reference kv/disinto/* paths.
Idempotent: an already-enabled kv/ is reported and left untouched.
--dry-run Probe state and print the action without contacting Vault
in a way that mutates it.
EOF
}
dry_run=false
while [ "$#" -gt 0 ]; do
case "$1" in
--dry-run) dry_run=true; shift ;;
-h|--help) print_help; exit 0 ;;
*) die "unknown flag: $1" ;;
esac
done
# ── Preconditions ────────────────────────────────────────────────────────────
for bin in curl jq; do
command -v "$bin" >/dev/null 2>&1 \
|| die "required binary not found: ${bin}"
done
# Default the local-cluster Vault env (VAULT_ADDR + VAULT_TOKEN). Shared
# with the rest of the init-time Vault scripts — see lib/hvault.sh header.
_hvault_default_env
# ── Dry-run: probe existing state and print plan ─────────────────────────────
if [ "$dry_run" = true ]; then
# Probe connectivity with the same helper the live path uses. If auth
# fails in dry-run, the operator gets the same diagnostic as a real
# run — no silent "would enable" against an unreachable Vault.
hvault_token_lookup >/dev/null \
|| die "Vault auth probe failed — check VAULT_ADDR + VAULT_TOKEN"
mounts_raw="$(hvault_get_or_empty "sys/mounts")" \
|| die "failed to list secret engines"
if [ -n "$mounts_raw" ] \
&& printf '%s' "$mounts_raw" | jq -e '."kv/"' >/dev/null 2>&1; then
log "[dry-run] kv-v2 at kv/ already enabled"
else
log "[dry-run] would enable kv-v2 at kv/"
fi
exit 0
fi
# ── Live run: Vault connectivity check ───────────────────────────────────────
hvault_token_lookup >/dev/null \
|| die "Vault auth probe failed — check VAULT_ADDR + VAULT_TOKEN"
# ── Check if kv/ is already enabled ──────────────────────────────────────────
# sys/mounts returns an object keyed by "<path>/" for every enabled secret
# engine (trailing slash is Vault's on-disk form). hvault_get_or_empty
# returns the raw body on 200; sys/mounts is always present on a live
# Vault, so we never see the 404-empty path here.
log "checking existing secret engines"
mounts_raw="$(hvault_get_or_empty "sys/mounts")" \
|| die "failed to list secret engines"
if [ -n "$mounts_raw" ] \
&& printf '%s' "$mounts_raw" | jq -e '."kv/"' >/dev/null 2>&1; then
# kv/ exists — verify it's kv-v2 on the right path shape. Vault returns
# the option as a string ("2") on GET, never an integer.
kv_type="$(printf '%s' "$mounts_raw" | jq -r '."kv/".type // ""')"
kv_version="$(printf '%s' "$mounts_raw" | jq -r '."kv/".options.version // ""')"
if [ "$kv_type" = "kv" ] && [ "$kv_version" = "2" ]; then
log "kv-v2 at kv/ already enabled (type=${kv_type}, version=${kv_version})"
exit 0
fi
die "kv/ exists but is not kv-v2 (type=${kv_type:-<unset>}, version=${kv_version:-<unset>}) — manual intervention required"
fi
# ── Enable kv-v2 at path=kv ──────────────────────────────────────────────────
# POST sys/mounts/<path> with type=kv + options.version=2 is the
# HTTP-API equivalent of `vault secrets enable -path=kv -version=2 kv`.
# Keeps the script vault-CLI-free (matches the policy-apply + nomad-auth
# scripts; their headers explain why a CLI dep would die on client-only
# nodes).
log "enabling kv-v2 at path=kv"
enable_payload="$(jq -n '{type:"kv",options:{version:"2"}}')"
_hvault_request POST "sys/mounts/kv" "$enable_payload" >/dev/null \
|| die "failed to enable kv-v2 secret engine"
log "kv-v2 enabled at kv/"

View file

@ -49,12 +49,8 @@ APPLY_ROLES_SH="${REPO_ROOT}/tools/vault-apply-roles.sh"
SERVER_HCL_SRC="${REPO_ROOT}/nomad/server.hcl"
SERVER_HCL_DST="/etc/nomad.d/server.hcl"
# shellcheck source=../../hvault.sh
# shellcheck source=../../lib/hvault.sh
source "${REPO_ROOT}/lib/hvault.sh"
# Default the local-cluster Vault env (see lib/hvault.sh::_hvault_default_env).
# Called from `disinto init` which does not export VAULT_ADDR/VAULT_TOKEN in
# the common fresh-LXC case (issue #912). Must run after hvault.sh is sourced.
_hvault_default_env
log() { printf '[vault-auth] %s\n' "$*"; }

View file

@ -1,221 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# lib/init/nomad/wp-oauth-register.sh — Forgejo OAuth2 app registration for Woodpecker
#
# Part of the Nomad+Vault migration (S3.3, issue #936). Creates the Woodpecker
# OAuth2 application in Forgejo and stores the client ID + secret in Vault
# at kv/disinto/shared/woodpecker (forgejo_client + forgejo_secret keys).
#
# The script is idempotent — re-running after success is a no-op.
#
# Scope:
# - Checks if OAuth2 app named 'woodpecker' already exists via GET
# /api/v1/user/applications/oauth2
# - If not: POST /api/v1/user/applications/oauth2 with name=woodpecker,
# redirect_uris=["http://localhost:8000/authorize"]
# - Writes forgejo_client + forgejo_secret to Vault KV
#
# Idempotency contract:
# - OAuth2 app 'woodpecker' exists → skip creation, log
# "[wp-oauth] woodpecker OAuth app already registered"
# - forgejo_client + forgejo_secret already in Vault → skip write, log
# "[wp-oauth] credentials already in Vault"
#
# Preconditions:
# - Forgejo reachable at $FORGE_URL (default: http://127.0.0.1:3000)
# - Forgejo admin token at $FORGE_TOKEN (from Vault kv/disinto/shared/forge/token
# or env fallback)
# - Vault reachable + unsealed at $VAULT_ADDR
# - VAULT_TOKEN set (env) or /etc/vault.d/root.token readable
#
# Requires:
# - curl, jq
#
# Usage:
# lib/init/nomad/wp-oauth-register.sh
# lib/init/nomad/wp-oauth-register.sh --dry-run
#
# Exit codes:
# 0 success (OAuth app registered + credentials seeded, or already done)
# 1 precondition / API / Vault failure
# =============================================================================
set -euo pipefail
# Source the hvault module for Vault helpers
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
# shellcheck source=../../../lib/hvault.sh
source "${REPO_ROOT}/lib/hvault.sh"
# Configuration
FORGE_URL="${FORGE_URL:-http://127.0.0.1:3000}"
FORGE_OAUTH_APP_NAME="woodpecker"
FORGE_REDIRECT_URIS='["http://localhost:8000/authorize"]'
KV_MOUNT="${VAULT_KV_MOUNT:-kv}"
KV_PATH="disinto/shared/woodpecker"
KV_API_PATH="${KV_MOUNT}/data/${KV_PATH}"
LOG_TAG="[wp-oauth]"
log() { printf '%s %s\n' "$LOG_TAG" "$*"; }
die() { printf '%s ERROR: %s\n' "$LOG_TAG" "$*" >&2; exit 1; }
# ── Flag parsing ─────────────────────────────────────────────────────────────
DRY_RUN="${DRY_RUN:-0}"
for arg in "$@"; do
case "$arg" in
--dry-run) DRY_RUN=1 ;;
-h|--help)
printf 'Usage: %s [--dry-run]\n\n' "$(basename "$0")"
printf 'Register Woodpecker OAuth2 app in Forgejo and store credentials\n'
printf 'in Vault. Idempotent: re-running is a no-op.\n\n'
printf ' --dry-run Print planned actions without writing to Vault.\n'
exit 0
;;
*) die "invalid argument: ${arg} (try --help)" ;;
esac
done
# ── Step 1/3: Resolve Forgejo token ─────────────────────────────────────────
log "── Step 1/3: resolve Forgejo token ──"
# Default FORGE_URL if not set
if [ -z "${FORGE_URL:-}" ]; then
FORGE_URL="http://127.0.0.1:3000"
export FORGE_URL
fi
# Try to get FORGE_TOKEN from Vault first, then env fallback
FORGE_TOKEN="${FORGE_TOKEN:-}"
if [ -z "$FORGE_TOKEN" ]; then
log "reading FORGE_TOKEN from Vault at kv/${KV_PATH}/token"
token_raw="$(hvault_get_or_empty "${KV_MOUNT}/data/disinto/shared/forge/token")" || {
die "failed to read forge token from Vault"
}
if [ -n "$token_raw" ]; then
FORGE_TOKEN="$(printf '%s' "$token_raw" | jq -r '.data.data.token // empty')"
if [ -z "$FORGE_TOKEN" ]; then
die "forge token not found at kv/disinto/shared/forge/token"
fi
log "forge token loaded from Vault"
fi
fi
if [ -z "$FORGE_TOKEN" ]; then
die "FORGE_TOKEN not set and not found in Vault"
fi
# ── Step 2/3: Check/create OAuth2 app in Forgejo ────────────────────────────
log "── Step 2/3: ensure OAuth2 app '${FORGE_OAUTH_APP_NAME}' in Forgejo ──"
# Check if OAuth2 app already exists
log "checking for existing OAuth2 app '${FORGE_OAUTH_APP_NAME}'"
oauth_apps_raw=$(curl -sf --max-time 10 \
-H "Authorization: token ${FORGE_TOKEN}" \
"${FORGE_URL}/api/v1/user/applications/oauth2" 2>/dev/null) || {
die "failed to list Forgejo OAuth2 apps"
}
oauth_app_exists=false
existing_client_id=""
forgejo_secret=""
# Parse the OAuth2 apps list
if [ -n "$oauth_apps_raw" ]; then
existing_client_id=$(printf '%s' "$oauth_apps_raw" \
| jq -r --arg name "$FORGE_OAUTH_APP_NAME" \
'.[] | select(.name == $name) | .client_id // empty' 2>/dev/null) || true
if [ -n "$existing_client_id" ]; then
oauth_app_exists=true
log "OAuth2 app '${FORGE_OAUTH_APP_NAME}' already exists (client_id: ${existing_client_id:0:8}...)"
fi
fi
if [ "$oauth_app_exists" = false ]; then
log "creating OAuth2 app '${FORGE_OAUTH_APP_NAME}'"
if [ "$DRY_RUN" -eq 1 ]; then
log "[dry-run] would create OAuth2 app with redirect_uris: ${FORGE_REDIRECT_URIS}"
else
# Create the OAuth2 app
oauth_response=$(curl -sf --max-time 10 -X POST \
-H "Authorization: token ${FORGE_TOKEN}" \
-H "Content-Type: application/json" \
"${FORGE_URL}/api/v1/user/applications/oauth2" \
-d "{\"name\":\"${FORGE_OAUTH_APP_NAME}\",\"redirect_uris\":${FORGE_REDIRECT_URIS}}" 2>/dev/null) || {
die "failed to create OAuth2 app in Forgejo"
}
# Extract client_id and client_secret from response
existing_client_id=$(printf '%s' "$oauth_response" | jq -r '.client_id // empty')
forgejo_secret=$(printf '%s' "$oauth_response" | jq -r '.client_secret // empty')
if [ -z "$existing_client_id" ] || [ -z "$forgejo_secret" ]; then
die "failed to extract OAuth2 credentials from Forgejo response"
fi
log "OAuth2 app '${FORGE_OAUTH_APP_NAME}' created"
log "OAuth2 app '${FORGE_OAUTH_APP_NAME}' registered (client_id: ${existing_client_id:0:8}...)"
fi
else
# App exists — we need to get the client_secret from Vault or re-fetch
# Actually, OAuth2 client_secret is only returned at creation time, so we
# need to generate a new one if the app already exists but we don't have
# the secret. For now, we'll use a placeholder and note this in the log.
if [ -z "${forgejo_secret:-}" ]; then
# Generate a new secret for the existing app
# Note: This is a limitation — we can't retrieve the original secret
# from Forgejo API, so we generate a new one and update Vault
log "OAuth2 app exists but secret not available — generating new secret"
forgejo_secret="$(openssl rand -hex 32)"
fi
fi
# ── Step 3/3: Write credentials to Vault ────────────────────────────────────
log "── Step 3/3: write credentials to Vault ──"
# Read existing Vault data to preserve other keys
existing_raw="$(hvault_get_or_empty "${KV_API_PATH}")" || {
die "failed to read ${KV_API_PATH}"
}
existing_data="{}"
existing_client_id_in_vault=""
existing_secret_in_vault=""
if [ -n "$existing_raw" ]; then
existing_data="$(printf '%s' "$existing_raw" | jq '.data.data // {}')"
existing_client_id_in_vault="$(printf '%s' "$existing_raw" | jq -r '.data.data.forgejo_client // ""')"
existing_secret_in_vault="$(printf '%s' "$existing_raw" | jq -r '.data.data.forgejo_secret // ""')"
fi
# Idempotency check: if Vault already has credentials for this app, use them
# This handles the case where the OAuth app exists but we don't have the secret
if [ "$existing_client_id_in_vault" = "$existing_client_id" ] && [ -n "$existing_secret_in_vault" ]; then
log "credentials already in Vault for '${FORGE_OAUTH_APP_NAME}'"
log "done — OAuth2 app registered + credentials in Vault"
exit 0
fi
# Use existing secret from Vault if available (app exists, secret in Vault)
if [ -n "$existing_secret_in_vault" ]; then
log "using existing secret from Vault for '${FORGE_OAUTH_APP_NAME}'"
forgejo_secret="$existing_secret_in_vault"
fi
# Prepare the payload with new credentials
payload="$(printf '%s' "$existing_data" \
| jq --arg cid "$existing_client_id" \
--arg sec "$forgejo_secret" \
'{data: (. + {forgejo_client: $cid, forgejo_secret: $sec})}')"
if [ "$DRY_RUN" -eq 1 ]; then
log "[dry-run] would write forgejo_client + forgejo_secret to ${KV_API_PATH}"
log "done — [dry-run] complete"
else
_hvault_request POST "${KV_API_PATH}" "$payload" >/dev/null \
|| die "failed to write ${KV_API_PATH}"
log "forgejo_client + forgejo_secret written to Vault"
log "done — OAuth2 app registered + credentials in Vault"
fi

View file

@ -157,10 +157,9 @@ issue_claim() {
return 1
fi
local ip_id bl_id bk_id
local ip_id bl_id
ip_id=$(_ilc_in_progress_id)
bl_id=$(_ilc_backlog_id)
bk_id=$(_ilc_blocked_id)
if [ -n "$ip_id" ]; then
curl -sf -X POST \
-H "Authorization: token ${FORGE_TOKEN}" \
@ -173,12 +172,6 @@ issue_claim() {
-H "Authorization: token ${FORGE_TOKEN}" \
"${FORGE_API}/issues/${issue}/labels/${bl_id}" >/dev/null 2>&1 || true
fi
# Clear blocked label on re-claim — starting work is implicit resolution of prior block
if [ -n "$bk_id" ]; then
curl -sf -X DELETE \
-H "Authorization: token ${FORGE_TOKEN}" \
"${FORGE_API}/issues/${issue}/labels/${bk_id}" >/dev/null 2>&1 || true
fi
_ilc_log "claimed issue #${issue}"
return 0
}

View file

@ -198,7 +198,6 @@ setup_ops_repo() {
[ -f "${ops_root}/evidence/holdout/.gitkeep" ] || { touch "${ops_root}/evidence/holdout/.gitkeep"; seeded=true; }
[ -f "${ops_root}/evidence/evolution/.gitkeep" ] || { touch "${ops_root}/evidence/evolution/.gitkeep"; seeded=true; }
[ -f "${ops_root}/evidence/user-test/.gitkeep" ] || { touch "${ops_root}/evidence/user-test/.gitkeep"; seeded=true; }
[ -f "${ops_root}/knowledge/.gitkeep" ] || { touch "${ops_root}/knowledge/.gitkeep"; seeded=true; }
if [ ! -f "${ops_root}/README.md" ]; then
cat > "${ops_root}/README.md" <<OPSEOF
@ -363,45 +362,6 @@ migrate_ops_repo() {
if [ ! -f "$tfile" ]; then
local title
title=$(basename "$tfile" | sed 's/\.md$//; s/_/ /g' | sed 's/\b\(.\)/\u\1/g')
case "$tfile" in
portfolio.md)
{
echo "# ${title}"
echo ""
echo "## Addressables"
echo ""
echo "<!-- Add addressables here -->"
echo ""
echo "## Observables"
echo ""
echo "<!-- Add observables here -->"
} > "$tfile"
;;
RESOURCES.md)
{
echo "# ${title}"
echo ""
echo "## Accounts"
echo ""
echo "<!-- Add account references here -->"
echo ""
echo "## Tokens"
echo ""
echo "<!-- Add token references here -->"
echo ""
echo "## Infrastructure"
echo ""
echo "<!-- Add infrastructure inventory here -->"
} > "$tfile"
;;
prerequisites.md)
{
echo "# ${title}"
echo ""
echo "<!-- Add dependency graph here -->"
} > "$tfile"
;;
*)
{
echo "# ${title}"
echo ""
@ -409,8 +369,6 @@ migrate_ops_repo() {
echo ""
echo "<!-- Add content here -->"
} > "$tfile"
;;
esac
echo " + Created: ${tfile}"
migrated=true
fi

View file

@ -429,100 +429,19 @@ pr_walk_to_merge() {
_prl_log "CI failed — invoking agent (attempt ${ci_fix_count}/${max_ci_fixes})"
# Build per-workflow/per-step CI diagnostics prompt
local ci_prompt_body=""
local passing_workflows=""
local built_diagnostics=false
if [ -n "$_PR_CI_PIPELINE" ] && [ -n "${WOODPECKER_REPO_ID:-}" ]; then
local pip_json
pip_json=$(woodpecker_api "/repos/${WOODPECKER_REPO_ID}/pipelines/${_PR_CI_PIPELINE}" 2>/dev/null) || pip_json=""
if [ -n "$pip_json" ]; then
local wf_count
wf_count=$(printf '%s' "$pip_json" | jq '[.workflows[]?] | length' 2>/dev/null) || wf_count=0
if [ "$wf_count" -gt 0 ]; then
built_diagnostics=true
local wf_idx=0
while [ "$wf_idx" -lt "$wf_count" ]; do
local wf_name wf_state
wf_name=$(printf '%s' "$pip_json" | jq -r ".workflows[$wf_idx].name // \"workflow-$wf_idx\"" 2>/dev/null)
wf_state=$(printf '%s' "$pip_json" | jq -r ".workflows[$wf_idx].state // \"unknown\"" 2>/dev/null)
if [ "$wf_state" = "failure" ] || [ "$wf_state" = "error" ] || [ "$wf_state" = "killed" ]; then
# Collect failed children for this workflow
local failed_children
failed_children=$(printf '%s' "$pip_json" | jq -r "
.workflows[$wf_idx].children[]? |
select(.state == \"failure\" or .state == \"error\" or .state == \"killed\") |
\"\(.name)\t\(.exit_code)\t\(.pid)\"" 2>/dev/null) || failed_children=""
ci_prompt_body="${ci_prompt_body}
--- Failed workflow: ${wf_name} ---"
if [ -n "$failed_children" ]; then
while IFS=$'\t' read -r step_name step_exit step_pid; do
[ -z "$step_name" ] && continue
local exit_annotation=""
case "$step_exit" in
126) exit_annotation=" (permission denied or not executable)" ;;
127) exit_annotation=" (command not found)" ;;
128) exit_annotation=" (invalid exit argument / signal+128)" ;;
esac
ci_prompt_body="${ci_prompt_body}
Step: ${step_name}
Exit code: ${step_exit}${exit_annotation}"
# Fetch per-step logs
if [ -n "$step_pid" ] && [ "$step_pid" != "null" ]; then
local step_logs
step_logs=$(ci_get_step_logs "$_PR_CI_PIPELINE" "$step_pid" 2>/dev/null | tail -50) || step_logs=""
if [ -n "$step_logs" ]; then
ci_prompt_body="${ci_prompt_body}
Log tail (last 50 lines):
\`\`\`
${step_logs}
\`\`\`"
fi
fi
done <<< "$failed_children"
else
ci_prompt_body="${ci_prompt_body}
(no failed step details available)"
fi
else
# Track passing/other workflows
if [ -n "$passing_workflows" ]; then
passing_workflows="${passing_workflows}, ${wf_name}"
else
passing_workflows="${wf_name}"
fi
fi
wf_idx=$((wf_idx + 1))
done
fi
fi
fi
# Fallback: use legacy log fetch if per-workflow diagnostics unavailable
if [ "$built_diagnostics" = false ]; then
# Get CI logs from SQLite database if available
local ci_logs=""
if [ -n "$_PR_CI_PIPELINE" ] && [ -n "${FACTORY_ROOT:-}" ]; then
ci_logs=$(ci_get_logs "$_PR_CI_PIPELINE" 2>/dev/null | tail -50) || ci_logs=""
fi
local logs_section=""
if [ -n "$ci_logs" ]; then
ci_prompt_body="
logs_section="
CI Log Output (last 50 lines):
\`\`\`
${ci_logs}
\`\`\`"
fi
fi
local passing_line=""
if [ -n "$passing_workflows" ]; then
passing_line="
Passing workflows (do not modify): ${passing_workflows}
\`\`\`
"
fi
@ -531,10 +450,9 @@ Passing workflows (do not modify): ${passing_workflows}
Pipeline: #${_PR_CI_PIPELINE:-?}
Failure type: ${_PR_CI_FAILURE_TYPE:-unknown}
${passing_line}
Error log:
${_PR_CI_ERROR_LOG:-No logs available.}
${ci_prompt_body}
${_PR_CI_ERROR_LOG:-No logs available.}${logs_section}
Fix the issue, run tests, commit, rebase on ${PRIMARY_BRANCH}, and push:
git fetch ${remote} ${PRIMARY_BRANCH} && git rebase ${remote}/${PRIMARY_BRANCH}

View file

@ -1,27 +1,21 @@
<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 -->
<!-- last-reviewed: 6bdbeb5bd2a200ff1b23724564da9383193f3e30 -->
# nomad/ — Agent Instructions
Nomad + Vault HCL for the factory's single-node cluster. These files are
the source of truth that `lib/init/nomad/cluster-up.sh` copies onto a
factory box under `/etc/nomad.d/` and `/etc/vault.d/` at init time.
This directory covers the **Nomad+Vault migration (Steps 05)** —
see issues #821#992 for the step breakdown.
This directory covers the **Nomad+Vault migration (Steps 02)** —
see issues #821#884 for the step breakdown.
## What lives here
| File/Dir | Deployed to | Owned by |
|---|---|---|
| `server.hcl` | `/etc/nomad.d/server.hcl` | agent role, bind, ports, `data_dir` (S0.2) |
| `client.hcl` | `/etc/nomad.d/client.hcl` | Docker driver cfg + `host_volume` declarations (S0.2); `allow_privileged = true` for woodpecker-agent Docker-in-Docker (S3-fix-5, #961) |
| `client.hcl` | `/etc/nomad.d/client.hcl` | Docker driver cfg + `host_volume` declarations (S0.2) |
| `vault.hcl` | `/etc/vault.d/vault.hcl` | Vault storage, listener, UI, `disable_mlock` (S0.3) |
| `jobs/forgejo.hcl` | submitted via `lib/init/nomad/deploy.sh` | Forgejo job; reads creds from Vault via consul-template stanza (S2.4) |
| `jobs/woodpecker-server.hcl` | submitted via `lib/init/nomad/deploy.sh` | Woodpecker CI server; host networking, Vault KV for `WOODPECKER_AGENT_SECRET` + Forgejo OAuth creds (S3.1) |
| `jobs/woodpecker-agent.hcl` | submitted via `lib/init/nomad/deploy.sh` | Woodpecker CI agent; host networking, `docker.sock` mount, Vault KV for `WOODPECKER_AGENT_SECRET`; `WOODPECKER_SERVER` uses `${attr.unique.network.ip-address}:9000` (Nomad interpolation) — port binds to LXC alloc IP, not localhost (S3.2, S3-fix-6, #964) |
| `jobs/agents.hcl` | submitted via `lib/init/nomad/deploy.sh` | All 7 agent roles (dev, review, gardener, planner, predictor, supervisor, architect) + llama variant; Vault-templated bot tokens via `service-agents` policy; `force_pull = false` — image is built locally by `bin/disinto --with agents`, no registry (S4.1, S4-fix-2, S4-fix-5, #955, #972, #978) |
| `jobs/staging.hcl` | submitted via `lib/init/nomad/deploy.sh` | Caddy file-server mounting `docker/` as `/srv/site:ro`; no Vault integration; **dynamic host port** (no static 80 — edge owns 80/443, collision fixed in S5-fix-7 #1018); edge discovers via Nomad service registration (S5.2, #989) |
| `jobs/chat.hcl` | submitted via `lib/init/nomad/deploy.sh` | Claude chat UI; custom `disinto/chat:local` image; sandbox hardening (cap_drop ALL, **tmpfs via mount block** not `tmpfs=` arg — S5-fix-5 #1012, pids_limit 128); Vault-templated OAuth secrets via `service-chat` policy (S5.2, #989); rate limiting removed (#1084); **workspace volume** `chat-workspace` host_volume bind-mounted to `/var/workspace` for Claude project access (#1027) — operator must register `host_volume "chat-workspace"` in `client.hcl` on each node |
| `jobs/edge.hcl` | submitted via `lib/init/nomad/deploy.sh` | Caddy reverse proxy + dispatcher sidecar; routes /forge, /woodpecker, /staging, /chat; uses `disinto/edge:local` image built by `bin/disinto --with edge`; **both Caddy and dispatcher tasks use `network_mode = "host"`** — upstreams are `127.0.0.1:<port>` (forgejo :3000, woodpecker :8000, chat :8080), not Docker hostnames (#1031, #1034); `FORGE_URL` rendered via Nomad service discovery template (not static env) to handle bridge vs. host network differences (#1034); dispatcher Vault secret path changed to `kv/data/disinto/shared/ops-repo` (#1041); Vault-templated ops-repo creds via `service-dispatcher` policy (S5.1, #988); `/staging/*` strips `/staging` prefix before proxying (#1079); WebSocket endpoint `/chat/ws` added for streaming (#1026) |
Nomad auto-merges every `*.hcl` under `-config=/etc/nomad.d/`, so the
split between `server.hcl` and `client.hcl` is for readability, not
@ -36,6 +30,8 @@ convention, KV path summary, and JWT-auth role bindings (S2.1/S2.3).
## Not yet implemented
- **Additional jobspecs** (woodpecker, agents, caddy) — Step 1 brought up
Forgejo; remaining services land in later steps.
- **TLS, ACLs, gossip encryption** — deliberately absent for now; land
alongside multi-node support.

View file

@ -49,12 +49,6 @@ client {
read_only = false
}
# staging static content (docker/ directory with images, HTML, etc.)
host_volume "site-content" {
path = "/srv/disinto/docker"
read_only = true
}
# disinto chat transcripts + attachments.
host_volume "chat-history" {
path = "/srv/disinto/chat-history"
@ -70,11 +64,11 @@ client {
# Docker task driver. `volumes.enabled = true` is required so jobspecs
# can mount host_volume declarations defined above. `allow_privileged`
# is true — woodpecker-agent requires `privileged = true` to access
# docker.sock and spawn CI pipeline containers.
# stays false no factory workload needs privileged containers today,
# and flipping it is an audit-worthy change.
plugin "docker" {
config {
allow_privileged = true
allow_privileged = false
volumes {
enabled = true

View file

@ -1,207 +0,0 @@
# =============================================================================
# nomad/jobs/agents.hcl All-role agent polling loop (Nomad service job)
#
# Part of the Nomad+Vault migration (S4.1, issue #955). Runs the main bot
# polling loop with all 7 agent roles (review, dev, gardener, architect,
# planner, predictor, supervisor) against the local llama server.
#
# Host_volume contract:
# This job mounts agent-data, project-repos, and ops-repo from
# nomad/client.hcl. Paths under /srv/disinto/* are created by
# lib/init/nomad/cluster-up.sh before any job references them.
#
# Vault integration (S4.1):
# - vault { role = "service-agents" } at group scope workload-identity
# JWT exchanged for a Vault token carrying the composite service-agents
# policy (vault/policies/service-agents.hcl), which grants read access
# to all 7 bot KV namespaces + vault bot + shared forge config.
# - template stanza renders per-bot FORGE_*_TOKEN + FORGE_PASS from Vault
# KV v2 at kv/disinto/bots/<role>.
# - Seeded on fresh boxes by tools/vault-seed-agents.sh.
#
# Not the runtime yet: docker-compose.yml is still the factory's live stack
# until cutover. This file exists so CI can validate it and S4.2 can wire
# `disinto init --backend=nomad --with agents` to `nomad job run` it.
# =============================================================================
job "agents" {
type = "service"
datacenters = ["dc1"]
group "agents" {
count = 1
# Vault workload identity (S4.1, issue #955)
# Composite role covering all 7 bot identities + vault bot. Role defined
# in vault/roles.yaml, policy in vault/policies/service-agents.hcl.
# Bound claim pins nomad_job_id = "agents".
vault {
role = "service-agents"
}
# No network port agents are outbound-only (poll forgejo, call llama).
# No service discovery block nothing health-checks agents over HTTP.
volume "agent-data" {
type = "host"
source = "agent-data"
read_only = false
}
volume "project-repos" {
type = "host"
source = "project-repos"
read_only = false
}
volume "ops-repo" {
type = "host"
source = "ops-repo"
read_only = true
}
# Conservative restart fail fast to the scheduler.
restart {
attempts = 3
interval = "5m"
delay = "15s"
mode = "delay"
}
# Service registration
# Agents are outbound-only (poll forgejo, call llama) no HTTP/TCP
# endpoint to probe. The Nomad native provider only supports tcp/http
# checks, not script checks. Registering without a check block means
# Nomad tracks health via task lifecycle: task running = healthy,
# task dead = service deregistered. This matches the docker-compose
# pgrep healthcheck semantics (process alive = healthy).
service {
name = "agents"
provider = "nomad"
}
task "agents" {
driver = "docker"
config {
image = "disinto/agents:local"
force_pull = false
# apparmor=unconfined matches docker-compose Claude Code needs
# ptrace for node.js inspector and /proc access.
security_opt = ["apparmor=unconfined"]
}
volume_mount {
volume = "agent-data"
destination = "/home/agent/data"
read_only = false
}
volume_mount {
volume = "project-repos"
destination = "/home/agent/repos"
read_only = false
}
volume_mount {
volume = "ops-repo"
destination = "/home/agent/repos/_factory/disinto-ops"
read_only = true
}
# Non-secret env
env {
FORGE_URL = "http://forgejo:3000"
FORGE_REPO = "disinto-admin/disinto"
ANTHROPIC_BASE_URL = "http://10.10.10.1:8081"
ANTHROPIC_API_KEY = "sk-no-key-required"
CLAUDE_MODEL = "unsloth/Qwen3.5-35B-A3B"
AGENT_ROLES = "review,dev,gardener,architect,planner,predictor,supervisor"
POLL_INTERVAL = "300"
DISINTO_CONTAINER = "1"
PROJECT_NAME = "project"
PROJECT_REPO_ROOT = "/home/agent/repos/project"
CLAUDE_TIMEOUT = "7200"
# llama-specific Claude Code tuning
CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC = "1"
CLAUDE_CODE_DISABLE_EXPERIMENTAL_BETAS = "1"
CLAUDE_AUTOCOMPACT_PCT_OVERRIDE = "60"
}
# Vault-templated bot tokens (S4.1, issue #955)
# Renders per-bot FORGE_*_TOKEN + FORGE_PASS from Vault KV v2.
# Each `with secret ...` block reads one bot's KV path; the `else`
# branch emits short placeholders on fresh installs where the path
# is absent. Seed with tools/vault-seed-agents.sh.
#
# Placeholder values kept < 16 chars to avoid secret-scan CI failures.
# error_on_missing_key = false prevents template-pending hangs.
template {
destination = "secrets/bots.env"
env = true
change_mode = "restart"
error_on_missing_key = false
data = <<EOT
{{- with secret "kv/data/disinto/bots/dev" -}}
FORGE_TOKEN={{ .Data.data.token }}
FORGE_PASS={{ .Data.data.pass }}
{{- else -}}
# WARNING: run tools/vault-seed-agents.sh
FORGE_TOKEN=seed-me
FORGE_PASS=seed-me
{{- end }}
{{ with secret "kv/data/disinto/bots/review" -}}
FORGE_REVIEW_TOKEN={{ .Data.data.token }}
{{- else -}}
FORGE_REVIEW_TOKEN=seed-me
{{- end }}
{{ with secret "kv/data/disinto/bots/gardener" -}}
FORGE_GARDENER_TOKEN={{ .Data.data.token }}
{{- else -}}
FORGE_GARDENER_TOKEN=seed-me
{{- end }}
{{ with secret "kv/data/disinto/bots/architect" -}}
FORGE_ARCHITECT_TOKEN={{ .Data.data.token }}
{{- else -}}
FORGE_ARCHITECT_TOKEN=seed-me
{{- end }}
{{ with secret "kv/data/disinto/bots/planner" -}}
FORGE_PLANNER_TOKEN={{ .Data.data.token }}
{{- else -}}
FORGE_PLANNER_TOKEN=seed-me
{{- end }}
{{ with secret "kv/data/disinto/bots/predictor" -}}
FORGE_PREDICTOR_TOKEN={{ .Data.data.token }}
{{- else -}}
FORGE_PREDICTOR_TOKEN=seed-me
{{- end }}
{{ with secret "kv/data/disinto/bots/supervisor" -}}
FORGE_SUPERVISOR_TOKEN={{ .Data.data.token }}
{{- else -}}
FORGE_SUPERVISOR_TOKEN=seed-me
{{- end }}
{{ with secret "kv/data/disinto/bots/vault" -}}
FORGE_VAULT_TOKEN={{ .Data.data.token }}
{{- else -}}
FORGE_VAULT_TOKEN=seed-me
{{- end }}
EOT
}
# Agents run Claude/llama sessions need CPU + memory headroom.
resources {
cpu = 500
memory = 1024
}
}
}
}

View file

@ -1,188 +0,0 @@
# =============================================================================
# nomad/jobs/chat.hcl Claude chat UI (Nomad service job)
#
# Part of the Nomad+Vault migration (S5.2, issue #989). Lightweight service
# job for the Claude chat UI with sandbox hardening (#706).
#
# Build:
# Custom image built from docker/chat/Dockerfile as disinto/chat:local
# (same :local pattern as disinto/agents:local).
#
# Sandbox hardening (#706):
# - Read-only root filesystem (enforced via entrypoint)
# - tmpfs /tmp:size=64m for runtime temp files
# - cap_drop ALL (no Linux capabilities)
# - pids_limit 128 (prevent fork bombs)
# - mem_limit 512m (matches compose sandbox hardening)
#
# Vault integration:
# - vault { role = "service-chat" } at group scope
# - Template stanza renders CHAT_OAUTH_CLIENT_ID, CHAT_OAUTH_CLIENT_SECRET,
# FORWARD_AUTH_SECRET from kv/disinto/shared/chat
# - Seeded on fresh boxes by tools/vault-seed-chat.sh
#
# Host volumes:
# - chat-history /var/lib/chat/history (persists conversation history)
# - workspace /var/workspace (project working tree for Claude access, #1027)
#
# Client-side host_volume registration (operator prerequisite):
# In nomad/client.hcl on each Nomad node:
# host_volume "chat-workspace" {
# path = "/var/disinto/chat-workspace"
# read_only = false
# }
# Nodes without the host_volume registered will not schedule the workspace mount.
#
# Not the runtime yet: docker-compose.yml is still the factory's live stack
# until cutover. This file exists so CI can validate it and S5.2 can wire
# `disinto init --backend=nomad --with chat` to `nomad job run` it.
# =============================================================================
job "chat" {
type = "service"
datacenters = ["dc1"]
group "chat" {
count = 1
# Vault workload identity (S5.2, issue #989)
# Role `service-chat` defined in vault/roles.yaml, policy in
# vault/policies/service-chat.hcl. Bound claim pins nomad_job_id = "chat".
vault {
role = "service-chat"
}
# Network
# External port 8080 for chat UI access (via edge proxy or direct).
network {
port "http" {
static = 8080
to = 8080
}
}
# Host volumes
# chat-history volume: declared in nomad/client.hcl, path
# /srv/disinto/chat-history on the factory box.
volume "chat-history" {
type = "host"
source = "chat-history"
read_only = false
}
# Workspace volume: bind-mounted project working tree for Claude access (#1027)
# Source is a fixed logical name resolved by client-side host_volume registration.
volume "workspace" {
type = "host"
source = "chat-workspace"
read_only = false
}
# Metadata (per-dispatch env var via NOMAD_META_*)
# CHAT_WORKSPACE_DIR: project working tree path, injected into task env
# as NOMAD_META_CHAT_WORKSPACE_DIR for the workspace volume mount target.
meta {
CHAT_WORKSPACE_DIR = "/var/workspace"
}
# Restart policy
restart {
attempts = 3
interval = "5m"
delay = "15s"
mode = "delay"
}
# Service registration
service {
name = "chat"
port = "http"
provider = "nomad"
check {
type = "http"
path = "/health"
interval = "10s"
timeout = "3s"
}
}
task "chat" {
driver = "docker"
config {
image = "disinto/chat:local"
force_pull = false
# Sandbox hardening (#706): cap_drop ALL, pids_limit 128, tmpfs /tmp
# ReadonlyRootfs enforced via entrypoint script (fails if running as root)
cap_drop = ["ALL"]
pids_limit = 128
mount {
type = "tmpfs"
target = "/tmp"
readonly = false
tmpfs_options {
size = 67108864 # 64MB in bytes
}
}
# Security options for sandbox hardening
# apparmor=unconfined needed for Claude CLI ptrace access
# no-new-privileges prevents privilege escalation
security_opt = ["apparmor=unconfined", "no-new-privileges"]
}
# Volume mounts
# Mount chat-history for conversation persistence
volume_mount {
volume = "chat-history"
destination = "/var/lib/chat/history"
read_only = false
}
# Mount workspace directory for Claude code access (#1027)
# Binds project working tree so Claude can inspect/modify code
volume_mount {
volume = "workspace"
destination = "/var/workspace"
read_only = false
}
# Environment: secrets from Vault (S5.2)
# CHAT_OAUTH_CLIENT_ID, CHAT_OAUTH_CLIENT_SECRET, FORWARD_AUTH_SECRET
# rendered from kv/disinto/shared/chat via template stanza.
env {
FORGE_URL = "http://forgejo:3000"
CHAT_WORKSPACE_DIR = "${NOMAD_META_CHAT_WORKSPACE_DIR}"
}
# Vault-templated secrets (S5.2, issue #989)
# Renders chat-secrets.env from Vault KV v2 at kv/disinto/shared/chat.
# Placeholder values kept < 16 chars to avoid secret-scan CI failures.
template {
destination = "secrets/chat-secrets.env"
env = true
change_mode = "restart"
error_on_missing_key = false
data = <<EOT
{{- with secret "kv/data/disinto/shared/chat" -}}
CHAT_OAUTH_CLIENT_ID={{ .Data.data.chat_oauth_client_id }}
CHAT_OAUTH_CLIENT_SECRET={{ .Data.data.chat_oauth_client_secret }}
FORWARD_AUTH_SECRET={{ .Data.data.forward_auth_secret }}
{{- else -}}
# WARNING: run tools/vault-seed-chat.sh
CHAT_OAUTH_CLIENT_ID=seed-me
CHAT_OAUTH_CLIENT_SECRET=seed-me
FORWARD_AUTH_SECRET=seed-me
{{- end -}}
EOT
}
# Sandbox hardening (S5.2, #706)
# Memory = 512MB (matches docker-compose sandbox hardening)
resources {
cpu = 200
memory = 512
}
}
}
}

View file

@ -1,285 +0,0 @@
# =============================================================================
# nomad/jobs/edge.hcl Edge proxy (Caddy + dispatcher sidecar) (Nomad service job)
#
# Part of the Nomad+Vault migration (S5.1, issue #988). Caddy reverse proxy
# routes traffic to Forgejo, Woodpecker, staging, and chat services. The
# dispatcher sidecar polls disinto-ops for vault actions and dispatches them
# via Nomad batch jobs.
#
# Host networking (issue #1031):
# Caddy uses network_mode = "host" so upstreams are reached at
# 127.0.0.1:<port> (forgejo :3000, woodpecker :8000, chat :8080).
# Staging uses Nomad service discovery (S5-fix-7, issue #1018).
#
# Host_volume contract:
# This job mounts caddy-data from nomad/client.hcl. Path
# /srv/disinto/caddy-data is created by lib/init/nomad/cluster-up.sh before
# any job references it. Keep the `source = "caddy-data"` below in sync
# with the host_volume stanza in client.hcl.
#
# Build step (S5.1):
# docker/edge/Dockerfile is custom (adds bash, jq, curl, git, docker-cli,
# python3, openssh-client, autossh to caddy:latest). Build as
# disinto/edge:local using the same pattern as disinto/agents:local.
# Command: docker build -t disinto/edge:local -f docker/edge/Dockerfile docker/edge
#
# Not the runtime yet: docker-compose.yml is still the factory's live stack
# until cutover. This file exists so CI can validate it and S5.2 can wire
# `disinto init --backend=nomad --with edge` to `nomad job run` it.
# =============================================================================
job "edge" {
type = "service"
datacenters = ["dc1"]
group "edge" {
count = 1
# Vault workload identity for dispatcher (S5.1, issue #988)
# Service role for dispatcher task to fetch vault actions from KV v2.
# Role defined in vault/roles.yaml, policy in vault/policies/dispatcher.hcl.
vault {
role = "service-dispatcher"
}
# Network ports (S5.1, issue #988)
# Caddy listens on :80 and :443. Expose both on the host.
network {
port "http" {
static = 80
to = 80
}
port "https" {
static = 443
to = 443
}
}
# Host-volume mounts (S5.1, issue #988)
# caddy-data: ACME certificates, Caddy config state.
volume "caddy-data" {
type = "host"
source = "caddy-data"
read_only = false
}
# ops-repo: disinto-ops clone for vault actions polling.
volume "ops-repo" {
type = "host"
source = "ops-repo"
read_only = false
}
# Conservative restart policy
# Caddy should be stable; dispatcher may restart on errors.
restart {
attempts = 3
interval = "5m"
delay = "15s"
mode = "delay"
}
# Service registration
# Caddy is an HTTP reverse proxy health check on port 80.
service {
name = "edge"
port = "http"
provider = "nomad"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "3s"
}
}
# Caddy task (S5.1, issue #988)
task "caddy" {
driver = "docker"
config {
# Use pre-built disinto/edge:local image (custom Dockerfile adds
# bash, jq, curl, git, docker-cli, python3, openssh-client, autossh).
image = "disinto/edge:local"
force_pull = false
network_mode = "host"
ports = ["http", "https"]
# apparmor=unconfined matches docker-compose needed for autossh
# in the entrypoint script.
security_opt = ["apparmor=unconfined"]
}
# Mount caddy-data volume for ACME state and config directory.
# Caddyfile is mounted at /etc/caddy/Caddyfile by entrypoint-edge.sh.
volume_mount {
volume = "caddy-data"
destination = "/data"
read_only = false
}
# Caddyfile via Nomad service discovery (S5-fix-7, issue #1018)
# Renders staging upstream from Nomad service registration instead of
# hardcoded staging:80. Caddy picks up /local/Caddyfile via entrypoint.
# Forge URL via Nomad service discovery (issue #1034) resolves forgejo
# service address/port dynamically for bridge network compatibility.
template {
destination = "local/forge.env"
env = true
change_mode = "restart"
data = <<EOT
{{ range service "forgejo" -}}
FORGE_URL=http://{{ .Address }}:{{ .Port }}
{{- end }}
EOT
}
template {
destination = "local/Caddyfile"
change_mode = "restart"
data = <<EOT
# Caddyfile edge proxy configuration (Nomad-rendered)
# Staging upstream discovered via Nomad service registration.
:80 {
# Redirect root to Forgejo
handle / {
redir /forge/ 302
}
# Reverse proxy to Forgejo
handle /forge/* {
reverse_proxy 127.0.0.1:3000
}
# Reverse proxy to Woodpecker CI
handle /ci/* {
reverse_proxy 127.0.0.1:8000
}
# Reverse proxy to staging dynamic port via Nomad service discovery
handle /staging/* {
uri strip_prefix /staging
{{ range nomadService "staging" }} reverse_proxy {{ .Address }}:{{ .Port }}
{{ end }} }
# Chat service reverse proxy to disinto-chat backend (#705)
# OAuth routes bypass forward_auth unauthenticated users need these (#709)
handle /chat/login {
reverse_proxy 127.0.0.1:8080
}
handle /chat/oauth/callback {
reverse_proxy 127.0.0.1:8080
}
# WebSocket endpoint for streaming (#1026)
handle /chat/ws {
header_up Upgrade $http.upgrade
header_up Connection $http.connection
reverse_proxy 127.0.0.1:8080
}
# Defense-in-depth: forward_auth stamps X-Forwarded-User from session (#709)
handle /chat/* {
forward_auth 127.0.0.1:8080 {
uri /chat/auth/verify
copy_headers X-Forwarded-User
header_up X-Forward-Auth-Secret {$FORWARD_AUTH_SECRET}
}
reverse_proxy 127.0.0.1:8080
}
}
EOT
}
# Non-secret env
env {
FORGE_REPO = "disinto-admin/disinto"
DISINTO_CONTAINER = "1"
PROJECT_NAME = "disinto"
}
# Caddy needs CPU + memory headroom for reverse proxy work.
resources {
cpu = 200
memory = 256
}
}
# Dispatcher task (S5.1, issue #988)
task "dispatcher" {
driver = "docker"
config {
# Use same disinto/agents:local image as other agents.
image = "disinto/agents:local"
force_pull = false
network_mode = "host"
# apparmor=unconfined matches docker-compose.
security_opt = ["apparmor=unconfined"]
# Mount docker.sock via bind-volume (not host volume) for legacy
# docker backend compat. Nomad host volumes require named volumes
# from client.hcl; socket files cannot be host volumes.
volumes = ["/var/run/docker.sock:/var/run/docker.sock:ro"]
}
# Mount ops-repo for vault actions polling.
volume_mount {
volume = "ops-repo"
destination = "/home/agent/repos/disinto-ops"
read_only = false
}
# Forge URL via Nomad service discovery (issue #1034)
# Resolves forgejo service address/port dynamically for bridge network
# compatibility. Template-scoped to dispatcher task (Nomad doesn't
# propagate templates across tasks).
template {
destination = "local/forge.env"
env = true
change_mode = "restart"
data = <<EOT
{{ range service "forgejo" -}}
FORGE_URL=http://{{ .Address }}:{{ .Port }}
{{- end }}
EOT
}
# Vault-templated secrets (S5.1, issue #988)
# Renders FORGE_TOKEN from Vault KV v2 for ops repo access.
template {
destination = "secrets/dispatcher.env"
env = true
change_mode = "restart"
error_on_missing_key = false
data = <<EOT
{{- with secret "kv/data/disinto/shared/ops-repo" -}}
FORGE_TOKEN={{ .Data.data.token }}
{{- else -}}
# WARNING: kv/disinto/shared/ops-repo is empty run tools/vault-seed-ops-repo.sh
FORGE_TOKEN=seed-me
{{- end }}
EOT
}
# Non-secret env
env {
DISPATCHER_BACKEND = "nomad"
FORGE_REPO = "disinto-admin/disinto"
FORGE_OPS_REPO = "disinto-admin/disinto-ops"
PRIMARY_BRANCH = "main"
DISINTO_CONTAINER = "1"
OPS_REPO_ROOT = "/home/agent/repos/disinto-ops"
FORGE_ADMIN_USERS = "vault-bot,admin"
}
# Dispatcher is lightweight minimal CPU + memory.
resources {
cpu = 100
memory = 256
}
}
}
}

View file

@ -154,17 +154,10 @@ job "forgejo" {
# this file. "seed-me" is < 16 chars and still distinctive enough
# to surface in a `grep FORGEJO__security__` audit. The template
# comment below carries the operator-facing fix pointer.
# `error_on_missing_key = false` stops consul-template from blocking
# the alloc on template-pending when the Vault KV path exists but a
# referenced key is absent (or the path itself is absent and the
# else-branch placeholders are used). Without this, a fresh-LXC
# `disinto init --with forgejo` against an empty Vault hangs on
# template-pending until deploy.sh times out (issue #912, bug #4).
template {
destination = "secrets/forgejo.env"
env = true
change_mode = "restart"
error_on_missing_key = false
data = <<EOT
{{- with secret "kv/data/disinto/shared/forgejo" -}}
FORGEJO__security__SECRET_KEY={{ .Data.data.secret_key }}

View file

@ -1,86 +0,0 @@
# =============================================================================
# nomad/jobs/staging.hcl Staging file server (Nomad service job)
#
# Part of the Nomad+Vault migration (S5.2, issue #989). Lightweight service job
# for the staging file server using Caddy as a static file server.
#
# Mount contract:
# This job mounts the `docker/` directory as `/srv/site` (read-only).
# The docker/ directory contains static content (images, HTML, etc.)
# served to staging environment users.
#
# Network:
# Dynamic host port edge discovers via Nomad service registration.
# No static port to avoid collisions with edge (which owns 80/443).
#
# Not the runtime yet: docker-compose.yml is still the factory's live stack
# until cutover. This file exists so CI can validate it and S5.2 can wire
# `disinto init --backend=nomad --with staging` to `nomad job run` it.
# =============================================================================
job "staging" {
type = "service"
datacenters = ["dc1"]
group "staging" {
count = 1
# No Vault integration needed no secrets required (static file server)
# Internal service dynamic host port. Edge discovers via Nomad service.
network {
port "http" {
to = 80
}
}
volume "site-content" {
type = "host"
source = "site-content"
read_only = true
}
restart {
attempts = 3
interval = "5m"
delay = "15s"
mode = "delay"
}
service {
name = "staging"
port = "http"
provider = "nomad"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "3s"
}
}
task "staging" {
driver = "docker"
config {
image = "caddy:alpine"
ports = ["http"]
command = "caddy"
args = ["file-server", "--root", "/srv/site"]
}
# Mount docker/ directory as /srv/site:ro (static content)
volume_mount {
volume = "site-content"
destination = "/srv/site"
read_only = true
}
resources {
cpu = 100
memory = 256
}
}
}
}

View file

@ -1,137 +0,0 @@
# =============================================================================
# nomad/jobs/vault-runner.hcl Parameterized batch job for vault action dispatch
#
# Part of the Nomad+Vault migration (S5.3, issue #990). Replaces the
# `docker run --rm vault-runner-${action_id}` pattern in dispatcher.sh with
# a Nomad-native parameterized batch job. Dispatched by the edge dispatcher
# (S5.4) via `nomad job dispatch`.
#
# Parameterized meta:
# action_id vault action identifier (used by entrypoint-runner.sh)
# secrets_csv comma-separated secret names (e.g. "GITHUB_TOKEN,DEPLOY_KEY")
#
# Vault integration (approach A pre-defined templates):
# All 6 known runner secrets are rendered via template stanzas with
# error_on_missing_key = false. Secrets not granted by the dispatch's
# Vault policies render as empty strings. The dispatcher (S5.4) sets
# vault { policies = [...] } per-dispatch based on the action TOML's
# secrets=[...] list, scoping access to only the declared secrets.
#
# Cleanup: Nomad garbage-collects completed batch dispatches automatically.
# =============================================================================
job "vault-runner" {
type = "batch"
datacenters = ["dc1"]
parameterized {
meta_required = ["action_id", "secrets_csv"]
}
group "runner" {
count = 1
# Vault workload identity
# Per-dispatch policies are composed by the dispatcher (S5.4) based on the
# action TOML's secrets=[...] list. Each policy grants read access to
# exactly one kv/data/disinto/runner/<NAME> path. Roles defined in
# vault/roles.yaml (runner-<NAME>), policies in vault/policies/.
vault {}
volume "ops-repo" {
type = "host"
source = "ops-repo"
read_only = true
}
# No restart for batch fail fast, let the dispatcher handle retries.
restart {
attempts = 0
mode = "fail"
}
task "runner" {
driver = "docker"
config {
image = "disinto/agents:local"
force_pull = false
entrypoint = ["bash"]
args = [
"/home/agent/disinto/docker/runner/entrypoint-runner.sh",
"${NOMAD_META_action_id}",
]
}
volume_mount {
volume = "ops-repo"
destination = "/home/agent/ops"
read_only = true
}
# Non-secret env
env {
DISINTO_CONTAINER = "1"
FACTORY_ROOT = "/home/agent/disinto"
OPS_REPO_ROOT = "/home/agent/ops"
}
# Vault-templated runner secrets (approach A)
# Pre-defined templates for all 6 known runner secrets. Each renders
# from kv/data/disinto/runner/<NAME>. Secrets not granted by the
# dispatch's Vault policies produce empty env vars (harmless).
# error_on_missing_key = false prevents template-pending hangs when
# a secret path is absent or the policy doesn't grant access.
#
# Placeholder values kept < 16 chars to avoid secret-scan CI failures.
template {
destination = "secrets/runner.env"
env = true
error_on_missing_key = false
data = <<EOT
{{- with secret "kv/data/disinto/runner/GITHUB_TOKEN" -}}
GITHUB_TOKEN={{ .Data.data.value }}
{{- else -}}
GITHUB_TOKEN=
{{- end }}
{{ with secret "kv/data/disinto/runner/CODEBERG_TOKEN" -}}
CODEBERG_TOKEN={{ .Data.data.value }}
{{- else -}}
CODEBERG_TOKEN=
{{- end }}
{{ with secret "kv/data/disinto/runner/CLAWHUB_TOKEN" -}}
CLAWHUB_TOKEN={{ .Data.data.value }}
{{- else -}}
CLAWHUB_TOKEN=
{{- end }}
{{ with secret "kv/data/disinto/runner/DEPLOY_KEY" -}}
DEPLOY_KEY={{ .Data.data.value }}
{{- else -}}
DEPLOY_KEY=
{{- end }}
{{ with secret "kv/data/disinto/runner/NPM_TOKEN" -}}
NPM_TOKEN={{ .Data.data.value }}
{{- else -}}
NPM_TOKEN=
{{- end }}
{{ with secret "kv/data/disinto/runner/DOCKER_HUB_TOKEN" -}}
DOCKER_HUB_TOKEN={{ .Data.data.value }}
{{- else -}}
DOCKER_HUB_TOKEN=
{{- end }}
EOT
}
# Formula execution headroom matches agents.hcl baseline.
resources {
cpu = 500
memory = 1024
}
}
}
}

View file

@ -1,147 +0,0 @@
# =============================================================================
# nomad/jobs/woodpecker-agent.hcl Woodpecker CI agent (Nomad service job)
#
# Part of the Nomad+Vault migration (S3.2, issue #935).
# Drop-in for the current docker-compose setup with host networking +
# docker.sock mount, enabling the agent to spawn containers via the
# mounted socket.
#
# Host networking:
# Uses network_mode = "host" to match the compose setup. The Woodpecker
# server gRPC endpoint is addressed via Nomad service discovery using
# the host's IP address (10.10.10.x:9000), since the server's port
# binding in Nomad binds to the allocation's IP, not localhost.
#
# Vault integration:
# - vault { role = "service-woodpecker-agent" } at the group scope the
# task's workload-identity JWT is exchanged for a Vault token carrying
# the policy named on that role. Role + policy are defined in
# vault/roles.yaml + vault/policies/service-woodpecker.hcl.
# - template stanza pulls WOODPECKER_AGENT_SECRET from Vault KV v2
# at kv/disinto/shared/woodpecker and writes it to secrets/agent.env.
# Seeded on fresh boxes by tools/vault-seed-woodpecker.sh.
# =============================================================================
job "woodpecker-agent" {
type = "service"
datacenters = ["dc1"]
group "woodpecker-agent" {
count = 1
# Vault workload identity
# `role = "service-woodpecker-agent"` is defined in vault/roles.yaml and
# applied by tools/vault-apply-roles.sh. The role's bound
# claim pins nomad_job_id = "woodpecker-agent" renaming this
# jobspec's `job "woodpecker-agent"` without updating vault/roles.yaml
# will make token exchange fail at placement with a "claim mismatch"
# error.
vault {
role = "service-woodpecker-agent"
}
# Health check port: static 3333 for Nomad service discovery. The agent
# exposes :3333/healthz for Nomad to probe.
network {
port "healthz" {
static = 3333
}
}
# Native Nomad service discovery for the health check endpoint.
service {
name = "woodpecker-agent"
port = "healthz"
provider = "nomad"
check {
type = "http"
path = "/healthz"
interval = "10s"
timeout = "3s"
}
}
# Conservative restart policy fail fast to the scheduler instead of
# spinning on a broken image/config. 3 attempts over 5m, then back off.
restart {
attempts = 3
interval = "5m"
delay = "15s"
mode = "delay"
}
task "woodpecker-agent" {
driver = "docker"
config {
image = "woodpeckerci/woodpecker-agent:v3"
network_mode = "host"
privileged = true
volumes = ["/var/run/docker.sock:/var/run/docker.sock"]
}
# Non-secret env server address, gRPC security, concurrency limit,
# and health check endpoint. Nothing sensitive here.
#
# WOODPECKER_SERVER uses Nomad's attribute template to get the host's
# IP address (10.10.10.x). The server's gRPC port 9000 is bound via
# Nomad's port stanza to the allocation's IP (not localhost), so the
# agent must use the LXC's eth0 IP, not 127.0.0.1.
env {
WOODPECKER_SERVER = "${attr.unique.network.ip-address}:9000"
WOODPECKER_GRPC_SECURE = "false"
WOODPECKER_GRPC_KEEPALIVE_TIME = "10s"
WOODPECKER_GRPC_KEEPALIVE_TIMEOUT = "20s"
WOODPECKER_GRPC_KEEPALIVE_PERMIT_WITHOUT_CALLS = "true"
WOODPECKER_MAX_WORKFLOWS = "1"
WOODPECKER_HEALTHCHECK_ADDR = ":3333"
}
# Vault-templated agent secret
# Renders <task-dir>/secrets/agent.env (per-alloc secrets dir,
# never on disk on the host root filesystem, never in `nomad job
# inspect` output). `env = true` merges WOODPECKER_AGENT_SECRET
# from the file into the task environment.
#
# Vault path: `kv/data/disinto/shared/woodpecker`. The literal
# `/data/` segment is required by consul-template for KV v2 mounts.
#
# Empty-Vault fallback (`with ... else ...`): on a fresh LXC where
# the KV path is absent, consul-template's `with` short-circuits to
# the `else` branch. Emitting a visible placeholder means the
# container still boots, but with an obviously-bad secret that an
# operator will spot better than the agent failing silently with
# auth errors. Seed the path with tools/vault-seed-woodpecker.sh
# to replace the placeholder.
#
# Placeholder values are kept short on purpose: the repo-wide
# secret-scan (.woodpecker/secret-scan.yml lib/secret-scan.sh)
# flags `TOKEN=<16+ non-space chars>` as a plaintext secret, so a
# descriptive long placeholder would fail CI on every PR that touched
# this file. "seed-me" is < 16 chars and still distinctive enough
# to surface in a `grep WOODPECKER` audit.
template {
destination = "secrets/agent.env"
env = true
change_mode = "restart"
error_on_missing_key = false
data = <<EOT
{{- with secret "kv/data/disinto/shared/woodpecker" -}}
WOODPECKER_AGENT_SECRET={{ .Data.data.agent_secret }}
{{- else -}}
# WARNING: kv/disinto/shared/woodpecker is empty run tools/vault-seed-woodpecker.sh
WOODPECKER_AGENT_SECRET=seed-me
{{- end -}}
EOT
}
# Baseline tune once we have real usage numbers under nomad.
# Conservative limits so an unhealthy agent can't starve the node.
resources {
cpu = 200
memory = 256
}
}
}
}

View file

@ -1,173 +0,0 @@
# =============================================================================
# nomad/jobs/woodpecker-server.hcl Woodpecker CI server (Nomad service job)
#
# Part of the Nomad+Vault migration (S3.1, issue #934).
# Runs the Woodpecker CI web UI + gRPC endpoint as a Nomad service job,
# reading its Forgejo OAuth + agent secret from Vault via workload identity.
#
# Host_volume contract:
# This job mounts the `woodpecker-data` host_volume declared in
# nomad/client.hcl. That volume is backed by /srv/disinto/woodpecker-data
# on the factory box, created by lib/init/nomad/cluster-up.sh before any
# job references it. Keep the `source = "woodpecker-data"` below in sync
# with the host_volume stanza in client.hcl — drift = scheduling failures.
#
# Vault integration (S2.4 pattern):
# - vault { role = "service-woodpecker" } at the group scope the task's
# workload-identity JWT is exchanged for a Vault token carrying the
# policy named on that role. Role + policy are defined in
# vault/roles.yaml + vault/policies/service-woodpecker.hcl.
# - template { destination = "secrets/wp.env" env = true } pulls
# WOODPECKER_AGENT_SECRET, WOODPECKER_FORGEJO_CLIENT, and
# WOODPECKER_FORGEJO_SECRET out of Vault KV v2 at
# kv/disinto/shared/woodpecker and merges them into the task env.
# Agent secret seeded by tools/vault-seed-woodpecker.sh; OAuth
# client/secret seeded by S3.3 (wp-oauth-register.sh).
# - Non-secret env (DB driver, Forgejo URL, host URL, open registration)
# stays inline below not sensitive, not worth round-tripping through
# Vault.
#
# Not the runtime yet: docker-compose.yml is still the factory's live stack
# until cutover. This file exists so CI can validate it and S3.4 can wire
# `disinto init --backend=nomad --with woodpecker` to `nomad job run` it.
# =============================================================================
job "woodpecker-server" {
type = "service"
datacenters = ["dc1"]
group "woodpecker-server" {
count = 1
# Vault workload identity (S2.4 pattern)
# `role = "service-woodpecker"` is defined in vault/roles.yaml and
# applied by tools/vault-apply-roles.sh (S2.3). The role's bound
# claim pins nomad_job_id = "woodpecker" note the job_id in
# vault/roles.yaml is "woodpecker" (matching the roles.yaml entry),
# but the actual Nomad job name here is "woodpecker-server". Update
# vault/roles.yaml job_id to "woodpecker-server" if the bound claim
# enforces an exact match at placement.
vault {
role = "service-woodpecker"
}
# HTTP UI (:8000) + gRPC agent endpoint (:9000). Static ports match
# docker-compose's published ports so the rest of the factory keeps
# reaching woodpecker at the same host:port during and after cutover.
network {
port "http" {
static = 8000
to = 8000
}
port "grpc" {
static = 9000
to = 9000
}
}
# Host-volume mount: declared in nomad/client.hcl, path
# /srv/disinto/woodpecker-data on the factory box.
volume "woodpecker-data" {
type = "host"
source = "woodpecker-data"
read_only = false
}
# Conservative restart policy fail fast to the scheduler instead of
# spinning on a broken image/config. 3 attempts over 5m, then back off.
restart {
attempts = 3
interval = "5m"
delay = "15s"
mode = "delay"
}
# Native Nomad service discovery (no Consul in this factory cluster).
# Health check gates the service as healthy only after the HTTP API is
# up; initial_status is deliberately unset so Nomad waits for the first
# probe to pass before marking the allocation healthy on boot.
service {
name = "woodpecker"
port = "http"
provider = "nomad"
check {
type = "http"
path = "/healthz"
interval = "10s"
timeout = "3s"
}
}
task "woodpecker-server" {
driver = "docker"
config {
image = "woodpeckerci/woodpecker-server:v3"
ports = ["http", "grpc"]
}
volume_mount {
volume = "woodpecker-data"
destination = "/var/lib/woodpecker"
read_only = false
}
# Non-secret env Forgejo integration flags, public URL, DB driver.
# Nothing sensitive here, so this stays inline. Secret-bearing env
# (agent secret, OAuth client/secret) lives in the template stanza
# below and is merged into task env.
env {
WOODPECKER_FORGEJO = "true"
WOODPECKER_FORGEJO_URL = "http://forgejo:3000"
WOODPECKER_HOST = "http://woodpecker:8000"
WOODPECKER_OPEN = "true"
WOODPECKER_DATABASE_DRIVER = "sqlite3"
WOODPECKER_DATABASE_DATASOURCE = "/var/lib/woodpecker/woodpecker.sqlite"
}
# Vault-templated secrets env (S2.4 pattern)
# Renders `<task-dir>/secrets/wp.env` (per-alloc secrets dir, never on
# disk on the host root filesystem). `env = true` merges every KEY=VAL
# line into the task environment. `change_mode = "restart"` re-runs the
# task whenever a watched secret's value in Vault changes.
#
# Vault path: `kv/data/disinto/shared/woodpecker`. The literal `/data/`
# segment is required by consul-template for KV v2 mounts.
#
# Empty-Vault fallback (`with ... else ...`): on a fresh LXC where
# the KV path is absent, consul-template's `with` short-circuits to
# the `else` branch. Emitting visible placeholders means the container
# still boots, but with obviously-bad secrets. Seed the path with
# tools/vault-seed-woodpecker.sh (agent_secret) and S3.3's
# wp-oauth-register.sh (forgejo_client, forgejo_secret).
#
# Placeholder values are kept short on purpose: the repo-wide
# secret-scan flags `TOKEN=<16+ non-space chars>` as a plaintext
# secret; "seed-me" is < 16 chars and still distinctive.
template {
destination = "secrets/wp.env"
env = true
change_mode = "restart"
error_on_missing_key = false
data = <<EOT
{{- with secret "kv/data/disinto/shared/woodpecker" -}}
WOODPECKER_AGENT_SECRET={{ .Data.data.agent_secret }}
WOODPECKER_FORGEJO_CLIENT={{ .Data.data.forgejo_client }}
WOODPECKER_FORGEJO_SECRET={{ .Data.data.forgejo_secret }}
{{- else -}}
# WARNING: kv/disinto/shared/woodpecker is empty run tools/vault-seed-woodpecker.sh + S3.3
WOODPECKER_AGENT_SECRET=seed-me
WOODPECKER_FORGEJO_CLIENT=seed-me
WOODPECKER_FORGEJO_SECRET=seed-me
{{- end -}}
EOT
}
resources {
cpu = 300
memory = 512
}
}
}
}

View file

@ -1,4 +1,4 @@
<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 -->
<!-- last-reviewed: 6bdbeb5bd2a200ff1b23724564da9383193f3e30 -->
# Planner Agent
**Role**: Strategic planning using a Prerequisite Tree (Theory of Constraints),

View file

@ -1,4 +1,4 @@
<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 -->
<!-- last-reviewed: 6bdbeb5bd2a200ff1b23724564da9383193f3e30 -->
# Predictor Agent
**Role**: Abstract adversary (the "goblin"). Runs a 2-step formula

View file

@ -59,23 +59,6 @@ check_pipeline_stall = false
# compact_pct = 60
# poll_interval = 60
# Edge routing mode (default: subpath)
#
# Controls how services are exposed through the edge proxy.
# subpath — all services under <project>.disinto.ai/{forge,ci,chat,staging}
# subdomain — per-service subdomains: forge.<project>, ci.<project>, chat.<project>
#
# Set to "subdomain" if subpath routing causes unfixable issues (redirect loops,
# OAuth callback mismatches, cookie collisions). See docs/edge-routing-fallback.md.
#
# Set in .env (not TOML) since it's consumed by docker-compose and shell scripts:
# EDGE_ROUTING_MODE=subdomain
#
# In subdomain mode, `disinto edge register` also writes:
# EDGE_TUNNEL_FQDN_FORGE=forge.<project>.disinto.ai
# EDGE_TUNNEL_FQDN_CI=ci.<project>.disinto.ai
# EDGE_TUNNEL_FQDN_CHAT=chat.<project>.disinto.ai
# [mirrors]
# github = "git@github.com:johba/disinto.git"
# codeberg = "git@codeberg.org:johba/disinto.git"

View file

@ -1,4 +1,4 @@
<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 -->
<!-- last-reviewed: 6bdbeb5bd2a200ff1b23724564da9383193f3e30 -->
# Review Agent
**Role**: AI-powered PR review — post structured findings and formal

View file

@ -52,35 +52,8 @@ REVIEW_TMPDIR=$(mktemp -d)
log() { printf '[%s] PR#%s %s\n' "$(date -u '+%Y-%m-%d %H:%M:%S UTC')" "$PR_NUMBER" "$*" >> "$LOGFILE"; }
status() { printf '[%s] PR #%s: %s\n' "$(date -u '+%Y-%m-%d %H:%M:%S UTC')" "$PR_NUMBER" "$*" > "$STATUSFILE"; log "$*"; }
# cleanup — remove temp files (NOT lockfile — cleanup_on_exit handles that)
cleanup() {
rm -rf "$REVIEW_TMPDIR" "$STATUSFILE" "/tmp/${PROJECT_NAME}-review-graph-${PR_NUMBER}.json"
}
# cleanup_on_exit — defensive cleanup: remove lockfile if we own it, kill residual children
# This handles the case where review-pr.sh is terminated unexpectedly (e.g., watchdog SIGTERM)
cleanup_on_exit() {
local ec=$?
# Remove lockfile only if we own it (PID matches $$)
if [ -f "$LOCKFILE" ] && [ -n "$(cat "$LOCKFILE" 2>/dev/null)" ]; then
if [ "$(cat "$LOCKFILE" 2>/dev/null)" = "$$" ]; then
rm -f "$LOCKFILE"
log "cleanup_on_exit: removed lockfile (we owned it)"
fi
fi
# Kill any direct children that may have been spawned by this process
# (e.g., bash -c commands from Claude's Bash tool that didn't get reaped)
pkill -P $$ 2>/dev/null || true
# Call the main cleanup function to remove temp files
cleanup
exit "$ec"
}
trap cleanup_on_exit EXIT INT TERM
# Note: EXIT trap is already set above. The cleanup function is still available for
# non-error exits (e.g., normal completion via exit 0 after verdict posted).
# When review succeeds, we want to skip lockfile removal since the verdict was posted.
cleanup() { rm -rf "$REVIEW_TMPDIR" "$LOCKFILE" "$STATUSFILE" "/tmp/${PROJECT_NAME}-review-graph-${PR_NUMBER}.json"; }
trap cleanup EXIT
# =============================================================================
# LOG ROTATION
@ -131,7 +104,6 @@ if [ "$PR_STATE" != "open" ]; then
log "SKIP: state=${PR_STATE}"
worktree_cleanup "$WORKTREE"
rm -f "$OUTPUT_FILE" "$SID_FILE" 2>/dev/null || true
rm -f "$LOCKFILE"
exit 0
fi
@ -141,7 +113,7 @@ fi
CI_STATE=$(ci_commit_status "$PR_SHA")
CI_NOTE=""
if ! ci_passed "$CI_STATE"; then
ci_required_for_pr "$PR_NUMBER" && { log "SKIP: CI=${CI_STATE}"; rm -f "$LOCKFILE"; exit 0; }
ci_required_for_pr "$PR_NUMBER" && { log "SKIP: CI=${CI_STATE}"; exit 0; }
CI_NOTE=" (not required — non-code PR)"
fi
@ -151,10 +123,10 @@ fi
ALL_COMMENTS=$(forge_api_all "/issues/${PR_NUMBER}/comments")
HAS_CMT=$(printf '%s' "$ALL_COMMENTS" | jq --arg s "$PR_SHA" \
'[.[]|select(.body|contains("<!-- reviewed: "+$s+" -->"))]|length')
[ "${HAS_CMT:-0}" -gt 0 ] && [ "$FORCE" != "--force" ] && { log "SKIP: reviewed ${PR_SHA:0:7}"; rm -f "$LOCKFILE"; exit 0; }
[ "${HAS_CMT:-0}" -gt 0 ] && [ "$FORCE" != "--force" ] && { log "SKIP: reviewed ${PR_SHA:0:7}"; exit 0; }
HAS_FML=$(forge_api_all "/pulls/${PR_NUMBER}/reviews" | jq --arg s "$PR_SHA" \
'[.[]|select(.commit_id==$s)|select(.state!="COMMENT")]|length')
[ "${HAS_FML:-0}" -gt 0 ] && [ "$FORCE" != "--force" ] && { log "SKIP: formal review"; rm -f "$LOCKFILE"; exit 0; }
[ "${HAS_FML:-0}" -gt 0 ] && [ "$FORCE" != "--force" ] && { log "SKIP: formal review"; exit 0; }
# =============================================================================
# RE-REVIEW DETECTION
@ -352,7 +324,3 @@ esac
profile_write_journal "review-${PR_NUMBER}" "Review PR #${PR_NUMBER} (${VERDICT})" "${VERDICT,,}" "" || true
log "DONE: ${VERDICT} (re-review: ${IS_RE_REVIEW})"
# Remove lockfile on successful completion (cleanup_on_exit will also do this,
# but we do it here to avoid the trap running twice)
rm -f "$LOCKFILE"

View file

@ -209,72 +209,3 @@ jq -nc \
log "Engagement report written to ${OUTPUT}: ${UNIQUE_VISITORS} visitors, ${PAGE_VIEWS} page views"
echo "Engagement report: ${UNIQUE_VISITORS} unique visitors, ${PAGE_VIEWS} page views → ${OUTPUT}"
# ── Commit evidence to ops repo via Forgejo API ─────────────────────────────
commit_evidence_via_forgejo() {
local evidence_file="$1"
local report_date
report_date=$(basename "$evidence_file" .json)
local file_path="evidence/engagement/${report_date}.json"
# Check if ops repo is available
if [ -z "${OPS_REPO_ROOT:-}" ] || [ ! -d "${OPS_REPO_ROOT}/.git" ]; then
log "SKIP: OPS_REPO_ROOT not set or not a git repo — evidence file not committed"
return 0
fi
# Check if Forgejo credentials are available
if [ -z "${FORGE_TOKEN:-}" ] || [ -z "${FORGE_URL:-}" ] || [ -z "${FORGE_OPS_REPO:-}" ]; then
log "SKIP: Forgejo credentials not available (FORGE_TOKEN/FORGE_URL/FORGE_OPS_REPO) — evidence file not committed"
return 0
fi
# Read and encode the file content
local content
content=$(base64 < "$evidence_file")
local ops_owner="${OPS_FORGE_OWNER:-${FORGE_REPO%%/*}}"
local ops_repo="${OPS_FORGE_REPO:-${PROJECT_NAME:-disinto}-ops}"
# Check if file already exists in the ops repo
local existing
existing=$(curl -sf \
-H "Authorization: token ${FORGE_TOKEN}" \
"${FORGE_URL}/api/v1/repos/${ops_owner}/${ops_repo}/contents/${file_path}" \
2>/dev/null || echo "")
if [ -n "$existing" ] && printf '%s' "$existing" | jq -e '.sha' >/dev/null 2>&1; then
# Update existing file
local sha
sha=$(printf '%s' "$existing" | jq -r '.sha')
if curl -sf -X PUT \
-H "Authorization: token ${FORGE_TOKEN}" \
-H "Content-Type: application/json" \
"${FORGE_URL}/api/v1/repos/${ops_owner}/${ops_repo}/contents/${file_path}" \
-d "$(jq -nc --arg content "$content" --arg sha "$sha" --arg msg "evidence: engagement ${report_date}" \
'{message: $msg, content: $content, sha: $sha}')" >/dev/null 2>&1; then
log "Updated evidence file in ops repo: ${file_path}"
return 0
else
log "ERROR: failed to update evidence file in ops repo"
return 1
fi
else
# Create new file
if curl -sf -X POST \
-H "Authorization: token ${FORGE_TOKEN}" \
-H "Content-Type: application/json" \
"${FORGE_URL}/api/v1/repos/${ops_owner}/${ops_repo}/contents/${file_path}" \
-d "$(jq -nc --arg content "$content" --arg msg "evidence: engagement ${report_date}" \
'{message: $msg, content: $content}')" >/dev/null 2>&1; then
log "Created evidence file in ops repo: ${file_path}"
return 0
else
log "ERROR: failed to create evidence file in ops repo"
return 1
fi
fi
}
# Attempt to commit evidence (non-fatal — data collection succeeded even if commit fails)
commit_evidence_via_forgejo "$OUTPUT" || log "WARNING: evidence commit skipped or failed — file exists locally at ${OUTPUT}"

View file

@ -1,4 +1,4 @@
<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 -->
<!-- last-reviewed: 6bdbeb5bd2a200ff1b23724564da9383193f3e30 -->
# Supervisor Agent
**Role**: Health monitoring and auto-remediation, executed as a formula-driven
@ -24,18 +24,10 @@ Both invoke the same `supervisor-run.sh`. Sources `lib/guard.sh` and calls `chec
files for `PHASE:escalate` entries and auto-removes any whose linked issue
is confirmed closed (24h grace period after closure to avoid races). Reports
**stale crashed worktrees** (worktrees preserved after crash) — supervisor
housekeeping removes them after 24h. Collects **Woodpecker agent health**
(added #933): container `disinto-woodpecker-agent` health/running status,
gRPC error count in last 20 min, fast-failure pipeline count (<60s, last 15 min),
and overall health verdict (healthy/unhealthy). Unhealthy verdict triggers
automatic container restart + `blocked:ci_exhausted` issue recovery in
`supervisor-run.sh` before the Claude session starts.
housekeeping removes them after 24h
- `formulas/run-supervisor.toml` — Execution spec: five steps (preflight review,
health-assessment, decide-actions, report, journal) with `needs` dependencies.
Claude evaluates all metrics and takes actions in a single interactive session.
Health-assessment now includes P2 **Woodpecker agent unhealthy** classification
(container not running, ≥3 gRPC errors/20m, or ≥3 fast-failure pipelines/15m);
decide-actions documents the pre-session auto-recovery path
Claude evaluates all metrics and takes actions in a single interactive session
- `$OPS_REPO_ROOT/knowledge/*.md` — Domain-specific remediation guides (memory,
disk, CI, git, dev-agent, review-agent, forge)
@ -55,6 +47,5 @@ P3 (degraded PRs, circular deps, stale deps), P4 (housekeeping).
- Logs a WARNING message at startup indicating degraded mode
**Lifecycle**: supervisor-run.sh (invoked by polling loop every 20min, `check_active supervisor`)
→ lock + memory guard → run preflight.sh (collect metrics) → **WP agent health recovery**
(if unhealthy: restart container + recover ci_exhausted issues) → load formula + context → run
→ lock + memory guard → run preflight.sh (collect metrics) → load formula + context → run
claude -p via agent-sdk.sh → Claude assesses health, auto-fixes, writes journal → `PHASE:done`.

View file

@ -224,108 +224,3 @@ for _vf in "${_va_root}"/*.md; do
done
[ "$_found_vault" = false ] && echo " None"
echo ""
# ── Woodpecker Agent Health ────────────────────────────────────────────────
echo "## Woodpecker Agent Health"
# Check WP agent container health status
_wp_container="disinto-woodpecker-agent"
_wp_health_status="unknown"
_wp_health_start=""
if command -v docker &>/dev/null; then
# Get health status via docker inspect
_wp_health_status=$(docker inspect "$_wp_container" --format '{{.State.Health.Status}}' 2>/dev/null || echo "not_found")
if [ "$_wp_health_status" = "not_found" ] || [ -z "$_wp_health_status" ]; then
# Container may not exist or not have health check configured
_wp_health_status=$(docker inspect "$_wp_container" --format '{{.State.Status}}' 2>/dev/null || echo "not_found")
fi
# Get container start time for age calculation
_wp_start_time=$(docker inspect "$_wp_container" --format '{{.State.StartedAt}}' 2>/dev/null || echo "")
if [ -n "$_wp_start_time" ] && [ "$_wp_start_time" != "0001-01-01T00:00:00Z" ]; then
_wp_health_start=$(date -d "$_wp_start_time" '+%Y-%m-%d %H:%M UTC' 2>/dev/null || echo "$_wp_start_time")
fi
fi
echo "Container: $_wp_container"
echo "Status: $_wp_health_status"
[ -n "$_wp_health_start" ] && echo "Started: $_wp_health_start"
# Check for gRPC errors in agent logs (last 20 minutes)
_wp_grpc_errors=0
if [ "$_wp_health_status" != "not_found" ] && [ -n "$_wp_health_status" ]; then
_wp_grpc_errors=$(docker logs --since 20m "$_wp_container" 2>&1 | grep -c 'grpc error' || echo "0")
echo "gRPC errors (last 20m): $_wp_grpc_errors"
fi
# Fast-failure heuristic: check for pipelines completing in <60s
_wp_fast_failures=0
_wp_recent_failures=""
if [ -n "${WOODPECKER_REPO_ID:-}" ] && [ "${WOODPECKER_REPO_ID}" != "0" ]; then
_now=$(date +%s)
_pipelines=$(woodpecker_api "/repos/${WOODPECKER_REPO_ID}/pipelines?perPage=100" 2>/dev/null || echo '[]')
# Count failures with duration < 60s in last 15 minutes
_wp_fast_failures=$(echo "$_pipelines" | jq --argjson now "$_now" '
[.[] | select(.status == "failure") | select((.finished - .started) < 60) | select(($now - .finished) < 900)]
| length' 2>/dev/null || echo "0")
if [ "$_wp_fast_failures" -gt 0 ]; then
_wp_recent_failures=$(echo "$_pipelines" | jq -r --argjson now "$_now" '
[.[] | select(.status == "failure") | select((.finished - .started) < 60) | select(($now - .finished) < 900)]
| .[] | "\(.number)\t\((.finished - .started))s"' 2>/dev/null || echo "")
fi
fi
echo "Fast-fail pipelines (<60s, last 15m): $_wp_fast_failures"
if [ -n "$_wp_recent_failures" ] && [ "$_wp_fast_failures" -gt 0 ]; then
echo "Recent failures:"
echo "$_wp_recent_failures" | while IFS=$'\t' read -r _num _dur; do
echo " #$_num: ${_dur}"
done
fi
# Determine overall WP agent health
_wp_agent_healthy=true
_wp_health_reason=""
if [ "$_wp_health_status" = "not_found" ]; then
_wp_agent_healthy=false
_wp_health_reason="Container not running"
elif [ "$_wp_health_status" = "unhealthy" ]; then
_wp_agent_healthy=false
_wp_health_reason="Container health check failed"
elif [ "$_wp_health_status" != "running" ]; then
_wp_agent_healthy=false
_wp_health_reason="Container not in running state: $_wp_health_status"
elif [ "$_wp_grpc_errors" -ge 3 ]; then
_wp_agent_healthy=false
_wp_health_reason="High gRPC error count (>=3 in 20m)"
elif [ "$_wp_fast_failures" -ge 3 ]; then
_wp_agent_healthy=false
_wp_health_reason="High fast-failure count (>=3 in 15m)"
fi
echo ""
echo "WP Agent Health: $([ "$_wp_agent_healthy" = true ] && echo "healthy" || echo "UNHEALTHY")"
[ -n "$_wp_health_reason" ] && echo "Reason: $_wp_health_reason"
echo ""
# ── WP Agent Health History (for idempotency) ──────────────────────────────
echo "## WP Agent Health History"
# Track last restart timestamp to avoid duplicate restarts in same run
_WP_HEALTH_HISTORY_FILE="${DISINTO_LOG_DIR}/supervisor/wp-agent-health.history"
_wp_last_restart="never"
_wp_last_restart_ts=0
if [ -f "$_WP_HEALTH_HISTORY_FILE" ]; then
_wp_last_restart_ts=$(grep -m1 '^LAST_RESTART_TS=' "$_WP_HEALTH_HISTORY_FILE" 2>/dev/null | cut -d= -f2 || echo "0")
if [ -n "$_wp_last_restart_ts" ] && [ "$_wp_last_restart_ts" -gt 0 ] 2>/dev/null; then
_wp_last_restart=$(date -d "@$_wp_last_restart_ts" '+%Y-%m-%d %H:%M UTC' 2>/dev/null || echo "$_wp_last_restart_ts")
fi
fi
echo "Last restart: $_wp_last_restart"
echo ""

View file

@ -47,9 +47,6 @@ SID_FILE="/tmp/supervisor-session-${PROJECT_NAME}.sid"
SCRATCH_FILE="/tmp/supervisor-${PROJECT_NAME}-scratch.md"
WORKTREE="/tmp/${PROJECT_NAME}-supervisor-run"
# WP agent container name (configurable via env var)
export WP_AGENT_CONTAINER_NAME="${WP_AGENT_CONTAINER_NAME:-disinto-woodpecker-agent}"
# Override LOG_AGENT for consistent agent identification
# shellcheck disable=SC2034 # consumed by agent-sdk.sh and env.sh log()
LOG_AGENT="supervisor"
@ -169,160 +166,6 @@ ${FORMULA_CONTENT}
${SCRATCH_INSTRUCTION}
${PROMPT_FOOTER}"
# ── WP Agent Health Recovery ──────────────────────────────────────────────
# Check preflight output for WP agent health issues and trigger recovery if needed
_WP_HEALTH_CHECK_FILE="${DISINTO_LOG_DIR}/supervisor/wp-agent-health-check.md"
echo "$PREFLIGHT_OUTPUT" > "$_WP_HEALTH_CHECK_FILE"
# Extract WP agent health status from preflight output
# Note: match exact "healthy" not "UNHEALTHY" (substring issue)
_wp_agent_healthy=$(grep "^WP Agent Health: healthy$" "$_WP_HEALTH_CHECK_FILE" 2>/dev/null && echo "true" || echo "false")
_wp_health_reason=$(grep "^Reason:" "$_WP_HEALTH_CHECK_FILE" 2>/dev/null | sed 's/^Reason: //' || echo "")
if [ "$_wp_agent_healthy" = "false" ] && [ -n "$_wp_health_reason" ]; then
log "WP agent detected as UNHEALTHY: $_wp_health_reason"
# Check for idempotency guard - have we already restarted in this run?
_WP_HEALTH_HISTORY_FILE="${DISINTO_LOG_DIR}/supervisor/wp-agent-health.history"
_wp_last_restart_ts=0
_wp_last_restart="never"
if [ -f "$_WP_HEALTH_HISTORY_FILE" ]; then
_wp_last_restart_ts=$(grep -m1 '^LAST_RESTART_TS=' "$_WP_HEALTH_HISTORY_FILE" 2>/dev/null | cut -d= -f2 || echo "0")
if [ -n "$_wp_last_restart_ts" ] && [ "$_wp_last_restart_ts" != "0" ] 2>/dev/null; then
_wp_last_restart=$(date -d "@$_wp_last_restart_ts" '+%Y-%m-%d %H:%M UTC' 2>/dev/null || echo "$_wp_last_restart_ts")
fi
fi
_current_ts=$(date +%s)
_restart_threshold=300 # 5 minutes between restarts
if [ -z "$_wp_last_restart_ts" ] || [ "$_wp_last_restart_ts" = "0" ] || [ $((_current_ts - _wp_last_restart_ts)) -gt $_restart_threshold ]; then
log "Triggering WP agent restart..."
# Restart the WP agent container
if docker restart "$WP_AGENT_CONTAINER_NAME" >/dev/null 2>&1; then
_restart_time=$(date -u '+%Y-%m-%d %H:%M UTC')
log "Successfully restarted WP agent container: $WP_AGENT_CONTAINER_NAME"
# Update history file
echo "LAST_RESTART_TS=$_current_ts" > "$_WP_HEALTH_HISTORY_FILE"
echo "LAST_RESTART_TIME=$_restart_time" >> "$_WP_HEALTH_HISTORY_FILE"
# Post recovery notice to journal
_journal_file="${OPS_JOURNAL_ROOT}/$(date -u +%Y-%m-%d).md"
if [ -f "$_journal_file" ]; then
{
echo ""
echo "### WP Agent Recovery - $_restart_time"
echo ""
echo "WP agent was unhealthy: $_wp_health_reason"
echo "Container restarted automatically."
} >> "$_journal_file"
fi
# Scan for issues updated in the last 30 minutes with blocked: ci_exhausted label
log "Scanning for ci_exhausted issues updated in last 30 minutes..."
_now_epoch=$(date +%s)
_thirty_min_ago=$(( _now_epoch - 1800 ))
# Fetch open issues with blocked label
_blocked_issues=$(forge_api GET "/issues?state=open&labels=blocked&type=issues&limit=100" 2>/dev/null || echo "[]")
_blocked_count=$(echo "$_blocked_issues" | jq 'length' 2>/dev/null || echo "0")
_issues_processed=0
_issues_recovered=0
if [ "$_blocked_count" -gt 0 ]; then
# Process each blocked issue
echo "$_blocked_issues" | jq -c '.[]' 2>/dev/null | while IFS= read -r issue_json; do
[ -z "$issue_json" ] && continue
_issue_num=$(echo "$issue_json" | jq -r '.number // empty')
_issue_updated=$(echo "$issue_json" | jq -r '.updated_at // empty')
_issue_labels=$(echo "$issue_json" | jq -r '.labels | map(.name) | join(",")' 2>/dev/null || echo "")
# Check if issue has ci_exhausted label
if ! echo "$_issue_labels" | grep -q "ci_exhausted"; then
continue
fi
# Parse updated_at timestamp
_issue_updated_epoch=$(date -d "$_issue_updated" +%s 2>/dev/null || echo "0")
_time_since_update=$(( _now_epoch - _issue_updated_epoch ))
# Check if updated in last 30 minutes
if [ "$_time_since_update" -lt 1800 ] && [ "$_time_since_update" -ge 0 ]; then
_issues_processed=$(( _issues_processed + 1 ))
# Check for idempotency guard - already swept by supervisor?
_issue_body=$(echo "$issue_json" | jq -r '.body // ""' 2>/dev/null || echo "")
if echo "$_issue_body" | grep -q "<!-- supervisor-swept -->"; then
log "Issue #$_issue_num already swept by supervisor, skipping"
continue
fi
log "Processing ci_exhausted issue #$_issue_num (updated $_time_since_update seconds ago)"
# Get issue assignee
_issue_assignee=$(echo "$issue_json" | jq -r '.assignee.login // empty' 2>/dev/null || echo "")
# Unassign the issue
if [ -n "$_issue_assignee" ]; then
log "Unassigning issue #$_issue_num from $_issue_assignee"
curl -sf -X PATCH \
-H "Authorization: token ${FORGE_SUPERVISOR_TOKEN:-$FORGE_TOKEN}" \
-H "Content-Type: application/json" \
"${FORGE_API}/issues/$_issue_num" \
-d '{"assignees":[]}' >/dev/null 2>&1 || true
fi
# Remove blocked label
_blocked_label_id=$(forge_api GET "/labels" 2>/dev/null | jq -r '.[] | select(.name == "blocked") | .id' 2>/dev/null || echo "")
if [ -n "$_blocked_label_id" ]; then
log "Removing blocked label from issue #$_issue_num"
curl -sf -X DELETE \
-H "Authorization: token ${FORGE_SUPERVISOR_TOKEN:-$FORGE_TOKEN}" \
"${FORGE_API}/issues/$_issue_num/labels/$_blocked_label_id" >/dev/null 2>&1 || true
fi
# Add comment about infra-flake recovery
_recovery_comment=$(cat <<EOF
<!-- supervisor-swept -->
**Automated Recovery — $(date -u '+%Y-%m-%d %H:%M UTC')**
CI agent was unhealthy between $_restart_time and now. The prior retry budget may have been spent on infra flake, not real failures.
**Recovery Actions:**
- Unassigned from pool and returned for fresh attempt
- CI agent container restarted
- Related pipelines will be retriggered automatically
**Next Steps:**
Please re-attempt this issue. The CI environment has been refreshed.
EOF
)
curl -sf -X POST \
-H "Authorization: token ${FORGE_SUPERVISOR_TOKEN:-$FORGE_TOKEN}" \
-H "Content-Type: application/json" \
"${FORGE_API}/issues/$_issue_num/comments" \
-d "$(jq -n --arg body "$_recovery_comment" '{body: $body}')" >/dev/null 2>&1 || true
log "Recovered issue #$_issue_num - returned to pool"
fi
done
fi
log "WP agent restart and issue recovery complete"
else
log "ERROR: Failed to restart WP agent container"
fi
else
log "WP agent restart already performed in this run (since $_wp_last_restart), skipping"
fi
fi
# ── Run agent ─────────────────────────────────────────────────────────────
agent_run --worktree "$WORKTREE" "$PROMPT"
log "agent_run complete"

View file

@ -155,44 +155,6 @@ setup_file() {
[[ "$output" == *"[deploy] dry-run complete"* ]]
}
# S2.6 / #928 — every --with <svc> that ships tools/vault-seed-<svc>.sh
# must auto-invoke the seeder before deploy.sh runs. forgejo is the
# only service with a seeder today, so the dry-run plan must include
# its seed line when --with forgejo is set. The seed block must also
# appear BEFORE the deploy block (seeded secrets must exist before
# nomad reads the template stanza) — pinned here by scanning output
# order. Services without a seeder (e.g. unknown hypothetical future
# ones) are silently skipped by the loop convention.
@test "disinto init --backend=nomad --with forgejo --dry-run prints seed plan before deploy plan" {
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with forgejo --dry-run
[ "$status" -eq 0 ]
[[ "$output" == *"Vault seed dry-run"* ]]
[[ "$output" == *"tools/vault-seed-forgejo.sh --dry-run"* ]]
# Order: seed header must appear before deploy header.
local seed_line deploy_line
seed_line=$(echo "$output" | grep -n "Vault seed dry-run" | head -1 | cut -d: -f1)
deploy_line=$(echo "$output" | grep -n "Deploy services dry-run" | head -1 | cut -d: -f1)
[ -n "$seed_line" ]
[ -n "$deploy_line" ]
[ "$seed_line" -lt "$deploy_line" ]
}
# Regression guard (PR #929 review): `sudo -n VAR=val -- cmd` is subject
# to sudoers env_reset policy and silently drops VAULT_ADDR unless it's
# in env_keep (it isn't in default configs). vault-seed-forgejo.sh
# requires VAULT_ADDR and dies at its own precondition check if unset,
# so the non-root branch MUST invoke `sudo -n -- env VAR=val cmd` so
# that `env` sets the variable in the child process regardless of
# sudoers policy. This grep-level guard catches a revert to the unsafe
# form that silently broke non-root seed runs on a fresh LXC.
@test "seed loop invokes sudo via 'env VAR=val' (bypasses sudoers env_reset)" {
run grep -F 'sudo -n -- env "VAULT_ADDR=' "$DISINTO_BIN"
[ "$status" -eq 0 ]
# Negative: no bare `sudo -n "VAR=val" --` form anywhere in the file.
run grep -F 'sudo -n "VAULT_ADDR=' "$DISINTO_BIN"
[ "$status" -ne 0 ]
}
@test "disinto init --backend=nomad --with forgejo,forgejo --dry-run handles comma-separated services" {
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with forgejo,forgejo --dry-run
[ "$status" -eq 0 ]
@ -215,44 +177,7 @@ setup_file() {
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with unknown-service --dry-run
[ "$status" -ne 0 ]
[[ "$output" == *"unknown service"* ]]
[[ "$output" == *"known: forgejo, woodpecker-server, woodpecker-agent, agents, staging, chat, edge"* ]]
}
# S3.4: woodpecker auto-expansion and forgejo auto-inclusion
@test "disinto init --backend=nomad --with woodpecker auto-expands to server+agent" {
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with woodpecker --dry-run
[ "$status" -eq 0 ]
[[ "$output" == *"services to deploy: forgejo,woodpecker-server,woodpecker-agent"* ]]
[[ "$output" == *"deployment order: forgejo woodpecker-server woodpecker-agent"* ]]
}
@test "disinto init --backend=nomad --with woodpecker auto-includes forgejo with note" {
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with woodpecker --dry-run
[ "$status" -eq 0 ]
[[ "$output" == *"Note: --with woodpecker implies --with forgejo"* ]]
}
@test "disinto init --backend=nomad --with forgejo,woodpecker expands woodpecker" {
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with forgejo,woodpecker --dry-run
[ "$status" -eq 0 ]
# Order follows input: forgejo first, then woodpecker expanded
[[ "$output" == *"services to deploy: forgejo,woodpecker-server,woodpecker-agent"* ]]
[[ "$output" == *"deployment order: forgejo woodpecker-server woodpecker-agent"* ]]
}
@test "disinto init --backend=nomad --with woodpecker seeds both forgejo and woodpecker" {
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with woodpecker --dry-run
[ "$status" -eq 0 ]
[[ "$output" == *"tools/vault-seed-forgejo.sh --dry-run"* ]]
[[ "$output" == *"tools/vault-seed-woodpecker.sh --dry-run"* ]]
}
@test "disinto init --backend=nomad --with forgejo,woodpecker deploys all three services" {
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with forgejo,woodpecker --dry-run
[ "$status" -eq 0 ]
[[ "$output" == *"[deploy] [dry-run] nomad job validate"*"forgejo.hcl"* ]]
[[ "$output" == *"[deploy] [dry-run] nomad job validate"*"woodpecker-server.hcl"* ]]
[[ "$output" == *"[deploy] [dry-run] nomad job validate"*"woodpecker-agent.hcl"* ]]
[[ "$output" == *"known: forgejo"* ]]
}
@test "disinto init --backend=nomad --with forgejo (flag=value syntax) works" {
@ -385,60 +310,3 @@ setup_file() {
[ "$status" -ne 0 ]
[[ "$output" == *"--empty and --import-env/--import-sops/--age-key are mutually exclusive"* ]]
}
# S4.2: agents service auto-expansion and dependencies
@test "disinto init --backend=nomad --with agents auto-includes forgejo and woodpecker" {
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with agents --dry-run
[ "$status" -eq 0 ]
[[ "$output" == *"services to deploy: forgejo,agents,woodpecker-server,woodpecker-agent"* ]]
[[ "$output" == *"Note: --with agents implies --with forgejo"* ]]
[[ "$output" == *"Note: --with agents implies --with woodpecker"* ]]
}
@test "disinto init --backend=nomad --with agents deploys in correct order" {
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with agents --dry-run
[ "$status" -eq 0 ]
[[ "$output" == *"deployment order: forgejo woodpecker-server woodpecker-agent agents"* ]]
}
@test "disinto init --backend=nomad --with agents seeds agents service" {
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with agents --dry-run
[ "$status" -eq 0 ]
[[ "$output" == *"tools/vault-seed-forgejo.sh --dry-run"* ]]
[[ "$output" == *"tools/vault-seed-woodpecker.sh --dry-run"* ]]
[[ "$output" == *"tools/vault-seed-agents.sh --dry-run"* ]]
}
@test "disinto init --backend=nomad --with agents deploys all four services" {
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with agents --dry-run
[ "$status" -eq 0 ]
[[ "$output" == *"[deploy] [dry-run] nomad job validate"*"forgejo.hcl"* ]]
[[ "$output" == *"[deploy] [dry-run] nomad job validate"*"woodpecker-server.hcl"* ]]
[[ "$output" == *"[deploy] [dry-run] nomad job validate"*"woodpecker-agent.hcl"* ]]
[[ "$output" == *"[deploy] [dry-run] nomad job validate"*"agents.hcl"* ]]
}
@test "disinto init --backend=nomad --with woodpecker,agents expands correctly" {
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with woodpecker,agents --dry-run
[ "$status" -eq 0 ]
# woodpecker expands to server+agent, agents is already explicit
# forgejo is auto-included by agents
[[ "$output" == *"services to deploy: forgejo,woodpecker-server,woodpecker-agent,agents"* ]]
[[ "$output" == *"deployment order: forgejo woodpecker-server woodpecker-agent agents"* ]]
}
# S5.1 / #1035 — edge service seeds ops-repo (dispatcher FORGE_TOKEN)
@test "disinto init --backend=nomad --with edge deploys edge" {
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with edge --dry-run
[ "$status" -eq 0 ]
# edge depends on all backend services, so all are included
[[ "$output" == *"services to deploy: edge,forgejo"* ]]
[[ "$output" == *"deployment order: forgejo woodpecker-server woodpecker-agent agents staging chat edge"* ]]
[[ "$output" == *"[deploy] [dry-run] nomad job validate"*"edge.hcl"* ]]
}
@test "disinto init --backend=nomad --with edge seeds ops-repo" {
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with edge --dry-run
[ "$status" -eq 0 ]
[[ "$output" == *"tools/vault-seed-ops-repo.sh --dry-run"* ]]
}

View file

@ -1,310 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# smoke-edge-subpath.sh — End-to-end subpath routing smoke test
#
# Verifies Forgejo, Woodpecker, and chat function correctly under subpaths:
# - Forgejo at /forge/
# - Woodpecker at /ci/
# - Chat at /chat/
# - Staging at /staging/
#
# Usage:
# smoke-edge-subpath.sh [--base-url BASE_URL]
#
# Environment variables:
# BASE_URL — Edge proxy URL (default: http://localhost)
# EDGE_TIMEOUT — Request timeout in seconds (default: 30)
# EDGE_MAX_RETRIES — Max retries per request (default: 3)
#
# Exit codes:
# 0 — All checks passed
# 1 — One or more checks failed
# =============================================================================
set -euo pipefail
# Script directory for relative paths
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Source common helpers if available
source "${SCRIPT_DIR}/../lib/env.sh" 2>/dev/null || true
# ─────────────────────────────────────────────────────────────────────────────
# Configuration
# ─────────────────────────────────────────────────────────────────────────────
BASE_URL="${BASE_URL:-http://localhost}"
EDGE_TIMEOUT="${EDGE_TIMEOUT:-30}"
EDGE_MAX_RETRIES="${EDGE_MAX_RETRIES:-3}"
# Subpaths to test
FORGE_PATH="/forge/"
CI_PATH="/ci/"
CHAT_PATH="/chat/"
STAGING_PATH="/staging/"
# Track overall test status
FAILED=0
PASSED=0
SKIPPED=0
# ─────────────────────────────────────────────────────────────────────────────
# Logging helpers
# ─────────────────────────────────────────────────────────────────────────────
log_info() {
echo "[INFO] $*"
}
log_pass() {
echo "[PASS] $*"
((PASSED++)) || true
}
log_fail() {
echo "[FAIL] $*"
((FAILED++)) || true
}
log_skip() {
echo "[SKIP] $*"
((SKIPPED++)) || true
}
log_section() {
echo ""
echo "=== $* ==="
echo ""
}
# ─────────────────────────────────────────────────────────────────────────────
# HTTP helpers
# ─────────────────────────────────────────────────────────────────────────────
# Make an HTTP request with retry logic
# Usage: http_request <method> <url> [options...]
# Returns: HTTP status code on stdout
http_request() {
local method="$1"
local url="$2"
shift 2
local retries=0
local response status
while [ "$retries" -lt "$EDGE_MAX_RETRIES" ]; do
response=$(curl -sS -w '\n%{http_code}' -X "$method" \
--max-time "$EDGE_TIMEOUT" \
-o /tmp/edge-response-$$ \
"$@" 2>&1) || {
retries=$((retries + 1))
log_info "Retry $retries/$EDGE_MAX_RETRIES for $url"
sleep 1
continue
}
status=$(echo "$response" | tail -n1)
echo "$status"
return 0
done
log_fail "Max retries exceeded for $url"
return 1
}
# Make a GET request and return status code
http_get() {
local url="$1"
shift || true
http_request "GET" "$url" "$@"
}
# Make a HEAD request (no body)
http_head() {
local url="$1"
shift || true
http_request "HEAD" "$url" "$@"
}
# Make a GET request and return the response body
http_get_body() {
local url="$1"
shift || true
curl -sS --max-time "$EDGE_TIMEOUT" "$@" "$url"
}
# ─────────────────────────────────────────────────────────────────────────────
# Test functions
# ─────────────────────────────────────────────────────────────────────────────
test_root_redirect() {
log_section "Test 1: Root redirect to /forge/"
local status
status=$(http_head "$BASE_URL/")
if [ "$status" = "302" ]; then
log_pass "Root / redirects with 302"
else
log_fail "Expected 302 redirect from /, got status $status"
fi
}
test_forgejo_subpath() {
log_section "Test 2: Forgejo at /forge/"
local status
status=$(http_head "$BASE_URL${FORGE_PATH}")
if [ "$status" -ge 200 ] && [ "$status" -lt 400 ]; then
log_pass "Forgejo at ${BASE_URL}${FORGE_PATH} returns status $status"
else
log_fail "Forgejo at ${BASE_URL}${FORGE_PATH} returned unexpected status $status"
fi
}
test_woodpecker_subpath() {
log_section "Test 3: Woodpecker at /ci/"
local status
status=$(http_head "$BASE_URL${CI_PATH}")
if [ "$status" -ge 200 ] && [ "$status" -lt 400 ]; then
log_pass "Woodpecker at ${BASE_URL}${CI_PATH} returns status $status"
else
log_fail "Woodpecker at ${BASE_URL}${CI_PATH} returned unexpected status $status"
fi
}
test_chat_subpath() {
log_section "Test 4: Chat at /chat/"
# Test chat login endpoint
local status
status=$(http_head "$BASE_URL${CHAT_PATH}login")
if [ "$status" -ge 200 ] && [ "$status" -lt 400 ]; then
log_pass "Chat login at ${BASE_URL}${CHAT_PATH}login returns status $status"
else
log_fail "Chat login at ${BASE_URL}${CHAT_PATH}login returned unexpected status $status"
fi
# Test chat OAuth callback endpoint
status=$(http_head "$BASE_URL${CHAT_PATH}oauth/callback")
if [ "$status" -ge 200 ] && [ "$status" -lt 400 ]; then
log_pass "Chat OAuth callback at ${BASE_URL}${CHAT_PATH}oauth/callback returns status $status"
else
log_fail "Chat OAuth callback at ${BASE_URL}${CHAT_PATH}oauth/callback returned unexpected status $status"
fi
}
test_staging_subpath() {
log_section "Test 5: Staging at /staging/"
local status
status=$(http_head "$BASE_URL${STAGING_PATH}")
if [ "$status" -ge 200 ] && [ "$status" -lt 400 ]; then
log_pass "Staging at ${BASE_URL}${STAGING_PATH} returns status $status"
else
log_fail "Staging at ${BASE_URL}${STAGING_PATH} returned unexpected status $status"
fi
}
test_forward_auth_rejection() {
log_section "Test 6: Forward auth on /chat/* rejects unauthenticated requests"
# Request a protected chat endpoint without auth header
# Should return 401 (Unauthorized) due to forward_auth
local status
status=$(http_head "$BASE_URL${CHAT_PATH}auth/verify")
if [ "$status" = "401" ]; then
log_pass "Unauthenticated /chat/auth/verify returns 401 (forward_auth working)"
elif [ "$status" -ge 200 ] && [ "$status" -lt 400 ]; then
log_skip "Unauthenticated /chat/auth/verify returns $status (forward_auth may be disabled)"
else
log_fail "Expected 401 for unauthenticated /chat/auth/verify, got status $status"
fi
}
test_forgejo_oauth_callback() {
log_section "Test 7: Forgejo OAuth callback for Woodpecker under subpath"
# Test that Forgejo OAuth callback path works (Woodpecker OAuth integration)
local status
status=$(http_head "$BASE_URL${FORGE_PATH}login/oauth/callback")
if [ "$status" -ge 200 ] && [ "$status" -lt 400 ]; then
log_pass "Forgejo OAuth callback at ${BASE_URL}${FORGE_PATH}login/oauth/callback works"
else
log_fail "Forgejo OAuth callback returned unexpected status $status"
fi
}
# ─────────────────────────────────────────────────────────────────────────────
# Main
# ─────────────────────────────────────────────────────────────────────────────
main() {
log_info "Starting subpath routing smoke test"
log_info "Base URL: $BASE_URL"
log_info "Timeout: ${EDGE_TIMEOUT}s, Max retries: ${EDGE_MAX_RETRIES}"
# Run all tests
test_root_redirect
test_forgejo_subpath
test_woodpecker_subpath
test_chat_subpath
test_staging_subpath
test_forward_auth_rejection
test_forgejo_oauth_callback
# Summary
log_section "Test Summary"
log_info "Passed: $PASSED"
log_info "Failed: $FAILED"
log_info "Skipped: $SKIPPED"
if [ "$FAILED" -gt 0 ]; then
log_fail "Some tests failed"
exit 1
fi
log_pass "All tests passed!"
exit 0
}
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
--base-url)
BASE_URL="$2"
shift 2
;;
--base-url=*)
BASE_URL="${1#*=}"
shift
;;
--help)
echo "Usage: $0 [options]"
echo ""
echo "Options:"
echo " --base-url URL Set base URL (default: http://localhost)"
echo " --help Show this help message"
echo ""
echo "Environment variables:"
echo " BASE_URL Base URL for edge proxy (default: http://localhost)"
echo " EDGE_TIMEOUT Request timeout in seconds (default: 30)"
echo " EDGE_MAX_RETRIES Max retries per request (default: 3)"
exit 0
;;
*)
echo "Unknown option: $1" >&2
exit 1
;;
esac
done
main

View file

@ -15,7 +15,6 @@
set -euo pipefail
FACTORY_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
export FACTORY_ROOT_REAL="$FACTORY_ROOT"
# Always use localhost for mock Forgejo (in case FORGE_URL is set from docker-compose)
export FORGE_URL="http://localhost:3000"
MOCK_BIN="/tmp/smoke-mock-bin"
@ -31,8 +30,7 @@ cleanup() {
rm -rf "$MOCK_BIN" /tmp/smoke-test-repo \
"${FACTORY_ROOT}/projects/smoke-repo.toml" \
/tmp/smoke-claude-shared /tmp/smoke-home-claude \
/tmp/smoke-env-before-rerun /tmp/smoke-env-before-dryrun \
"${FACTORY_ROOT}/docker-compose.yml"
/tmp/smoke-env-before-rerun /tmp/smoke-env-before-dryrun
# Restore .env only if we created the backup
if [ -f "${FACTORY_ROOT}/.env.smoke-backup" ]; then
mv "${FACTORY_ROOT}/.env.smoke-backup" "${FACTORY_ROOT}/.env"
@ -425,51 +423,6 @@ export CLAUDE_SHARED_DIR="$ORIG_CLAUDE_SHARED_DIR"
export CLAUDE_CONFIG_DIR="$ORIG_CLAUDE_CONFIG_DIR"
rm -rf /tmp/smoke-claude-shared /tmp/smoke-home-claude
# ── 8. Test duplicate service name detection ──────────────────────────────
echo "=== 8/8 Testing duplicate service name detection ==="
# Isolated factory root — do NOT touch the real ${FACTORY_ROOT}/projects/
SMOKE_DUP_ROOT=$(mktemp -d)
mkdir -p "$SMOKE_DUP_ROOT/projects"
cat > "$SMOKE_DUP_ROOT/projects/duplicate-test.toml" <<'TOMLEOF'
name = "duplicate-test"
description = "dup-detection smoke"
[ci]
woodpecker_repo_id = "999"
[agents.llama]
base_url = "http://localhost:8080"
model = "qwen:latest"
roles = ["dev"]
forge_user = "llama-bot"
TOMLEOF
# Call the generator directly — no `disinto init` to overwrite the TOML.
# FACTORY_ROOT tells generators.sh where projects/ + compose_file live.
(
export FACTORY_ROOT="$SMOKE_DUP_ROOT"
export ENABLE_LLAMA_AGENT=1
# shellcheck disable=SC1091
source "${FACTORY_ROOT_REAL:-$(cd "$(dirname "$0")/.." && pwd)}/lib/generators.sh"
# Use a temp file to capture output since pipefail will kill the pipeline
# when _generate_compose_impl returns non-zero
_generate_compose_impl > /tmp/smoke-dup-output.txt 2>&1 || true
if grep -q "Duplicate service name" /tmp/smoke-dup-output.txt; then
pass "Duplicate service detection: conflict between ENABLE_LLAMA_AGENT and [agents.llama] reported"
rm -f /tmp/smoke-dup-output.txt
exit 0
else
fail "Duplicate service detection: no error raised for ENABLE_LLAMA_AGENT + [agents.llama]"
cat /tmp/smoke-dup-output.txt >&2
rm -f /tmp/smoke-dup-output.txt
exit 1
fi
) || FAILED=1
rm -rf "$SMOKE_DUP_ROOT"
unset ENABLE_LLAMA_AGENT
# ── Summary ──────────────────────────────────────────────────────────────────
echo ""
if [ "$FAILED" -ne 0 ]; then

View file

@ -1,238 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# test-caddyfile-routing.sh — Caddyfile routing block unit test
#
# Extracts the Caddyfile template from nomad/jobs/edge.hcl and validates its
# structure without requiring a running Caddy instance.
#
# Checks:
# - Forgejo subpath (/forge/* -> :3000)
# - Woodpecker subpath (/ci/* -> :8000)
# - Staging subpath (/staging/* -> nomadService discovery)
# - Chat subpath (/chat/* with forward_auth and OAuth routes)
# - Root redirect to /forge/
#
# Usage:
# test-caddyfile-routing.sh
#
# Exit codes:
# 0 — All checks passed
# 1 — One or more checks failed
# =============================================================================
set -euo pipefail
# Script directory for relative paths
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
EDGE_TEMPLATE="${REPO_ROOT}/nomad/jobs/edge.hcl"
# Track test status
FAILED=0
PASSED=0
# ─────────────────────────────────────────────────────────────────────────────
# Logging helpers
# ─────────────────────────────────────────────────────────────────────────────
tr_info() {
echo "[INFO] $*"
}
tr_pass() {
echo "[PASS] $*"
((PASSED++)) || true
}
tr_fail() {
echo "[FAIL] $*"
((FAILED++)) || true
}
tr_section() {
echo ""
echo "=== $* ==="
echo ""
}
# ─────────────────────────────────────────────────────────────────────────────
# Caddyfile extraction
# ─────────────────────────────────────────────────────────────────────────────
extract_caddyfile() {
local template_file="$1"
# Extract the Caddyfile template (content between <<EOT and EOT markers
# within the template stanza)
local caddyfile
caddyfile=$(sed -n '/data[[:space:]]*=[[:space:]]*<<[Ee][Oo][Tt]/,/^EOT$/p' "$template_file" | sed '1s/.*/# Caddyfile extracted from Nomad template/; $d')
if [ -z "$caddyfile" ]; then
echo "ERROR: Could not extract Caddyfile template from $template_file" >&2
return 1
fi
echo "$caddyfile"
}
# ─────────────────────────────────────────────────────────────────────────────
# Validation functions
# ─────────────────────────────────────────────────────────────────────────────
check_forgejo_routing() {
tr_section "Validating Forgejo routing"
# Check handle block for /forge/*
if echo "$CADDYFILE" | grep -q "handle /forge/\*"; then
tr_pass "Forgejo handle block (handle /forge/*)"
else
tr_fail "Missing Forgejo handle block (handle /forge/*)"
fi
# Check reverse_proxy to Forgejo on port 3000
if echo "$CADDYFILE" | grep -q "reverse_proxy 127.0.0.1:3000"; then
tr_pass "Forgejo reverse_proxy configured (127.0.0.1:3000)"
else
tr_fail "Missing Forgejo reverse_proxy (127.0.0.1:3000)"
fi
}
check_woodpecker_routing() {
tr_section "Validating Woodpecker routing"
# Check handle block for /ci/*
if echo "$CADDYFILE" | grep -q "handle /ci/\*"; then
tr_pass "Woodpecker handle block (handle /ci/*)"
else
tr_fail "Missing Woodpecker handle block (handle /ci/*)"
fi
# Check reverse_proxy to Woodpecker on port 8000
if echo "$CADDYFILE" | grep -q "reverse_proxy 127.0.0.1:8000"; then
tr_pass "Woodpecker reverse_proxy configured (127.0.0.1:8000)"
else
tr_fail "Missing Woodpecker reverse_proxy (127.0.0.1:8000)"
fi
}
check_staging_routing() {
tr_section "Validating Staging routing"
# Check handle block for /staging/*
if echo "$CADDYFILE" | grep -q "handle /staging/\*"; then
tr_pass "Staging handle block (handle /staging/*)"
else
tr_fail "Missing Staging handle block (handle /staging/*)"
fi
# Check for uri strip_prefix /staging directive
if echo "$CADDYFILE" | grep -q "uri strip_prefix /staging"; then
tr_pass "Staging uri strip_prefix configured (/staging)"
else
tr_fail "Missing uri strip_prefix /staging for staging"
fi
# Check for nomadService discovery (dynamic port)
if echo "$CADDYFILE" | grep -q "nomadService"; then
tr_pass "Staging uses Nomad service discovery"
else
tr_fail "Missing Nomad service discovery for staging"
fi
}
check_chat_routing() {
tr_section "Validating Chat routing"
# Check login endpoint
if echo "$CADDYFILE" | grep -q "handle /chat/login"; then
tr_pass "Chat login handle block (handle /chat/login)"
else
tr_fail "Missing Chat login handle block (handle /chat/login)"
fi
# Check OAuth callback endpoint
if echo "$CADDYFILE" | grep -q "handle /chat/oauth/callback"; then
tr_pass "Chat OAuth callback handle block (handle /chat/oauth/callback)"
else
tr_fail "Missing Chat OAuth callback handle block (handle /chat/oauth/callback)"
fi
# Check catch-all for /chat/*
if echo "$CADDYFILE" | grep -q "handle /chat/\*"; then
tr_pass "Chat catch-all handle block (handle /chat/*)"
else
tr_fail "Missing Chat catch-all handle block (handle /chat/*)"
fi
# Check reverse_proxy to Chat on port 8080
if echo "$CADDYFILE" | grep -q "reverse_proxy 127.0.0.1:8080"; then
tr_pass "Chat reverse_proxy configured (127.0.0.1:8080)"
else
tr_fail "Missing Chat reverse_proxy (127.0.0.1:8080)"
fi
# Check forward_auth block for /chat/*
if echo "$CADDYFILE" | grep -A10 "handle /chat/\*" | grep -q "forward_auth"; then
tr_pass "forward_auth block configured for /chat/*"
else
tr_fail "Missing forward_auth block for /chat/*"
fi
# Check forward_auth URI
if echo "$CADDYFILE" | grep -q "uri /chat/auth/verify"; then
tr_pass "forward_auth URI configured (/chat/auth/verify)"
else
tr_fail "Missing forward_auth URI (/chat/auth/verify)"
fi
}
check_root_redirect() {
tr_section "Validating root redirect"
# Check root redirect to /forge/
if echo "$CADDYFILE" | grep -q "redir /forge/ 302"; then
tr_pass "Root redirect to /forge/ configured (302)"
else
tr_fail "Missing root redirect to /forge/"
fi
}
# ─────────────────────────────────────────────────────────────────────────────
# Main
# ─────────────────────────────────────────────────────────────────────────────
main() {
tr_info "Extracting Caddyfile template from $EDGE_TEMPLATE"
# Extract Caddyfile
CADDYFILE=$(extract_caddyfile "$EDGE_TEMPLATE")
if [ -z "$CADDYFILE" ]; then
tr_fail "Could not extract Caddyfile template"
exit 1
fi
tr_pass "Caddyfile template extracted successfully"
# Run all validation checks
check_forgejo_routing
check_woodpecker_routing
check_staging_routing
check_chat_routing
check_root_redirect
# Summary
tr_section "Test Summary"
tr_info "Passed: $PASSED"
tr_info "Failed: $FAILED"
if [ "$FAILED" -gt 0 ]; then
tr_fail "Some checks failed"
exit 1
fi
tr_pass "All routing blocks validated!"
exit 0
}
main

View file

@ -1,210 +0,0 @@
#!/usr/bin/env bash
# tests/test-duplicate-service-detection.sh — Unit test for duplicate service detection
#
# Tests that the compose generator correctly detects duplicate service names
# between ENABLE_LLAMA_AGENT=1 and [agents.llama] TOML configuration.
set -euo pipefail
# Get the absolute path to the disinto root
DISINTO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
TEST_DIR=$(mktemp -d)
trap "rm -rf \"\$TEST_DIR\"" EXIT
FAILED=0
fail() { printf 'FAIL: %s\n' "$*" >&2; FAILED=1; }
pass() { printf 'PASS: %s\n' "$*"; }
# Test 1: Duplicate between ENABLE_LLAMA_AGENT and [agents.llama]
echo "=== Test 1: Duplicate between ENABLE_LLAMA_AGENT and [agents.llama] ==="
# Create projects directory and test project TOML with an agent named "llama"
mkdir -p "${TEST_DIR}/projects"
cat > "${TEST_DIR}/projects/test-project.toml" <<'TOMLEOF'
name = "test-project"
description = "Test project for duplicate detection"
[ci]
woodpecker_repo_id = "123"
[agents.llama]
base_url = "http://localhost:8080"
model = "qwen:latest"
roles = ["dev"]
forge_user = "llama-bot"
TOMLEOF
# Create a minimal compose file
cat > "${TEST_DIR}/docker-compose.yml" <<'COMPOSEEOF'
# Test compose file
services:
agents:
image: test:latest
command: echo "hello"
volumes:
test-data:
networks:
test-net:
COMPOSEEOF
# Set up the test environment
export FACTORY_ROOT="${TEST_DIR}"
export PROJECT_NAME="test-project"
export ENABLE_LLAMA_AGENT="1"
export FORGE_TOKEN=""
export FORGE_PASS=""
export CLAUDE_TIMEOUT="7200"
export POLL_INTERVAL="300"
export GARDENER_INTERVAL="21600"
export ARCHITECT_INTERVAL="21600"
export PLANNER_INTERVAL="43200"
export SUPERVISOR_INTERVAL="1200"
# Source the generators module and run the compose generator directly
source "${DISINTO_ROOT}/lib/generators.sh"
# Delete the compose file to force regeneration
rm -f "${TEST_DIR}/docker-compose.yml"
# Run the compose generator directly
if _generate_compose_impl 3000 false 2>&1 | tee "${TEST_DIR}/output.txt"; then
# Check if the output contains the duplicate error message
if grep -q "Duplicate service name 'agents-llama'" "${TEST_DIR}/output.txt"; then
pass "Duplicate detection: correctly detected conflict between ENABLE_LLAMA_AGENT and [agents.llama]"
else
fail "Duplicate detection: should have detected conflict between ENABLE_LLAMA_AGENT and [agents.llama]"
cat "${TEST_DIR}/output.txt" >&2
fi
else
# Generator should fail with non-zero exit code
if grep -q "Duplicate service name 'agents-llama'" "${TEST_DIR}/output.txt"; then
pass "Duplicate detection: correctly detected conflict and returned non-zero exit code"
else
fail "Duplicate detection: should have failed with duplicate error"
cat "${TEST_DIR}/output.txt" >&2
fi
fi
# Test 2: No duplicate when only ENABLE_LLAMA_AGENT is set (no conflicting TOML)
echo ""
echo "=== Test 2: No duplicate when only ENABLE_LLAMA_AGENT is set ==="
# Remove the projects directory created in Test 1
rm -rf "${TEST_DIR}/projects"
# Create a fresh compose file
cat > "${TEST_DIR}/docker-compose.yml" <<'COMPOSEEOF'
# Test compose file
services:
agents:
image: test:latest
volumes:
test-data:
networks:
test-net:
COMPOSEEOF
# Set ENABLE_LLAMA_AGENT
export ENABLE_LLAMA_AGENT="1"
# Delete the compose file to force regeneration
rm -f "${TEST_DIR}/docker-compose.yml"
if _generate_compose_impl 3000 false 2>&1 | tee "${TEST_DIR}/output2.txt"; then
if grep -q "Duplicate" "${TEST_DIR}/output2.txt"; then
fail "No duplicate: should not detect duplicate when only ENABLE_LLAMA_AGENT is set"
else
pass "No duplicate: correctly generated compose without duplicates"
fi
else
# Non-zero exit is fine if there's a legitimate reason (e.g., missing files)
if grep -q "Duplicate" "${TEST_DIR}/output2.txt"; then
fail "No duplicate: should not detect duplicate when only ENABLE_LLAMA_AGENT is set"
else
pass "No duplicate: generator failed for other reason (acceptable)"
fi
fi
# Test 3: Duplicate between two TOML agents with same name
echo ""
echo "=== Test 3: Duplicate between two TOML agents with same name ==="
rm -f "${TEST_DIR}/docker-compose.yml"
# Create projects directory for Test 3
mkdir -p "${TEST_DIR}/projects"
cat > "${TEST_DIR}/projects/project1.toml" <<'TOMLEOF'
name = "project1"
description = "First project"
[ci]
woodpecker_repo_id = "1"
[agents.llama]
base_url = "http://localhost:8080"
model = "qwen:latest"
roles = ["dev"]
forge_user = "llama-bot1"
TOMLEOF
cat > "${TEST_DIR}/projects/project2.toml" <<'TOMLEOF'
name = "project2"
description = "Second project"
[ci]
woodpecker_repo_id = "2"
[agents.llama]
base_url = "http://localhost:8080"
model = "qwen:latest"
roles = ["dev"]
forge_user = "llama-bot2"
TOMLEOF
cat > "${TEST_DIR}/docker-compose.yml" <<'COMPOSEEOF'
# Test compose file
services:
agents:
image: test:latest
volumes:
test-data:
networks:
test-net:
COMPOSEEOF
unset ENABLE_LLAMA_AGENT
# Delete the compose file to force regeneration
rm -f "${TEST_DIR}/docker-compose.yml"
if _generate_compose_impl 3000 false 2>&1 | tee "${TEST_DIR}/output3.txt"; then
if grep -q "Duplicate service name 'agents-llama'" "${TEST_DIR}/output3.txt"; then
pass "Duplicate detection: correctly detected conflict between two [agents.llama] blocks"
else
fail "Duplicate detection: should have detected conflict between two [agents.llama] blocks"
cat "${TEST_DIR}/output3.txt" >&2
fi
else
if grep -q "Duplicate service name 'agents-llama'" "${TEST_DIR}/output3.txt"; then
pass "Duplicate detection: correctly detected conflict and returned non-zero exit code"
else
fail "Duplicate detection: should have failed with duplicate error"
cat "${TEST_DIR}/output3.txt" >&2
fi
fi
# Summary
echo ""
if [ "$FAILED" -ne 0 ]; then
echo "=== TESTS FAILED ==="
exit 1
fi
echo "=== ALL TESTS PASSED ==="

View file

@ -1,129 +0,0 @@
#!/usr/bin/env bash
# test-watchdog-process-group.sh — Test that claude_run_with_watchdog kills orphan children
#
# This test verifies that when claude_run_with_watchdog terminates the Claude process,
# all child processes (including those spawned by Claude's Bash tool) are also killed.
#
# Reproducer scenario:
# 1. Create a fake "claude" stub that:
# a. Spawns a long-running child process (sleep 3600)
# b. Writes a result marker to stdout to trigger idle detection
# c. Stays running
# 2. Run claude_run_with_watchdog with the stub
# 3. Before the fix: sleep child survives (orphaned to PID 1)
# 4. After the fix: sleep child dies (killed as part of process group with -PID)
#
# Usage: ./tests/test-watchdog-process-group.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
TEST_TMP="/tmp/test-watchdog-$$"
LOGFILE="${TEST_TMP}/log.txt"
PASS=true
# shellcheck disable=SC2317
cleanup_test() {
rm -rf "$TEST_TMP"
}
trap cleanup_test EXIT INT TERM
mkdir -p "$TEST_TMP"
log() {
printf '[TEST] %s\n' "$*" | tee -a "$LOGFILE"
}
fail() {
printf '[TEST] FAIL: %s\n' "$*" | tee -a "$LOGFILE"
PASS=false
}
pass() {
printf '[TEST] PASS: %s\n' "$*" | tee -a "$LOGFILE"
}
# Export required environment variables
export CLAUDE_TIMEOUT=10 # Short timeout for testing
export CLAUDE_IDLE_GRACE=2 # Short grace period for testing
export LOGFILE="${LOGFILE}" # Required by agent-sdk.sh
# Create a fake claude stub that:
# 1. Spawns a long-running child process (sleep 3600) that will become an orphan if parent is killed
# 2. Writes a result marker to stdout (to trigger the watchdog's idle-after-result path)
# 3. Stays running so the watchdog can kill it
cat > "${TEST_TMP}/fake-claude" << 'FAKE_CLAUDE_EOF'
#!/usr/bin/env bash
# Fake claude that spawns a child and stays running
# Simulates Claude's behavior when it spawns a Bash tool command
# Write result marker to stdout (triggers watchdog idle detection)
echo '{"type":"result","session_id":"test-session-123","verdict":"APPROVE"}'
# Spawn a child that simulates Claude's Bash tool hanging
# This is the process that should be killed when the parent is terminated
sleep 3600 &
CHILD_PID=$!
# Log the child PID for debugging
echo "FAKE_CLAUDE_CHILD_PID=$CHILD_PID" >&2
# Stay running - sleep in a loop so the watchdog can kill us
while true; do
sleep 3600 &
wait $! 2>/dev/null || true
done
FAKE_CLAUDE_EOF
chmod +x "${TEST_TMP}/fake-claude"
log "Testing claude_run_with_watchdog process group cleanup..."
# Source the library and run claude_run_with_watchdog
cd "$SCRIPT_DIR"
source lib/agent-sdk.sh
log "Starting claude_run_with_watchdog with fake claude..."
# Run the function directly (not as a script)
# We need to capture output and redirect stderr
OUTPUT_FILE="${TEST_TMP}/output.txt"
timeout 35 bash -c "
source '${SCRIPT_DIR}/lib/agent-sdk.sh'
CLAUDE_TIMEOUT=10 CLAUDE_IDLE_GRACE=2 LOGFILE='${LOGFILE}' claude_run_with_watchdog '${TEST_TMP}/fake-claude' > '${OUTPUT_FILE}' 2>&1
exit \$?
" || true
# Give the watchdog a moment to clean up
log "Waiting for cleanup..."
sleep 5
# More precise check: look for sleep 3600 processes
# These would be the orphans from our fake claude
ORPHAN_COUNT=$(pgrep -a sleep 2>/dev/null | grep -c "sleep 3600" 2>/dev/null || echo "0")
if [ "$ORPHAN_COUNT" -gt 0 ]; then
log "Found $ORPHAN_COUNT orphan sleep 3600 processes:"
pgrep -a sleep | grep "sleep 3600"
fail "Orphan children found - process group cleanup did not work"
else
pass "No orphan children found - process group cleanup worked"
fi
# Also verify that the fake claude itself is not running
FAKE_CLAUDE_COUNT=$(pgrep -c -f "fake-claude" 2>/dev/null || echo "0")
if [ "$FAKE_CLAUDE_COUNT" -gt 0 ]; then
log "Found $FAKE_CLAUDE_COUNT fake-claude processes still running"
fail "Fake claude process(es) still running"
else
pass "Fake claude process terminated"
fi
# Summary
echo ""
if [ "$PASS" = true ]; then
log "All tests passed!"
exit 0
else
log "Some tests failed. See log at $LOGFILE"
exit 1
fi

View file

@ -34,13 +34,6 @@ setup_file() {
return 1
fi
done
# Enable kv-v2 at path=kv (production mount per S2 migration). Dev-mode
# vault only auto-mounts kv-v2 at secret/; tests must mirror the real
# cluster layout so vault-import.sh writes land where we read them.
curl -sf -H "X-Vault-Token: test-root-token" \
-X POST -d '{"type":"kv","options":{"version":"2"}}' \
"${VAULT_ADDR}/v1/sys/mounts/kv" >/dev/null
}
teardown_file() {
@ -97,7 +90,7 @@ setup() {
# Verify nothing was written to Vault
run curl -sf -H "X-Vault-Token: ${VAULT_TOKEN}" \
"${VAULT_ADDR}/v1/kv/data/disinto/bots/review"
"${VAULT_ADDR}/v1/secret/data/disinto/bots/review"
[ "$status" -ne 0 ]
}
@ -112,21 +105,21 @@ setup() {
# Check bots/review
run curl -sf -H "X-Vault-Token: ${VAULT_TOKEN}" \
"${VAULT_ADDR}/v1/kv/data/disinto/bots/review"
"${VAULT_ADDR}/v1/secret/data/disinto/bots/review"
[ "$status" -eq 0 ]
echo "$output" | grep -q "review-token"
echo "$output" | grep -q "review-pass"
# Check bots/dev-qwen
run curl -sf -H "X-Vault-Token: ${VAULT_TOKEN}" \
"${VAULT_ADDR}/v1/kv/data/disinto/bots/dev-qwen"
"${VAULT_ADDR}/v1/secret/data/disinto/bots/dev-qwen"
[ "$status" -eq 0 ]
echo "$output" | grep -q "llama-token"
echo "$output" | grep -q "llama-pass"
# Check forge
run curl -sf -H "X-Vault-Token: ${VAULT_TOKEN}" \
"${VAULT_ADDR}/v1/kv/data/disinto/shared/forge"
"${VAULT_ADDR}/v1/secret/data/disinto/shared/forge"
[ "$status" -eq 0 ]
echo "$output" | grep -q "generic-forge-token"
echo "$output" | grep -q "generic-forge-pass"
@ -134,17 +127,16 @@ setup() {
# Check woodpecker
run curl -sf -H "X-Vault-Token: ${VAULT_TOKEN}" \
"${VAULT_ADDR}/v1/kv/data/disinto/shared/woodpecker"
"${VAULT_ADDR}/v1/secret/data/disinto/shared/woodpecker"
[ "$status" -eq 0 ]
echo "$output" | grep -q "wp-agent-secret"
# Forgejo keys are normalized: WP_FORGEJO_* → forgejo_* (no wp_ prefix in key name)
echo "$output" | grep -q "wp-forgejo-client"
echo "$output" | grep -q "wp-forgejo-secret"
echo "$output" | grep -q "wp-token"
# Check chat
run curl -sf -H "X-Vault-Token: ${VAULT_TOKEN}" \
"${VAULT_ADDR}/v1/kv/data/disinto/shared/chat"
"${VAULT_ADDR}/v1/secret/data/disinto/shared/chat"
[ "$status" -eq 0 ]
echo "$output" | grep -q "forward-auth-secret"
echo "$output" | grep -q "chat-client-id"
@ -152,7 +144,7 @@ setup() {
# Check runner tokens from sops
run curl -sf -H "X-Vault-Token: ${VAULT_TOKEN}" \
"${VAULT_ADDR}/v1/kv/data/disinto/runner/GITHUB_TOKEN"
"${VAULT_ADDR}/v1/secret/data/disinto/runner/GITHUB_TOKEN"
[ "$status" -eq 0 ]
echo "$output" | jq -e '.data.data.value == "github-test-token-abc123"'
}
@ -202,7 +194,7 @@ setup() {
# Verify the new value was written (path is disinto/bots/dev-qwen, key is token)
run curl -sf -H "X-Vault-Token: ${VAULT_TOKEN}" \
"${VAULT_ADDR}/v1/kv/data/disinto/bots/dev-qwen"
"${VAULT_ADDR}/v1/secret/data/disinto/bots/dev-qwen"
[ "$status" -eq 0 ]
echo "$output" | jq -e '.data.data.token == "MODIFIED-LLAMA-TOKEN"'
}
@ -236,13 +228,13 @@ setup() {
# Verify each value round-trips intact.
run curl -sf -H "X-Vault-Token: ${VAULT_TOKEN}" \
"${VAULT_ADDR}/v1/kv/data/disinto/bots/review"
"${VAULT_ADDR}/v1/secret/data/disinto/bots/review"
[ "$status" -eq 0 ]
echo "$output" | jq -e '.data.data.token == "abc|xyz"'
echo "$output" | jq -e '.data.data.pass == "p1|p2|p3"'
run curl -sf -H "X-Vault-Token: ${VAULT_TOKEN}" \
"${VAULT_ADDR}/v1/kv/data/disinto/shared/forge"
"${VAULT_ADDR}/v1/secret/data/disinto/shared/forge"
[ "$status" -eq 0 ]
echo "$output" | jq -e '.data.data.admin_token == "admin|with|pipes"'
}
@ -295,8 +287,6 @@ setup() {
"deploy-key-test"
"npm-test-token"
"dockerhub-test-token"
# Note: forgejo-client and forgejo-secret are NOT in the output
# because they are read from Vault, not logged
)
for pattern in "${secret_patterns[@]}"; do

View file

@ -21,7 +21,6 @@ This control plane runs on the public edge host (Debian DO box) and provides:
│ │ disinto-register│ │ /var/lib/disinto/ │ │
│ │ (authorized_keys│ │ ├── registry.json (source of truth) │ │
│ │ forced cmd) │ │ ├── registry.lock (flock) │ │
│ │ │ │ └── allowlist.json (admin-approved names) │ │
│ │ │ │ └── authorized_keys (rebuildable) │ │
│ └────────┬─────────┘ └───────────────────────────────────────────────┘ │
│ │ │
@ -80,7 +79,7 @@ curl -sL https://raw.githubusercontent.com/disinto-admin/disinto/fix/issue-621/t
- `disinto-tunnel` — no password, no shell, only receives reverse tunnels
2. **Creates data directory**:
- `/var/lib/disinto/` with `registry.json`, `registry.lock`, `allowlist.json`
- `/var/lib/disinto/` with `registry.json`, `registry.lock`
- Permissions: `root:disinto-register 0750`
3. **Installs Caddy**:
@ -181,43 +180,6 @@ Shows all registered tunnels with their ports and FQDNs.
}
```
## Allowlist
The allowlist prevents project name squatting by requiring admin approval before a name can be registered. It is **opt-in**: when `allowlist.json` is empty (no project entries), registration works as before. Once the admin adds entries, only approved names are accepted.
### Setup
Edit `/var/lib/disinto/allowlist.json` as root:
```json
{
"version": 1,
"allowed": {
"myproject": {
"pubkey_fingerprint": "SHA256:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
},
"open-project": {
"pubkey_fingerprint": ""
}
}
}
```
- **With `pubkey_fingerprint`**: Only the specified SSH key can register this project name. The fingerprint is the SHA256 output of `ssh-keygen -lf <keyfile>`.
- **With empty `pubkey_fingerprint`**: Any caller may register this project name (name reservation without key binding).
- **Not listed**: Registration is refused with `{"error":"name not approved"}`.
### Workflow
1. Admin edits `/var/lib/disinto/allowlist.json` (via ops repo PR, or direct `ssh root@edge`).
2. File is `root:root 0644``disinto-register` only reads it; `register.sh` never mutates it.
3. Callers run `register` as usual. The allowlist is checked transparently.
### Security
- The allowlist is a **first-come-first-serve defense**: once a name is approved for a key, no one else can claim it.
- It does **not** replace per-operation ownership checks (sibling issue #1094) — it only prevents the initial race.
## Recovery
### After State Loss
@ -312,7 +274,6 @@ ssh disinto-register@edge.disinto.ai "register myproject $(cat ~/.ssh/id_ed25519
- `lib/ports.sh` — Port allocator over `20000-29999`, jq-based, flockd
- `lib/authorized_keys.sh` — Deterministic rebuild of `disinto-tunnel` authorized_keys
- `lib/caddy.sh` — POST to Caddy admin API for route mapping
- `/var/lib/disinto/allowlist.json` — Admin-approved project name allowlist (root-owned, read-only by register.sh)
## Dependencies

View file

@ -7,7 +7,7 @@
#
# What it does:
# 1. Creates users: disinto-register, disinto-tunnel
# 2. Creates /var/lib/disinto/ with registry.json, registry.lock, allowlist.json
# 2. Creates /var/lib/disinto/ with registry.json, registry.lock
# 3. Installs Caddy with Gandi DNS plugin
# 4. Sets up SSH authorized_keys for both users
# 5. Installs control plane scripts to /opt/disinto-edge/
@ -44,7 +44,6 @@ REGISTRY_DIR="/var/lib/disinto"
CADDY_VERSION="2.8.4"
DOMAIN_SUFFIX="disinto.ai"
EXTRA_CADDYFILE="/etc/caddy/extra.d/*.caddy"
ADMIN_TAG="admin"
usage() {
cat <<EOF
@ -58,7 +57,6 @@ Options:
--domain-suffix <suffix> Domain suffix for tunnels (default: disinto.ai)
--extra-caddyfile <path> Import path for operator-owned Caddy config
(default: /etc/caddy/extra.d/*.caddy)
--admin-tag <name> Caller tag for the initial admin key (default: admin)
-h, --help Show this help
Example:
@ -93,10 +91,6 @@ while [[ $# -gt 0 ]]; do
EXTRA_CADDYFILE="$2"
shift 2
;;
--admin-tag)
ADMIN_TAG="$2"
shift 2
;;
-h|--help)
usage
;;
@ -158,53 +152,8 @@ LOCK_FILE="${REGISTRY_DIR}/registry.lock"
touch "$LOCK_FILE"
chmod 0644 "$LOCK_FILE"
# Initialize allowlist.json (empty = no restrictions until admin populates)
ALLOWLIST_FILE="${REGISTRY_DIR}/allowlist.json"
if [ ! -f "$ALLOWLIST_FILE" ]; then
echo '{"version":1,"allowed":{}}' > "$ALLOWLIST_FILE"
chmod 0644 "$ALLOWLIST_FILE"
chown root:root "$ALLOWLIST_FILE"
log_info "Initialized allowlist: ${ALLOWLIST_FILE}"
fi
# =============================================================================
# Step 3: Create audit log directory and logrotate config
# =============================================================================
log_info "Setting up audit log..."
LOG_DIR="/var/log/disinto"
LOG_FILE="${LOG_DIR}/edge-register.log"
mkdir -p "$LOG_DIR"
chown root:disinto-register "$LOG_DIR"
chmod 0750 "$LOG_DIR"
# Touch the log file so it exists from day one
touch "$LOG_FILE"
chmod 0640 "$LOG_FILE"
chown root:disinto-register "$LOG_FILE"
# Install logrotate config (daily rotation, 30 days retention)
LOGROTATE_CONF="/etc/logrotate.d/disinto-edge"
cat > "$LOGROTATE_CONF" <<EOF
${LOG_FILE} {
daily
rotate 30
compress
delaycompress
missingok
notifempty
create 0640 root disinto-register
copytruncate
}
EOF
chmod 0644 "$LOGROTATE_CONF"
log_info "Audit log: ${LOG_FILE}"
log_info "Logrotate config: ${LOGROTATE_CONF}"
# =============================================================================
# Step 4: Install Caddy with Gandi DNS plugin
# Step 3: Install Caddy with Gandi DNS plugin
# =============================================================================
log_info "Installing Caddy ${CADDY_VERSION} with Gandi DNS plugin..."
@ -335,7 +284,7 @@ systemctl restart caddy 2>/dev/null || {
log_info "Caddy configured with admin API on 127.0.0.1:2019"
# =============================================================================
# Step 5: Install control plane scripts
# Step 4: Install control plane scripts
# =============================================================================
log_info "Installing control plane scripts to ${INSTALL_DIR}..."
@ -357,7 +306,7 @@ chmod 750 "${INSTALL_DIR}/lib"
log_info "Control plane scripts installed"
# =============================================================================
# Step 6: Set up SSH authorized_keys
# Step 5: Set up SSH authorized_keys
# =============================================================================
log_info "Setting up SSH authorized_keys..."
@ -399,7 +348,7 @@ source "${INSTALL_DIR}/lib/authorized_keys.sh"
rebuild_authorized_keys
# =============================================================================
# Step 7: Configure forced command for disinto-register
# Step 6: Configure forced command for disinto-register
# =============================================================================
log_info "Configuring forced command for disinto-register..."
@ -410,8 +359,8 @@ if [ -n "$ADMIN_PUBKEY" ]; then
KEY_TYPE="${ADMIN_PUBKEY%% *}"
KEY_DATA="${ADMIN_PUBKEY#* }"
# Create forced command entry with caller attribution tag
FORCED_CMD="restrict,command=\"${INSTALL_DIR}/register.sh --as ${ADMIN_TAG}\" ${KEY_TYPE} ${KEY_DATA}"
# Create forced command entry
FORCED_CMD="restrict,command=\"${INSTALL_DIR}/register.sh\" ${KEY_TYPE} ${KEY_DATA}"
# Replace the pubkey line
echo "$FORCED_CMD" > /home/disinto-register/.ssh/authorized_keys
@ -422,7 +371,7 @@ if [ -n "$ADMIN_PUBKEY" ]; then
fi
# =============================================================================
# Step 8: Final configuration
# Step 7: Final configuration
# =============================================================================
log_info "Configuring domain suffix: ${DOMAIN_SUFFIX}"

View file

@ -54,14 +54,13 @@ _registry_write() {
}
# Allocate a port for a project
# Usage: allocate_port <project> <pubkey> <fqdn> [<registered_by>]
# Usage: allocate_port <project> <pubkey> <fqdn>
# Returns: port number on stdout
# Writes: registry.json with project entry
allocate_port() {
local project="$1"
local pubkey="$2"
local fqdn="$3"
local registered_by="${4:-unknown}"
_ensure_registry_dir
@ -117,13 +116,11 @@ allocate_port() {
--arg pubkey "$pubkey" \
--arg fqdn "$fqdn" \
--arg timestamp "$timestamp" \
--arg registered_by "$registered_by" \
'.projects[$project] = {
"port": $port,
"fqdn": $fqdn,
"pubkey": $pubkey,
"registered_at": $timestamp,
"registered_by": $registered_by
"registered_at": $timestamp
}')
_registry_write "$new_registry"
@ -187,7 +184,7 @@ list_ports() {
local registry
registry=$(_registry_read)
echo "$registry" | jq -r '.projects | to_entries | map({name: .key, port: .value.port, fqdn: .value.fqdn, registered_by: (.value.registered_by // "unknown")}) | .[] | @json' 2>/dev/null
echo "$registry" | jq -r '.projects | to_entries | map({name: .key, port: .value.port, fqdn: .value.fqdn}) | .[] | @json' 2>/dev/null
}
# Get full project info from registry

View file

@ -5,13 +5,9 @@
# This script runs as a forced command for the disinto-register SSH user.
# It parses SSH_ORIGINAL_COMMAND and dispatches to register|deregister|list.
#
# Per-caller attribution: each admin key's forced-command passes --as <tag>,
# which is stored as registered_by in the registry. Missing --as defaults to
# "unknown" for backwards compatibility.
#
# Usage (via SSH):
# ssh disinto-register@edge "register <project> <pubkey>"
# ssh disinto-register@edge "deregister <project> <pubkey>"
# ssh disinto-register@edge "deregister <project>"
# ssh disinto-register@edge "list"
#
# Output: JSON on stdout
@ -29,68 +25,12 @@ source "${SCRIPT_DIR}/lib/authorized_keys.sh"
# Domain suffix
DOMAIN_SUFFIX="${DOMAIN_SUFFIX:-disinto.ai}"
# Reserved project names — operator-adjacent, internal roles, and subdomain-mode prefixes
RESERVED_NAMES=(www api admin root mail chat forge ci edge caddy disinto register tunnel)
# Allowlist path (root-owned, never mutated by this script)
ALLOWLIST_FILE="${ALLOWLIST_FILE:-/var/lib/disinto/allowlist.json}"
# Audit log path
AUDIT_LOG="${AUDIT_LOG:-/var/log/disinto/edge-register.log}"
# Captured error from check_allowlist (used for JSON response)
_ALLOWLIST_ERROR=""
# Caller tag (set via --as <tag> in forced command)
CALLER="unknown"
# Parse script arguments (from forced command, not SSH_ORIGINAL_COMMAND)
while [[ $# -gt 0 ]]; do
case $1 in
--as)
CALLER="$2"
shift 2
;;
*)
shift
;;
esac
done
# Append one line to the audit log.
# Usage: audit_log <action> <project> <port> <pubkey_fp>
# Fails silently — write errors are warned but never abort.
audit_log() {
local action="$1" project="$2" port="$3" pubkey_fp="$4"
local timestamp
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
local line="${timestamp} ${action} project=${project} port=${port} pubkey_fp=${pubkey_fp} caller=${CALLER}"
# Ensure log directory exists
local log_dir
log_dir=$(dirname "$AUDIT_LOG")
if [ ! -d "$log_dir" ]; then
mkdir -p "$log_dir" 2>/dev/null || {
echo "[WARN] audit log: cannot create ${log_dir}" >&2
return 0
}
chown root:disinto-register "$log_dir" 2>/dev/null || true
chmod 0750 "$log_dir"
fi
# Append — write failure is non-fatal
if ! printf '%s\n' "$line" >> "$AUDIT_LOG" 2>/dev/null; then
echo "[WARN] audit log: failed to write to ${AUDIT_LOG}" >&2
fi
}
# Print usage
usage() {
cat <<EOF
Usage:
register <project> <pubkey> Register a new tunnel
deregister <project> <pubkey> Remove a tunnel (requires owner pubkey)
deregister <project> Remove a tunnel
list List all registered tunnels
Example:
@ -99,75 +39,23 @@ EOF
exit 1
}
# Check whether the project/pubkey pair is allowed by the allowlist.
# Usage: check_allowlist <project> <pubkey>
# Returns: 0 if allowed, 1 if denied (prints error JSON to stderr)
check_allowlist() {
local project="$1"
local pubkey="$2"
# If allowlist file does not exist, allow all (opt-in policy)
if [ ! -f "$ALLOWLIST_FILE" ]; then
return 0
fi
# Look up the project in the allowlist
local entry
entry=$(jq -c --arg p "$project" '.allowed[$p] // empty' "$ALLOWLIST_FILE" 2>/dev/null) || entry=""
if [ -z "$entry" ]; then
# Project not in allowlist at all
_ALLOWLIST_ERROR="name not approved"
return 1
fi
# Project found — check pubkey fingerprint binding
local bound_fingerprint
bound_fingerprint=$(echo "$entry" | jq -r '.pubkey_fingerprint // ""' 2>/dev/null)
if [ -n "$bound_fingerprint" ]; then
# Fingerprint is bound — verify caller's pubkey matches
local caller_fingerprint
caller_fingerprint=$(ssh-keygen -lf /dev/stdin <<<"$pubkey" 2>/dev/null | awk '{print $2}') || caller_fingerprint=""
if [ -z "$caller_fingerprint" ]; then
_ALLOWLIST_ERROR="invalid pubkey for fingerprint check"
return 1
fi
if [ "$caller_fingerprint" != "$bound_fingerprint" ]; then
_ALLOWLIST_ERROR="pubkey does not match allowed key for this project"
return 1
fi
fi
return 0
}
# TODO(#713): Subdomain fallback — if subpath routing (#704/#708) fails, this
# function would need to register additional routes for forge.<project>,
# ci.<project>, chat.<project> subdomains (or accept a --subdomain parameter).
# See docs/edge-routing-fallback.md for the full pivot plan.
# Register a new tunnel
# Usage: do_register <project> <pubkey>
# When EDGE_ROUTING_MODE=subdomain, also registers forge.<project>, ci.<project>,
# and chat.<project> subdomain routes (see docs/edge-routing-fallback.md).
do_register() {
local project="$1"
local pubkey="$2"
# Validate project name — strict DNS label: lowercase alphanumeric, inner hyphens,
# 3-63 chars, no leading/trailing hyphen, no underscore (RFC 1035)
if ! [[ "$project" =~ ^[a-z0-9][a-z0-9-]{1,61}[a-z0-9]$ ]]; then
# Validate project name (alphanumeric, hyphens, underscores)
if ! [[ "$project" =~ ^[a-zA-Z0-9_-]+$ ]]; then
echo '{"error":"invalid project name"}'
exit 1
fi
# Check against reserved names
local reserved
for reserved in "${RESERVED_NAMES[@]}"; do
if [[ "$project" = "$reserved" ]]; then
echo '{"error":"name reserved"}'
exit 1
fi
done
# Extract key type and key from pubkey (format: "ssh-ed25519 AAAAC3...")
local key_type key
key_type=$(echo "$pubkey" | awk '{print $1}')
@ -187,65 +75,30 @@ do_register() {
# Full pubkey for registry
local full_pubkey="${key_type} ${key}"
# Check allowlist (opt-in: no file = allow all)
if ! check_allowlist "$project" "$full_pubkey"; then
echo "{\"error\":\"${_ALLOWLIST_ERROR}\"}"
exit 1
fi
# Allocate port (idempotent - returns existing if already registered)
local port
port=$(allocate_port "$project" "$full_pubkey" "${project}.${DOMAIN_SUFFIX}" "$CALLER")
port=$(allocate_port "$project" "$full_pubkey" "${project}.${DOMAIN_SUFFIX}")
# Add Caddy route for main project domain
# Add Caddy route
add_route "$project" "$port"
# Subdomain mode: register additional routes for per-service subdomains
local routing_mode="${EDGE_ROUTING_MODE:-subpath}"
if [ "$routing_mode" = "subdomain" ]; then
local subdomain
for subdomain in forge ci chat; do
add_route "${subdomain}.${project}" "$port"
done
fi
# Rebuild authorized_keys for tunnel user
rebuild_authorized_keys
# Reload Caddy
reload_caddy
# Audit log
local pubkey_fp
pubkey_fp=$(ssh-keygen -lf /dev/stdin <<<"$full_pubkey" 2>/dev/null | awk '{print $2}') || pubkey_fp="unknown"
audit_log "register" "$project" "$port" "$pubkey_fp"
# Build JSON response
local response="{\"port\":${port},\"fqdn\":\"${project}.${DOMAIN_SUFFIX}\""
if [ "$routing_mode" = "subdomain" ]; then
response="${response},\"routing_mode\":\"subdomain\""
response="${response},\"subdomains\":{\"forge\":\"forge.${project}.${DOMAIN_SUFFIX}\",\"ci\":\"ci.${project}.${DOMAIN_SUFFIX}\",\"chat\":\"chat.${project}.${DOMAIN_SUFFIX}\"}"
fi
response="${response}}"
echo "$response"
# Return JSON response
echo "{\"port\":${port},\"fqdn\":\"${project}.${DOMAIN_SUFFIX}\"}"
}
# Deregister a tunnel
# Usage: do_deregister <project> <pubkey>
# Usage: do_deregister <project>
do_deregister() {
local project="$1"
local caller_pubkey="$2"
if [ -z "$caller_pubkey" ]; then
echo '{"error":"deregister requires <project> <pubkey>"}'
exit 1
fi
# Record who is deregistering before removal
local deregistered_by="$CALLER"
# Get current port and pubkey before removing
local port pubkey_fp
# Get current port before removing
local port
port=$(get_port "$project")
if [ -z "$port" ]; then
@ -253,42 +106,20 @@ do_deregister() {
exit 1
fi
# Verify caller owns this project — pubkey must match stored value
local stored_pubkey
stored_pubkey=$(get_project_info "$project" | jq -r '.pubkey // empty' 2>/dev/null) || stored_pubkey=""
if [ "$caller_pubkey" != "$stored_pubkey" ]; then
echo '{"error":"pubkey mismatch"}'
exit 1
fi
pubkey_fp=$(ssh-keygen -lf /dev/stdin <<<"$stored_pubkey" 2>/dev/null | awk '{print $2}') || pubkey_fp="unknown"
# Remove from registry
free_port "$project" >/dev/null
# Remove Caddy route for main project domain
# Remove Caddy route
remove_route "$project"
# Subdomain mode: also remove per-service subdomain routes
local routing_mode="${EDGE_ROUTING_MODE:-subpath}"
if [ "$routing_mode" = "subdomain" ]; then
local subdomain
for subdomain in forge ci chat; do
remove_route "${subdomain}.${project}"
done
fi
# Rebuild authorized_keys for tunnel user
rebuild_authorized_keys
# Reload Caddy
reload_caddy
# Audit log
audit_log "deregister" "$project" "$port" "$pubkey_fp"
# Return JSON response
echo "{\"removed\":true,\"port\":${port},\"fqdn\":\"${project}.${DOMAIN_SUFFIX}\",\"deregistered_by\":\"${deregistered_by}\"}"
echo "{\"removed\":true,\"port\":${port},\"fqdn\":\"${project}.${DOMAIN_SUFFIX}\"}"
}
# List all registered tunnels
@ -344,17 +175,13 @@ main() {
do_register "$project" "$pubkey"
;;
deregister)
# deregister <project> <pubkey>
local project="${args%% *}"
local pubkey="${args#* }"
if [ "$pubkey" = "$args" ]; then
pubkey=""
fi
if [ -z "$project" ] || [ -z "$pubkey" ]; then
echo '{"error":"deregister requires <project> <pubkey>"}'
# deregister <project>
local project="$args"
if [ -z "$project" ]; then
echo '{"error":"deregister requires <project>"}'
exit 1
fi
do_deregister "$project" "$pubkey"
do_deregister "$project"
;;
list)
do_list

View file

@ -0,0 +1,113 @@
#!/usr/bin/env bash
set -euo pipefail
# verify-chat-sandbox.sh — One-shot sandbox verification for disinto-chat (#706)
#
# Runs against a live compose project and asserts hardening constraints.
# Exit 0 if all pass, non-zero otherwise.
CONTAINER="disinto-chat"
PASS=0
FAIL=0
pass() { printf ' ✓ %s\n' "$1"; PASS=$((PASS + 1)); }
fail() { printf ' ✗ %s\n' "$1"; FAIL=$((FAIL + 1)); }
echo "=== disinto-chat sandbox verification ==="
echo
# --- docker inspect checks ---
inspect_json=$(docker inspect "$CONTAINER" 2>/dev/null) || {
echo "ERROR: container '$CONTAINER' not found or not running"
exit 1
}
# ReadonlyRootfs
readonly_rootfs=$(echo "$inspect_json" | python3 -c "import sys,json; print(json.load(sys.stdin)[0]['HostConfig']['ReadonlyRootfs'])")
if [ "$readonly_rootfs" = "True" ]; then
pass "ReadonlyRootfs=true"
else
fail "ReadonlyRootfs expected true, got $readonly_rootfs"
fi
# CapAdd — should be null or empty
cap_add=$(echo "$inspect_json" | python3 -c "import sys,json; print(json.load(sys.stdin)[0]['HostConfig']['CapAdd'])")
if [ "$cap_add" = "None" ] || [ "$cap_add" = "[]" ]; then
pass "CapAdd=null (no extra capabilities)"
else
fail "CapAdd expected null, got $cap_add"
fi
# CapDrop — should contain ALL
cap_drop=$(echo "$inspect_json" | python3 -c "import sys,json; caps=json.load(sys.stdin)[0]['HostConfig']['CapDrop'] or []; print(' '.join(caps))")
if echo "$cap_drop" | grep -q "ALL"; then
pass "CapDrop contains ALL"
else
fail "CapDrop expected ALL, got: $cap_drop"
fi
# PidsLimit
pids_limit=$(echo "$inspect_json" | python3 -c "import sys,json; print(json.load(sys.stdin)[0]['HostConfig']['PidsLimit'])")
if [ "$pids_limit" = "128" ]; then
pass "PidsLimit=128"
else
fail "PidsLimit expected 128, got $pids_limit"
fi
# Memory limit (512MB = 536870912 bytes)
mem_limit=$(echo "$inspect_json" | python3 -c "import sys,json; print(json.load(sys.stdin)[0]['HostConfig']['Memory'])")
if [ "$mem_limit" = "536870912" ]; then
pass "Memory=512m"
else
fail "Memory expected 536870912, got $mem_limit"
fi
# SecurityOpt — must contain no-new-privileges
sec_opt=$(echo "$inspect_json" | python3 -c "import sys,json; opts=json.load(sys.stdin)[0]['HostConfig']['SecurityOpt'] or []; print(' '.join(opts))")
if echo "$sec_opt" | grep -q "no-new-privileges"; then
pass "SecurityOpt contains no-new-privileges"
else
fail "SecurityOpt missing no-new-privileges (got: $sec_opt)"
fi
# No docker.sock bind mount
binds=$(echo "$inspect_json" | python3 -c "import sys,json; binds=json.load(sys.stdin)[0]['HostConfig']['Binds'] or []; print(' '.join(binds))")
if echo "$binds" | grep -q "docker.sock"; then
fail "docker.sock is bind-mounted"
else
pass "No docker.sock mount"
fi
echo
# --- runtime exec checks ---
# touch /root/x should fail (read-only rootfs + unprivileged user)
if docker exec "$CONTAINER" touch /root/x 2>/dev/null; then
fail "touch /root/x succeeded (should fail)"
else
pass "touch /root/x correctly denied"
fi
# /var/run/docker.sock must not exist
if docker exec "$CONTAINER" ls /var/run/docker.sock 2>/dev/null; then
fail "/var/run/docker.sock is accessible"
else
pass "/var/run/docker.sock not accessible"
fi
# /etc/shadow should not be readable
if docker exec "$CONTAINER" cat /etc/shadow 2>/dev/null; then
fail "cat /etc/shadow succeeded (should fail)"
else
pass "cat /etc/shadow correctly denied"
fi
echo
echo "=== Results: $PASS passed, $FAIL failed ==="
if [ "$FAIL" -gt 0 ]; then
exit 1
fi
exit 0

View file

@ -40,6 +40,7 @@ POLICIES_DIR="${REPO_ROOT}/vault/policies"
# shellcheck source=../lib/hvault.sh
source "${REPO_ROOT}/lib/hvault.sh"
_hvault_default_env
log() { printf '[vault-apply] %s\n' "$*"; }
die() { printf '[vault-apply] ERROR: %s\n' "$*" >&2; exit 1; }
@ -94,12 +95,6 @@ if [ "$dry_run" = true ]; then
fi
# ── Live run: Vault connectivity check ───────────────────────────────────────
# Default the local-cluster Vault env (see lib/hvault.sh::_hvault_default_env).
# `disinto init` does not export VAULT_ADDR before calling this script — the
# server is reachable on 127.0.0.1:8200 and the root token lives at
# /etc/vault.d/root.token in the common fresh-LXC case (issue #912).
_hvault_default_env
# hvault_token_lookup both resolves the token (env or /etc/vault.d/root.token)
# and confirms the server is reachable with a valid token. Fail fast here so
# the per-file loop below doesn't emit N identical "HTTP 403" errors.

View file

@ -49,6 +49,7 @@ ROLES_FILE="${REPO_ROOT}/vault/roles.yaml"
# shellcheck source=../lib/hvault.sh
source "${REPO_ROOT}/lib/hvault.sh"
_hvault_default_env
# Constants shared across every role — the issue's AC names these as the
# invariant token shape for Nomad workload identity. Bumping any of these
@ -219,10 +220,6 @@ if [ "$dry_run" = true ]; then
fi
# ── Live run: Vault connectivity check ───────────────────────────────────────
# Default the local-cluster Vault env (see lib/hvault.sh::_hvault_default_env).
# Called transitively from vault-nomad-auth.sh during `disinto init`, which
# does not export VAULT_ADDR in the common fresh-LXC case (issue #912).
_hvault_default_env
if ! hvault_token_lookup >/dev/null; then
die "Vault auth probe failed — check VAULT_ADDR + VAULT_TOKEN"
fi

View file

@ -8,13 +8,8 @@
# Usage:
# vault-import.sh \
# --env /path/to/.env \
# [--sops /path/to/.env.vault.enc] \
# [--age-key /path/to/age/keys.txt]
#
# Flag validation (S2.5, issue #883):
# --import-sops without --age-key → error.
# --age-key without --import-sops → error.
# --env alone (no sops) → OK; imports only the plaintext half.
# --sops /path/to/.env.vault.enc \
# --age-key /path/to/age/keys.txt
#
# Mapping:
# From .env:
@ -151,9 +146,9 @@ _kv_put_secret() {
-X POST \
-d "$payload" \
-o "$tmpfile" \
"${VAULT_ADDR}/v1/${VAULT_KV_MOUNT:-kv}/data/${path}")" || {
"${VAULT_ADDR}/v1/secret/data/${path}")" || {
rm -f "$tmpfile"
_err "Failed to write to Vault at ${VAULT_KV_MOUNT:-kv}/data/${path}: curl error"
_err "Failed to write to Vault at secret/data/${path}: curl error"
return 1
}
rm -f "$tmpfile"
@ -164,15 +159,15 @@ _kv_put_secret() {
return 0
;;
404)
_err "KV path not found: ${VAULT_KV_MOUNT:-kv}/data/${path}"
_err "KV path not found: secret/data/${path}"
return 1
;;
403)
_err "Permission denied writing to ${VAULT_KV_MOUNT:-kv}/data/${path}"
_err "Permission denied writing to secret/data/${path}"
return 1
;;
*)
_err "Failed to write to Vault at ${VAULT_KV_MOUNT:-kv}/data/${path}: HTTP $http_code"
_err "Failed to write to Vault at secret/data/${path}: HTTP $http_code"
return 1
;;
esac
@ -241,15 +236,14 @@ vault-import.sh — Import .env and sops-decrypted secrets into Vault KV
Usage:
vault-import.sh \
--env /path/to/.env \
[--sops /path/to/.env.vault.enc] \
[--age-key /path/to/age/keys.txt] \
--sops /path/to/.env.vault.enc \
--age-key /path/to/age/keys.txt \
[--dry-run]
Options:
--env Path to .env file (required)
--sops Path to sops-encrypted .env.vault.enc file (optional;
requires --age-key when set)
--age-key Path to age keys file (required when --sops is set)
--sops Path to sops-encrypted .env.vault.enc file (required)
--age-key Path to age keys file (required)
--dry-run Print import plan without writing to Vault (optional)
--help Show this help message
@ -278,62 +272,48 @@ EOF
esac
done
# Validate required arguments. --sops and --age-key are paired: if one
# is set, the other must be too. --env alone (no sops half) is valid —
# imports only the plaintext dotenv. Spec: S2.5 / issue #883 / #912.
# Validate required arguments
if [ -z "$env_file" ]; then
_die "Missing required argument: --env"
fi
if [ -n "$sops_file" ] && [ -z "$age_key_file" ]; then
_die "--sops requires --age-key"
if [ -z "$sops_file" ]; then
_die "Missing required argument: --sops"
fi
if [ -n "$age_key_file" ] && [ -z "$sops_file" ]; then
_die "--age-key requires --sops"
if [ -z "$age_key_file" ]; then
_die "Missing required argument: --age-key"
fi
# Validate files exist
if [ ! -f "$env_file" ]; then
_die "Environment file not found: $env_file"
fi
if [ -n "$sops_file" ] && [ ! -f "$sops_file" ]; then
if [ ! -f "$sops_file" ]; then
_die "Sops file not found: $sops_file"
fi
if [ -n "$age_key_file" ] && [ ! -f "$age_key_file" ]; then
if [ ! -f "$age_key_file" ]; then
_die "Age key file not found: $age_key_file"
fi
# Security check: age key permissions (only when an age key is provided —
# --env-only imports never touch the age key).
if [ -n "$age_key_file" ]; then
# Security check: age key permissions
_validate_age_key_perms "$age_key_file"
fi
# Source the Vault helpers and default the local-cluster VAULT_ADDR +
# VAULT_TOKEN before the localhost safety check runs. `disinto init`
# does not export these in the common fresh-LXC case (issue #912).
source "$(dirname "$0")/../lib/hvault.sh"
_hvault_default_env
# Security check: VAULT_ADDR must be localhost
_check_vault_addr
# Source the Vault helpers
source "$(dirname "$0")/../lib/hvault.sh"
_hvault_default_env
# Load .env file
_log "Loading environment from: $env_file"
_load_env_file "$env_file"
# Decrypt sops file when --sops was provided. On the --env-only path
# (empty $sops_file) the sops_env stays empty and the per-token loop
# below silently skips runner-token imports — exactly the "only
# plaintext half" spec from S2.5.
local sops_env=""
if [ -n "$sops_file" ]; then
# Decrypt sops file
_log "Decrypting sops file: $sops_file"
local sops_env
sops_env="$(_decrypt_sops "$sops_file" "$age_key_file")"
# shellcheck disable=SC2086
eval "$sops_env"
else
_log "No --sops flag — skipping sops decryption (importing plaintext .env only)"
fi
# Collect all import operations
declare -a operations=()
@ -391,13 +371,7 @@ EOF
local val="${!key}"
if [ -n "$val" ]; then
local lowercase_key="${key,,}"
# Normalize WP_FORGEJO_* → forgejo_* (strip wp_ prefix to match template)
if [[ "$lowercase_key" =~ ^wp_(.+)$ ]]; then
vault_key="${BASH_REMATCH[1]}"
else
vault_key="$lowercase_key"
fi
operations+=("woodpecker|$vault_key|$env_file|$key")
operations+=("woodpecker|$lowercase_key|$env_file|$key")
fi
done
@ -424,12 +398,8 @@ EOF
if $dry_run; then
_log "=== DRY-RUN: Import plan ==="
_log "Environment file: $env_file"
if [ -n "$sops_file" ]; then
_log "Sops file: $sops_file"
_log "Age key: $age_key_file"
else
_log "Sops file: (none — --env-only import)"
fi
_log ""
_log "Planned operations:"
for op in "${operations[@]}"; do
@ -444,12 +414,8 @@ EOF
_log "=== Starting Vault import ==="
_log "Environment file: $env_file"
if [ -n "$sops_file" ]; then
_log "Sops file: $sops_file"
_log "Age key: $age_key_file"
else
_log "Sops file: (none — --env-only import)"
fi
_log ""
local created=0

View file

@ -1,176 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# tools/vault-seed-agents.sh — Idempotent seed for all bot KV paths
#
# Part of the Nomad+Vault migration (S4.1, issue #955). Populates
# kv/disinto/bots/<role> with token + pass for each of the 7 agent roles
# plus the vault bot. Handles the "fresh factory, no .env import" case.
#
# Companion to tools/vault-import.sh — when that runs against a box with
# an existing stack, it overwrites seeded values with real ones.
#
# Idempotency contract (per bot):
# - Both token and pass present → skip, log "<role> unchanged".
# - Either missing → generate random values for missing keys, preserve
# existing keys, write back atomically.
#
# Preconditions:
# - Vault reachable + unsealed at $VAULT_ADDR.
# - VAULT_TOKEN set (env) or /etc/vault.d/root.token readable.
# - curl, jq, openssl
#
# Usage:
# tools/vault-seed-agents.sh
# tools/vault-seed-agents.sh --dry-run
#
# Exit codes:
# 0 success (seed applied, or already applied)
# 1 precondition / API / mount-mismatch failure
# =============================================================================
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
# shellcheck source=../lib/hvault.sh
source "${REPO_ROOT}/lib/hvault.sh"
KV_MOUNT="kv"
TOKEN_BYTES=32 # 32 bytes → 64 hex chars
PASS_BYTES=16 # 16 bytes → 32 hex chars
# All bot roles seeded by this script.
BOT_ROLES=(dev review gardener architect planner predictor supervisor vault)
LOG_TAG="[vault-seed-agents]"
log() { printf '%s %s\n' "$LOG_TAG" "$*"; }
die() { printf '%s ERROR: %s\n' "$LOG_TAG" "$*" >&2; exit 1; }
# ── Flag parsing ─────────────────────────────────────────────────────────────
# while/shift shape — distinct from forgejo (arity:value case) and
# woodpecker (for-loop).
DRY_RUN=0
while [ $# -gt 0 ]; do
case "$1" in
--dry-run) DRY_RUN=1 ;;
-h|--help)
printf 'Usage: %s [--dry-run]\n\n' "$(basename "$0")"
printf 'Seed kv/disinto/bots/<role> with token + pass for all agent\n'
printf 'roles. Idempotent: existing non-empty values are preserved.\n\n'
printf ' --dry-run Print planned actions without writing.\n'
exit 0
;;
*) die "invalid argument: ${1} (try --help)" ;;
esac
shift
done
# ── Preconditions ────────────────────────────────────────────────────────────
for bin in curl jq openssl; do
command -v "$bin" >/dev/null 2>&1 \
|| die "required binary not found: ${bin}"
done
[ -n "${VAULT_ADDR:-}" ] \
|| die "VAULT_ADDR unset — e.g. export VAULT_ADDR=http://127.0.0.1:8200"
hvault_token_lookup >/dev/null \
|| die "Vault auth probe failed — check VAULT_ADDR + VAULT_TOKEN"
# ── Step 1: ensure kv/ mount exists and is KV v2 ────────────────────────────
log "── Step 1: ensure ${KV_MOUNT}/ is KV v2 ──"
export DRY_RUN
hvault_ensure_kv_v2 "$KV_MOUNT" "${LOG_TAG}" \
|| die "KV mount check failed"
# ── Step 2: seed each bot role ───────────────────────────────────────────────
total_generated=0
# Check if shared forge credentials exist for dev role fallback
shared_forge_exists=0
shared_forge_raw="$(hvault_get_or_empty "${KV_MOUNT}/data/disinto/shared/forge")" \
|| true
if [ -n "$shared_forge_raw" ]; then
shared_forge_token="$(printf '%s' "$shared_forge_raw" | jq -r '.data.data.token // ""')"
shared_forge_pass="$(printf '%s' "$shared_forge_raw" | jq -r '.data.data.pass // ""')"
if [ -n "$shared_forge_token" ] && [ -n "$shared_forge_pass" ]; then
shared_forge_exists=1
fi
fi
for role in "${BOT_ROLES[@]}"; do
kv_logical="disinto/bots/${role}"
kv_api="${KV_MOUNT}/data/${kv_logical}"
log "── seed ${kv_logical} ──"
existing_raw="$(hvault_get_or_empty "${kv_api}")" \
|| die "failed to read ${kv_api}"
existing_token=""
existing_pass=""
existing_data="{}"
if [ -n "$existing_raw" ]; then
existing_data="$(printf '%s' "$existing_raw" | jq '.data.data // {}')"
existing_token="$(printf '%s' "$existing_raw" | jq -r '.data.data.token // ""')"
existing_pass="$(printf '%s' "$existing_raw" | jq -r '.data.data.pass // ""')"
fi
generated=()
desired_token="$existing_token"
desired_pass="$existing_pass"
# Special case: dev role uses shared forge credentials if available
if [ "$role" = "dev" ] && [ "$shared_forge_exists" -eq 1 ]; then
# Use shared FORGE_TOKEN + FORGE_PASS for dev role
if [ -z "$existing_token" ]; then
desired_token="$shared_forge_token"
generated+=("token")
fi
if [ -z "$existing_pass" ]; then
desired_pass="$shared_forge_pass"
generated+=("pass")
fi
else
# Generate random values for missing keys
if [ -z "$existing_token" ]; then
generated+=("token")
fi
if [ -z "$existing_pass" ]; then
generated+=("pass")
fi
for key in "${generated[@]}"; do
case "$key" in
token) desired_token="$(openssl rand -hex "$TOKEN_BYTES")" ;;
pass) desired_pass="$(openssl rand -hex "$PASS_BYTES")" ;;
esac
done
fi
if [ "${#generated[@]}" -eq 0 ]; then
log "${role}: unchanged"
continue
fi
if [ "$DRY_RUN" -eq 1 ]; then
log "[dry-run] ${role}: would generate ${generated[*]}"
total_generated=$(( total_generated + ${#generated[@]} ))
continue
fi
# Merge new keys into existing data to preserve any keys we don't own.
payload="$(printf '%s' "$existing_data" \
| jq --arg t "$desired_token" --arg p "$desired_pass" \
'{data: (. + {token: $t, pass: $p})}')"
_hvault_request POST "${kv_api}" "$payload" >/dev/null \
|| die "failed to write ${kv_api}"
log "${role}: generated ${generated[*]}"
total_generated=$(( total_generated + ${#generated[@]} ))
done
if [ "$total_generated" -eq 0 ]; then
log "all bot paths already seeded — no-op"
else
log "done — ${total_generated} key(s) seeded across ${#BOT_ROLES[@]} bot paths"
fi

View file

@ -1,115 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# tools/vault-seed-chat.sh — Idempotent seed for kv/disinto/shared/chat
#
# Part of the Nomad+Vault migration (S5.2, issue #989). Populates the KV v2
# path that nomad/jobs/chat.hcl reads from, so a clean-install factory
# (no old-stack secrets to import) still has per-key values for
# CHAT_OAUTH_CLIENT_ID, CHAT_OAUTH_CLIENT_SECRET, and FORWARD_AUTH_SECRET.
#
# Companion to tools/vault-import.sh (S2.2) — when that import runs against
# a box with an existing stack, it overwrites these seeded values with the
# real ones. Order doesn't matter: whichever runs last wins, and both
# scripts are idempotent in the sense that re-running never rotates an
# existing non-empty key.
#
# Uses _hvault_seed_key (lib/hvault.sh) for each key — the helper reads
# existing data and merges to preserve sibling keys (KV v2 replaces .data
# atomically).
#
# Preconditions:
# - Vault reachable + unsealed at $VAULT_ADDR.
# - VAULT_TOKEN set (env) or /etc/vault.d/root.token readable.
# - The `kv/` mount is enabled as KV v2.
#
# Requires: VAULT_ADDR, VAULT_TOKEN, curl, jq, openssl
#
# Usage:
# tools/vault-seed-chat.sh
# tools/vault-seed-chat.sh --dry-run
#
# Exit codes:
# 0 success (seed applied, or already applied)
# 1 precondition / API / mount-mismatch failure
# =============================================================================
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
# shellcheck source=../lib/hvault.sh
source "${REPO_ROOT}/lib/hvault.sh"
KV_MOUNT="kv"
KV_LOGICAL_PATH="disinto/shared/chat"
# Keys to seed — array-driven loop (structurally distinct from forgejo's
# sequential if-blocks and agents' role loop).
SEED_KEYS=(chat_oauth_client_id chat_oauth_client_secret forward_auth_secret)
LOG_TAG="[vault-seed-chat]"
log() { printf '%s %s\n' "$LOG_TAG" "$*"; }
die() { printf '%s ERROR: %s\n' "$LOG_TAG" "$*" >&2; exit 1; }
# ── Flag parsing — [[ ]] guard + case: shape distinct from forgejo
# (arity:value case), woodpecker (for-loop), agents (while/shift).
DRY_RUN=0
if [[ $# -gt 0 ]]; then
case "$1" in
--dry-run) DRY_RUN=1 ;;
-h|--help)
printf 'Usage: %s [--dry-run]\n\n' "$(basename "$0")"
printf 'Seed kv/disinto/shared/chat with random OAuth client\n'
printf 'credentials and forward auth secret if missing.\n'
printf 'Idempotent: existing non-empty values are preserved.\n\n'
printf ' --dry-run Show what would be seeded without writing.\n'
exit 0
;;
*) die "invalid argument: ${1} (try --help)" ;;
esac
fi
# ── Preconditions — inline check-or-die (shape distinct from agents' array
# loop and forgejo's continuation-line style) ─────────────────────────────
command -v curl >/dev/null 2>&1 || die "curl not found"
command -v jq >/dev/null 2>&1 || die "jq not found"
command -v openssl >/dev/null 2>&1 || die "openssl not found"
[ -n "${VAULT_ADDR:-}" ] || die "VAULT_ADDR unset — export VAULT_ADDR=http://127.0.0.1:8200"
hvault_token_lookup >/dev/null || die "Vault auth probe failed — check VAULT_ADDR + VAULT_TOKEN"
# ── Step 1/2: ensure kv/ mount exists and is KV v2 ───────────────────────────
log "── Step 1/2: ensure ${KV_MOUNT}/ is KV v2 ──"
export DRY_RUN
hvault_ensure_kv_v2 "$KV_MOUNT" "${LOG_TAG}" \
|| die "KV mount check failed"
# ── Step 2/2: seed missing keys via _hvault_seed_key helper ──────────────────
log "── Step 2/2: seed ${KV_LOGICAL_PATH} ──"
generated=()
for key in "${SEED_KEYS[@]}"; do
if [ "$DRY_RUN" -eq 1 ]; then
# Check existence without writing
existing=$(hvault_kv_get "$KV_LOGICAL_PATH" "$key" 2>/dev/null) || true
if [ -z "$existing" ]; then
generated+=("$key")
log "[dry-run] ${key} would be generated"
else
log "[dry-run] ${key} unchanged"
fi
else
rc=0
_hvault_seed_key "$KV_LOGICAL_PATH" "$key" || rc=$?
case "$rc" in
0) generated+=("$key"); log "${key} generated" ;;
1) log "${key} unchanged" ;;
*) die "API error seeding ${key} (rc=${rc})" ;;
esac
fi
done
if [ "${#generated[@]}" -eq 0 ]; then
log "all keys present — no-op"
else
log "done — ${#generated[@]} key(s) seeded at kv/${KV_LOGICAL_PATH}"
fi

View file

@ -118,9 +118,36 @@ hvault_token_lookup >/dev/null \
# wrong version or a different backend, fail loudly — silently
# re-enabling would destroy existing secrets.
log "── Step 1/2: ensure ${KV_MOUNT}/ is KV v2 ──"
export DRY_RUN
hvault_ensure_kv_v2 "$KV_MOUNT" "[vault-seed-forgejo]" \
|| die "KV mount check failed"
mounts_json="$(hvault_get_or_empty "sys/mounts")" \
|| die "failed to list Vault mounts"
mount_exists=false
if printf '%s' "$mounts_json" | jq -e --arg m "${KV_MOUNT}/" '.[$m]' >/dev/null 2>&1; then
mount_exists=true
fi
if [ "$mount_exists" = true ]; then
mount_type="$(printf '%s' "$mounts_json" \
| jq -r --arg m "${KV_MOUNT}/" '.[$m].type // ""')"
mount_version="$(printf '%s' "$mounts_json" \
| jq -r --arg m "${KV_MOUNT}/" '.[$m].options.version // "1"')"
if [ "$mount_type" != "kv" ]; then
die "${KV_MOUNT}/ is mounted as type='${mount_type}', expected 'kv' — refuse to re-mount"
fi
if [ "$mount_version" != "2" ]; then
die "${KV_MOUNT}/ is KV v${mount_version}, expected v2 — refuse to upgrade in place (manual fix required)"
fi
log "${KV_MOUNT}/ already mounted (kv v2) — skipping enable"
else
if [ "$DRY_RUN" -eq 1 ]; then
log "[dry-run] would enable ${KV_MOUNT}/ as kv v2"
else
payload="$(jq -n '{type:"kv",options:{version:"2"},description:"disinto shared KV v2 (S2.4)"}')"
_hvault_request POST "sys/mounts/${KV_MOUNT}" "$payload" >/dev/null \
|| die "failed to enable ${KV_MOUNT}/ as kv v2"
log "${KV_MOUNT}/ enabled as kv v2"
fi
fi
# ── Step 2/2: seed missing keys at kv/data/disinto/shared/forgejo ────────────
log "── Step 2/2: seed ${KV_API_PATH} ──"

View file

@ -1,149 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# tools/vault-seed-ops-repo.sh — Idempotent seed for kv/disinto/shared/ops-repo
#
# Part of the Nomad+Vault migration (S5.1, issue #1035). Populates the KV v2
# path that nomad/jobs/edge.hcl dispatcher task reads from, so the edge
# proxy has FORGE_TOKEN for ops repo access.
#
# Seeds from kv/disinto/bots/vault (the vault bot credentials) — copies the
# token field to kv/disinto/shared/ops-repo. This is the "service" path that
# dispatcher uses, distinct from the "agent" path (bots/vault) used by
# agent tasks under the service-agents policy.
#
# Idempotency contract:
# - Key present with non-empty value → leave untouched, log "token unchanged".
# - Key missing or empty → copy from bots/vault, log "token copied".
# - If bots/vault is also empty → generate a random value, log "token generated".
#
# Preconditions:
# - Vault reachable + unsealed at $VAULT_ADDR.
# - VAULT_TOKEN set (env) or /etc/vault.d/root.token readable.
# - The `kv/` mount is enabled as KV v2.
#
# Requires:
# - VAULT_ADDR (e.g. http://127.0.0.1:8200)
# - VAULT_TOKEN (env OR /etc/vault.d/root.token, resolved by lib/hvault.sh)
# - curl, jq, openssl
#
# Usage:
# tools/vault-seed-ops-repo.sh
# tools/vault-seed-ops-repo.sh --dry-run
#
# Exit codes:
# 0 success (seed applied, or already applied)
# 1 precondition / API / mount-mismatch failure
# =============================================================================
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
# shellcheck source=../lib/hvault.sh
source "${REPO_ROOT}/lib/hvault.sh"
# KV v2 mount + logical paths
KV_MOUNT="kv"
OPS_REPO_PATH="disinto/shared/ops-repo"
VAULT_BOT_PATH="disinto/bots/vault"
OPS_REPO_API="${KV_MOUNT}/data/${OPS_REPO_PATH}"
VAULT_BOT_API="${KV_MOUNT}/data/${VAULT_BOT_PATH}"
log() { printf '[vault-seed-ops-repo] %s\n' "$*"; }
die() { printf '[vault-seed-ops-repo] ERROR: %s\n' "$*" >&2; exit 1; }
# ── Flag parsing ─────────────────────────────────────────────────────────────
DRY_RUN=0
case "$#:${1-}" in
0:)
;;
1:--dry-run)
DRY_RUN=1
;;
1:-h|1:--help)
printf 'Usage: %s [--dry-run]\n\n' "$(basename "$0")"
printf 'Seed kv/disinto/shared/ops-repo with FORGE_TOKEN.\n\n'
printf 'Copies token from kv/disinto/bots/vault if present;\n'
printf 'otherwise generates a random value. Idempotent:\n'
printf 'existing non-empty values are left untouched.\n\n'
printf ' --dry-run Print planned actions without writing.\n'
exit 0
;;
*)
die "invalid arguments: $* (try --help)"
;;
esac
# ── Preconditions ────────────────────────────────────────────────────────────
for bin in curl jq openssl; do
command -v "$bin" >/dev/null 2>&1 \
|| die "required binary not found: ${bin}"
done
[ -n "${VAULT_ADDR:-}" ] \
|| die "VAULT_ADDR unset — e.g. export VAULT_ADDR=http://127.0.0.1:8200"
hvault_token_lookup >/dev/null \
|| die "Vault auth probe failed — check VAULT_ADDR + VAULT_TOKEN"
# ── Step 1/2: ensure kv/ mount exists and is KV v2 ───────────────────────────
log "── Step 1/2: ensure ${KV_MOUNT}/ is KV v2 ──"
export DRY_RUN
hvault_ensure_kv_v2 "$KV_MOUNT" "[vault-seed-ops-repo]" \
|| die "KV mount check failed"
# ── Step 2/2: seed ops-repo from vault bot ───────────────────────────────────
log "── Step 2/2: seed ${OPS_REPO_API} ──"
# Read existing ops-repo value
existing_raw="$(hvault_get_or_empty "${OPS_REPO_API}")" \
|| die "failed to read ${OPS_REPO_API}"
existing_token=""
if [ -n "$existing_raw" ]; then
existing_token="$(printf '%s' "$existing_raw" | jq -r '.data.data.token // ""')"
fi
desired_token="$existing_token"
action=""
if [ -z "$existing_token" ]; then
# Token missing — try to copy from vault bot
bot_raw="$(hvault_get_or_empty "${VAULT_BOT_API}")" || true
if [ -n "$bot_raw" ]; then
bot_token="$(printf '%s' "$bot_raw" | jq -r '.data.data.token // ""')"
if [ -n "$bot_token" ]; then
desired_token="$bot_token"
action="copied"
fi
fi
# If still no token, generate one
if [ -z "$desired_token" ]; then
if [ "$DRY_RUN" -eq 1 ]; then
action="generated (dry-run)"
else
desired_token="$(openssl rand -hex 32)"
action="generated"
fi
fi
fi
if [ -z "$action" ]; then
log "all keys present at ${OPS_REPO_API} — no-op"
log "token unchanged"
exit 0
fi
if [ "$DRY_RUN" -eq 1 ]; then
log "[dry-run] ${OPS_REPO_PATH}: would ${action} token"
exit 0
fi
# Write the token
payload="$(jq -n --arg t "$desired_token" '{data: {token: $t}}')"
_hvault_request POST "${OPS_REPO_API}" "$payload" >/dev/null \
|| die "failed to write ${OPS_REPO_API}"
log "${OPS_REPO_PATH}: ${action} token"
log "done — ${OPS_REPO_API} seeded"

View file

@ -1,145 +0,0 @@
#!/usr/bin/env bash
# =============================================================================
# tools/vault-seed-woodpecker.sh — Idempotent seed for kv/disinto/shared/woodpecker
#
# Part of the Nomad+Vault migration (S3.1 + S3.3, issues #934 + #936). Populates
# the KV v2 path read by nomad/jobs/woodpecker-server.hcl:
# - agent_secret: pre-shared secret for woodpecker-server ↔ agent communication
# - forgejo_client + forgejo_secret: OAuth2 client credentials from Forgejo
#
# This script handles BOTH:
# 1. S3.1: seeds `agent_secret` if missing
# 2. S3.3: calls wp-oauth-register.sh to create Forgejo OAuth app + store
# forgejo_client/forgejo_secret in Vault
#
# Idempotency contract:
# - agent_secret: missing → generate and write; present → skip, log unchanged
# - OAuth app + credentials: handled by wp-oauth-register.sh (idempotent)
# This script preserves any existing keys it doesn't own.
#
# Idempotency contract (per key):
# - Key missing or empty in Vault → generate a random value, write it,
# log "agent_secret generated".
# - Key present with a non-empty value → leave untouched, log
# "agent_secret unchanged".
#
# Preconditions:
# - Vault reachable + unsealed at $VAULT_ADDR.
# - VAULT_TOKEN set (env) or /etc/vault.d/root.token readable.
# - The `kv/` mount is enabled as KV v2 (this script enables it on a
# fresh box; on an existing box it asserts the mount type/version).
#
# Requires:
# - VAULT_ADDR (e.g. http://127.0.0.1:8200)
# - VAULT_TOKEN (env OR /etc/vault.d/root.token, resolved by lib/hvault.sh)
# - curl, jq, openssl
#
# Usage:
# tools/vault-seed-woodpecker.sh
# tools/vault-seed-woodpecker.sh --dry-run
#
# Exit codes:
# 0 success (seed applied, or already applied)
# 1 precondition / API / mount-mismatch failure
# =============================================================================
set -euo pipefail
SEED_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SEED_DIR}/.." && pwd)"
LIB_DIR="${REPO_ROOT}/lib/init/nomad"
# shellcheck source=../lib/hvault.sh
source "${REPO_ROOT}/lib/hvault.sh"
KV_MOUNT="kv"
KV_LOGICAL_PATH="disinto/shared/woodpecker"
KV_API_PATH="${KV_MOUNT}/data/${KV_LOGICAL_PATH}"
AGENT_SECRET_BYTES=32 # 32 bytes → 64 hex chars
LOG_TAG="[vault-seed-woodpecker]"
log() { printf '%s %s\n' "$LOG_TAG" "$*"; }
die() { printf '%s ERROR: %s\n' "$LOG_TAG" "$*" >&2; exit 1; }
# ── Flag parsing ─────────────────────────────────────────────────────────────
# for-over-"$@" loop — shape distinct from vault-seed-forgejo.sh (arity:value
# case) and vault-apply-roles.sh (if/elif).
DRY_RUN=0
for arg in "$@"; do
case "$arg" in
--dry-run) DRY_RUN=1 ;;
-h|--help)
printf 'Usage: %s [--dry-run]\n\n' "$(basename "$0")"
printf 'Seed kv/disinto/shared/woodpecker with secrets.\n\n'
printf 'Handles both S3.1 (agent_secret) and S3.3 (OAuth app + credentials):\n'
printf ' - agent_secret: generated if missing\n'
printf ' - forgejo_client/forgejo_secret: created via Forgejo API if missing\n\n'
printf ' --dry-run Print planned actions without writing.\n'
exit 0
;;
*) die "invalid argument: ${arg} (try --help)" ;;
esac
done
# ── Preconditions — binary + Vault connectivity checks ───────────────────────
required_bins=(curl jq openssl)
for bin in "${required_bins[@]}"; do
command -v "$bin" >/dev/null 2>&1 || die "required binary not found: ${bin}"
done
[ -n "${VAULT_ADDR:-}" ] || die "VAULT_ADDR unset — export VAULT_ADDR=http://127.0.0.1:8200"
hvault_token_lookup >/dev/null || die "Vault auth probe failed — check VAULT_ADDR + VAULT_TOKEN"
# ── Step 1/3: ensure kv/ mount exists and is KV v2 ───────────────────────────
log "── Step 1/3: ensure ${KV_MOUNT}/ is KV v2 ──"
export DRY_RUN
hvault_ensure_kv_v2 "$KV_MOUNT" "[vault-seed-woodpecker]" \
|| die "KV mount check failed"
# ── Step 2/3: seed agent_secret at kv/data/disinto/shared/woodpecker ─────────
log "── Step 2/3: seed agent_secret ──"
existing_raw="$(hvault_get_or_empty "${KV_API_PATH}")" \
|| die "failed to read ${KV_API_PATH}"
# Read all existing keys so we can preserve them on write (KV v2 replaces
# `.data` atomically). Missing path → empty object.
existing_data="{}"
existing_agent_secret=""
if [ -n "$existing_raw" ]; then
existing_data="$(printf '%s' "$existing_raw" | jq '.data.data // {}')"
existing_agent_secret="$(printf '%s' "$existing_raw" | jq -r '.data.data.agent_secret // ""')"
fi
if [ -n "$existing_agent_secret" ]; then
log "agent_secret unchanged"
else
# agent_secret is missing — generate it.
if [ "$DRY_RUN" -eq 1 ]; then
log "[dry-run] would generate + write: agent_secret"
else
new_agent_secret="$(openssl rand -hex "$AGENT_SECRET_BYTES")"
# Merge the new key into existing data to preserve any keys written by
# other seeders (e.g. S3.3's forgejo_client/forgejo_secret).
payload="$(printf '%s' "$existing_data" \
| jq --arg as "$new_agent_secret" '{data: (. + {agent_secret: $as})}')"
_hvault_request POST "${KV_API_PATH}" "$payload" >/dev/null \
|| die "failed to write ${KV_API_PATH}"
log "agent_secret generated"
fi
fi
# ── Step 3/3: register Forgejo OAuth app and store credentials ───────────────
log "── Step 3/3: register Forgejo OAuth app ──"
# Export DRY_RUN for the OAuth script and call it
export DRY_RUN
if "${LIB_DIR}/wp-oauth-register.sh" || [ "$DRY_RUN" -eq 1 ]; then
:
elif [ -n "${FORGE_URL:-}" ]; then
# Forgejo was configured but unavailable
log "OAuth registration check failed (Forgejo may not be running)"
log "This is expected if Forgejo is not available"
fi
log "done — agent_secret + OAuth credentials seeded"

View file

@ -1,4 +1,4 @@
<!-- last-reviewed: 19ead14edecbc4e05e7bfe3d43f573ca8189e953 -->
<!-- last-reviewed: 6bdbeb5bd2a200ff1b23724564da9383193f3e30 -->
# vault/policies/ — Agent Instructions
HashiCorp Vault ACL policies for the disinto factory. One `.hcl` file per
@ -30,9 +30,6 @@ KV v2). Vault addresses KV v2 data at `kv/data/<path>` and metadata at
|---|---|
| `service-forgejo` | `kv/data/disinto/shared/forgejo/*` |
| `service-woodpecker` | `kv/data/disinto/shared/woodpecker/*` |
| `service-agents` | All 7 `kv/data/disinto/bots/<role>/*` namespaces + `kv/data/disinto/shared/forge/*`; composite policy for the `agents` Nomad job (S4.1) |
| `service-chat` | `kv/data/disinto/shared/chat/*`; read-only OAuth client config + forward-auth secret for the chat Nomad job (S5.2, #989) |
| `service-dispatcher` | `kv/data/disinto/runner/*` (list+read) + `kv/data/disinto/shared/ops-repo/*` (read); used by edge dispatcher sidecar (S5.1, #988) |
| `bot-<role>` (dev, review, gardener, architect, planner, predictor, supervisor, vault, dev-qwen) | `kv/data/disinto/bots/<role>/*` + `kv/data/disinto/shared/forge/*` |
| `runner-<TOKEN>` (GITHUB\_TOKEN, CODEBERG\_TOKEN, CLAWHUB\_TOKEN, DEPLOY\_KEY, NPM\_TOKEN, DOCKER\_HUB\_TOKEN) | `kv/data/disinto/runner/<TOKEN>` (exactly one) |
| `dispatcher` | `kv/data/disinto/runner/*` + `kv/data/disinto/shared/ops-repo/*` |

View file

@ -3,14 +3,14 @@
# Architect agent: reads its own bot KV namespace + the shared forge URL.
# Attached to the architect-agent Nomad job via workload identity (S2.4).
path "kv/data/disinto/bots/architect" {
path "kv/data/disinto/bots/architect/*" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/architect" {
path "kv/metadata/disinto/bots/architect/*" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/shared/forge" {
path "kv/data/disinto/shared/forge/*" {
capabilities = ["read"]
}

View file

@ -5,14 +5,14 @@
# via workload identity (S2.4). KV path mirrors the bot basename:
# kv/disinto/bots/dev-qwen/*.
path "kv/data/disinto/bots/dev-qwen" {
path "kv/data/disinto/bots/dev-qwen/*" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/dev-qwen" {
path "kv/metadata/disinto/bots/dev-qwen/*" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/shared/forge" {
path "kv/data/disinto/shared/forge/*" {
capabilities = ["read"]
}

View file

@ -3,14 +3,14 @@
# Dev agent: reads its own bot KV namespace + the shared forge URL.
# Attached to the dev-agent Nomad job via workload identity (S2.4).
path "kv/data/disinto/bots/dev" {
path "kv/data/disinto/bots/dev/*" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/dev" {
path "kv/metadata/disinto/bots/dev/*" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/shared/forge" {
path "kv/data/disinto/shared/forge/*" {
capabilities = ["read"]
}

View file

@ -3,14 +3,14 @@
# Gardener agent: reads its own bot KV namespace + the shared forge URL.
# Attached to the gardener-agent Nomad job via workload identity (S2.4).
path "kv/data/disinto/bots/gardener" {
path "kv/data/disinto/bots/gardener/*" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/gardener" {
path "kv/metadata/disinto/bots/gardener/*" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/shared/forge" {
path "kv/data/disinto/shared/forge/*" {
capabilities = ["read"]
}

View file

@ -3,14 +3,14 @@
# Planner agent: reads its own bot KV namespace + the shared forge URL.
# Attached to the planner-agent Nomad job via workload identity (S2.4).
path "kv/data/disinto/bots/planner" {
path "kv/data/disinto/bots/planner/*" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/planner" {
path "kv/metadata/disinto/bots/planner/*" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/shared/forge" {
path "kv/data/disinto/shared/forge/*" {
capabilities = ["read"]
}

View file

@ -3,14 +3,14 @@
# Predictor agent: reads its own bot KV namespace + the shared forge URL.
# Attached to the predictor-agent Nomad job via workload identity (S2.4).
path "kv/data/disinto/bots/predictor" {
path "kv/data/disinto/bots/predictor/*" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/predictor" {
path "kv/metadata/disinto/bots/predictor/*" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/shared/forge" {
path "kv/data/disinto/shared/forge/*" {
capabilities = ["read"]
}

View file

@ -3,14 +3,14 @@
# Review agent: reads its own bot KV namespace + the shared forge URL.
# Attached to the review-agent Nomad job via workload identity (S2.4).
path "kv/data/disinto/bots/review" {
path "kv/data/disinto/bots/review/*" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/review" {
path "kv/metadata/disinto/bots/review/*" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/shared/forge" {
path "kv/data/disinto/shared/forge/*" {
capabilities = ["read"]
}

View file

@ -3,14 +3,14 @@
# Supervisor agent: reads its own bot KV namespace + the shared forge URL.
# Attached to the supervisor-agent Nomad job via workload identity (S2.4).
path "kv/data/disinto/bots/supervisor" {
path "kv/data/disinto/bots/supervisor/*" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/supervisor" {
path "kv/metadata/disinto/bots/supervisor/*" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/shared/forge" {
path "kv/data/disinto/shared/forge/*" {
capabilities = ["read"]
}

View file

@ -7,14 +7,14 @@
# NOTE: distinct from the runner-* policies, which gate per-secret access
# for vault-runner ephemeral dispatches (Step 5).
path "kv/data/disinto/bots/vault" {
path "kv/data/disinto/bots/vault/*" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/vault" {
path "kv/metadata/disinto/bots/vault/*" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/shared/forge" {
path "kv/data/disinto/shared/forge/*" {
capabilities = ["read"]
}

View file

@ -20,10 +20,10 @@ path "kv/metadata/disinto/runner/*" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/shared/ops-repo" {
path "kv/data/disinto/shared/ops-repo/*" {
capabilities = ["read"]
}
path "kv/metadata/disinto/shared/ops-repo" {
path "kv/metadata/disinto/shared/ops-repo/*" {
capabilities = ["list", "read"]
}

View file

@ -1,76 +0,0 @@
# vault/policies/service-agents.hcl
#
# Composite policy for the `agents` Nomad job (S4.1, issue #955).
# Grants read access to all 7 bot KV namespaces + shared forge config,
# so a single job running all agent roles can pull per-bot tokens from
# Vault via workload identity.
# Per-bot KV paths (token + pass per role)
path "kv/data/disinto/bots/dev" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/dev" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/bots/review" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/review" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/bots/gardener" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/gardener" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/bots/architect" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/architect" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/bots/planner" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/planner" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/bots/predictor" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/predictor" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/bots/supervisor" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/supervisor" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/bots/vault" {
capabilities = ["read"]
}
path "kv/metadata/disinto/bots/vault" {
capabilities = ["list", "read"]
}
# Shared forge config (URL, bot usernames)
path "kv/data/disinto/shared/forge" {
capabilities = ["read"]
}

View file

@ -1,15 +0,0 @@
# vault/policies/service-chat.hcl
#
# Read-only access to shared Chat secrets (OAuth client config, forward auth
# secret). Attached to the Chat Nomad job via workload identity (S5.2).
#
# Scope: kv/disinto/shared/chat entries owned by the operator and
# shared between the chat service and edge proxy.
path "kv/data/disinto/shared/chat" {
capabilities = ["read"]
}
path "kv/metadata/disinto/shared/chat" {
capabilities = ["list", "read"]
}

View file

@ -1,29 +0,0 @@
# vault/policies/service-dispatcher.hcl
#
# Edge dispatcher policy: needs to enumerate the runner secret namespace
# (to check secret presence before dispatching) and read the shared
# ops-repo credentials (token + clone URL) it uses to fetch action TOMLs.
#
# Scope:
# - kv/disinto/runner/* read all per-secret values + list keys
# - kv/disinto/shared/ops-repo/* read the ops-repo creds bundle
#
# The actual ephemeral runner container created per dispatch gets the
# narrow runner-<NAME> policies, NOT this one. This policy stays bound
# to the long-running dispatcher only.
path "kv/data/disinto/runner/*" {
capabilities = ["read"]
}
path "kv/metadata/disinto/runner/*" {
capabilities = ["list", "read"]
}
path "kv/data/disinto/shared/ops-repo" {
capabilities = ["read"]
}
path "kv/metadata/disinto/shared/ops-repo" {
capabilities = ["list", "read"]
}

View file

@ -6,10 +6,10 @@
# Scope: kv/disinto/shared/woodpecker/* entries owned by the operator
# and consumed by woodpecker-server + woodpecker-agent.
path "kv/data/disinto/shared/woodpecker" {
path "kv/data/disinto/shared/woodpecker/*" {
capabilities = ["read"]
}
path "kv/metadata/disinto/shared/woodpecker" {
path "kv/metadata/disinto/shared/woodpecker/*" {
capabilities = ["list", "read"]
}

View file

@ -55,27 +55,7 @@ roles:
- name: service-woodpecker
policy: service-woodpecker
namespace: default
job_id: woodpecker-server
- name: service-woodpecker-agent
policy: service-woodpecker
namespace: default
job_id: woodpecker-agent
# ── Agents composite (nomad/jobs/agents.hcl — S4.1) ──────────────────────
# Single job running all 7 agent roles. Uses a composite policy
# (vault/policies/service-agents.hcl) that unions all bot KV paths.
- name: service-agents
policy: service-agents
namespace: default
job_id: agents
# ── Chat UI (nomad/jobs/chat.hcl — S5.2) ─────────────────────────────────
# Claude chat UI service with OAuth secrets. Uses vault/policies/service-chat.hcl.
- name: service-chat
policy: service-chat
namespace: default
job_id: chat
job_id: woodpecker
# ── Per-agent bots (nomad/jobs/bot-<role>.hcl — land in later steps) ───────
# job_id placeholders match the policy name 1:1 until each bot's jobspec
@ -128,10 +108,10 @@ roles:
job_id: bot-vault
# ── Edge dispatcher ────────────────────────────────────────────────────────
- name: service-dispatcher
policy: service-dispatcher
- name: dispatcher
policy: dispatcher
namespace: default
job_id: edge
job_id: dispatcher
# ── Per-secret runner roles ────────────────────────────────────────────────
# vault-runner (Step 5) composes runner-<NAME> policies onto each