Compare commits
2 commits
main
...
fix/issue-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f398b32952 | ||
|
|
2605d8afba |
145 changed files with 1681 additions and 13367 deletions
|
|
@ -1,7 +1,8 @@
|
||||||
# Secrets — prevent .env files and encrypted secrets from being baked into the image
|
# Secrets — prevent .env files from being baked into the image
|
||||||
.env
|
.env
|
||||||
.env.enc
|
.env.enc
|
||||||
secrets/
|
.env.vault
|
||||||
|
.env.vault.enc
|
||||||
|
|
||||||
# Version control — .git is huge and not needed in image
|
# Version control — .git is huge and not needed in image
|
||||||
.git
|
.git
|
||||||
|
|
|
||||||
42
.env.example
42
.env.example
|
|
@ -25,17 +25,12 @@ FORGE_URL=http://localhost:3000 # [CONFIG] local Forgejo instance
|
||||||
# - FORGE_TOKEN_<BOT> = API token for REST calls (user identity via /api/v1/user)
|
# - FORGE_TOKEN_<BOT> = API token for REST calls (user identity via /api/v1/user)
|
||||||
# - FORGE_PASS_<BOT> = password for git HTTP push (#361, Forgejo 11.x limitation)
|
# - FORGE_PASS_<BOT> = password for git HTTP push (#361, Forgejo 11.x limitation)
|
||||||
#
|
#
|
||||||
# Local-model agents hired with `disinto hire-an-agent` are keyed by *agent
|
# Local-model agents (agents-llama) use FORGE_TOKEN_LLAMA / FORGE_PASS_LLAMA
|
||||||
# name* (not role), so multiple local-model dev agents can coexist without
|
# with FORGE_BOT_USER_LLAMA=dev-qwen to ensure correct attribution (#563).
|
||||||
# colliding on credentials (#834). For an agent named `dev-qwen2` the vars are:
|
|
||||||
# - FORGE_TOKEN_DEV_QWEN2
|
|
||||||
# - FORGE_PASS_DEV_QWEN2
|
|
||||||
# Name conversion: tr 'a-z-' 'A-Z_' (lowercase→UPPER, hyphens→underscores).
|
|
||||||
# The compose generator looks these up via the agent's `forge_user` field in
|
|
||||||
# the project TOML. Configure local-model agents via [agents.X] sections in
|
|
||||||
# projects/*.toml — this is the canonical activation path.
|
|
||||||
FORGE_TOKEN= # [SECRET] dev-bot API token (default for all agents)
|
FORGE_TOKEN= # [SECRET] dev-bot API token (default for all agents)
|
||||||
FORGE_PASS= # [SECRET] dev-bot password for git HTTP push (#361)
|
FORGE_PASS= # [SECRET] dev-bot password for git HTTP push (#361)
|
||||||
|
FORGE_TOKEN_LLAMA= # [SECRET] dev-qwen API token (for agents-llama)
|
||||||
|
FORGE_PASS_LLAMA= # [SECRET] dev-qwen password for git HTTP push
|
||||||
FORGE_REVIEW_TOKEN= # [SECRET] review-bot API token
|
FORGE_REVIEW_TOKEN= # [SECRET] review-bot API token
|
||||||
FORGE_REVIEW_PASS= # [SECRET] review-bot password for git HTTP push
|
FORGE_REVIEW_PASS= # [SECRET] review-bot password for git HTTP push
|
||||||
FORGE_PLANNER_TOKEN= # [SECRET] planner-bot API token
|
FORGE_PLANNER_TOKEN= # [SECRET] planner-bot API token
|
||||||
|
|
@ -50,9 +45,7 @@ FORGE_PREDICTOR_TOKEN= # [SECRET] predictor-bot API token
|
||||||
FORGE_PREDICTOR_PASS= # [SECRET] predictor-bot password for git HTTP push
|
FORGE_PREDICTOR_PASS= # [SECRET] predictor-bot password for git HTTP push
|
||||||
FORGE_ARCHITECT_TOKEN= # [SECRET] architect-bot API token
|
FORGE_ARCHITECT_TOKEN= # [SECRET] architect-bot API token
|
||||||
FORGE_ARCHITECT_PASS= # [SECRET] architect-bot password for git HTTP push
|
FORGE_ARCHITECT_PASS= # [SECRET] architect-bot password for git HTTP push
|
||||||
FORGE_FILER_TOKEN= # [SECRET] filer-bot API token (issues:write on project repo only)
|
FORGE_BOT_USERNAMES=dev-bot,review-bot,planner-bot,gardener-bot,vault-bot,supervisor-bot,predictor-bot,architect-bot
|
||||||
FORGE_FILER_PASS= # [SECRET] filer-bot password for git HTTP push
|
|
||||||
FORGE_BOT_USERNAMES=dev-bot,review-bot,planner-bot,gardener-bot,vault-bot,supervisor-bot,predictor-bot,architect-bot,filer-bot
|
|
||||||
|
|
||||||
# ── Backwards compatibility ───────────────────────────────────────────────
|
# ── Backwards compatibility ───────────────────────────────────────────────
|
||||||
# If CODEBERG_TOKEN is set but FORGE_TOKEN is not, env.sh falls back to
|
# If CODEBERG_TOKEN is set but FORGE_TOKEN is not, env.sh falls back to
|
||||||
|
|
@ -68,10 +61,6 @@ FORGE_BOT_USERNAMES=dev-bot,review-bot,planner-bot,gardener-bot,vault-bot,superv
|
||||||
WOODPECKER_TOKEN= # [SECRET] Woodpecker API token
|
WOODPECKER_TOKEN= # [SECRET] Woodpecker API token
|
||||||
WOODPECKER_SERVER=http://localhost:8000 # [CONFIG] Woodpecker server URL
|
WOODPECKER_SERVER=http://localhost:8000 # [CONFIG] Woodpecker server URL
|
||||||
WOODPECKER_AGENT_SECRET= # [SECRET] shared secret for server↔agent auth (auto-generated)
|
WOODPECKER_AGENT_SECRET= # [SECRET] shared secret for server↔agent auth (auto-generated)
|
||||||
# Woodpecker privileged-plugin allowlist — comma-separated image names
|
|
||||||
# Add plugins/docker (and others) here to allow privileged execution
|
|
||||||
WOODPECKER_PLUGINS_PRIVILEGED=plugins/docker
|
|
||||||
|
|
||||||
# WOODPECKER_REPO_ID — now per-project, set in projects/*.toml [ci] section
|
# WOODPECKER_REPO_ID — now per-project, set in projects/*.toml [ci] section
|
||||||
|
|
||||||
# Woodpecker Postgres (for direct DB queries)
|
# Woodpecker Postgres (for direct DB queries)
|
||||||
|
|
@ -88,17 +77,16 @@ FORWARD_AUTH_SECRET= # [SECRET] Shared secret for Caddy ↔
|
||||||
|
|
||||||
# ── Vault-only secrets (DO NOT put these in .env) ────────────────────────
|
# ── Vault-only secrets (DO NOT put these in .env) ────────────────────────
|
||||||
# These tokens grant access to external systems (GitHub, ClawHub, deploy targets).
|
# These tokens grant access to external systems (GitHub, ClawHub, deploy targets).
|
||||||
# They live ONLY in secrets/<NAME>.enc (age-encrypted, one file per key) and are
|
# They live ONLY in .env.vault.enc and are injected into the ephemeral runner
|
||||||
# decrypted into the ephemeral runner container at fire time (#745, #777).
|
# container at fire time (#745). lib/env.sh explicitly unsets them so agents
|
||||||
# lib/env.sh explicitly unsets them so agents can never hold them directly —
|
# can never hold them directly — all external actions go through vault dispatch.
|
||||||
# all external actions go through vault dispatch.
|
|
||||||
#
|
#
|
||||||
# GITHUB_TOKEN — GitHub API access (publish, deploy, post)
|
# GITHUB_TOKEN — GitHub API access (publish, deploy, post)
|
||||||
# CLAWHUB_TOKEN — ClawHub registry credentials (publish)
|
# CLAWHUB_TOKEN — ClawHub registry credentials (publish)
|
||||||
# CADDY_SSH_KEY — SSH key for Caddy log collection
|
|
||||||
# (deploy keys) — SSH keys for deployment targets
|
# (deploy keys) — SSH keys for deployment targets
|
||||||
#
|
#
|
||||||
# To manage secrets: disinto secrets add/show/remove/list
|
# To manage vault secrets: disinto secrets edit-vault
|
||||||
|
# (vault redesign in progress: PR-based approval, see #73-#77)
|
||||||
|
|
||||||
# ── Project-specific secrets ──────────────────────────────────────────────
|
# ── Project-specific secrets ──────────────────────────────────────────────
|
||||||
# Store all project secrets here so formulas reference env vars, never hardcode.
|
# Store all project secrets here so formulas reference env vars, never hardcode.
|
||||||
|
|
@ -107,16 +95,6 @@ BASE_RPC_URL= # [SECRET] on-chain RPC endpoint
|
||||||
# ── Tuning ────────────────────────────────────────────────────────────────
|
# ── Tuning ────────────────────────────────────────────────────────────────
|
||||||
CLAUDE_TIMEOUT=7200 # [CONFIG] max seconds per Claude invocation
|
CLAUDE_TIMEOUT=7200 # [CONFIG] max seconds per Claude invocation
|
||||||
|
|
||||||
# ── Host paths (Nomad-portable) ────────────────────────────────────────────
|
|
||||||
# These env vars externalize host-side bind-mount paths from docker-compose.yml.
|
|
||||||
# At cutover, Nomad jobspecs reference the same vars — no path translation.
|
|
||||||
# Defaults point at current paths so an empty .env override still works.
|
|
||||||
CLAUDE_BIN_DIR=/usr/local/bin/claude # [CONFIG] host path to claude CLI binary (resolved by `disinto init`)
|
|
||||||
CLAUDE_CONFIG_FILE=${HOME}/.claude.json # [CONFIG] host path to claude config JSON file
|
|
||||||
CLAUDE_DIR=${HOME}/.claude # [CONFIG] host path to .claude directory (reproduce/edge)
|
|
||||||
AGENT_SSH_DIR=${HOME}/.ssh # [CONFIG] host path to SSH keys directory
|
|
||||||
SOPS_AGE_DIR=${HOME}/.config/sops/age # [CONFIG] host path to SOPS age key directory
|
|
||||||
|
|
||||||
# ── Claude Code shared OAuth state ─────────────────────────────────────────
|
# ── Claude Code shared OAuth state ─────────────────────────────────────────
|
||||||
# Shared directory used by every factory container so Claude Code's internal
|
# Shared directory used by every factory container so Claude Code's internal
|
||||||
# proper-lockfile-based OAuth refresh lock works across containers. Both
|
# proper-lockfile-based OAuth refresh lock works across containers. Both
|
||||||
|
|
|
||||||
5
.gitignore
vendored
5
.gitignore
vendored
|
|
@ -3,6 +3,7 @@
|
||||||
|
|
||||||
# Encrypted secrets — safe to commit (SOPS-encrypted with age)
|
# Encrypted secrets — safe to commit (SOPS-encrypted with age)
|
||||||
!.env.enc
|
!.env.enc
|
||||||
|
!.env.vault.enc
|
||||||
!.sops.yaml
|
!.sops.yaml
|
||||||
|
|
||||||
# Per-box project config (generated by disinto init)
|
# Per-box project config (generated by disinto init)
|
||||||
|
|
@ -20,6 +21,7 @@ metrics/supervisor-metrics.jsonl
|
||||||
# OS
|
# OS
|
||||||
.DS_Store
|
.DS_Store
|
||||||
dev/ci-fixes-*.json
|
dev/ci-fixes-*.json
|
||||||
|
gardener/dust.jsonl
|
||||||
|
|
||||||
# Individual encrypted secrets (managed by disinto secrets add)
|
# Individual encrypted secrets (managed by disinto secrets add)
|
||||||
secrets/
|
secrets/
|
||||||
|
|
@ -31,9 +33,6 @@ docker/agents/bin/
|
||||||
# Note: This file is now committed to track volume mount configuration
|
# Note: This file is now committed to track volume mount configuration
|
||||||
# docker-compose.yml
|
# docker-compose.yml
|
||||||
|
|
||||||
# Generated Caddyfile — single source of truth is generate_caddyfile in lib/generators.sh
|
|
||||||
docker/Caddyfile
|
|
||||||
|
|
||||||
# Python bytecode
|
# Python bytecode
|
||||||
__pycache__/
|
__pycache__/
|
||||||
*.pyc
|
*.pyc
|
||||||
|
|
|
||||||
|
|
@ -98,38 +98,50 @@ echo "syntax check done"
|
||||||
|
|
||||||
echo "=== 2/2 Function resolution ==="
|
echo "=== 2/2 Function resolution ==="
|
||||||
|
|
||||||
# Enumerate ALL lib/*.sh files in stable lexicographic order (#742).
|
# Required lib files for LIB_FUNS construction. Missing any of these means the
|
||||||
# Previous approach used a hand-maintained REQUIRED_LIBS list, which silently
|
# checkout is incomplete or the test is misconfigured — fail loudly, do NOT
|
||||||
# became incomplete as new libs were added, producing partial LIB_FUNS that
|
# silently produce a partial LIB_FUNS list (that masquerades as "undef" errors
|
||||||
# caused non-deterministic "undef" failures.
|
# in unrelated scripts; see #600).
|
||||||
|
REQUIRED_LIBS=(
|
||||||
|
lib/agent-sdk.sh lib/env.sh lib/ci-helpers.sh lib/load-project.sh
|
||||||
|
lib/secret-scan.sh lib/formula-session.sh lib/mirrors.sh lib/guard.sh
|
||||||
|
lib/pr-lifecycle.sh lib/issue-lifecycle.sh lib/worktree.sh
|
||||||
|
)
|
||||||
|
|
||||||
|
for f in "${REQUIRED_LIBS[@]}"; do
|
||||||
|
if [ ! -f "$f" ]; then
|
||||||
|
printf 'FAIL [missing-lib] expected %s but it is not present at smoke time\n' "$f" >&2
|
||||||
|
printf ' pwd=%s\n' "$(pwd)" >&2
|
||||||
|
printf ' ls lib/=%s\n' "$(ls lib/ 2>&1 | tr '\n' ' ')" >&2
|
||||||
|
echo '=== SMOKE TEST FAILED (precondition) ===' >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Functions provided by shared lib files (available to all agent scripts via source).
|
||||||
#
|
#
|
||||||
# Excluded from LIB_FUNS (not sourced inline by agents):
|
# Included — these are inline-sourced by agent scripts:
|
||||||
|
# lib/env.sh — sourced by every agent (log, forge_api, etc.)
|
||||||
|
# lib/agent-sdk.sh — sourced by SDK agents (agent_run, agent_recover_session)
|
||||||
|
# lib/ci-helpers.sh — sourced by pollers and review (ci_passed, classify_pipeline_failure, etc.)
|
||||||
|
# lib/load-project.sh — sourced by env.sh when PROJECT_TOML is set
|
||||||
|
# lib/secret-scan.sh — standalone CLI tool, run directly (not sourced)
|
||||||
|
# lib/formula-session.sh — sourced by formula-driven agents (acquire_run_lock, check_memory, etc.)
|
||||||
|
# lib/mirrors.sh — sourced by merge sites (mirror_push)
|
||||||
|
# lib/guard.sh — sourced by all polling-loop entry points (check_active)
|
||||||
|
# lib/issue-lifecycle.sh — sourced by agents for issue claim/release/block/deps
|
||||||
|
# lib/worktree.sh — sourced by agents for worktree create/recover/cleanup/preserve
|
||||||
|
#
|
||||||
|
# Excluded — not sourced inline by agents:
|
||||||
|
# lib/tea-helpers.sh — sourced conditionally by env.sh (tea_file_issue, etc.); checked standalone below
|
||||||
# lib/ci-debug.sh — standalone CLI tool, run directly (not sourced)
|
# lib/ci-debug.sh — standalone CLI tool, run directly (not sourced)
|
||||||
# lib/parse-deps.sh — executed via `bash lib/parse-deps.sh` (not sourced)
|
# lib/parse-deps.sh — executed via `bash lib/parse-deps.sh` (not sourced)
|
||||||
# lib/hooks/*.sh — Claude Code hook scripts, executed by the harness (not sourced)
|
# lib/hooks/*.sh — Claude Code hook scripts, executed by the harness (not sourced)
|
||||||
EXCLUDED_LIBS="lib/ci-debug.sh lib/parse-deps.sh"
|
#
|
||||||
|
# If a new lib file is added and sourced by agents, add it to LIB_FUNS below
|
||||||
# Build the list of lib files in deterministic order (LC_ALL=C sort).
|
# and add a check_script call for it in the lib files section further down.
|
||||||
# Fail loudly if no lib files are found — checkout is broken.
|
|
||||||
mapfile -t ALL_LIBS < <(LC_ALL=C find lib -maxdepth 1 -name '*.sh' -print | LC_ALL=C sort)
|
|
||||||
if [ "${#ALL_LIBS[@]}" -eq 0 ]; then
|
|
||||||
echo 'FAIL [no-libs] no lib/*.sh files found at smoke time' >&2
|
|
||||||
printf ' pwd=%s\n' "$(pwd)" >&2
|
|
||||||
echo '=== SMOKE TEST FAILED (precondition) ===' >&2
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Build LIB_FUNS from all non-excluded lib files.
|
|
||||||
# Use set -e inside the subshell so a failed get_fns aborts loudly
|
|
||||||
# instead of silently shrinking the function list.
|
|
||||||
LIB_FUNS=$(
|
LIB_FUNS=$(
|
||||||
set -e
|
for f in "${REQUIRED_LIBS[@]}"; do get_fns "$f"; done | sort -u
|
||||||
for f in "${ALL_LIBS[@]}"; do
|
|
||||||
# shellcheck disable=SC2086
|
|
||||||
skip=0; for ex in $EXCLUDED_LIBS; do [ "$f" = "$ex" ] && skip=1; done
|
|
||||||
[ "$skip" -eq 1 ] && continue
|
|
||||||
get_fns "$f"
|
|
||||||
done | sort -u
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Known external commands and shell builtins — never flag these
|
# Known external commands and shell builtins — never flag these
|
||||||
|
|
@ -180,14 +192,13 @@ check_script() {
|
||||||
while IFS= read -r fn; do
|
while IFS= read -r fn; do
|
||||||
[ -z "$fn" ] && continue
|
[ -z "$fn" ] && continue
|
||||||
is_known_cmd "$fn" && continue
|
is_known_cmd "$fn" && continue
|
||||||
# Use here-string (<<<) instead of pipe to avoid SIGPIPE race (#742):
|
if ! printf '%s\n' "$all_fns" | grep -qxF "$fn"; then
|
||||||
# with pipefail, `printf | grep -q` can fail when grep closes the pipe
|
|
||||||
# early after finding a match, causing printf to get SIGPIPE (exit 141).
|
|
||||||
# This produced non-deterministic false "undef" failures.
|
|
||||||
if ! grep -qxF "$fn" <<< "$all_fns"; then
|
|
||||||
printf 'FAIL [undef] %s: %s\n' "$script" "$fn"
|
printf 'FAIL [undef] %s: %s\n' "$script" "$fn"
|
||||||
printf ' all_fns count: %d\n' "$(grep -c . <<< "$all_fns")"
|
# Diagnostic dump (#600): if the function is expected to be in a known lib,
|
||||||
printf ' LIB_FUNS contains "%s": %s\n' "$fn" "$(grep -cxF "$fn" <<< "$LIB_FUNS")"
|
# print what the actual all_fns set looks like so we can tell whether the
|
||||||
|
# function is genuinely missing or whether the resolution loop is broken.
|
||||||
|
printf ' all_fns count: %d\n' "$(printf '%s\n' "$all_fns" | wc -l)"
|
||||||
|
printf ' LIB_FUNS contains "%s": %s\n' "$fn" "$(printf '%s\n' "$LIB_FUNS" | grep -cxF "$fn")"
|
||||||
printf ' defining lib (if any): %s\n' "$(grep -l "^[[:space:]]*${fn}[[:space:]]*()" lib/*.sh 2>/dev/null | tr '\n' ' ')"
|
printf ' defining lib (if any): %s\n' "$(grep -l "^[[:space:]]*${fn}[[:space:]]*()" lib/*.sh 2>/dev/null | tr '\n' ' ')"
|
||||||
FAILED=1
|
FAILED=1
|
||||||
fi
|
fi
|
||||||
|
|
@ -211,9 +222,8 @@ check_script lib/issue-lifecycle.sh lib/secret-scan.sh
|
||||||
|
|
||||||
# Standalone lib scripts (not sourced by agents; run directly or as services).
|
# Standalone lib scripts (not sourced by agents; run directly or as services).
|
||||||
# Still checked for function resolution against LIB_FUNS + own definitions.
|
# Still checked for function resolution against LIB_FUNS + own definitions.
|
||||||
check_script lib/ci-debug.sh
|
check_script lib/ci-debug.sh lib/env.sh
|
||||||
check_script lib/parse-deps.sh
|
check_script lib/parse-deps.sh
|
||||||
check_script lib/sprint-filer.sh
|
|
||||||
|
|
||||||
# Agent scripts — list cross-sourced files where function scope flows across files.
|
# Agent scripts — list cross-sourced files where function scope flows across files.
|
||||||
check_script dev/dev-agent.sh
|
check_script dev/dev-agent.sh
|
||||||
|
|
|
||||||
|
|
@ -292,22 +292,6 @@ def main() -> int:
|
||||||
"21aec56a99d5252b23fb9a38b895e8e8": "Verification helper: check body for Decomposed from pattern",
|
"21aec56a99d5252b23fb9a38b895e8e8": "Verification helper: check body for Decomposed from pattern",
|
||||||
"60ea98b3604557d539193b2a6624e232": "Verification helper: append sub-issue number",
|
"60ea98b3604557d539193b2a6624e232": "Verification helper: append sub-issue number",
|
||||||
"9f6ae8e7811575b964279d8820494eb0": "Verification helper: for loop done pattern",
|
"9f6ae8e7811575b964279d8820494eb0": "Verification helper: for loop done pattern",
|
||||||
# Standard lib source block shared across formula-driven agent run scripts
|
|
||||||
"330e5809a00b95ade1a5fce2d749b94b": "Standard lib source block (env.sh, formula-session.sh, worktree.sh, guard.sh, agent-sdk.sh)",
|
|
||||||
# Common vault-seed script patterns: logging helpers + flag parsing
|
|
||||||
# Used in tools/vault-seed-woodpecker.sh + lib/init/nomad/wp-oauth-register.sh
|
|
||||||
"843a1cbf987952697d4e05e96ed2b2d5": "Logging helpers + DRY_RUN init (vault-seed-woodpecker + wp-oauth-register)",
|
|
||||||
"ee51df9642f2ef37af73b0c15f4d8406": "Logging helpers + DRY_RUN loop start (vault-seed-woodpecker + wp-oauth-register)",
|
|
||||||
"9a57368f3c1dfd29ec328596b86962a0": "Flag parsing loop + case start (vault-seed-woodpecker + wp-oauth-register)",
|
|
||||||
"9d72d40ff303cbed0b7e628fc15381c3": "Case loop + dry-run handler (vault-seed-woodpecker + wp-oauth-register)",
|
|
||||||
"5b52ddbbf47948e3cbc1b383f0909588": "Help + invalid arg handler end (vault-seed-woodpecker + wp-oauth-register)",
|
|
||||||
# Common vault-seed script preamble + precondition patterns
|
|
||||||
# Shared across tools/vault-seed-{forgejo,agents,woodpecker}.sh
|
|
||||||
"dff3675c151fcdbd2fef798826ae919b": "Vault-seed preamble: set -euo + path setup + source hvault.sh + KV_MOUNT",
|
|
||||||
"1cd9f0d083e24e6e6b2071db9b6dae09": "Vault-seed preconditions: binary check loop + VAULT_ADDR guard",
|
|
||||||
"63bfa88d71764c95c65a9a248f3e40ab": "Vault-seed preconditions: binary check end + VAULT_ADDR die",
|
|
||||||
"34873ad3570b211ce1d90468ab6ac94c": "Vault-seed preconditions: VAULT_ADDR die + hvault_token_lookup",
|
|
||||||
"71a52270f249e843cda48ad896d9f781": "Vault-seed preconditions: VAULT_ADDR + hvault_token_lookup + die",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if not sh_files:
|
if not sh_files:
|
||||||
|
|
|
||||||
|
|
@ -1,334 +0,0 @@
|
||||||
# =============================================================================
|
|
||||||
# .woodpecker/nomad-validate.yml — Static validation for Nomad+Vault artifacts
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S0.5, issue #825; extended in S2.6,
|
|
||||||
# issue #884). Locks in the "no-ad-hoc-steps" principle: every HCL/shell
|
|
||||||
# artifact under nomad/, lib/init/nomad/, vault/policies/, plus the
|
|
||||||
# `disinto init` dispatcher and vault/roles.yaml, gets checked before it
|
|
||||||
# can land.
|
|
||||||
#
|
|
||||||
# Triggers on PRs (and pushes) that touch any of:
|
|
||||||
# nomad/** — HCL configs (server, client, vault)
|
|
||||||
# lib/init/nomad/** — cluster-up / install / systemd / vault-init /
|
|
||||||
# vault-nomad-auth (S2.6 trigger: vault-*.sh
|
|
||||||
# is a subset of this glob)
|
|
||||||
# bin/disinto — `disinto init --backend=nomad` dispatcher
|
|
||||||
# tests/disinto-init-nomad.bats — the bats suite itself
|
|
||||||
# vault/policies/** — Vault ACL policy HCL files (S2.1, S2.6)
|
|
||||||
# vault/roles.yaml — JWT-auth role bindings (S2.3, S2.6)
|
|
||||||
# .woodpecker/nomad-validate.yml — the pipeline definition
|
|
||||||
#
|
|
||||||
# Steps (all fail-closed — any error blocks merge):
|
|
||||||
# 1. nomad-config-validate — `nomad config validate` on server + client HCL
|
|
||||||
# 2. nomad-job-validate — `nomad job validate` looped over every
|
|
||||||
# nomad/jobs/*.hcl (new jobspecs get
|
|
||||||
# CI coverage automatically)
|
|
||||||
# 3. vault-operator-diagnose — `vault operator diagnose` syntax check on vault.hcl
|
|
||||||
# 4. vault-policy-fmt — `vault policy fmt` idempotence check on
|
|
||||||
# every vault/policies/*.hcl (format drift =
|
|
||||||
# CI fail; non-destructive via cp+diff)
|
|
||||||
# 5. vault-policy-validate — HCL syntax + capability validation for every
|
|
||||||
# vault/policies/*.hcl via `vault policy write`
|
|
||||||
# against an inline dev-mode Vault server
|
|
||||||
# 6. vault-roles-validate — yamllint + role→policy reference check on
|
|
||||||
# vault/roles.yaml (every referenced policy
|
|
||||||
# must exist as vault/policies/<name>.hcl)
|
|
||||||
# 7. shellcheck-nomad — shellcheck the cluster-up + install scripts + disinto
|
|
||||||
# 8. bats-init-nomad — `disinto init --backend=nomad --dry-run` smoke tests
|
|
||||||
#
|
|
||||||
# Secret-scan coverage: vault/policies/*.hcl is already scanned by the
|
|
||||||
# P11 gate (.woodpecker/secret-scan.yml, issue #798) — its trigger path
|
|
||||||
# `vault/**/*` covers everything under this directory. We intentionally
|
|
||||||
# do NOT duplicate that gate here; one scanner, one source of truth.
|
|
||||||
#
|
|
||||||
# Pinned image versions match lib/init/nomad/install.sh (nomad 1.9.5 /
|
|
||||||
# vault 1.18.5). Bump there AND here together — drift = CI passing on
|
|
||||||
# syntax the runtime would reject.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
when:
|
|
||||||
- event: [push, pull_request]
|
|
||||||
path:
|
|
||||||
- "nomad/**"
|
|
||||||
- "lib/init/nomad/**"
|
|
||||||
- "bin/disinto"
|
|
||||||
- "tests/disinto-init-nomad.bats"
|
|
||||||
- "vault/policies/**"
|
|
||||||
- "vault/roles.yaml"
|
|
||||||
- ".woodpecker/nomad-validate.yml"
|
|
||||||
|
|
||||||
# Authenticated clone — same pattern as .woodpecker/ci.yml. Forgejo is
|
|
||||||
# configured with REQUIRE_SIGN_IN, so anonymous git clones fail (exit 128).
|
|
||||||
# FORGE_TOKEN is injected globally via WOODPECKER_ENVIRONMENT.
|
|
||||||
clone:
|
|
||||||
git:
|
|
||||||
image: alpine/git
|
|
||||||
commands:
|
|
||||||
- AUTH_URL=$(printf '%s' "$CI_REPO_CLONE_URL" | sed "s|://|://token:$FORGE_TOKEN@|")
|
|
||||||
- git clone --depth 1 "$AUTH_URL" .
|
|
||||||
- git fetch --depth 1 origin "$CI_COMMIT_REF"
|
|
||||||
- git checkout FETCH_HEAD
|
|
||||||
|
|
||||||
steps:
|
|
||||||
# ── 1. Nomad HCL syntax check ────────────────────────────────────────────
|
|
||||||
# `nomad config validate` parses server.hcl + client.hcl and fails on any
|
|
||||||
# HCL/semantic error (unknown block, invalid port range, bad driver cfg).
|
|
||||||
# vault.hcl is excluded — it's a Vault config, not Nomad, so it goes
|
|
||||||
# through the vault-operator-diagnose step instead.
|
|
||||||
- name: nomad-config-validate
|
|
||||||
image: hashicorp/nomad:1.9.5
|
|
||||||
commands:
|
|
||||||
- nomad version
|
|
||||||
- nomad config validate nomad/server.hcl nomad/client.hcl
|
|
||||||
|
|
||||||
# ── 2. Nomad jobspec HCL syntax check ────────────────────────────────────
|
|
||||||
# `nomad job validate` is a *different* tool from `nomad config validate` —
|
|
||||||
# the former parses jobspec HCL (job/group/task blocks, driver config,
|
|
||||||
# volume refs, network ports), the latter parses agent config HCL
|
|
||||||
# (server/client blocks). Running step 1 on a jobspec would reject it
|
|
||||||
# with "unknown block 'job'", and vice versa. Hence two separate steps.
|
|
||||||
#
|
|
||||||
# Validation is offline: no running Nomad server is required (exit 0 on
|
|
||||||
# valid HCL, 1 on syntax/semantic error). The CLI takes a single path
|
|
||||||
# argument so we loop over every `*.hcl` file under nomad/jobs/ —
|
|
||||||
# that way a new jobspec PR gets CI coverage automatically (no separate
|
|
||||||
# "edit the pipeline" step to forget). The `.hcl` suffix is the naming
|
|
||||||
# convention: anything else in nomad/jobs/ is deliberately not validated
|
|
||||||
# by this step.
|
|
||||||
#
|
|
||||||
# `[ -f "$f" ]` guards against the no-match case: POSIX sh does not
|
|
||||||
# nullglob, so an empty jobs/ directory would leave the literal glob in
|
|
||||||
# "$f" and fail. Today forgejo.hcl exists, but the guard keeps the
|
|
||||||
# step safe during any future transient empty state.
|
|
||||||
#
|
|
||||||
# Scope note: offline validate catches jobspec-level errors (unknown
|
|
||||||
# stanzas, missing required fields, wrong value types, invalid driver
|
|
||||||
# config). It does NOT resolve cross-file references like host_volume
|
|
||||||
# source names against nomad/client.hcl — that mismatch surfaces at
|
|
||||||
# scheduling time on the live cluster, not here. The paired-write rule
|
|
||||||
# in nomad/AGENTS.md ("add to both client.hcl and cluster-up.sh") is the
|
|
||||||
# primary guardrail for that class of drift.
|
|
||||||
- name: nomad-job-validate
|
|
||||||
image: hashicorp/nomad:1.9.5
|
|
||||||
commands:
|
|
||||||
- |
|
|
||||||
set -e
|
|
||||||
for f in nomad/jobs/*.hcl; do
|
|
||||||
[ -f "$f" ] || continue
|
|
||||||
echo "validating jobspec: $f"
|
|
||||||
nomad job validate "$f"
|
|
||||||
done
|
|
||||||
|
|
||||||
# ── 3. Vault HCL syntax check ────────────────────────────────────────────
|
|
||||||
# `vault operator diagnose` loads the config and runs a suite of checks.
|
|
||||||
# Exit codes:
|
|
||||||
# 0 — all checks green
|
|
||||||
# 1 — at least one hard failure (bad HCL, bad schema, unreachable storage)
|
|
||||||
# 2 — advisory warnings only (no hard failure)
|
|
||||||
# Our factory dev-box vault.hcl deliberately runs TLS-disabled on a
|
|
||||||
# localhost-only listener (documented in nomad/vault.hcl), which triggers
|
|
||||||
# an advisory "Check Listener TLS" warning → exit 2. The config still
|
|
||||||
# parses, so we tolerate exit 2 and fail only on exit 1 or crashes.
|
|
||||||
# -skip=storage/-skip=listener disables the runtime-only checks (vault's
|
|
||||||
# container has /vault/file so storage is fine, but explicit skip is cheap
|
|
||||||
# insurance against future container-image drift).
|
|
||||||
- name: vault-operator-diagnose
|
|
||||||
image: hashicorp/vault:1.18.5
|
|
||||||
commands:
|
|
||||||
- |
|
|
||||||
rc=0
|
|
||||||
vault operator diagnose -config=nomad/vault.hcl -skip=storage -skip=listener || rc=$?
|
|
||||||
case "$rc" in
|
|
||||||
0) echo "vault config: all checks green" ;;
|
|
||||||
2) echo "vault config: parse OK (rc=2 — advisory warnings only; TLS-disabled on localhost listener is by design)" ;;
|
|
||||||
*) echo "vault config: hard failure (rc=$rc)" >&2; exit "$rc" ;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# ── 4. Vault policy fmt idempotence check ────────────────────────────────
|
|
||||||
# `vault policy fmt <file>` formats a local HCL policy file in place.
|
|
||||||
# There's no `-check`/dry-run flag (vault 1.18.5), so we implement a
|
|
||||||
# non-destructive check as cp → fmt-on-copy → diff against original.
|
|
||||||
# Any diff means the committed file would be rewritten by `vault policy
|
|
||||||
# fmt` — failure steers the author to run `vault policy fmt <file>`
|
|
||||||
# locally before pushing.
|
|
||||||
#
|
|
||||||
# Scope: vault/policies/*.hcl only. The `[ -f "$f" ]` guard handles the
|
|
||||||
# no-match case (POSIX sh does not nullglob) so an empty policies/
|
|
||||||
# directory does not fail this step.
|
|
||||||
#
|
|
||||||
# Note: `vault policy fmt` is purely local (HCL text transform) and does
|
|
||||||
# not require a running Vault server, which is why this step can run
|
|
||||||
# without starting one.
|
|
||||||
- name: vault-policy-fmt
|
|
||||||
image: hashicorp/vault:1.18.5
|
|
||||||
commands:
|
|
||||||
- |
|
|
||||||
set -e
|
|
||||||
failed=0
|
|
||||||
for f in vault/policies/*.hcl; do
|
|
||||||
[ -f "$f" ] || continue
|
|
||||||
tmp="/tmp/$(basename "$f").fmt"
|
|
||||||
cp "$f" "$tmp"
|
|
||||||
vault policy fmt "$tmp" >/dev/null 2>&1
|
|
||||||
if ! diff -u "$f" "$tmp"; then
|
|
||||||
echo "ERROR: $f is not formatted — run 'vault policy fmt $f' locally" >&2
|
|
||||||
failed=1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if [ "$failed" -gt 0 ]; then
|
|
||||||
echo "vault-policy-fmt: formatting drift detected" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "vault-policy-fmt: all policies formatted correctly"
|
|
||||||
|
|
||||||
# ── 5. Vault policy HCL syntax + capability validation ───────────────────
|
|
||||||
# Vault has no offline `vault policy validate` subcommand — the closest
|
|
||||||
# in-CLI validator is `vault policy write`, which sends the HCL to a
|
|
||||||
# running server which parses it, checks capability names against the
|
|
||||||
# known set (read, list, create, update, delete, patch, sudo, deny),
|
|
||||||
# and rejects unknown stanzas / malformed path blocks. We start an
|
|
||||||
# inline dev-mode Vault (in-memory, no persistence, root token = "root")
|
|
||||||
# for the duration of this step and loop `vault policy write` over every
|
|
||||||
# vault/policies/*.hcl; the policies never leave the ephemeral dev
|
|
||||||
# server, so this is strictly a validator — not a deploy.
|
|
||||||
#
|
|
||||||
# Exit-code handling:
|
|
||||||
# - `vault policy write` exits 0 on success, non-zero on any parse /
|
|
||||||
# semantic error. We aggregate failures across all files so a single
|
|
||||||
# CI run surfaces every broken policy (not just the first).
|
|
||||||
# - The dev server is killed on any step exit via EXIT trap so the
|
|
||||||
# step tears down cleanly even on failure.
|
|
||||||
#
|
|
||||||
# Why dev-mode is sufficient: we're not persisting secrets, only asking
|
|
||||||
# Vault to parse policy text. The factory's production Vault is NOT
|
|
||||||
# contacted.
|
|
||||||
- name: vault-policy-validate
|
|
||||||
image: hashicorp/vault:1.18.5
|
|
||||||
commands:
|
|
||||||
- |
|
|
||||||
set -e
|
|
||||||
vault server -dev -dev-root-token-id=root -dev-listen-address=127.0.0.1:8200 >/tmp/vault-dev.log 2>&1 &
|
|
||||||
VAULT_PID=$!
|
|
||||||
trap 'kill "$VAULT_PID" 2>/dev/null || true' EXIT INT TERM
|
|
||||||
export VAULT_ADDR=http://127.0.0.1:8200
|
|
||||||
export VAULT_TOKEN=root
|
|
||||||
ready=0
|
|
||||||
i=0
|
|
||||||
while [ "$i" -lt 30 ]; do
|
|
||||||
if vault status >/dev/null 2>&1; then
|
|
||||||
ready=1
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
i=$((i + 1))
|
|
||||||
sleep 0.5
|
|
||||||
done
|
|
||||||
if [ "$ready" -ne 1 ]; then
|
|
||||||
echo "vault-policy-validate: dev server failed to start after 15s" >&2
|
|
||||||
cat /tmp/vault-dev.log >&2 || true
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
failed=0
|
|
||||||
for f in vault/policies/*.hcl; do
|
|
||||||
[ -f "$f" ] || continue
|
|
||||||
name=$(basename "$f" .hcl)
|
|
||||||
echo "validate: $f"
|
|
||||||
if ! vault policy write "$name" "$f"; then
|
|
||||||
echo " ERROR: $f failed validation" >&2
|
|
||||||
failed=1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if [ "$failed" -gt 0 ]; then
|
|
||||||
echo "vault-policy-validate: validation errors found" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "vault-policy-validate: all policies valid"
|
|
||||||
|
|
||||||
# ── 6. vault/roles.yaml validator ────────────────────────────────────────
|
|
||||||
# Validates the JWT-auth role bindings file (S2.3). Two checks:
|
|
||||||
#
|
|
||||||
# a. `yamllint` — catches YAML syntax errors and indentation drift.
|
|
||||||
# Uses a relaxed config (line length bumped to 200) because
|
|
||||||
# roles.yaml's comments are wide by design.
|
|
||||||
# b. role → policy reference check — every role's `policy:` field
|
|
||||||
# must match a basename in vault/policies/*.hcl. A role pointing
|
|
||||||
# at a non-existent policy = runtime "permission denied" at job
|
|
||||||
# placement; catching the drift here turns it into a CI failure.
|
|
||||||
# Also verifies each role entry has the four required fields
|
|
||||||
# (name, policy, namespace, job_id) per the file's documented
|
|
||||||
# format.
|
|
||||||
#
|
|
||||||
# Parsing is done with PyYAML (the roles.yaml format is a strict
|
|
||||||
# subset that awk-level parsing in tools/vault-apply-roles.sh handles
|
|
||||||
# too, but PyYAML in CI gives us structural validation for free). If
|
|
||||||
# roles.yaml is ever absent (e.g. reverted), the step skips rather
|
|
||||||
# than fails — presence is enforced by S2.3's own tooling, not here.
|
|
||||||
- name: vault-roles-validate
|
|
||||||
image: python:3.12-alpine
|
|
||||||
commands:
|
|
||||||
- pip install --quiet --disable-pip-version-check pyyaml yamllint
|
|
||||||
- |
|
|
||||||
set -e
|
|
||||||
if [ ! -f vault/roles.yaml ]; then
|
|
||||||
echo "vault-roles-validate: vault/roles.yaml not present, skipping"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
yamllint -d '{extends: relaxed, rules: {line-length: {max: 200}}}' vault/roles.yaml
|
|
||||||
echo "vault-roles-validate: yamllint OK"
|
|
||||||
python3 - <<'PY'
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
with open('vault/roles.yaml') as f:
|
|
||||||
data = yaml.safe_load(f) or {}
|
|
||||||
roles = data.get('roles') or []
|
|
||||||
if not roles:
|
|
||||||
print("vault-roles-validate: no roles defined in vault/roles.yaml", file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
existing = {
|
|
||||||
os.path.splitext(e)[0]
|
|
||||||
for e in os.listdir('vault/policies')
|
|
||||||
if e.endswith('.hcl')
|
|
||||||
}
|
|
||||||
required = ('name', 'policy', 'namespace', 'job_id')
|
|
||||||
failed = 0
|
|
||||||
for r in roles:
|
|
||||||
if not isinstance(r, dict):
|
|
||||||
print(f"ERROR: role entry is not a mapping: {r!r}", file=sys.stderr)
|
|
||||||
failed = 1
|
|
||||||
continue
|
|
||||||
for field in required:
|
|
||||||
if r.get(field) in (None, ''):
|
|
||||||
print(f"ERROR: role entry missing required field '{field}': {r}", file=sys.stderr)
|
|
||||||
failed = 1
|
|
||||||
policy = r.get('policy')
|
|
||||||
if policy and policy not in existing:
|
|
||||||
print(
|
|
||||||
f"ERROR: role '{r.get('name')}' references policy '{policy}' "
|
|
||||||
f"but vault/policies/{policy}.hcl does not exist",
|
|
||||||
file=sys.stderr,
|
|
||||||
)
|
|
||||||
failed = 1
|
|
||||||
sys.exit(failed)
|
|
||||||
PY
|
|
||||||
echo "vault-roles-validate: all role→policy references valid"
|
|
||||||
|
|
||||||
# ── 7. Shellcheck ────────────────────────────────────────────────────────
|
|
||||||
# Covers the new lib/init/nomad/*.sh scripts plus bin/disinto (which owns
|
|
||||||
# the backend dispatcher). bin/disinto has no .sh extension so the
|
|
||||||
# repo-wide shellcheck in .woodpecker/ci.yml skips it — this step is the
|
|
||||||
# one place it gets checked.
|
|
||||||
- name: shellcheck-nomad
|
|
||||||
image: koalaman/shellcheck-alpine:stable
|
|
||||||
commands:
|
|
||||||
- shellcheck --severity=warning lib/init/nomad/*.sh bin/disinto
|
|
||||||
|
|
||||||
# ── 8. bats: `disinto init --backend=nomad --dry-run` ────────────────────
|
|
||||||
# Smoke-tests the CLI dispatcher: both --backend=nomad variants exit 0
|
|
||||||
# with the expected step list, and --backend=docker stays on the docker
|
|
||||||
# path (regression guard). Pure dry-run — no sudo, no network.
|
|
||||||
- name: bats-init-nomad
|
|
||||||
image: alpine:3.19
|
|
||||||
commands:
|
|
||||||
- apk add --no-cache bash bats
|
|
||||||
- bats tests/disinto-init-nomad.bats
|
|
||||||
|
|
@ -1,64 +0,0 @@
|
||||||
# .woodpecker/publish-images.yml — Build and push versioned container images
|
|
||||||
# Triggered on tag pushes (e.g. v1.2.3). Builds and pushes:
|
|
||||||
# - ghcr.io/disinto/agents:<tag>
|
|
||||||
# - ghcr.io/disinto/reproduce:<tag>
|
|
||||||
# - ghcr.io/disinto/edge:<tag>
|
|
||||||
#
|
|
||||||
# Requires GHCR_TOKEN secret configured in Woodpecker with push access
|
|
||||||
# to ghcr.io/disinto.
|
|
||||||
|
|
||||||
when:
|
|
||||||
event: tag
|
|
||||||
ref: refs/tags/v*
|
|
||||||
|
|
||||||
clone:
|
|
||||||
git:
|
|
||||||
image: alpine/git
|
|
||||||
commands:
|
|
||||||
- AUTH_URL=$(printf '%s' "$CI_REPO_CLONE_URL" | sed "s|://|://token:$FORGE_TOKEN@|")
|
|
||||||
- git clone --depth 1 "$AUTH_URL" .
|
|
||||||
- git fetch --depth 1 origin "$CI_COMMIT_REF"
|
|
||||||
- git checkout FETCH_HEAD
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: build-and-push-agents
|
|
||||||
image: plugins/docker
|
|
||||||
settings:
|
|
||||||
repo: ghcr.io/disinto/agents
|
|
||||||
registry: ghcr.io
|
|
||||||
dockerfile: docker/agents/Dockerfile
|
|
||||||
context: .
|
|
||||||
tags:
|
|
||||||
- ${CI_COMMIT_TAG}
|
|
||||||
- latest
|
|
||||||
username: disinto
|
|
||||||
password:
|
|
||||||
from_secret: GHCR_TOKEN
|
|
||||||
|
|
||||||
- name: build-and-push-reproduce
|
|
||||||
image: plugins/docker
|
|
||||||
settings:
|
|
||||||
repo: ghcr.io/disinto/reproduce
|
|
||||||
registry: ghcr.io
|
|
||||||
dockerfile: docker/reproduce/Dockerfile
|
|
||||||
context: .
|
|
||||||
tags:
|
|
||||||
- ${CI_COMMIT_TAG}
|
|
||||||
- latest
|
|
||||||
username: disinto
|
|
||||||
password:
|
|
||||||
from_secret: GHCR_TOKEN
|
|
||||||
|
|
||||||
- name: build-and-push-edge
|
|
||||||
image: plugins/docker
|
|
||||||
settings:
|
|
||||||
repo: ghcr.io/disinto/edge
|
|
||||||
registry: ghcr.io
|
|
||||||
dockerfile: docker/edge/Dockerfile
|
|
||||||
context: docker/edge
|
|
||||||
tags:
|
|
||||||
- ${CI_COMMIT_TAG}
|
|
||||||
- latest
|
|
||||||
username: disinto
|
|
||||||
password:
|
|
||||||
from_secret: GHCR_TOKEN
|
|
||||||
|
|
@ -1,68 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
set -euo pipefail
|
|
||||||
# run-secret-scan.sh — CI wrapper for lib/secret-scan.sh
|
|
||||||
#
|
|
||||||
# Scans files changed in this PR for plaintext secrets.
|
|
||||||
# Exits non-zero if any secret is detected.
|
|
||||||
|
|
||||||
# shellcheck source=../lib/secret-scan.sh
|
|
||||||
source lib/secret-scan.sh
|
|
||||||
|
|
||||||
# Path patterns considered secret-adjacent
|
|
||||||
SECRET_PATH_PATTERNS=(
|
|
||||||
'\.env'
|
|
||||||
'tools/vault-.*\.sh'
|
|
||||||
'nomad/'
|
|
||||||
'vault/'
|
|
||||||
'action-vault/'
|
|
||||||
'lib/hvault\.sh'
|
|
||||||
'lib/action-vault\.sh'
|
|
||||||
)
|
|
||||||
|
|
||||||
# Build a single regex from patterns
|
|
||||||
path_regex=$(printf '%s|' "${SECRET_PATH_PATTERNS[@]}")
|
|
||||||
path_regex="${path_regex%|}"
|
|
||||||
|
|
||||||
# Get files changed in this PR vs target branch.
|
|
||||||
# Note: shallow clone (depth 50) may lack the merge base for very large PRs,
|
|
||||||
# causing git diff to fail — || true means the gate skips rather than blocks.
|
|
||||||
changed_files=$(git diff --name-only --diff-filter=ACMR "origin/${CI_COMMIT_TARGET_BRANCH}...HEAD" || true)
|
|
||||||
|
|
||||||
if [ -z "$changed_files" ]; then
|
|
||||||
echo "secret-scan: no changed files found, skipping"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Filter to secret-adjacent paths only
|
|
||||||
target_files=$(printf '%s\n' "$changed_files" | grep -E "$path_regex" || true)
|
|
||||||
|
|
||||||
if [ -z "$target_files" ]; then
|
|
||||||
echo "secret-scan: no secret-adjacent files changed, skipping"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "secret-scan: scanning $(printf '%s\n' "$target_files" | wc -l) file(s):"
|
|
||||||
printf ' %s\n' "$target_files"
|
|
||||||
|
|
||||||
failures=0
|
|
||||||
while IFS= read -r file; do
|
|
||||||
# Skip deleted files / non-existent
|
|
||||||
[ -f "$file" ] || continue
|
|
||||||
# Skip binary files
|
|
||||||
file -b --mime-encoding "$file" 2>/dev/null | grep -q binary && continue
|
|
||||||
|
|
||||||
content=$(cat "$file")
|
|
||||||
if ! scan_for_secrets "$content"; then
|
|
||||||
echo "FAIL: secret detected in $file"
|
|
||||||
failures=$((failures + 1))
|
|
||||||
fi
|
|
||||||
done <<< "$target_files"
|
|
||||||
|
|
||||||
if [ "$failures" -gt 0 ]; then
|
|
||||||
echo ""
|
|
||||||
echo "secret-scan: $failures file(s) contain potential secrets — merge blocked"
|
|
||||||
echo "If these are false positives, verify patterns in lib/secret-scan.sh"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "secret-scan: all files clean"
|
|
||||||
|
|
@ -1,32 +0,0 @@
|
||||||
# .woodpecker/secret-scan.yml — Block PRs that leak plaintext secrets
|
|
||||||
#
|
|
||||||
# Triggers on pull requests touching secret-adjacent paths.
|
|
||||||
# Sources lib/secret-scan.sh and scans each changed file's content.
|
|
||||||
# Exits non-zero if any potential secret is detected.
|
|
||||||
|
|
||||||
when:
|
|
||||||
- event: pull_request
|
|
||||||
path:
|
|
||||||
- ".env*"
|
|
||||||
- "tools/vault-*.sh"
|
|
||||||
- "nomad/**/*"
|
|
||||||
- "vault/**/*"
|
|
||||||
- "action-vault/**/*"
|
|
||||||
- "lib/hvault.sh"
|
|
||||||
- "lib/action-vault.sh"
|
|
||||||
|
|
||||||
clone:
|
|
||||||
git:
|
|
||||||
image: alpine/git
|
|
||||||
commands:
|
|
||||||
- AUTH_URL=$(printf '%s' "$CI_REPO_CLONE_URL" | sed "s|://|://token:$FORGE_TOKEN@|")
|
|
||||||
- git clone --depth 50 "$AUTH_URL" .
|
|
||||||
- git fetch --depth 50 origin "$CI_COMMIT_REF" "$CI_COMMIT_TARGET_BRANCH"
|
|
||||||
- git checkout FETCH_HEAD
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: secret-scan
|
|
||||||
image: alpine:3
|
|
||||||
commands:
|
|
||||||
- apk add --no-cache bash git grep file
|
|
||||||
- bash .woodpecker/run-secret-scan.sh
|
|
||||||
40
AGENTS.md
40
AGENTS.md
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: c872f282428861a735fbbb00609f77d063ad92b3 -->
|
<!-- last-reviewed: ec7dff854a71d64f427a4e6d6874de8e6dee822c -->
|
||||||
# Disinto — Agent Instructions
|
# Disinto — Agent Instructions
|
||||||
|
|
||||||
## What this repo is
|
## What this repo is
|
||||||
|
|
@ -31,26 +31,21 @@ disinto/ (code repo)
|
||||||
├── supervisor/ supervisor-run.sh — formula-driven health monitoring (polling-loop executor)
|
├── supervisor/ supervisor-run.sh — formula-driven health monitoring (polling-loop executor)
|
||||||
│ preflight.sh — pre-flight data collection for supervisor formula
|
│ preflight.sh — pre-flight data collection for supervisor formula
|
||||||
├── architect/ architect-run.sh — strategic decomposition of vision into sprints
|
├── architect/ architect-run.sh — strategic decomposition of vision into sprints
|
||||||
├── action-vault/ vault-env.sh — shared env setup (vault redesign in progress, see #73-#77)
|
├── vault/ vault-env.sh — shared env setup (vault redesign in progress, see #73-#77)
|
||||||
│ SCHEMA.md — vault item schema documentation
|
│ SCHEMA.md — vault item schema documentation
|
||||||
│ validate.sh — vault item validator
|
│ validate.sh — vault item validator
|
||||||
│ examples/ — example vault action TOMLs (promote, publish, release, webhook-call)
|
│ examples/ — example vault action TOMLs (promote, publish, release, webhook-call)
|
||||||
├── lib/ env.sh, agent-sdk.sh, ci-helpers.sh, ci-debug.sh, load-project.sh, parse-deps.sh, guard.sh, mirrors.sh, pr-lifecycle.sh, issue-lifecycle.sh, worktree.sh, formula-session.sh, stack-lock.sh, forge-setup.sh, forge-push.sh, ops-setup.sh, ci-setup.sh, generators.sh, hire-agent.sh, release.sh, build-graph.py, branch-protection.sh, secret-scan.sh, tea-helpers.sh, action-vault.sh, ci-log-reader.py, git-creds.sh, sprint-filer.sh, hvault.sh
|
├── lib/ env.sh, agent-sdk.sh, ci-helpers.sh, ci-debug.sh, load-project.sh, parse-deps.sh, guard.sh, mirrors.sh, pr-lifecycle.sh, issue-lifecycle.sh, worktree.sh, formula-session.sh, stack-lock.sh, forge-setup.sh, forge-push.sh, ops-setup.sh, ci-setup.sh, generators.sh, hire-agent.sh, release.sh, build-graph.py, branch-protection.sh, secret-scan.sh, tea-helpers.sh, vault.sh, ci-log-reader.py, git-creds.sh
|
||||||
│ hooks/ — Claude Code session hooks (on-compact-reinject, on-idle-stop, on-phase-change, on-pretooluse-guard, on-session-end, on-stop-failure)
|
│ hooks/ — Claude Code session hooks (on-compact-reinject, on-idle-stop, on-phase-change, on-pretooluse-guard, on-session-end, on-stop-failure)
|
||||||
│ init/nomad/ — cluster-up.sh, install.sh, vault-init.sh, lib-systemd.sh (Nomad+Vault Step 0 installers, #821-#825); wp-oauth-register.sh (Forgejo OAuth2 app + Vault KV seeder for Woodpecker, S3.3); deploy.sh (dependency-ordered Nomad job deploy + health-wait, S4)
|
|
||||||
├── nomad/ server.hcl, client.hcl (allow_privileged for woodpecker-agent, S3-fix-5), vault.hcl — HCL configs deployed to /etc/nomad.d/ and /etc/vault.d/ by lib/init/nomad/cluster-up.sh
|
|
||||||
│ jobs/ — Nomad jobspecs: forgejo.hcl (Vault secrets via template, S2.4); woodpecker-server.hcl + woodpecker-agent.hcl (host-net, docker.sock, Vault KV, S3.1-S3.2); agents.hcl (7 roles, llama, Vault-templated bot tokens, S4.1); vault-runner.hcl (parameterized batch dispatch, S5.3)
|
|
||||||
├── projects/ *.toml.example — templates; *.toml — local per-box config (gitignored)
|
├── projects/ *.toml.example — templates; *.toml — local per-box config (gitignored)
|
||||||
├── formulas/ Issue templates (TOML specs for multi-step agent tasks)
|
├── formulas/ Issue templates (TOML specs for multi-step agent tasks)
|
||||||
├── docker/ Dockerfiles and entrypoints: reproduce, triage, edge dispatcher, chat (server.py, entrypoint-chat.sh, Dockerfile, ui/)
|
├── docker/ Dockerfiles and entrypoints: reproduce, triage, edge dispatcher, chat (server.py, entrypoint-chat.sh, Dockerfile, ui/)
|
||||||
├── tools/ Operational tools: edge-control/ (register.sh, install.sh, verify-chat-sandbox.sh)
|
├── tools/ Operational tools: edge-control/ (register.sh, install.sh, verify-chat-sandbox.sh)
|
||||||
│ vault-apply-policies.sh, vault-apply-roles.sh, vault-import.sh — Vault provisioning (S2.1/S2.2)
|
|
||||||
│ vault-seed-<svc>.sh — per-service Vault secret seeders; auto-invoked by `bin/disinto --with <svc>` (add a new file to support a new service)
|
|
||||||
├── docs/ Protocol docs (PHASE-PROTOCOL.md, EVIDENCE-ARCHITECTURE.md)
|
├── docs/ Protocol docs (PHASE-PROTOCOL.md, EVIDENCE-ARCHITECTURE.md)
|
||||||
├── site/ disinto.ai website content
|
├── site/ disinto.ai website content
|
||||||
├── tests/ Test files (mock-forgejo.py, smoke-init.sh, lib-hvault.bats, lib-generators.bats, vault-import.bats, disinto-init-nomad.bats)
|
├── tests/ Test files (mock-forgejo.py, smoke-init.sh)
|
||||||
├── templates/ Issue templates
|
├── templates/ Issue templates
|
||||||
├── bin/ The `disinto` CLI script (`--with <svc>` deploys services + runs their Vault seeders)
|
├── bin/ The `disinto` CLI script
|
||||||
├── disinto-factory/ Setup documentation and skill
|
├── disinto-factory/ Setup documentation and skill
|
||||||
├── state/ Runtime state
|
├── state/ Runtime state
|
||||||
├── .woodpecker/ Woodpecker CI pipeline configs
|
├── .woodpecker/ Woodpecker CI pipeline configs
|
||||||
|
|
@ -91,7 +86,7 @@ Each agent has a `.profile` repository on Forgejo storing `knowledge/lessons-lea
|
||||||
- All scripts start with `#!/usr/bin/env bash` and `set -euo pipefail`
|
- All scripts start with `#!/usr/bin/env bash` and `set -euo pipefail`
|
||||||
- Source shared environment: `source "$(dirname "$0")/../lib/env.sh"`
|
- Source shared environment: `source "$(dirname "$0")/../lib/env.sh"`
|
||||||
- Log to `$LOGFILE` using the `log()` function from env.sh or defined locally
|
- Log to `$LOGFILE` using the `log()` function from env.sh or defined locally
|
||||||
- Never hardcode secrets — agent secrets come from `.env.enc`, vault secrets from `secrets/<NAME>.enc` (age-encrypted, one file per key)
|
- Never hardcode secrets — agent secrets come from `.env.enc`, vault secrets from `.env.vault.enc` (or `.env`/`.env.vault` fallback)
|
||||||
- Never embed secrets in issue bodies, PR descriptions, or comments — use env var references (e.g. `$BASE_RPC_URL`)
|
- Never embed secrets in issue bodies, PR descriptions, or comments — use env var references (e.g. `$BASE_RPC_URL`)
|
||||||
- ShellCheck must pass (CI runs `shellcheck` on all `.sh` files)
|
- ShellCheck must pass (CI runs `shellcheck` on all `.sh` files)
|
||||||
- Avoid duplicate code — shared helpers go in `lib/`
|
- Avoid duplicate code — shared helpers go in `lib/`
|
||||||
|
|
@ -118,12 +113,10 @@ bash dev/phase-test.sh
|
||||||
| Supervisor | `supervisor/` | Health monitoring | [supervisor/AGENTS.md](supervisor/AGENTS.md) |
|
| Supervisor | `supervisor/` | Health monitoring | [supervisor/AGENTS.md](supervisor/AGENTS.md) |
|
||||||
| Planner | `planner/` | Strategic planning | [planner/AGENTS.md](planner/AGENTS.md) |
|
| Planner | `planner/` | Strategic planning | [planner/AGENTS.md](planner/AGENTS.md) |
|
||||||
| Predictor | `predictor/` | Infrastructure pattern detection | [predictor/AGENTS.md](predictor/AGENTS.md) |
|
| Predictor | `predictor/` | Infrastructure pattern detection | [predictor/AGENTS.md](predictor/AGENTS.md) |
|
||||||
| Architect | `architect/` | Strategic decomposition (read-only on project repo) | [architect/AGENTS.md](architect/AGENTS.md) |
|
| Architect | `architect/` | Strategic decomposition | [architect/AGENTS.md](architect/AGENTS.md) |
|
||||||
| Filer | `lib/sprint-filer.sh` | Sub-issue filing from merged sprint PRs | ops repo pipeline (deferred, see #779) |
|
|
||||||
| Reproduce | `docker/reproduce/` | Bug reproduction using Playwright MCP | `formulas/reproduce.toml` |
|
| Reproduce | `docker/reproduce/` | Bug reproduction using Playwright MCP | `formulas/reproduce.toml` |
|
||||||
| Triage | `docker/reproduce/` | Deep root cause analysis | `formulas/triage.toml` |
|
| Triage | `docker/reproduce/` | Deep root cause analysis | `formulas/triage.toml` |
|
||||||
| Edge dispatcher | `docker/edge/` | Polls ops repo for vault actions, executes via Claude sessions | `docker/edge/dispatcher.sh` |
|
| Edge dispatcher | `docker/edge/` | Polls ops repo for vault actions, executes via Claude sessions | `docker/edge/dispatcher.sh` |
|
||||||
| Local-model agents | `docker/agents/` (same image) | Local llama-server agents configured via `[agents.X]` sections in project TOML | [docs/agents-llama.md](docs/agents-llama.md) |
|
|
||||||
|
|
||||||
> **Vault:** Being redesigned as a PR-based approval workflow (issues #73-#77).
|
> **Vault:** Being redesigned as a PR-based approval workflow (issues #73-#77).
|
||||||
> See [docs/VAULT.md](docs/VAULT.md) for the vault PR workflow details.
|
> See [docs/VAULT.md](docs/VAULT.md) for the vault PR workflow details.
|
||||||
|
|
@ -142,7 +135,7 @@ Issues flow: `backlog` → `in-progress` → PR → CI → review → merge →
|
||||||
|---|---|---|
|
|---|---|---|
|
||||||
| `backlog` | Issue is queued for implementation. Dev-poll picks the first ready one. | Planner, gardener, humans |
|
| `backlog` | Issue is queued for implementation. Dev-poll picks the first ready one. | Planner, gardener, humans |
|
||||||
| `priority` | Queue tier above plain backlog. Issues with both `priority` and `backlog` are picked before plain `backlog` issues. FIFO within each tier. | Planner, humans |
|
| `priority` | Queue tier above plain backlog. Issues with both `priority` and `backlog` are picked before plain `backlog` issues. FIFO within each tier. | Planner, humans |
|
||||||
| `in-progress` | Dev-agent is actively working on this issue. Only one issue per project is in-progress at a time. Also set on vision issues by filer-bot when sub-issues are filed (#764). | dev-agent.sh (claims issue), filer-bot (vision issues) |
|
| `in-progress` | Dev-agent is actively working on this issue. Only one issue per project is in-progress at a time. | dev-agent.sh (claims issue) |
|
||||||
| `blocked` | Issue is stuck — agent session failed, crashed, timed out, or CI exhausted. Diagnostic comment on the issue has details. Also used for unmet dependencies. | dev-agent.sh, dev-poll.sh (on failure) |
|
| `blocked` | Issue is stuck — agent session failed, crashed, timed out, or CI exhausted. Diagnostic comment on the issue has details. Also used for unmet dependencies. | dev-agent.sh, dev-poll.sh (on failure) |
|
||||||
| `tech-debt` | Pre-existing issue flagged by AI reviewer, not introduced by a PR. | review-pr.sh (auto-created follow-ups) |
|
| `tech-debt` | Pre-existing issue flagged by AI reviewer, not introduced by a PR. | review-pr.sh (auto-created follow-ups) |
|
||||||
| `underspecified` | Dev-agent refused the issue as too large or vague. | dev-poll.sh (on preflight `too_large`), dev-agent.sh (on mid-run `too_large` refusal) |
|
| `underspecified` | Dev-agent refused the issue as too large or vague. | dev-poll.sh (on preflight `too_large`), dev-agent.sh (on mid-run `too_large` refusal) |
|
||||||
|
|
@ -184,17 +177,24 @@ Humans write these. Agents read and enforce them.
|
||||||
| AD-002 | **Concurrency is bounded per LLM backend, not per project.** One concurrent Claude session per OAuth credential pool; one concurrent session per llama-server instance. Containers with disjoint backends may run in parallel. | The single-thread invariant is about *backends*, not pipelines. **(a) Anthropic OAuth credentials race on token refresh** — each container uses a per-session `CLAUDE_CONFIG_DIR`, so Claude Code's native lockfile-based OAuth refresh handles contention automatically without external serialization. (Legacy: set `CLAUDE_EXTERNAL_LOCK=1` to re-enable the old `flock session.lock` wrapper for rollback.) **(b) llama-server has finite VRAM and one KV cache** — parallel inference thrashes the cache and risks OOM. All llama-backed agents serialize on the same lock. **(c) Disjoint backends are free to parallelize.** Today `disinto-agents` (Anthropic OAuth, runs `review,gardener`) runs concurrently with `disinto-agents-llama` (llama, runs `dev`) on the same project — they share neither OAuth state nor llama VRAM. **(d) Per-project work-conflict safety** (no duplicate dev work, no merge conflicts on the same branch) is enforced by `issue_claim` (assignee + `in-progress` label) and per-issue worktrees — that's a separate guard that does NOT depend on this AD. |
|
| AD-002 | **Concurrency is bounded per LLM backend, not per project.** One concurrent Claude session per OAuth credential pool; one concurrent session per llama-server instance. Containers with disjoint backends may run in parallel. | The single-thread invariant is about *backends*, not pipelines. **(a) Anthropic OAuth credentials race on token refresh** — each container uses a per-session `CLAUDE_CONFIG_DIR`, so Claude Code's native lockfile-based OAuth refresh handles contention automatically without external serialization. (Legacy: set `CLAUDE_EXTERNAL_LOCK=1` to re-enable the old `flock session.lock` wrapper for rollback.) **(b) llama-server has finite VRAM and one KV cache** — parallel inference thrashes the cache and risks OOM. All llama-backed agents serialize on the same lock. **(c) Disjoint backends are free to parallelize.** Today `disinto-agents` (Anthropic OAuth, runs `review,gardener`) runs concurrently with `disinto-agents-llama` (llama, runs `dev`) on the same project — they share neither OAuth state nor llama VRAM. **(d) Per-project work-conflict safety** (no duplicate dev work, no merge conflicts on the same branch) is enforced by `issue_claim` (assignee + `in-progress` label) and per-issue worktrees — that's a separate guard that does NOT depend on this AD. |
|
||||||
| AD-003 | The runtime creates and destroys, the formula preserves. | Runtime manages worktrees/sessions/temp. Formulas commit knowledge to git before signaling done. |
|
| AD-003 | The runtime creates and destroys, the formula preserves. | Runtime manages worktrees/sessions/temp. Formulas commit knowledge to git before signaling done. |
|
||||||
| AD-004 | Event-driven > polling > fixed delays. | Never `waitForTimeout` or hardcoded sleep. Use phase files, webhooks, or poll loops with backoff. |
|
| AD-004 | Event-driven > polling > fixed delays. | Never `waitForTimeout` or hardcoded sleep. Use phase files, webhooks, or poll loops with backoff. |
|
||||||
| AD-005 | Secrets via env var indirection, never in issue bodies. | Issue bodies become code. Agent secrets go in `.env.enc` (SOPS-encrypted), vault secrets in `secrets/<NAME>.enc` (age-encrypted, one file per key). Referenced as `$VAR_NAME`. Runner gets only vault secrets; agents get only agent secrets. |
|
| AD-005 | Secrets via env var indirection, never in issue bodies. | Issue bodies become code. Agent secrets go in `.env.enc`, vault secrets in `.env.vault.enc` (SOPS-encrypted when available; plaintext `.env`/`.env.vault` fallback supported). Referenced as `$VAR_NAME`. Runner gets only vault secrets; agents get only agent secrets. |
|
||||||
| AD-006 | External actions go through vault dispatch, never direct. | Agents build addressables; only the vault exercises them (publishes, deploys, posts). Tokens for external systems (`GITHUB_TOKEN`, `CLAWHUB_TOKEN`, deploy keys) live only in `secrets/<NAME>.enc` and are decrypted into the ephemeral runner container. `lib/env.sh` unsets them so agents never hold them. PRs with direct external actions without vault dispatch get REQUEST_CHANGES. (Vault redesign in progress: PR-based approval on ops repo, see #73-#77) |
|
| AD-006 | External actions go through vault dispatch, never direct. | Agents build addressables; only the vault exercises them (publishes, deploys, posts). Tokens for external systems (`GITHUB_TOKEN`, `CLAWHUB_TOKEN`, deploy keys) live only in `.env.vault.enc` and are injected into the ephemeral runner container. `lib/env.sh` unsets them so agents never hold them. PRs with direct external actions without vault dispatch get REQUEST_CHANGES. (Vault redesign in progress: PR-based approval on ops repo, see #73-#77) |
|
||||||
|
|
||||||
**Who enforces what:**
|
**Who enforces what:**
|
||||||
- **Gardener** checks open backlog issues against ADs during grooming; closes violations with a comment. **Planner** plans within the architecture; does not create issues that violate ADs.
|
- **Gardener** checks open backlog issues against ADs during grooming; closes violations with a comment referencing the AD number.
|
||||||
|
- **Planner** plans within the architecture; does not create issues that violate ADs.
|
||||||
- **Dev-agent** reads AGENTS.md before implementing; refuses work that violates ADs.
|
- **Dev-agent** reads AGENTS.md before implementing; refuses work that violates ADs.
|
||||||
- **AD-002 is a runtime invariant; nothing for the gardener to check at issue-groom time.** OAuth concurrency is handled by per-session `CLAUDE_CONFIG_DIR` isolation (with `CLAUDE_EXTERNAL_LOCK` as a rollback flag). Per-issue work is enforced by `issue_claim`. A violation manifests as a 401 or VRAM OOM in agent logs, not as a malformed issue.
|
- **AD-002 is a runtime invariant; nothing for the gardener to check at issue-groom time.** OAuth concurrency is handled by per-session `CLAUDE_CONFIG_DIR` isolation (with `CLAUDE_EXTERNAL_LOCK` as a rollback flag). Per-issue work is enforced by `issue_claim`. A violation manifests as a 401 or VRAM OOM in agent logs, not as a malformed issue.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Phase-Signaling Protocol
|
## Phase-Signaling Protocol
|
||||||
|
|
||||||
When running as a persistent tmux session, Claude must signal the orchestrator at each phase boundary by writing to a phase file (e.g. `/tmp/dev-session-{project}-{issue}.phase`).
|
When running as a persistent tmux session, Claude must signal the orchestrator
|
||||||
|
at each phase boundary by writing to a phase file (e.g.
|
||||||
|
`/tmp/dev-session-{project}-{issue}.phase`).
|
||||||
|
|
||||||
|
Key phases: `PHASE:awaiting_ci` → `PHASE:awaiting_review` → `PHASE:done`.
|
||||||
|
Also: `PHASE:escalate` (needs human input), `PHASE:failed`.
|
||||||
|
|
||||||
Key phases: `PHASE:awaiting_ci` → `PHASE:awaiting_review` → `PHASE:done`. Also: `PHASE:escalate` (needs human input), `PHASE:failed`.
|
|
||||||
See [docs/PHASE-PROTOCOL.md](docs/PHASE-PROTOCOL.md) for the complete spec, orchestrator reaction matrix, sequence diagram, and crash recovery.
|
See [docs/PHASE-PROTOCOL.md](docs/PHASE-PROTOCOL.md) for the complete spec, orchestrator reaction matrix, sequence diagram, and crash recovery.
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: c872f282428861a735fbbb00609f77d063ad92b3 -->
|
<!-- last-reviewed: ec7dff854a71d64f427a4e6d6874de8e6dee822c -->
|
||||||
# Architect — Agent Instructions
|
# Architect — Agent Instructions
|
||||||
|
|
||||||
## What this agent is
|
## What this agent is
|
||||||
|
|
@ -10,9 +10,9 @@ converses with humans through PR comments.
|
||||||
## Role
|
## Role
|
||||||
|
|
||||||
- **Input**: Vision issues from VISION.md, prerequisite tree from ops repo
|
- **Input**: Vision issues from VISION.md, prerequisite tree from ops repo
|
||||||
- **Output**: Sprint proposals as PRs on the ops repo (with embedded `## Sub-issues` blocks)
|
- **Output**: Sprint proposals as PRs on the ops repo, sub-issue files
|
||||||
- **Mechanism**: Bash-driven orchestration in `architect-run.sh`, pitching formula via `formulas/run-architect.toml`
|
- **Mechanism**: Bash-driven orchestration in `architect-run.sh`, pitching formula via `formulas/run-architect.toml`
|
||||||
- **Identity**: `architect-bot` on Forgejo (READ-ONLY on project repo, write on ops repo only — #764)
|
- **Identity**: `architect-bot` on Forgejo
|
||||||
|
|
||||||
## Responsibilities
|
## Responsibilities
|
||||||
|
|
||||||
|
|
@ -24,17 +24,16 @@ converses with humans through PR comments.
|
||||||
acceptance criteria and dependencies
|
acceptance criteria and dependencies
|
||||||
4. **Human conversation**: Respond to PR comments, refine sprint proposals based
|
4. **Human conversation**: Respond to PR comments, refine sprint proposals based
|
||||||
on human feedback
|
on human feedback
|
||||||
5. **Sub-issue definition**: Define concrete sub-issues in the `## Sub-issues`
|
5. **Sub-issue filing**: After design forks are resolved, file concrete sub-issues
|
||||||
block of the sprint spec. Filing is handled by `filer-bot` after sprint PR
|
for implementation
|
||||||
merge (#764)
|
|
||||||
|
|
||||||
## Formula
|
## Formula
|
||||||
|
|
||||||
The architect pitching is driven by `formulas/run-architect.toml`. This formula defines
|
The architect pitching is driven by `formulas/run-architect.toml`. This formula defines
|
||||||
the steps for:
|
the steps for:
|
||||||
- Research: analyzing vision items and prerequisite tree
|
- Research: analyzing vision items and prerequisite tree
|
||||||
- Pitch: creating structured sprint PRs with embedded `## Sub-issues` blocks
|
- Pitch: creating structured sprint PRs
|
||||||
- Design Q&A: refining the sprint via PR comments after human ACCEPT
|
- Sub-issue filing: creating concrete implementation issues
|
||||||
|
|
||||||
## Bash-driven orchestration
|
## Bash-driven orchestration
|
||||||
|
|
||||||
|
|
@ -58,31 +57,22 @@ APPROVED review → start design questions (model posts Q1:, adds Design forks s
|
||||||
↓
|
↓
|
||||||
Answers received → continue Q&A (model processes answers, posts follow-ups)
|
Answers received → continue Q&A (model processes answers, posts follow-ups)
|
||||||
↓
|
↓
|
||||||
All forks resolved → finalize ## Sub-issues section in sprint spec
|
All forks resolved → sub-issue filing (model files implementation issues)
|
||||||
↓
|
|
||||||
Sprint PR merged → filer-bot files sub-issues on project repo (#764)
|
|
||||||
↓
|
↓
|
||||||
REJECT review → close PR + journal (model processes rejection, bash merges PR)
|
REJECT review → close PR + journal (model processes rejection, bash merges PR)
|
||||||
```
|
```
|
||||||
|
|
||||||
### Vision issue lifecycle
|
### Vision issue lifecycle
|
||||||
|
|
||||||
Vision issues decompose into sprint sub-issues. Sub-issues are defined in the
|
Vision issues decompose into sprint sub-issues tracked via "Decomposed from #N" in sub-issue bodies. The architect automatically closes vision issues when all sub-issues are closed:
|
||||||
`## Sub-issues` block of the sprint spec (between `<!-- filer:begin -->` and
|
|
||||||
`<!-- filer:end -->` markers) and filed by `filer-bot` after the sprint PR merges
|
|
||||||
on the ops repo (#764).
|
|
||||||
|
|
||||||
Each filer-created sub-issue carries a `<!-- decomposed-from: #<vision>, sprint: <slug>, id: <id> -->`
|
1. Before picking new vision issues, the architect checks each open vision issue
|
||||||
marker in its body for idempotency and traceability.
|
2. For each, it queries merged sprint PRs — **only PRs whose title or body reference the specific vision issue** (matched via `#N` pattern, filtering out unrelated PRs that happen to close unrelated issues) (#735/#736)
|
||||||
|
3. Extracts sub-issue numbers from those PRs, excluding the vision issue itself
|
||||||
|
4. If all sub-issues are closed, posts a summary comment listing completed sub-issues (with an idempotency guard: skips if a "Vision Issue Completed" comment already exists)
|
||||||
|
5. The vision issue is then closed automatically
|
||||||
|
|
||||||
The filer-bot (via `lib/sprint-filer.sh`) handles vision lifecycle:
|
This ensures vision issues transition from `open` → `closed` once their work is complete, without manual intervention. The #N-scoped matching prevents false positives where unrelated sub-issues would incorrectly trigger vision issue closure.
|
||||||
1. After filing sub-issues, adds `in-progress` label to the vision issue
|
|
||||||
2. On each run, checks if all sub-issues for a vision are closed
|
|
||||||
3. If all closed, posts a summary comment and closes the vision issue
|
|
||||||
|
|
||||||
The architect no longer writes to the project repo — it is read-only (#764).
|
|
||||||
All project-repo writes (issue filing, label management, vision closure) are
|
|
||||||
handled by filer-bot with its narrowly-scoped `FORGE_FILER_TOKEN`.
|
|
||||||
|
|
||||||
### Session management
|
### Session management
|
||||||
|
|
||||||
|
|
@ -96,7 +86,6 @@ Run via `architect/architect-run.sh`, which:
|
||||||
- Acquires a poll-loop lock (via `acquire_lock`) and checks available memory
|
- Acquires a poll-loop lock (via `acquire_lock`) and checks available memory
|
||||||
- Cleans up per-issue scratch files from previous runs (`/tmp/architect-{project}-scratch-*.md`)
|
- Cleans up per-issue scratch files from previous runs (`/tmp/architect-{project}-scratch-*.md`)
|
||||||
- Sources shared libraries (env.sh, formula-session.sh)
|
- Sources shared libraries (env.sh, formula-session.sh)
|
||||||
- Exports `FORGE_TOKEN_OVERRIDE="${FORGE_ARCHITECT_TOKEN}"` BEFORE sourcing env.sh, ensuring architect-bot identity survives re-sourcing (#762)
|
|
||||||
- Uses FORGE_ARCHITECT_TOKEN for authentication
|
- Uses FORGE_ARCHITECT_TOKEN for authentication
|
||||||
- Processes existing architect PRs via bash-driven design phase
|
- Processes existing architect PRs via bash-driven design phase
|
||||||
- Loads the formula and builds context from VISION.md, AGENTS.md, and ops repo
|
- Loads the formula and builds context from VISION.md, AGENTS.md, and ops repo
|
||||||
|
|
@ -106,9 +95,7 @@ Run via `architect/architect-run.sh`, which:
|
||||||
- Selects up to `pitch_budget` (3 - open architect PRs) remaining vision issues
|
- Selects up to `pitch_budget` (3 - open architect PRs) remaining vision issues
|
||||||
- For each selected issue, invokes stateless `claude -p` with issue body + context
|
- For each selected issue, invokes stateless `claude -p` with issue body + context
|
||||||
- Creates PRs directly from pitch content (no scratch files)
|
- Creates PRs directly from pitch content (no scratch files)
|
||||||
- Agent is invoked for stateless pitch generation and response processing (ACCEPT/REJECT handling)
|
- Agent is invoked only for response processing (ACCEPT/REJECT handling)
|
||||||
- NOTE: architect-bot is read-only on the project repo (#764) — sub-issue filing
|
|
||||||
and in-progress label management are handled by filer-bot after sprint PR merge
|
|
||||||
|
|
||||||
**Multi-sprint pitching**: The architect pitches up to 3 sprints per run. Bash handles all state management:
|
**Multi-sprint pitching**: The architect pitches up to 3 sprints per run. Bash handles all state management:
|
||||||
- Fetches Forgejo API data (vision issues, open PRs, merged PRs)
|
- Fetches Forgejo API data (vision issues, open PRs, merged PRs)
|
||||||
|
|
@ -133,5 +120,4 @@ empty file not created, just document it).
|
||||||
- #100: Architect formula — research + design fork identification
|
- #100: Architect formula — research + design fork identification
|
||||||
- #101: Architect formula — sprint PR creation with questions
|
- #101: Architect formula — sprint PR creation with questions
|
||||||
- #102: Architect formula — answer parsing + sub-issue filing
|
- #102: Architect formula — answer parsing + sub-issue filing
|
||||||
- #764: Permission scoping — architect read-only on project repo, filer-bot files sub-issues
|
|
||||||
- #491: Refactor — bash-driven design phase with stateful session resumption
|
- #491: Refactor — bash-driven design phase with stateful session resumption
|
||||||
|
|
|
||||||
|
|
@ -34,11 +34,10 @@ FACTORY_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||||
|
|
||||||
# Accept project config from argument; default to disinto
|
# Accept project config from argument; default to disinto
|
||||||
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
||||||
# Set override BEFORE sourcing env.sh so it survives any later re-source of
|
|
||||||
# env.sh from nested shells / claude -p tools (#762, #747)
|
|
||||||
export FORGE_TOKEN_OVERRIDE="${FORGE_ARCHITECT_TOKEN:-}"
|
|
||||||
# shellcheck source=../lib/env.sh
|
# shellcheck source=../lib/env.sh
|
||||||
source "$FACTORY_ROOT/lib/env.sh"
|
source "$FACTORY_ROOT/lib/env.sh"
|
||||||
|
# Override FORGE_TOKEN with architect-bot's token (#747)
|
||||||
|
FORGE_TOKEN="${FORGE_ARCHITECT_TOKEN:-${FORGE_TOKEN}}"
|
||||||
# shellcheck source=../lib/formula-session.sh
|
# shellcheck source=../lib/formula-session.sh
|
||||||
source "$FACTORY_ROOT/lib/formula-session.sh"
|
source "$FACTORY_ROOT/lib/formula-session.sh"
|
||||||
# shellcheck source=../lib/worktree.sh
|
# shellcheck source=../lib/worktree.sh
|
||||||
|
|
@ -117,8 +116,8 @@ build_architect_prompt() {
|
||||||
You are the architect agent for ${FORGE_REPO}. Work through the formula below.
|
You are the architect agent for ${FORGE_REPO}. Work through the formula below.
|
||||||
|
|
||||||
Your role: strategic decomposition of vision issues into development sprints.
|
Your role: strategic decomposition of vision issues into development sprints.
|
||||||
Propose sprints via PRs on the ops repo, converse with humans through PR comments.
|
Propose sprints via PRs on the ops repo, converse with humans through PR comments,
|
||||||
You are READ-ONLY on the project repo — sub-issues are filed by filer-bot after sprint PR merge (#764).
|
and file sub-issues after design forks are resolved.
|
||||||
|
|
||||||
## Project context
|
## Project context
|
||||||
${CONTEXT_BLOCK}
|
${CONTEXT_BLOCK}
|
||||||
|
|
@ -145,8 +144,8 @@ build_architect_prompt_for_mode() {
|
||||||
You are the architect agent for ${FORGE_REPO}. Work through the formula below.
|
You are the architect agent for ${FORGE_REPO}. Work through the formula below.
|
||||||
|
|
||||||
Your role: strategic decomposition of vision issues into development sprints.
|
Your role: strategic decomposition of vision issues into development sprints.
|
||||||
Propose sprints via PRs on the ops repo, converse with humans through PR comments.
|
Propose sprints via PRs on the ops repo, converse with humans through PR comments,
|
||||||
You are READ-ONLY on the project repo — sub-issues are filed by filer-bot after sprint PR merge (#764).
|
and file sub-issues after design forks are resolved.
|
||||||
|
|
||||||
## CURRENT STATE: Approved PR awaiting initial design questions
|
## CURRENT STATE: Approved PR awaiting initial design questions
|
||||||
|
|
||||||
|
|
@ -157,10 +156,10 @@ design conversation has not yet started. Your task is to:
|
||||||
2. Identify the key design decisions that need human input
|
2. Identify the key design decisions that need human input
|
||||||
3. Post initial design questions (Q1:, Q2:, etc.) as comments on the PR
|
3. Post initial design questions (Q1:, Q2:, etc.) as comments on the PR
|
||||||
4. Add a `## Design forks` section to the PR body documenting the design decisions
|
4. Add a `## Design forks` section to the PR body documenting the design decisions
|
||||||
5. Update the ## Sub-issues section in the sprint spec if design decisions affect decomposition
|
5. File sub-issues for each design fork path if applicable
|
||||||
|
|
||||||
This is NOT a pitch phase — the pitch is already approved. This is the START
|
This is NOT a pitch phase — the pitch is already approved. This is the START
|
||||||
of the design Q&A phase. Sub-issues are filed by filer-bot after sprint PR merge (#764).
|
of the design Q&A phase.
|
||||||
|
|
||||||
## Project context
|
## Project context
|
||||||
${CONTEXT_BLOCK}
|
${CONTEXT_BLOCK}
|
||||||
|
|
@ -179,8 +178,8 @@ _PROMPT_EOF_
|
||||||
You are the architect agent for ${FORGE_REPO}. Work through the formula below.
|
You are the architect agent for ${FORGE_REPO}. Work through the formula below.
|
||||||
|
|
||||||
Your role: strategic decomposition of vision issues into development sprints.
|
Your role: strategic decomposition of vision issues into development sprints.
|
||||||
Propose sprints via PRs on the ops repo, converse with humans through PR comments.
|
Propose sprints via PRs on the ops repo, converse with humans through PR comments,
|
||||||
You are READ-ONLY on the project repo — sub-issues are filed by filer-bot after sprint PR merge (#764).
|
and file sub-issues after design forks are resolved.
|
||||||
|
|
||||||
## CURRENT STATE: Design Q&A in progress
|
## CURRENT STATE: Design Q&A in progress
|
||||||
|
|
||||||
|
|
@ -194,7 +193,7 @@ Your task is to:
|
||||||
2. Read human answers from PR comments
|
2. Read human answers from PR comments
|
||||||
3. Parse the answers and determine next steps
|
3. Parse the answers and determine next steps
|
||||||
4. Post follow-up questions if needed (Q3:, Q4:, etc.)
|
4. Post follow-up questions if needed (Q3:, Q4:, etc.)
|
||||||
5. If all design forks are resolved, finalize the ## Sub-issues section in the sprint spec
|
5. If all design forks are resolved, file sub-issues for each path
|
||||||
6. Update the `## Design forks` section as you progress
|
6. Update the `## Design forks` section as you progress
|
||||||
|
|
||||||
## Project context
|
## Project context
|
||||||
|
|
@ -418,10 +417,224 @@ fetch_vision_issues() {
|
||||||
"${FORGE_API}/issues?labels=vision&state=open&limit=100" 2>/dev/null || echo '[]'
|
"${FORGE_API}/issues?labels=vision&state=open&limit=100" 2>/dev/null || echo '[]'
|
||||||
}
|
}
|
||||||
|
|
||||||
# NOTE: get_vision_subissues, all_subissues_closed, close_vision_issue,
|
# ── Helper: Fetch all sub-issues for a vision issue ───────────────────────
|
||||||
# check_and_close_completed_visions removed (#764) — architect-bot is read-only
|
# Sub-issues are identified by:
|
||||||
# on the project repo. Vision lifecycle (closing completed visions, adding
|
# 1. Issues whose body contains "Decomposed from #N" pattern
|
||||||
# in-progress labels) is now handled by filer-bot via lib/sprint-filer.sh.
|
# 2. Issues referenced in merged sprint PR bodies
|
||||||
|
# Returns: newline-separated list of sub-issue numbers (empty if none)
|
||||||
|
# Args: vision_issue_number
|
||||||
|
get_vision_subissues() {
|
||||||
|
local vision_issue="$1"
|
||||||
|
local subissues=()
|
||||||
|
|
||||||
|
# Method 1: Find issues with "Decomposed from #N" in body
|
||||||
|
local issues_json
|
||||||
|
issues_json=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
"${FORGE_API}/issues?limit=100" 2>/dev/null) || true
|
||||||
|
|
||||||
|
if [ -n "$issues_json" ] && [ "$issues_json" != "null" ]; then
|
||||||
|
while IFS= read -r subissue_num; do
|
||||||
|
[ -z "$subissue_num" ] && continue
|
||||||
|
subissues+=("$subissue_num")
|
||||||
|
done <<< "$(printf '%s' "$issues_json" | jq -r --arg vid "$vision_issue" \
|
||||||
|
'[.[] | select(.number != ($vid | tonumber)) | select(.body // "" | contains("Decomposed from #" + $vid))] | .[].number' 2>/dev/null)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Method 2: Find issues referenced in merged sprint PR bodies
|
||||||
|
# Only consider PRs whose title or body references this specific vision issue
|
||||||
|
local prs_json
|
||||||
|
prs_json=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
"${FORGE_API_BASE}/repos/${FORGE_OPS_REPO}/pulls?state=closed&limit=100" 2>/dev/null) || true
|
||||||
|
|
||||||
|
if [ -n "$prs_json" ] && [ "$prs_json" != "null" ]; then
|
||||||
|
while IFS= read -r pr_num; do
|
||||||
|
[ -z "$pr_num" ] && continue
|
||||||
|
|
||||||
|
local pr_details pr_body pr_title
|
||||||
|
pr_details=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
"${FORGE_API_BASE}/repos/${FORGE_OPS_REPO}/pulls/${pr_num}" 2>/dev/null) || continue
|
||||||
|
|
||||||
|
local is_merged
|
||||||
|
is_merged=$(printf '%s' "$pr_details" | jq -r '.merged // false') || continue
|
||||||
|
|
||||||
|
if [ "$is_merged" != "true" ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
pr_title=$(printf '%s' "$pr_details" | jq -r '.title // ""') || continue
|
||||||
|
pr_body=$(printf '%s' "$pr_details" | jq -r '.body // ""') || continue
|
||||||
|
|
||||||
|
# Only process PRs that reference this specific vision issue
|
||||||
|
if ! printf '%s\n%s' "$pr_title" "$pr_body" | grep -qE "#${vision_issue}([^0-9]|$)"; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Extract issue numbers from PR body, excluding the vision issue itself
|
||||||
|
while IFS= read -r ref_issue; do
|
||||||
|
[ -z "$ref_issue" ] && continue
|
||||||
|
# Skip the vision issue itself
|
||||||
|
[ "$ref_issue" = "$vision_issue" ] && continue
|
||||||
|
# Skip if already in list
|
||||||
|
local found=false
|
||||||
|
for existing in "${subissues[@]+"${subissues[@]}"}"; do
|
||||||
|
[ "$existing" = "$ref_issue" ] && found=true && break
|
||||||
|
done
|
||||||
|
if [ "$found" = false ]; then
|
||||||
|
subissues+=("$ref_issue")
|
||||||
|
fi
|
||||||
|
done <<< "$(printf '%s' "$pr_body" | grep -oE '#[0-9]+' | tr -d '#' | sort -u)"
|
||||||
|
done <<< "$(printf '%s' "$prs_json" | jq -r '.[] | select(.title | contains("architect:")) | .number')"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Output unique sub-issues
|
||||||
|
printf '%s\n' "${subissues[@]}" | sort -u | grep -v '^$' || true
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Helper: Check if all sub-issues of a vision issue are closed ───────────
|
||||||
|
# Returns: 0 if all sub-issues are closed, 1 if any are still open
|
||||||
|
# Args: vision_issue_number
|
||||||
|
all_subissues_closed() {
|
||||||
|
local vision_issue="$1"
|
||||||
|
local subissues
|
||||||
|
subissues=$(get_vision_subissues "$vision_issue")
|
||||||
|
|
||||||
|
# If no sub-issues found, parent cannot be considered complete
|
||||||
|
if [ -z "$subissues" ]; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check each sub-issue state
|
||||||
|
while IFS= read -r subissue_num; do
|
||||||
|
[ -z "$subissue_num" ] && continue
|
||||||
|
|
||||||
|
local sub_state
|
||||||
|
sub_state=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
"${FORGE_API}/issues/${subissue_num}" 2>/dev/null | jq -r '.state // "unknown"') || true
|
||||||
|
|
||||||
|
if [ "$sub_state" != "closed" ]; then
|
||||||
|
log "Sub-issue #${subissue_num} is ${sub_state} — vision issue #${vision_issue} not ready to close"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
done <<< "$subissues"
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Helper: Close vision issue with summary comment ────────────────────────
|
||||||
|
# Posts a comment listing all completed sub-issues before closing.
|
||||||
|
# Returns: 0 on success, 1 on failure
|
||||||
|
# Args: vision_issue_number
|
||||||
|
close_vision_issue() {
|
||||||
|
local vision_issue="$1"
|
||||||
|
|
||||||
|
# Idempotency guard: check if a completion comment already exists
|
||||||
|
local existing_comments
|
||||||
|
existing_comments=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
"${FORGE_API}/issues/${vision_issue}/comments" 2>/dev/null) || existing_comments="[]"
|
||||||
|
|
||||||
|
if printf '%s' "$existing_comments" | jq -e '[.[] | select(.body | contains("Vision Issue Completed"))] | length > 0' >/dev/null 2>&1; then
|
||||||
|
log "Vision issue #${vision_issue} already has a completion comment — skipping"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
local subissues
|
||||||
|
subissues=$(get_vision_subissues "$vision_issue")
|
||||||
|
|
||||||
|
# Build summary comment
|
||||||
|
local summary=""
|
||||||
|
local count=0
|
||||||
|
while IFS= read -r subissue_num; do
|
||||||
|
[ -z "$subissue_num" ] && continue
|
||||||
|
local sub_title
|
||||||
|
sub_title=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
"${FORGE_API}/issues/${subissue_num}" 2>/dev/null | jq -r '.title // "Untitled"') || sub_title="Untitled"
|
||||||
|
summary+="- #${subissue_num}: ${sub_title}"$'\n'
|
||||||
|
count=$((count + 1))
|
||||||
|
done <<< "$subissues"
|
||||||
|
|
||||||
|
local comment
|
||||||
|
comment=$(cat <<EOF
|
||||||
|
## Vision Issue Completed
|
||||||
|
|
||||||
|
All sub-issues have been implemented and merged. This vision issue is now closed.
|
||||||
|
|
||||||
|
### Completed sub-issues (${count}):
|
||||||
|
${summary}
|
||||||
|
---
|
||||||
|
*Automated closure by architect · $(date -u '+%Y-%m-%d %H:%M UTC')*
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
# Post comment before closing
|
||||||
|
local tmpfile tmpjson
|
||||||
|
tmpfile=$(mktemp /tmp/vision-close-XXXXXX.md)
|
||||||
|
tmpjson="${tmpfile}.json"
|
||||||
|
printf '%s' "$comment" > "$tmpfile"
|
||||||
|
jq -Rs '{body:.}' < "$tmpfile" > "$tmpjson"
|
||||||
|
|
||||||
|
if ! curl -sf -X POST \
|
||||||
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"${FORGE_API}/issues/${vision_issue}/comments" \
|
||||||
|
--data-binary @"$tmpjson" >/dev/null 2>&1; then
|
||||||
|
log "WARNING: failed to post closure comment on vision issue #${vision_issue}"
|
||||||
|
rm -f "$tmpfile" "$tmpjson"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
rm -f "$tmpfile" "$tmpjson"
|
||||||
|
|
||||||
|
# Clear assignee and close the issue
|
||||||
|
curl -sf -X PATCH \
|
||||||
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"${FORGE_API}/issues/${vision_issue}" \
|
||||||
|
-d '{"assignees":[]}' >/dev/null 2>&1 || true
|
||||||
|
|
||||||
|
curl -sf -X PATCH \
|
||||||
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"${FORGE_API}/issues/${vision_issue}" \
|
||||||
|
-d '{"state":"closed"}' >/dev/null 2>&1 || true
|
||||||
|
|
||||||
|
log "Closed vision issue #${vision_issue} — all ${count} sub-issue(s) complete"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Lifecycle check: Close vision issues with all sub-issues complete ──────
|
||||||
|
# Runs before picking new vision issues for decomposition.
|
||||||
|
# Checks each open vision issue and closes it if all sub-issues are closed.
|
||||||
|
check_and_close_completed_visions() {
|
||||||
|
log "Checking for vision issues with all sub-issues complete..."
|
||||||
|
|
||||||
|
local vision_issues_json
|
||||||
|
vision_issues_json=$(fetch_vision_issues)
|
||||||
|
|
||||||
|
if [ -z "$vision_issues_json" ] || [ "$vision_issues_json" = "null" ]; then
|
||||||
|
log "No open vision issues found"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get all vision issue numbers
|
||||||
|
local vision_issue_nums
|
||||||
|
vision_issue_nums=$(printf '%s' "$vision_issues_json" | jq -r '.[].number' 2>/dev/null) || vision_issue_nums=""
|
||||||
|
|
||||||
|
local closed_count=0
|
||||||
|
while IFS= read -r vision_issue; do
|
||||||
|
[ -z "$vision_issue" ] && continue
|
||||||
|
|
||||||
|
if all_subissues_closed "$vision_issue"; then
|
||||||
|
if close_vision_issue "$vision_issue"; then
|
||||||
|
closed_count=$((closed_count + 1))
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done <<< "$vision_issue_nums"
|
||||||
|
|
||||||
|
if [ "$closed_count" -gt 0 ]; then
|
||||||
|
log "Closed ${closed_count} vision issue(s) with all sub-issues complete"
|
||||||
|
else
|
||||||
|
log "No vision issues ready for closure"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
# ── Helper: Fetch open architect PRs from ops repo Forgejo API ───────────
|
# ── Helper: Fetch open architect PRs from ops repo Forgejo API ───────────
|
||||||
# Returns: JSON array of architect PR objects
|
# Returns: JSON array of architect PR objects
|
||||||
|
|
@ -513,23 +726,7 @@ Instructions:
|
||||||
## Recommendation
|
## Recommendation
|
||||||
<architect's assessment: worth it / defer / alternative approach>
|
<architect's assessment: worth it / defer / alternative approach>
|
||||||
|
|
||||||
## Sub-issues
|
|
||||||
|
|
||||||
<!-- filer:begin -->
|
|
||||||
- id: <kebab-case-id>
|
|
||||||
title: \"vision(#${issue_num}): <concise sub-issue title>\"
|
|
||||||
labels: [backlog]
|
|
||||||
depends_on: []
|
|
||||||
body: |
|
|
||||||
## Goal
|
|
||||||
<what this sub-issue accomplishes>
|
|
||||||
## Acceptance criteria
|
|
||||||
- [ ] <criterion>
|
|
||||||
<!-- filer:end -->
|
|
||||||
|
|
||||||
IMPORTANT: Do NOT include design forks or questions. This is a go/no-go pitch.
|
IMPORTANT: Do NOT include design forks or questions. This is a go/no-go pitch.
|
||||||
The ## Sub-issues block is parsed by the filer-bot pipeline after sprint PR merge.
|
|
||||||
Each sub-issue between filer:begin/end markers becomes a Forgejo issue.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
@ -638,8 +835,37 @@ post_pr_footer() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# NOTE: add_inprogress_label removed (#764) — architect-bot is read-only on
|
# ── Helper: Add in-progress label to vision issue ────────────────────────
|
||||||
# project repo. in-progress label is now added by filer-bot via sprint-filer.sh.
|
# Args: vision_issue_number
|
||||||
|
add_inprogress_label() {
|
||||||
|
local issue_num="$1"
|
||||||
|
|
||||||
|
# Get label ID for 'in-progress'
|
||||||
|
local labels_json
|
||||||
|
labels_json=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
"${FORGE_API}/labels" 2>/dev/null) || return 1
|
||||||
|
|
||||||
|
local inprogress_label_id
|
||||||
|
inprogress_label_id=$(printf '%s' "$labels_json" | jq -r --arg label "in-progress" '.[] | select(.name == $label) | .id' 2>/dev/null) || true
|
||||||
|
|
||||||
|
if [ -z "$inprogress_label_id" ]; then
|
||||||
|
log "WARNING: in-progress label not found"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Add label to issue
|
||||||
|
if curl -sf -X POST \
|
||||||
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"${FORGE_API}/issues/${issue_num}/labels" \
|
||||||
|
-d "{\"labels\": [${inprogress_label_id}]}" >/dev/null 2>&1; then
|
||||||
|
log "Added in-progress label to vision issue #${issue_num}"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
log "WARNING: failed to add in-progress label to vision issue #${issue_num}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
# ── Precondition checks in bash before invoking the model ─────────────────
|
# ── Precondition checks in bash before invoking the model ─────────────────
|
||||||
|
|
||||||
|
|
@ -689,7 +915,9 @@ if [ "${open_arch_prs:-0}" -ge 3 ]; then
|
||||||
log "3 open architect PRs found but responses detected — processing"
|
log "3 open architect PRs found but responses detected — processing"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# NOTE: Vision lifecycle check (close completed visions) moved to filer-bot (#764)
|
# ── Lifecycle check: Close vision issues with all sub-issues complete ──────
|
||||||
|
# Run before picking new vision issues for decomposition
|
||||||
|
check_and_close_completed_visions
|
||||||
|
|
||||||
# ── Bash-driven state management: Select vision issues for pitching ───────
|
# ── Bash-driven state management: Select vision issues for pitching ───────
|
||||||
# This logic is also documented in formulas/run-architect.toml preflight step
|
# This logic is also documented in formulas/run-architect.toml preflight step
|
||||||
|
|
@ -825,7 +1053,8 @@ for vision_issue in "${ARCHITECT_TARGET_ISSUES[@]}"; do
|
||||||
# Post footer comment
|
# Post footer comment
|
||||||
post_pr_footer "$pr_number"
|
post_pr_footer "$pr_number"
|
||||||
|
|
||||||
# NOTE: in-progress label is added by filer-bot after sprint PR merge (#764)
|
# Add in-progress label to vision issue
|
||||||
|
add_inprogress_label "$vision_issue"
|
||||||
|
|
||||||
pitch_count=$((pitch_count + 1))
|
pitch_count=$((pitch_count + 1))
|
||||||
log "Completed pitch for vision issue #${vision_issue} — PR #${pr_number}"
|
log "Completed pitch for vision issue #${vision_issue} — PR #${pr_number}"
|
||||||
|
|
|
||||||
1097
bin/disinto
1097
bin/disinto
File diff suppressed because it is too large
Load diff
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: c872f282428861a735fbbb00609f77d063ad92b3 -->
|
<!-- last-reviewed: ec7dff854a71d64f427a4e6d6874de8e6dee822c -->
|
||||||
# Dev Agent
|
# Dev Agent
|
||||||
|
|
||||||
**Role**: Implement issues autonomously — write code, push branches, address
|
**Role**: Implement issues autonomously — write code, push branches, address
|
||||||
|
|
@ -29,11 +29,7 @@ stale checks (vision issues are managed by the architect). If the issue is assig
|
||||||
`REQUEST_CHANGES`, spawns the dev-agent to address it before setting `BLOCKED_BY_INPROGRESS=true`;
|
`REQUEST_CHANGES`, spawns the dev-agent to address it before setting `BLOCKED_BY_INPROGRESS=true`;
|
||||||
otherwise just sets blocked. If assigned to another agent, logs and falls through (does not
|
otherwise just sets blocked. If assigned to another agent, logs and falls through (does not
|
||||||
block). If no assignee, no open PR, and no agent lock file — removes `in-progress`, adds
|
block). If no assignee, no open PR, and no agent lock file — removes `in-progress`, adds
|
||||||
`blocked` with a human-triage comment. **Post-crash self-assigned recovery (#749)**: when the
|
`blocked` with a human-triage comment. **Per-agent open-PR gate**: before starting new work,
|
||||||
issue is self-assigned (this bot) but there is no open PR, dev-poll now checks for a lock
|
|
||||||
file (`/tmp/dev-impl-summary-$PROJECT_NAME-$ISSUE_NUM.txt`) AND a remote branch
|
|
||||||
(`fix/issue-$ISSUE_NUM`) before declaring "my thread is busy". If neither exists after a cold
|
|
||||||
boot, it spawns a fresh dev-agent for recovery instead of looping forever. **Per-agent open-PR gate**: before starting new work,
|
|
||||||
filters open waiting PRs to only those assigned to this agent (`$BOT_USER`). Other agents'
|
filters open waiting PRs to only those assigned to this agent (`$BOT_USER`). Other agents'
|
||||||
PRs do not block this agent's pipeline (#358, #369). **Pre-lock merge scan own-PRs only**:
|
PRs do not block this agent's pipeline (#358, #369). **Pre-lock merge scan own-PRs only**:
|
||||||
the direct-merge scan only merges PRs whose linked issue is assigned to this agent — skips
|
the direct-merge scan only merges PRs whose linked issue is assigned to this agent — skips
|
||||||
|
|
@ -55,12 +51,6 @@ PRs owned by other bot users (#374).
|
||||||
|
|
||||||
**Crash recovery**: on `PHASE:crashed` or non-zero exit, the worktree is **preserved** (not destroyed) for debugging. Location logged. Supervisor housekeeping removes stale crashed worktrees older than 24h.
|
**Crash recovery**: on `PHASE:crashed` or non-zero exit, the worktree is **preserved** (not destroyed) for debugging. Location logged. Supervisor housekeeping removes stale crashed worktrees older than 24h.
|
||||||
|
|
||||||
**Polling loop isolation (#753)**: `docker/agents/entrypoint.sh` now tracks fast-poll PIDs
|
|
||||||
(`FAST_PIDS`) and calls `wait "${FAST_PIDS[@]}"` instead of `wait` (no-args). This means
|
|
||||||
long-running dev-agent sessions no longer block the loop from launching the next iteration's
|
|
||||||
fast polls — the loop only waits for review-poll and dev-poll (the fast agents), never for
|
|
||||||
the dev-agent subprocess itself.
|
|
||||||
|
|
||||||
**Lifecycle**: dev-poll.sh (invoked by polling loop, `check_active dev`) → dev-agent.sh →
|
**Lifecycle**: dev-poll.sh (invoked by polling loop, `check_active dev`) → dev-agent.sh →
|
||||||
tmux session → phase file drives CI/review loop → merge + `mirror_push()` → close issue.
|
tmux session → phase file drives CI/review loop → merge + `mirror_push()` → close issue.
|
||||||
On respawn after `PHASE:escalate`, the stale phase file is cleared first so the session
|
On respawn after `PHASE:escalate`, the stale phase file is cleared first so the session
|
||||||
|
|
|
||||||
|
|
@ -254,11 +254,7 @@ agent_recover_session
|
||||||
# WORKTREE SETUP
|
# WORKTREE SETUP
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
status "setting up worktree"
|
status "setting up worktree"
|
||||||
if ! cd "$REPO_ROOT"; then
|
cd "$REPO_ROOT"
|
||||||
log "ERROR: REPO_ROOT=${REPO_ROOT} does not exist — cannot cd"
|
|
||||||
log "Check PROJECT_REPO_ROOT vs compose PROJECT_NAME vs TOML name mismatch"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Determine forge remote by matching FORGE_URL host against git remotes
|
# Determine forge remote by matching FORGE_URL host against git remotes
|
||||||
_forge_host=$(printf '%s' "$FORGE_URL" | sed 's|https\?://||; s|/.*||')
|
_forge_host=$(printf '%s' "$FORGE_URL" | sed 's|https\?://||; s|/.*||')
|
||||||
|
|
|
||||||
|
|
@ -476,19 +476,8 @@ if [ "$ORPHAN_COUNT" -gt 0 ]; then
|
||||||
BLOCKED_BY_INPROGRESS=true
|
BLOCKED_BY_INPROGRESS=true
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
# No open PR — check if a thread is actually alive (lock file or remote branch)
|
log "issue #${ISSUE_NUM} assigned to me — my thread is busy"
|
||||||
LOCK_FILE="/tmp/dev-impl-summary-${PROJECT_NAME}-${ISSUE_NUM}.txt"
|
|
||||||
REMOTE_BRANCH_EXISTS=$(git ls-remote --exit-code origin "fix/issue-${ISSUE_NUM}" >/dev/null 2>&1 && echo yes || echo no)
|
|
||||||
|
|
||||||
if [ -f "$LOCK_FILE" ] || [ "$REMOTE_BRANCH_EXISTS" = "yes" ]; then
|
|
||||||
log "issue #${ISSUE_NUM} assigned to me — my thread is busy (lock=$([ -f "$LOCK_FILE" ] && echo y || echo n) remote_branch=$REMOTE_BRANCH_EXISTS)"
|
|
||||||
BLOCKED_BY_INPROGRESS=true
|
BLOCKED_BY_INPROGRESS=true
|
||||||
else
|
|
||||||
log "issue #${ISSUE_NUM} self-assigned but orphaned (no lock, no branch, no PR) — recovering"
|
|
||||||
nohup "${SCRIPT_DIR}/dev-agent.sh" "$ISSUE_NUM" >> "$LOGFILE" 2>&1 &
|
|
||||||
log "started dev-agent PID $! for issue #${ISSUE_NUM} (post-crash recovery)"
|
|
||||||
BLOCKED_BY_INPROGRESS=true
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
log "issue #${ISSUE_NUM} assigned to ${assignee} — their thread, not blocking"
|
log "issue #${ISSUE_NUM} assigned to ${assignee} — their thread, not blocking"
|
||||||
|
|
|
||||||
|
|
@ -14,9 +14,10 @@ services:
|
||||||
- agent-data:/home/agent/data
|
- agent-data:/home/agent/data
|
||||||
- project-repos:/home/agent/repos
|
- project-repos:/home/agent/repos
|
||||||
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
||||||
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
|
- ${HOME}/.claude.json:/home/agent/.claude.json:ro
|
||||||
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
|
- CLAUDE_BIN_PLACEHOLDER:/usr/local/bin/claude:ro
|
||||||
- ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro
|
- ${HOME}/.ssh:/home/agent/.ssh:ro
|
||||||
|
- ${HOME}/.config/sops/age:/home/agent/.config/sops/age:ro
|
||||||
- woodpecker-data:/woodpecker-data:ro
|
- woodpecker-data:/woodpecker-data:ro
|
||||||
environment:
|
environment:
|
||||||
- FORGE_URL=http://forgejo:3000
|
- FORGE_URL=http://forgejo:3000
|
||||||
|
|
@ -29,7 +30,6 @@ services:
|
||||||
- FORGE_SUPERVISOR_TOKEN=${FORGE_SUPERVISOR_TOKEN:-}
|
- FORGE_SUPERVISOR_TOKEN=${FORGE_SUPERVISOR_TOKEN:-}
|
||||||
- FORGE_PREDICTOR_TOKEN=${FORGE_PREDICTOR_TOKEN:-}
|
- FORGE_PREDICTOR_TOKEN=${FORGE_PREDICTOR_TOKEN:-}
|
||||||
- FORGE_ARCHITECT_TOKEN=${FORGE_ARCHITECT_TOKEN:-}
|
- FORGE_ARCHITECT_TOKEN=${FORGE_ARCHITECT_TOKEN:-}
|
||||||
- FORGE_FILER_TOKEN=${FORGE_FILER_TOKEN:-}
|
|
||||||
- FORGE_BOT_USERNAMES=${FORGE_BOT_USERNAMES:-}
|
- FORGE_BOT_USERNAMES=${FORGE_BOT_USERNAMES:-}
|
||||||
- WOODPECKER_TOKEN=${WOODPECKER_TOKEN:-}
|
- WOODPECKER_TOKEN=${WOODPECKER_TOKEN:-}
|
||||||
- CLAUDE_TIMEOUT=${CLAUDE_TIMEOUT:-7200}
|
- CLAUDE_TIMEOUT=${CLAUDE_TIMEOUT:-7200}
|
||||||
|
|
@ -48,18 +48,9 @@ services:
|
||||||
- GARDENER_INTERVAL=${GARDENER_INTERVAL:-21600}
|
- GARDENER_INTERVAL=${GARDENER_INTERVAL:-21600}
|
||||||
- ARCHITECT_INTERVAL=${ARCHITECT_INTERVAL:-21600}
|
- ARCHITECT_INTERVAL=${ARCHITECT_INTERVAL:-21600}
|
||||||
- PLANNER_INTERVAL=${PLANNER_INTERVAL:-43200}
|
- PLANNER_INTERVAL=${PLANNER_INTERVAL:-43200}
|
||||||
- SUPERVISOR_INTERVAL=${SUPERVISOR_INTERVAL:-1200}
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "pgrep", "-f", "entrypoint.sh"]
|
|
||||||
interval: 60s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 3
|
|
||||||
start_period: 30s
|
|
||||||
depends_on:
|
depends_on:
|
||||||
forgejo:
|
- forgejo
|
||||||
condition: service_healthy
|
- woodpecker
|
||||||
woodpecker:
|
|
||||||
condition: service_started
|
|
||||||
networks:
|
networks:
|
||||||
- disinto-net
|
- disinto-net
|
||||||
|
|
||||||
|
|
@ -76,9 +67,10 @@ services:
|
||||||
- agent-data:/home/agent/data
|
- agent-data:/home/agent/data
|
||||||
- project-repos:/home/agent/repos
|
- project-repos:/home/agent/repos
|
||||||
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
||||||
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
|
- ${HOME}/.claude.json:/home/agent/.claude.json:ro
|
||||||
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
|
- CLAUDE_BIN_PLACEHOLDER:/usr/local/bin/claude:ro
|
||||||
- ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro
|
- ${HOME}/.ssh:/home/agent/.ssh:ro
|
||||||
|
- ${HOME}/.config/sops/age:/home/agent/.config/sops/age:ro
|
||||||
- woodpecker-data:/woodpecker-data:ro
|
- woodpecker-data:/woodpecker-data:ro
|
||||||
environment:
|
environment:
|
||||||
- FORGE_URL=http://forgejo:3000
|
- FORGE_URL=http://forgejo:3000
|
||||||
|
|
@ -108,84 +100,9 @@ services:
|
||||||
- CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}
|
- CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}
|
||||||
- POLL_INTERVAL=${POLL_INTERVAL:-300}
|
- POLL_INTERVAL=${POLL_INTERVAL:-300}
|
||||||
- AGENT_ROLES=dev
|
- AGENT_ROLES=dev
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "pgrep", "-f", "entrypoint.sh"]
|
|
||||||
interval: 60s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 3
|
|
||||||
start_period: 30s
|
|
||||||
depends_on:
|
depends_on:
|
||||||
forgejo:
|
- forgejo
|
||||||
condition: service_healthy
|
- woodpecker
|
||||||
woodpecker:
|
|
||||||
condition: service_started
|
|
||||||
networks:
|
|
||||||
- disinto-net
|
|
||||||
|
|
||||||
agents-llama-all:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: docker/agents/Dockerfile
|
|
||||||
image: disinto/agents-llama:latest
|
|
||||||
container_name: disinto-agents-llama-all
|
|
||||||
restart: unless-stopped
|
|
||||||
profiles: ["agents-llama-all"]
|
|
||||||
security_opt:
|
|
||||||
- apparmor=unconfined
|
|
||||||
volumes:
|
|
||||||
- agent-data:/home/agent/data
|
|
||||||
- project-repos:/home/agent/repos
|
|
||||||
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
|
||||||
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
|
|
||||||
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
|
|
||||||
- ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro
|
|
||||||
- woodpecker-data:/woodpecker-data:ro
|
|
||||||
environment:
|
|
||||||
- FORGE_URL=http://forgejo:3000
|
|
||||||
- FORGE_REPO=${FORGE_REPO:-disinto-admin/disinto}
|
|
||||||
- FORGE_TOKEN=${FORGE_TOKEN_LLAMA:-}
|
|
||||||
- FORGE_PASS=${FORGE_PASS_LLAMA:-}
|
|
||||||
- FORGE_REVIEW_TOKEN=${FORGE_REVIEW_TOKEN:-}
|
|
||||||
- FORGE_PLANNER_TOKEN=${FORGE_PLANNER_TOKEN:-}
|
|
||||||
- FORGE_GARDENER_TOKEN=${FORGE_GARDENER_TOKEN:-}
|
|
||||||
- FORGE_VAULT_TOKEN=${FORGE_VAULT_TOKEN:-}
|
|
||||||
- FORGE_SUPERVISOR_TOKEN=${FORGE_SUPERVISOR_TOKEN:-}
|
|
||||||
- FORGE_PREDICTOR_TOKEN=${FORGE_PREDICTOR_TOKEN:-}
|
|
||||||
- FORGE_ARCHITECT_TOKEN=${FORGE_ARCHITECT_TOKEN:-}
|
|
||||||
- FORGE_FILER_TOKEN=${FORGE_FILER_TOKEN:-}
|
|
||||||
- FORGE_BOT_USERNAMES=${FORGE_BOT_USERNAMES:-}
|
|
||||||
- WOODPECKER_TOKEN=${WOODPECKER_TOKEN:-}
|
|
||||||
- CLAUDE_TIMEOUT=${CLAUDE_TIMEOUT:-7200}
|
|
||||||
- CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=${CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC:-1}
|
|
||||||
- CLAUDE_AUTOCOMPACT_PCT_OVERRIDE=60
|
|
||||||
- CLAUDE_CODE_DISABLE_EXPERIMENTAL_BETAS=1
|
|
||||||
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
|
||||||
- ANTHROPIC_BASE_URL=${ANTHROPIC_BASE_URL:-}
|
|
||||||
- FORGE_ADMIN_PASS=${FORGE_ADMIN_PASS:-}
|
|
||||||
- DISINTO_CONTAINER=1
|
|
||||||
- PROJECT_TOML=projects/disinto.toml
|
|
||||||
- PROJECT_NAME=${PROJECT_NAME:-project}
|
|
||||||
- PROJECT_REPO_ROOT=/home/agent/repos/${PROJECT_NAME:-project}
|
|
||||||
- WOODPECKER_DATA_DIR=/woodpecker-data
|
|
||||||
- WOODPECKER_REPO_ID=${WOODPECKER_REPO_ID:-}
|
|
||||||
- CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}
|
|
||||||
- POLL_INTERVAL=${POLL_INTERVAL:-300}
|
|
||||||
- GARDENER_INTERVAL=${GARDENER_INTERVAL:-21600}
|
|
||||||
- ARCHITECT_INTERVAL=${ARCHITECT_INTERVAL:-21600}
|
|
||||||
- PLANNER_INTERVAL=${PLANNER_INTERVAL:-43200}
|
|
||||||
- SUPERVISOR_INTERVAL=${SUPERVISOR_INTERVAL:-1200}
|
|
||||||
- AGENT_ROLES=review,dev,gardener,architect,planner,predictor,supervisor
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "pgrep", "-f", "entrypoint.sh"]
|
|
||||||
interval: 60s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 3
|
|
||||||
start_period: 30s
|
|
||||||
depends_on:
|
|
||||||
forgejo:
|
|
||||||
condition: service_healthy
|
|
||||||
woodpecker:
|
|
||||||
condition: service_started
|
|
||||||
networks:
|
networks:
|
||||||
- disinto-net
|
- disinto-net
|
||||||
|
|
||||||
|
|
@ -200,9 +117,9 @@ services:
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
- agent-data:/home/agent/data
|
- agent-data:/home/agent/data
|
||||||
- project-repos:/home/agent/repos
|
- project-repos:/home/agent/repos
|
||||||
- ${CLAUDE_DIR:-${HOME}/.claude}:/home/agent/.claude
|
- ${HOME}/.claude:/home/agent/.claude
|
||||||
- ${CLAUDE_BIN_DIR:-/usr/local/bin/claude}:/usr/local/bin/claude:ro
|
- /usr/local/bin/claude:/usr/local/bin/claude:ro
|
||||||
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
|
- ${HOME}/.ssh:/home/agent/.ssh:ro
|
||||||
env_file:
|
env_file:
|
||||||
- .env
|
- .env
|
||||||
|
|
||||||
|
|
@ -216,9 +133,9 @@ services:
|
||||||
- apparmor=unconfined
|
- apparmor=unconfined
|
||||||
volumes:
|
volumes:
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
- ${CLAUDE_BIN_DIR:-/usr/local/bin/claude}:/usr/local/bin/claude:ro
|
- /usr/local/bin/claude:/usr/local/bin/claude:ro
|
||||||
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/root/.claude.json:ro
|
- ${HOME}/.claude.json:/root/.claude.json:ro
|
||||||
- ${CLAUDE_DIR:-${HOME}/.claude}:/root/.claude:ro
|
- ${HOME}/.claude:/root/.claude:ro
|
||||||
- disinto-logs:/opt/disinto-logs
|
- disinto-logs:/opt/disinto-logs
|
||||||
environment:
|
environment:
|
||||||
- FORGE_SUPERVISOR_TOKEN=${FORGE_SUPERVISOR_TOKEN:-}
|
- FORGE_SUPERVISOR_TOKEN=${FORGE_SUPERVISOR_TOKEN:-}
|
||||||
|
|
@ -234,12 +151,6 @@ services:
|
||||||
ports:
|
ports:
|
||||||
- "80:80"
|
- "80:80"
|
||||||
- "443:443"
|
- "443:443"
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "curl", "-fsS", "http://localhost:2019/config/"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 3
|
|
||||||
start_period: 15s
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- forgejo
|
- forgejo
|
||||||
networks:
|
networks:
|
||||||
|
|
@ -260,12 +171,6 @@ services:
|
||||||
- FORGEJO__security__INSTALL_LOCK=true
|
- FORGEJO__security__INSTALL_LOCK=true
|
||||||
- FORGEJO__service__DISABLE_REGISTRATION=true
|
- FORGEJO__service__DISABLE_REGISTRATION=true
|
||||||
- FORGEJO__webhook__ALLOWED_HOST_LIST=private
|
- FORGEJO__webhook__ALLOWED_HOST_LIST=private
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "curl", "-sf", "http://localhost:3000/api/v1/version"]
|
|
||||||
interval: 5s
|
|
||||||
timeout: 3s
|
|
||||||
retries: 30
|
|
||||||
start_period: 30s
|
|
||||||
ports:
|
ports:
|
||||||
- "3000:3000"
|
- "3000:3000"
|
||||||
networks:
|
networks:
|
||||||
|
|
|
||||||
|
|
@ -1,26 +1,21 @@
|
||||||
FROM debian:bookworm-slim
|
FROM debian:bookworm-slim
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
bash curl git jq tmux nodejs npm python3 python3-pip openssh-client ca-certificates age shellcheck procps gosu \
|
bash curl git jq tmux python3 python3-pip openssh-client ca-certificates age shellcheck procps gosu \
|
||||||
&& pip3 install --break-system-packages networkx tomlkit \
|
&& pip3 install --break-system-packages networkx \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Pre-built binaries (copied from docker/agents/bin/)
|
# Pre-built binaries (copied from docker/agents/bin/)
|
||||||
# SOPS — encrypted data decryption tool
|
# SOPS — encrypted data decryption tool
|
||||||
# Download sops binary (replaces manual COPY of vendored binary)
|
COPY docker/agents/bin/sops /usr/local/bin/sops
|
||||||
ARG SOPS_VERSION=3.9.4
|
RUN chmod +x /usr/local/bin/sops
|
||||||
RUN curl -fsSL "https://github.com/getsops/sops/releases/download/v${SOPS_VERSION}/sops-v${SOPS_VERSION}.linux.amd64" \
|
|
||||||
-o /usr/local/bin/sops && chmod +x /usr/local/bin/sops
|
|
||||||
|
|
||||||
# tea CLI — official Gitea/Forgejo CLI for issue/label/comment operations
|
# tea CLI — official Gitea/Forgejo CLI for issue/label/comment operations
|
||||||
# Download tea binary (replaces manual COPY of vendored binary)
|
COPY docker/agents/bin/tea /usr/local/bin/tea
|
||||||
ARG TEA_VERSION=0.9.2
|
RUN chmod +x /usr/local/bin/tea
|
||||||
RUN curl -fsSL "https://dl.gitea.com/tea/${TEA_VERSION}/tea-${TEA_VERSION}-linux-amd64" \
|
|
||||||
-o /usr/local/bin/tea && chmod +x /usr/local/bin/tea
|
|
||||||
|
|
||||||
# Install Claude Code CLI — agent runtime for all LLM backends (llama, Claude API).
|
# Claude CLI is mounted from the host via docker-compose volume.
|
||||||
# The CLI is the execution environment; ANTHROPIC_BASE_URL selects the model provider.
|
# No internet access to cli.anthropic.com required at build time.
|
||||||
RUN npm install -g @anthropic-ai/claude-code@2.1.84
|
|
||||||
|
|
||||||
# Non-root user
|
# Non-root user
|
||||||
RUN useradd -m -u 1000 -s /bin/bash agent
|
RUN useradd -m -u 1000 -s /bin/bash agent
|
||||||
|
|
@ -33,9 +28,6 @@ RUN chmod +x /entrypoint.sh
|
||||||
|
|
||||||
# Entrypoint runs polling loop directly, dropping to agent user via gosu.
|
# Entrypoint runs polling loop directly, dropping to agent user via gosu.
|
||||||
# All scripts execute as the agent user (UID 1000) while preserving env vars.
|
# All scripts execute as the agent user (UID 1000) while preserving env vars.
|
||||||
VOLUME /home/agent/data
|
|
||||||
VOLUME /home/agent/repos
|
|
||||||
|
|
||||||
WORKDIR /home/agent/disinto
|
WORKDIR /home/agent/disinto
|
||||||
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
ENTRYPOINT ["/entrypoint.sh"]
|
||||||
|
|
|
||||||
|
|
@ -7,47 +7,14 @@ set -euo pipefail
|
||||||
# poll scripts. All Docker Compose env vars are inherited (PATH, FORGE_TOKEN,
|
# poll scripts. All Docker Compose env vars are inherited (PATH, FORGE_TOKEN,
|
||||||
# ANTHROPIC_API_KEY, etc.).
|
# ANTHROPIC_API_KEY, etc.).
|
||||||
#
|
#
|
||||||
# AGENT_ROLES env var controls which scripts run: "review,dev,gardener,architect,planner,predictor,supervisor"
|
# AGENT_ROLES env var controls which scripts run: "review,dev,gardener,architect,planner,predictor"
|
||||||
# (default: all seven). Uses while-true loop with staggered intervals:
|
# (default: all six). Uses while-true loop with staggered intervals:
|
||||||
# - review-poll: every 5 minutes (offset by 0s)
|
# - review-poll: every 5 minutes (offset by 0s)
|
||||||
# - dev-poll: every 5 minutes (offset by 2 minutes)
|
# - dev-poll: every 5 minutes (offset by 2 minutes)
|
||||||
# - gardener: every GARDENER_INTERVAL seconds (default: 21600 = 6 hours)
|
# - gardener: every GARDENER_INTERVAL seconds (default: 21600 = 6 hours)
|
||||||
# - architect: every ARCHITECT_INTERVAL seconds (default: 21600 = 6 hours)
|
# - architect: every ARCHITECT_INTERVAL seconds (default: 21600 = 6 hours)
|
||||||
# - planner: every PLANNER_INTERVAL seconds (default: 43200 = 12 hours)
|
# - planner: every PLANNER_INTERVAL seconds (default: 43200 = 12 hours)
|
||||||
# - predictor: every 24 hours (288 iterations * 5 min)
|
# - predictor: every 24 hours (288 iterations * 5 min)
|
||||||
# - supervisor: every SUPERVISOR_INTERVAL seconds (default: 1200 = 20 min)
|
|
||||||
|
|
||||||
# ── Migration check: reject ENABLE_LLAMA_AGENT ───────────────────────────────
|
|
||||||
# #846: The legacy ENABLE_LLAMA_AGENT env flag is no longer supported.
|
|
||||||
# Activation is now done exclusively via [agents.X] sections in project TOML.
|
|
||||||
# If this legacy flag is detected, fail immediately with a migration message.
|
|
||||||
if [ "${ENABLE_LLAMA_AGENT:-}" = "1" ]; then
|
|
||||||
cat <<'MIGRATION_ERR'
|
|
||||||
FATAL: ENABLE_LLAMA_AGENT is no longer supported.
|
|
||||||
|
|
||||||
The legacy ENABLE_LLAMA_AGENT=1 flag has been removed (#846).
|
|
||||||
Activation is now done exclusively via [agents.X] sections in projects/*.toml.
|
|
||||||
|
|
||||||
To migrate:
|
|
||||||
1. Remove ENABLE_LLAMA_AGENT from your .env or .env.enc file
|
|
||||||
2. Add an [agents.<name>] section to your project TOML:
|
|
||||||
|
|
||||||
[agents.dev-qwen]
|
|
||||||
base_url = "http://your-llama-server:8081"
|
|
||||||
model = "unsloth/Qwen3.5-35B-A3B"
|
|
||||||
api_key = "sk-no-key-required"
|
|
||||||
roles = ["dev"]
|
|
||||||
forge_user = "dev-qwen"
|
|
||||||
compact_pct = 60
|
|
||||||
poll_interval = 60
|
|
||||||
|
|
||||||
3. Run: disinto init
|
|
||||||
4. Start the agent: docker compose up -d agents-dev-qwen
|
|
||||||
|
|
||||||
See docs/agents-llama.md for full details.
|
|
||||||
MIGRATION_ERR
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
DISINTO_BAKED="/home/agent/disinto"
|
DISINTO_BAKED="/home/agent/disinto"
|
||||||
DISINTO_LIVE="/home/agent/repos/_factory"
|
DISINTO_LIVE="/home/agent/repos/_factory"
|
||||||
|
|
@ -82,7 +49,7 @@ source "${DISINTO_BAKED}/lib/git-creds.sh"
|
||||||
# Wrapper that calls the shared configure_git_creds with agent-specific paths,
|
# Wrapper that calls the shared configure_git_creds with agent-specific paths,
|
||||||
# then repairs any legacy baked-credential URLs in existing clones.
|
# then repairs any legacy baked-credential URLs in existing clones.
|
||||||
_setup_git_creds() {
|
_setup_git_creds() {
|
||||||
_GIT_CREDS_LOG_FN=log configure_git_creds "/home/agent" "gosu agent"
|
configure_git_creds "/home/agent" "gosu agent"
|
||||||
if [ -n "${FORGE_PASS:-}" ] && [ -n "${FORGE_URL:-}" ]; then
|
if [ -n "${FORGE_PASS:-}" ] && [ -n "${FORGE_URL:-}" ]; then
|
||||||
log "Git credential helper configured (password auth)"
|
log "Git credential helper configured (password auth)"
|
||||||
fi
|
fi
|
||||||
|
|
@ -94,21 +61,16 @@ _setup_git_creds() {
|
||||||
# Configure git author identity for commits made by this container.
|
# Configure git author identity for commits made by this container.
|
||||||
# Derives identity from the resolved bot user (BOT_USER) to ensure commits
|
# Derives identity from the resolved bot user (BOT_USER) to ensure commits
|
||||||
# are visibly attributable to the correct bot in the forge timeline.
|
# are visibly attributable to the correct bot in the forge timeline.
|
||||||
# BOT_USER is normally set by configure_git_creds() (#741); this function
|
|
||||||
# only falls back to its own API call if BOT_USER was not already resolved.
|
|
||||||
configure_git_identity() {
|
configure_git_identity() {
|
||||||
# Resolve BOT_USER from FORGE_TOKEN if not already set (configure_git_creds
|
# Resolve BOT_USER from FORGE_TOKEN if not already set
|
||||||
# exports BOT_USER on success, so this is a fallback for edge cases only).
|
|
||||||
if [ -z "${BOT_USER:-}" ] && [ -n "${FORGE_TOKEN:-}" ]; then
|
if [ -z "${BOT_USER:-}" ] && [ -n "${FORGE_TOKEN:-}" ]; then
|
||||||
BOT_USER=$(curl -sf --max-time 10 \
|
BOT_USER=$(curl -sf --max-time 10 \
|
||||||
-H "Authorization: token ${FORGE_TOKEN}" \
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
"${FORGE_URL:-http://localhost:3000}/api/v1/user" 2>/dev/null | jq -r '.login // empty') || true
|
"${FORGE_URL:-http://localhost:3000}/api/v1/user" 2>/dev/null | jq -r '.login // empty') || true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "${BOT_USER:-}" ]; then
|
# Default to dev-bot if resolution fails
|
||||||
log "WARNING: Could not resolve bot username for git identity — commits will use fallback"
|
BOT_USER="${BOT_USER:-dev-bot}"
|
||||||
BOT_USER="agent"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Configure git identity for all repositories
|
# Configure git identity for all repositories
|
||||||
gosu agent git config --global user.name "${BOT_USER}"
|
gosu agent git config --global user.name "${BOT_USER}"
|
||||||
|
|
@ -347,24 +309,6 @@ _setup_git_creds
|
||||||
configure_git_identity
|
configure_git_identity
|
||||||
configure_tea_login
|
configure_tea_login
|
||||||
|
|
||||||
# Parse first available project TOML to get the project name for cloning.
|
|
||||||
# This ensures PROJECT_NAME matches the TOML 'name' field, not the compose
|
|
||||||
# default of 'project'. The clone will land at /home/agent/repos/<toml_name>
|
|
||||||
# and subsequent env exports in the main loop will be consistent.
|
|
||||||
if compgen -G "${DISINTO_DIR}/projects/*.toml" >/dev/null 2>&1; then
|
|
||||||
_first_toml=$(compgen -G "${DISINTO_DIR}/projects/*.toml" | head -1)
|
|
||||||
_pname=$(python3 -c "
|
|
||||||
import sys, tomllib
|
|
||||||
with open(sys.argv[1], 'rb') as f:
|
|
||||||
print(tomllib.load(f).get('name', ''))
|
|
||||||
" "$_first_toml" 2>/dev/null) || _pname=""
|
|
||||||
if [ -n "$_pname" ]; then
|
|
||||||
export PROJECT_NAME="$_pname"
|
|
||||||
export PROJECT_REPO_ROOT="/home/agent/repos/${_pname}"
|
|
||||||
log "Parsed PROJECT_NAME=${PROJECT_NAME} from ${_first_toml}"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Clone project repo on first run (makes agents self-healing, #589)
|
# Clone project repo on first run (makes agents self-healing, #589)
|
||||||
ensure_project_clone
|
ensure_project_clone
|
||||||
|
|
||||||
|
|
@ -374,35 +318,12 @@ bootstrap_ops_repos
|
||||||
# Bootstrap factory repo — switch DISINTO_DIR to live checkout (#593)
|
# Bootstrap factory repo — switch DISINTO_DIR to live checkout (#593)
|
||||||
bootstrap_factory_repo
|
bootstrap_factory_repo
|
||||||
|
|
||||||
# Validate that projects directory has at least one real .toml file (not .example)
|
|
||||||
# This prevents the silent-zombie mode where the polling loop matches zero files
|
|
||||||
# and does nothing forever.
|
|
||||||
validate_projects_dir() {
|
|
||||||
# NOTE: compgen -G exits non-zero when no matches exist, so piping it through
|
|
||||||
# `wc -l` under `set -eo pipefail` aborts the script before the FATAL branch
|
|
||||||
# can log a diagnostic (#877). Use the conditional form already adopted at
|
|
||||||
# lines above (see bootstrap_factory_repo, PROJECT_NAME parsing).
|
|
||||||
if ! compgen -G "${DISINTO_DIR}/projects/*.toml" >/dev/null 2>&1; then
|
|
||||||
log "FATAL: No real .toml files found in ${DISINTO_DIR}/projects/"
|
|
||||||
log "Expected at least one project config file (e.g., disinto.toml)"
|
|
||||||
log "The directory only contains *.toml.example template files."
|
|
||||||
log "Mount the host ./projects volume or copy real .toml files into the container."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
local toml_count
|
|
||||||
toml_count=$(compgen -G "${DISINTO_DIR}/projects/*.toml" | wc -l)
|
|
||||||
log "Projects directory validated: ${toml_count} real .toml file(s) found"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Initialize state directory for check_active guards
|
# Initialize state directory for check_active guards
|
||||||
init_state_dir
|
init_state_dir
|
||||||
|
|
||||||
# Validate projects directory before entering polling loop
|
|
||||||
validate_projects_dir
|
|
||||||
|
|
||||||
# Parse AGENT_ROLES env var (default: all agents)
|
# Parse AGENT_ROLES env var (default: all agents)
|
||||||
# Expected format: comma-separated list like "review,dev,gardener"
|
# Expected format: comma-separated list like "review,dev,gardener"
|
||||||
AGENT_ROLES="${AGENT_ROLES:-review,dev,gardener,architect,planner,predictor,supervisor}"
|
AGENT_ROLES="${AGENT_ROLES:-review,dev,gardener,architect,planner,predictor}"
|
||||||
log "Agent roles configured: ${AGENT_ROLES}"
|
log "Agent roles configured: ${AGENT_ROLES}"
|
||||||
|
|
||||||
# Poll interval in seconds (5 minutes default)
|
# Poll interval in seconds (5 minutes default)
|
||||||
|
|
@ -412,10 +333,9 @@ POLL_INTERVAL="${POLL_INTERVAL:-300}"
|
||||||
GARDENER_INTERVAL="${GARDENER_INTERVAL:-21600}"
|
GARDENER_INTERVAL="${GARDENER_INTERVAL:-21600}"
|
||||||
ARCHITECT_INTERVAL="${ARCHITECT_INTERVAL:-21600}"
|
ARCHITECT_INTERVAL="${ARCHITECT_INTERVAL:-21600}"
|
||||||
PLANNER_INTERVAL="${PLANNER_INTERVAL:-43200}"
|
PLANNER_INTERVAL="${PLANNER_INTERVAL:-43200}"
|
||||||
SUPERVISOR_INTERVAL="${SUPERVISOR_INTERVAL:-1200}"
|
|
||||||
|
|
||||||
log "Entering polling loop (interval: ${POLL_INTERVAL}s, roles: ${AGENT_ROLES})"
|
log "Entering polling loop (interval: ${POLL_INTERVAL}s, roles: ${AGENT_ROLES})"
|
||||||
log "Gardener interval: ${GARDENER_INTERVAL}s, Architect interval: ${ARCHITECT_INTERVAL}s, Planner interval: ${PLANNER_INTERVAL}s, Supervisor interval: ${SUPERVISOR_INTERVAL}s"
|
log "Gardener interval: ${GARDENER_INTERVAL}s, Architect interval: ${ARCHITECT_INTERVAL}s, Planner interval: ${PLANNER_INTERVAL}s"
|
||||||
|
|
||||||
# Main polling loop using iteration counter for gardener scheduling
|
# Main polling loop using iteration counter for gardener scheduling
|
||||||
iteration=0
|
iteration=0
|
||||||
|
|
@ -460,13 +380,11 @@ print(cfg.get('primary_branch', 'main'))
|
||||||
log "Processing project TOML: ${toml}"
|
log "Processing project TOML: ${toml}"
|
||||||
|
|
||||||
# --- Fast agents: run in background, wait before slow agents ---
|
# --- Fast agents: run in background, wait before slow agents ---
|
||||||
FAST_PIDS=()
|
|
||||||
|
|
||||||
# Review poll (every iteration)
|
# Review poll (every iteration)
|
||||||
if [[ ",${AGENT_ROLES}," == *",review,"* ]]; then
|
if [[ ",${AGENT_ROLES}," == *",review,"* ]]; then
|
||||||
log "Running review-poll (iteration ${iteration}) for ${toml}"
|
log "Running review-poll (iteration ${iteration}) for ${toml}"
|
||||||
gosu agent bash -c "cd ${DISINTO_DIR} && bash review/review-poll.sh \"${toml}\"" >> "${DISINTO_LOG_DIR}/review-poll.log" 2>&1 &
|
gosu agent bash -c "cd ${DISINTO_DIR} && bash review/review-poll.sh \"${toml}\"" >> "${DISINTO_LOG_DIR}/review-poll.log" 2>&1 &
|
||||||
FAST_PIDS+=($!)
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
sleep 2 # stagger fast polls
|
sleep 2 # stagger fast polls
|
||||||
|
|
@ -475,14 +393,10 @@ print(cfg.get('primary_branch', 'main'))
|
||||||
if [[ ",${AGENT_ROLES}," == *",dev,"* ]]; then
|
if [[ ",${AGENT_ROLES}," == *",dev,"* ]]; then
|
||||||
log "Running dev-poll (iteration ${iteration}) for ${toml}"
|
log "Running dev-poll (iteration ${iteration}) for ${toml}"
|
||||||
gosu agent bash -c "cd ${DISINTO_DIR} && bash dev/dev-poll.sh \"${toml}\"" >> "${DISINTO_LOG_DIR}/dev-poll.log" 2>&1 &
|
gosu agent bash -c "cd ${DISINTO_DIR} && bash dev/dev-poll.sh \"${toml}\"" >> "${DISINTO_LOG_DIR}/dev-poll.log" 2>&1 &
|
||||||
FAST_PIDS+=($!)
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Wait only for THIS iteration's fast polls — long-running gardener/dev-agent
|
# Wait for fast polls to finish before launching slow agents
|
||||||
# from prior iterations must not block us.
|
wait
|
||||||
if [ ${#FAST_PIDS[@]} -gt 0 ]; then
|
|
||||||
wait "${FAST_PIDS[@]}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# --- Slow agents: run in background with pgrep guard ---
|
# --- Slow agents: run in background with pgrep guard ---
|
||||||
|
|
||||||
|
|
@ -538,19 +452,6 @@ print(cfg.get('primary_branch', 'main'))
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Supervisor (interval configurable via SUPERVISOR_INTERVAL env var, default 20 min)
|
|
||||||
if [[ ",${AGENT_ROLES}," == *",supervisor,"* ]]; then
|
|
||||||
supervisor_iteration=$((iteration * POLL_INTERVAL))
|
|
||||||
if [ $((supervisor_iteration % SUPERVISOR_INTERVAL)) -eq 0 ] && [ "$now" -ge "$supervisor_iteration" ]; then
|
|
||||||
if ! pgrep -f "supervisor-run.sh" >/dev/null; then
|
|
||||||
log "Running supervisor (iteration ${iteration}, ${SUPERVISOR_INTERVAL}s interval) for ${toml}"
|
|
||||||
gosu agent bash -c "cd ${DISINTO_DIR} && bash supervisor/supervisor-run.sh \"${toml}\"" >> "${DISINTO_LOG_DIR}/supervisor.log" 2>&1 &
|
|
||||||
else
|
|
||||||
log "Skipping supervisor — already running"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
|
|
||||||
sleep "${POLL_INTERVAL}"
|
sleep "${POLL_INTERVAL}"
|
||||||
|
|
|
||||||
|
|
@ -30,6 +30,6 @@ WORKDIR /var/chat
|
||||||
|
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
|
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
|
||||||
CMD python3 -c "import urllib.request; urllib.request.urlopen('http://localhost:8080/health')" || exit 1
|
CMD python3 -c "import urllib.request; urllib.request.urlopen('http://localhost:8080/')" || exit 1
|
||||||
|
|
||||||
ENTRYPOINT ["/entrypoint-chat.sh"]
|
ENTRYPOINT ["/entrypoint-chat.sh"]
|
||||||
|
|
|
||||||
|
|
@ -30,6 +30,8 @@ import secrets
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
import urllib.request
|
||||||
|
import urllib.error
|
||||||
from http.server import HTTPServer, BaseHTTPRequestHandler
|
from http.server import HTTPServer, BaseHTTPRequestHandler
|
||||||
from urllib.parse import urlparse, parse_qs, urlencode
|
from urllib.parse import urlparse, parse_qs, urlencode
|
||||||
|
|
||||||
|
|
@ -57,6 +59,14 @@ CHAT_MAX_REQUESTS_PER_HOUR = int(os.environ.get("CHAT_MAX_REQUESTS_PER_HOUR", 60
|
||||||
CHAT_MAX_REQUESTS_PER_DAY = int(os.environ.get("CHAT_MAX_REQUESTS_PER_DAY", 500))
|
CHAT_MAX_REQUESTS_PER_DAY = int(os.environ.get("CHAT_MAX_REQUESTS_PER_DAY", 500))
|
||||||
CHAT_MAX_TOKENS_PER_DAY = int(os.environ.get("CHAT_MAX_TOKENS_PER_DAY", 1000000))
|
CHAT_MAX_TOKENS_PER_DAY = int(os.environ.get("CHAT_MAX_TOKENS_PER_DAY", 1000000))
|
||||||
|
|
||||||
|
# Action endpoints configuration (#712)
|
||||||
|
WOODPECKER_TOKEN = os.environ.get("WOODPECKER_TOKEN", "")
|
||||||
|
WOODPECKER_URL = os.environ.get("WOODPECKER_URL", "http://woodpecker:8000")
|
||||||
|
FORGE_TOKEN = os.environ.get("FORGE_TOKEN", "")
|
||||||
|
FORGE_URL = os.environ.get("FORGE_URL", "http://forgejo:3000")
|
||||||
|
FORGE_OWNER = os.environ.get("FORGE_OWNER", "")
|
||||||
|
FORGE_REPO = os.environ.get("FORGE_REPO", "")
|
||||||
|
|
||||||
# Allowed users - disinto-admin always allowed; CSV allowlist extends it
|
# Allowed users - disinto-admin always allowed; CSV allowlist extends it
|
||||||
_allowed_csv = os.environ.get("DISINTO_CHAT_ALLOWED_USERS", "")
|
_allowed_csv = os.environ.get("DISINTO_CHAT_ALLOWED_USERS", "")
|
||||||
ALLOWED_USERS = {"disinto-admin"}
|
ALLOWED_USERS = {"disinto-admin"}
|
||||||
|
|
@ -423,6 +433,181 @@ def _delete_conversation(user, conv_id):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# Action Endpoints (#712)
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
def _write_action_record(user, conv_id, action_type, payload, response_data):
|
||||||
|
"""Write an action record to the conversation history."""
|
||||||
|
record = {
|
||||||
|
"ts": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
|
||||||
|
"user": user,
|
||||||
|
"role": "action",
|
||||||
|
"action_type": action_type,
|
||||||
|
"payload": payload,
|
||||||
|
"response": response_data,
|
||||||
|
}
|
||||||
|
conv_path = _get_conversation_path(user, conv_id)
|
||||||
|
_ensure_user_dir(user)
|
||||||
|
with open(conv_path, "a", encoding="utf-8") as f:
|
||||||
|
f.write(json.dumps(record, ensure_ascii=False) + "\n")
|
||||||
|
|
||||||
|
|
||||||
|
def _trigger_woodpecker_pipeline(repo, branch):
|
||||||
|
"""Trigger a Woodpecker CI pipeline for the given repo and branch.
|
||||||
|
|
||||||
|
Woodpecker API: POST /api/v1/repos/{owner}/{repo}/pipeline
|
||||||
|
Returns dict with success status and response data.
|
||||||
|
"""
|
||||||
|
if not WOODPECKER_TOKEN:
|
||||||
|
return {"success": False, "error": "WOODPECKER_TOKEN not configured"}
|
||||||
|
|
||||||
|
if not FORGE_OWNER or not FORGE_REPO:
|
||||||
|
return {"success": False, "error": "FORGE_OWNER and FORGE_REPO not configured"}
|
||||||
|
|
||||||
|
try:
|
||||||
|
url = f"{WOODPECKER_URL}/api/v1/repos/{FORGE_OWNER}/{FORGE_REPO}/pipeline"
|
||||||
|
data = json.dumps({"branch": branch, "event": "push"}).encode("utf-8")
|
||||||
|
|
||||||
|
req = urllib.request.Request(
|
||||||
|
url,
|
||||||
|
data=data,
|
||||||
|
headers={
|
||||||
|
"Authorization": f"token {WOODPECKER_TOKEN}",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Accept": "application/json",
|
||||||
|
},
|
||||||
|
method="POST",
|
||||||
|
)
|
||||||
|
|
||||||
|
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||||
|
result = json.loads(resp.read().decode())
|
||||||
|
return {"success": True, "data": result}
|
||||||
|
|
||||||
|
except urllib.error.HTTPError as e:
|
||||||
|
try:
|
||||||
|
body = json.loads(e.read().decode())
|
||||||
|
return {"success": False, "error": str(e.reason), "details": body}
|
||||||
|
except (json.JSONDecodeError, UnicodeDecodeError):
|
||||||
|
return {"success": False, "error": str(e.reason)}
|
||||||
|
except urllib.error.URLError as e:
|
||||||
|
return {"success": False, "error": f"Network error: {e.reason}"}
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
return {"success": False, "error": f"Invalid JSON response: {e}"}
|
||||||
|
except Exception as e:
|
||||||
|
return {"success": False, "error": str(e)}
|
||||||
|
|
||||||
|
|
||||||
|
def _create_forgejo_issue(title, body, labels=None):
|
||||||
|
"""Create a Forgejo issue.
|
||||||
|
|
||||||
|
Forgejo API: POST /api/v1/repos/{owner}/{repo}/issues
|
||||||
|
Returns dict with success status and response data.
|
||||||
|
"""
|
||||||
|
if not FORGE_TOKEN:
|
||||||
|
return {"success": False, "error": "FORGE_TOKEN not configured"}
|
||||||
|
|
||||||
|
if not FORGE_OWNER or not FORGE_REPO:
|
||||||
|
return {"success": False, "error": "FORGE_OWNER and FORGE_REPO not configured"}
|
||||||
|
|
||||||
|
if not title:
|
||||||
|
return {"success": False, "error": "Title is required"}
|
||||||
|
|
||||||
|
try:
|
||||||
|
url = f"{FORGE_URL}/api/v1/repos/{FORGE_OWNER}/{FORGE_REPO}/issues"
|
||||||
|
payload = {
|
||||||
|
"title": title,
|
||||||
|
"body": body or "",
|
||||||
|
}
|
||||||
|
if labels:
|
||||||
|
payload["labels"] = labels
|
||||||
|
|
||||||
|
data = json.dumps(payload).encode("utf-8")
|
||||||
|
|
||||||
|
req = urllib.request.Request(
|
||||||
|
url,
|
||||||
|
data=data,
|
||||||
|
headers={
|
||||||
|
"Authorization": f"token {FORGE_TOKEN}",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Accept": "application/json",
|
||||||
|
},
|
||||||
|
method="POST",
|
||||||
|
)
|
||||||
|
|
||||||
|
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||||
|
result = json.loads(resp.read().decode())
|
||||||
|
return {"success": True, "data": result}
|
||||||
|
|
||||||
|
except urllib.error.HTTPError as e:
|
||||||
|
try:
|
||||||
|
body = json.loads(e.read().decode())
|
||||||
|
return {"success": False, "error": str(e.reason), "details": body}
|
||||||
|
except (json.JSONDecodeError, UnicodeDecodeError):
|
||||||
|
return {"success": False, "error": str(e.reason)}
|
||||||
|
except urllib.error.URLError as e:
|
||||||
|
return {"success": False, "error": f"Network error: {e.reason}"}
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
return {"success": False, "error": f"Invalid JSON response: {e}"}
|
||||||
|
except Exception as e:
|
||||||
|
return {"success": False, "error": str(e)}
|
||||||
|
|
||||||
|
|
||||||
|
def _create_forgejo_pull_request(head, base, title, body=None):
|
||||||
|
"""Create a Forgejo pull request.
|
||||||
|
|
||||||
|
Forgejo API: POST /api/v1/repos/{owner}/{repo}/pulls
|
||||||
|
Returns dict with success status and response data.
|
||||||
|
"""
|
||||||
|
if not FORGE_TOKEN:
|
||||||
|
return {"success": False, "error": "FORGE_TOKEN not configured"}
|
||||||
|
|
||||||
|
if not FORGE_OWNER or not FORGE_REPO:
|
||||||
|
return {"success": False, "error": "FORGE_OWNER and FORGE_REPO not configured"}
|
||||||
|
|
||||||
|
if not head or not base or not title:
|
||||||
|
return {"success": False, "error": "head, base, and title are required"}
|
||||||
|
|
||||||
|
try:
|
||||||
|
url = f"{FORGE_URL}/api/v1/repos/{FORGE_OWNER}/{FORGE_REPO}/pulls"
|
||||||
|
payload = {
|
||||||
|
"head": head,
|
||||||
|
"base": base,
|
||||||
|
"title": title,
|
||||||
|
"body": body or "",
|
||||||
|
}
|
||||||
|
|
||||||
|
data = json.dumps(payload).encode("utf-8")
|
||||||
|
|
||||||
|
req = urllib.request.Request(
|
||||||
|
url,
|
||||||
|
data=data,
|
||||||
|
headers={
|
||||||
|
"Authorization": f"token {FORGE_TOKEN}",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Accept": "application/json",
|
||||||
|
},
|
||||||
|
method="POST",
|
||||||
|
)
|
||||||
|
|
||||||
|
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||||
|
result = json.loads(resp.read().decode())
|
||||||
|
return {"success": True, "data": result}
|
||||||
|
|
||||||
|
except urllib.error.HTTPError as e:
|
||||||
|
try:
|
||||||
|
body = json.loads(e.read().decode())
|
||||||
|
return {"success": False, "error": str(e.reason), "details": body}
|
||||||
|
except (json.JSONDecodeError, UnicodeDecodeError):
|
||||||
|
return {"success": False, "error": str(e.reason)}
|
||||||
|
except urllib.error.URLError as e:
|
||||||
|
return {"success": False, "error": f"Network error: {e.reason}"}
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
return {"success": False, "error": f"Invalid JSON response: {e}"}
|
||||||
|
except Exception as e:
|
||||||
|
return {"success": False, "error": str(e)}
|
||||||
|
|
||||||
|
|
||||||
class ChatHandler(BaseHTTPRequestHandler):
|
class ChatHandler(BaseHTTPRequestHandler):
|
||||||
"""HTTP request handler for disinto-chat with Forgejo OAuth."""
|
"""HTTP request handler for disinto-chat with Forgejo OAuth."""
|
||||||
|
|
||||||
|
|
@ -481,14 +666,6 @@ class ChatHandler(BaseHTTPRequestHandler):
|
||||||
parsed = urlparse(self.path)
|
parsed = urlparse(self.path)
|
||||||
path = parsed.path
|
path = parsed.path
|
||||||
|
|
||||||
# Health endpoint (no auth required) — used by Docker healthcheck
|
|
||||||
if path == "/health":
|
|
||||||
self.send_response(200)
|
|
||||||
self.send_header("Content-Type", "text/plain")
|
|
||||||
self.end_headers()
|
|
||||||
self.wfile.write(b"ok\n")
|
|
||||||
return
|
|
||||||
|
|
||||||
# Verify endpoint for Caddy forward_auth (#709)
|
# Verify endpoint for Caddy forward_auth (#709)
|
||||||
if path == "/chat/auth/verify":
|
if path == "/chat/auth/verify":
|
||||||
self.handle_auth_verify()
|
self.handle_auth_verify()
|
||||||
|
|
@ -557,6 +734,16 @@ class ChatHandler(BaseHTTPRequestHandler):
|
||||||
parsed = urlparse(self.path)
|
parsed = urlparse(self.path)
|
||||||
path = parsed.path
|
path = parsed.path
|
||||||
|
|
||||||
|
# Action endpoints (#712)
|
||||||
|
if path in ("/chat/action/ci-run", "/chat/action/issue-create", "/chat/action/pr-create"):
|
||||||
|
user = self._require_session()
|
||||||
|
if not user:
|
||||||
|
return
|
||||||
|
if not self._check_forwarded_user(user):
|
||||||
|
return
|
||||||
|
self.handle_action(user, path)
|
||||||
|
return
|
||||||
|
|
||||||
# New conversation endpoint (session required)
|
# New conversation endpoint (session required)
|
||||||
if path == "/chat/new":
|
if path == "/chat/new":
|
||||||
user = self._require_session()
|
user = self._require_session()
|
||||||
|
|
@ -909,6 +1096,75 @@ class ChatHandler(BaseHTTPRequestHandler):
|
||||||
self.end_headers()
|
self.end_headers()
|
||||||
self.wfile.write(json.dumps({"conversation_id": conv_id}, ensure_ascii=False).encode("utf-8"))
|
self.wfile.write(json.dumps({"conversation_id": conv_id}, ensure_ascii=False).encode("utf-8"))
|
||||||
|
|
||||||
|
def handle_action(self, user, path):
|
||||||
|
"""Handle action requests (ci-run, issue-create, pr-create)."""
|
||||||
|
# Determine action type from path
|
||||||
|
action_type = path.replace("/chat/action/", "")
|
||||||
|
|
||||||
|
# Read request body
|
||||||
|
content_length = int(self.headers.get("Content-Length", 0))
|
||||||
|
if content_length == 0:
|
||||||
|
self.send_error_page(400, "No request body provided")
|
||||||
|
return
|
||||||
|
|
||||||
|
body = self.rfile.read(content_length)
|
||||||
|
try:
|
||||||
|
request_data = json.loads(body.decode("utf-8"))
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
self.send_error_page(400, "Invalid JSON in request body")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Get conversation ID from request or session
|
||||||
|
conv_id = request_data.get("conversation_id")
|
||||||
|
if not conv_id or not _validate_conversation_id(conv_id):
|
||||||
|
# Fall back to session-based conversation if available
|
||||||
|
# For now, we'll use a default or generate one
|
||||||
|
conv_id = request_data.get("conversation_id")
|
||||||
|
if not conv_id:
|
||||||
|
self.send_error_page(400, "conversation_id is required")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Route to appropriate handler
|
||||||
|
if action_type == "ci-run":
|
||||||
|
repo = request_data.get("repo")
|
||||||
|
branch = request_data.get("branch")
|
||||||
|
if not repo or not branch:
|
||||||
|
self.send_error_page(400, "repo and branch are required for ci-run")
|
||||||
|
return
|
||||||
|
payload = {"repo": repo, "branch": branch}
|
||||||
|
result = _trigger_woodpecker_pipeline(repo, branch)
|
||||||
|
elif action_type == "issue-create":
|
||||||
|
title = request_data.get("title")
|
||||||
|
body_text = request_data.get("body", "")
|
||||||
|
labels = request_data.get("labels", [])
|
||||||
|
if not title:
|
||||||
|
self.send_error_page(400, "title is required for issue-create")
|
||||||
|
return
|
||||||
|
payload = {"title": title, "body": body_text, "labels": labels}
|
||||||
|
result = _create_forgejo_issue(title, body_text, labels)
|
||||||
|
elif action_type == "pr-create":
|
||||||
|
head = request_data.get("head")
|
||||||
|
base = request_data.get("base")
|
||||||
|
title = request_data.get("title")
|
||||||
|
body_text = request_data.get("body", "")
|
||||||
|
if not head or not base or not title:
|
||||||
|
self.send_error_page(400, "head, base, and title are required for pr-create")
|
||||||
|
return
|
||||||
|
payload = {"head": head, "base": base, "title": title, "body": body_text}
|
||||||
|
result = _create_forgejo_pull_request(head, base, title, body_text)
|
||||||
|
else:
|
||||||
|
self.send_error_page(404, f"Unknown action type: {action_type}")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Log the action to history
|
||||||
|
_write_action_record(user, conv_id, action_type, payload, result)
|
||||||
|
|
||||||
|
# Send response
|
||||||
|
self.send_response(200)
|
||||||
|
self.send_header("Content-Type", "application/json; charset=utf-8")
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(json.dumps(result, ensure_ascii=False).encode("utf-8"))
|
||||||
|
|
||||||
def do_DELETE(self):
|
def do_DELETE(self):
|
||||||
"""Handle DELETE requests."""
|
"""Handle DELETE requests."""
|
||||||
parsed = urlparse(self.path)
|
parsed = urlparse(self.path)
|
||||||
|
|
|
||||||
|
|
@ -161,6 +161,56 @@
|
||||||
white-space: pre-wrap;
|
white-space: pre-wrap;
|
||||||
word-wrap: break-word;
|
word-wrap: break-word;
|
||||||
}
|
}
|
||||||
|
/* Action button container */
|
||||||
|
.action-buttons {
|
||||||
|
margin-top: 0.75rem;
|
||||||
|
display: flex;
|
||||||
|
flex-wrap: wrap;
|
||||||
|
gap: 0.5rem;
|
||||||
|
}
|
||||||
|
.action-btn {
|
||||||
|
background: #0f3460;
|
||||||
|
border: 1px solid #e94560;
|
||||||
|
color: #e94560;
|
||||||
|
padding: 0.5rem 1rem;
|
||||||
|
border-radius: 6px;
|
||||||
|
font-size: 0.875rem;
|
||||||
|
font-weight: 600;
|
||||||
|
cursor: pointer;
|
||||||
|
transition: all 0.2s;
|
||||||
|
display: inline-flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 0.5rem;
|
||||||
|
}
|
||||||
|
.action-btn:hover {
|
||||||
|
background: #e94560;
|
||||||
|
color: white;
|
||||||
|
}
|
||||||
|
.action-btn:disabled {
|
||||||
|
opacity: 0.5;
|
||||||
|
cursor: not-allowed;
|
||||||
|
}
|
||||||
|
.action-btn .spinner {
|
||||||
|
width: 14px;
|
||||||
|
height: 14px;
|
||||||
|
border: 2px solid currentColor;
|
||||||
|
border-top-color: transparent;
|
||||||
|
border-radius: 50%;
|
||||||
|
animation: spin 0.8s linear infinite;
|
||||||
|
}
|
||||||
|
@keyframes spin {
|
||||||
|
to { transform: rotate(360deg); }
|
||||||
|
}
|
||||||
|
.action-btn.success {
|
||||||
|
background: #1a1a2e;
|
||||||
|
border-color: #4ade80;
|
||||||
|
color: #4ade80;
|
||||||
|
}
|
||||||
|
.action-btn.error {
|
||||||
|
background: #1a1a2e;
|
||||||
|
border-color: #f87171;
|
||||||
|
color: #f87171;
|
||||||
|
}
|
||||||
.input-area {
|
.input-area {
|
||||||
display: flex;
|
display: flex;
|
||||||
gap: 0.5rem;
|
gap: 0.5rem;
|
||||||
|
|
@ -404,11 +454,28 @@
|
||||||
function addMessage(role, content, streaming = false) {
|
function addMessage(role, content, streaming = false) {
|
||||||
const msgDiv = document.createElement('div');
|
const msgDiv = document.createElement('div');
|
||||||
msgDiv.className = `message ${role}`;
|
msgDiv.className = `message ${role}`;
|
||||||
|
|
||||||
|
// Parse action markers if this is an assistant message
|
||||||
|
let contentHtml = escapeHtml(content);
|
||||||
|
let actions = [];
|
||||||
|
|
||||||
|
if (role === 'assistant' && !streaming) {
|
||||||
|
const parsed = parseActionMarkers(content, messagesDiv.children.length);
|
||||||
|
contentHtml = parsed.html;
|
||||||
|
actions = parsed.actions;
|
||||||
|
}
|
||||||
|
|
||||||
msgDiv.innerHTML = `
|
msgDiv.innerHTML = `
|
||||||
<div class="role">${role}</div>
|
<div class="role">${role}</div>
|
||||||
<div class="content${streaming ? ' streaming' : ''}">${escapeHtml(content)}</div>
|
<div class="content${streaming ? ' streaming' : ''}">${contentHtml}</div>
|
||||||
`;
|
`;
|
||||||
messagesDiv.appendChild(msgDiv);
|
messagesDiv.appendChild(msgDiv);
|
||||||
|
|
||||||
|
// Render action buttons for assistant messages
|
||||||
|
if (actions.length > 0) {
|
||||||
|
renderActionButtons(msgDiv, actions, messagesDiv.children.length - 1);
|
||||||
|
}
|
||||||
|
|
||||||
messagesDiv.scrollTop = messagesDiv.scrollHeight;
|
messagesDiv.scrollTop = messagesDiv.scrollHeight;
|
||||||
return msgDiv.querySelector('.content');
|
return msgDiv.querySelector('.content');
|
||||||
}
|
}
|
||||||
|
|
@ -430,6 +497,121 @@
|
||||||
return div.innerHTML.replace(/\n/g, '<br>');
|
return div.innerHTML.replace(/\n/g, '<br>');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Action buttons state - track pending actions by message index
|
||||||
|
const pendingActions = new Map();
|
||||||
|
|
||||||
|
// Parse action markers from content and return HTML with action buttons
|
||||||
|
function parseActionMarkers(content, messageIndex) {
|
||||||
|
const actionPattern = /<action type="([^"]+)">(.*?)<\/action>/gs;
|
||||||
|
const hasActions = actionPattern.test(content);
|
||||||
|
|
||||||
|
if (!hasActions) {
|
||||||
|
return { html: escapeHtml(content), actions: [] };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset pending actions for this message
|
||||||
|
pendingActions.set(messageIndex, []);
|
||||||
|
|
||||||
|
let html = content;
|
||||||
|
const actions = [];
|
||||||
|
|
||||||
|
// Replace action markers with placeholders and collect actions
|
||||||
|
html = html.replace(actionPattern, (match, type, jsonStr) => {
|
||||||
|
try {
|
||||||
|
const action = JSON.parse(jsonStr);
|
||||||
|
actions.push({ type, payload: action, id: `${messageIndex}-${actions.length}` });
|
||||||
|
// Replace with placeholder that will be rendered as button
|
||||||
|
return `<div class="action-placeholder" data-action-id="${actions[actions.length - 1].id}"></div>`;
|
||||||
|
} catch (e) {
|
||||||
|
// If JSON parsing fails, keep the original marker
|
||||||
|
return match;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Convert newlines to <br> for HTML output
|
||||||
|
html = html.replace(/\n/g, '<br>');
|
||||||
|
|
||||||
|
return { html, actions };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render action buttons for a message
|
||||||
|
function renderActionButtons(msgDiv, actions, messageIndex) {
|
||||||
|
if (actions.length === 0) return;
|
||||||
|
|
||||||
|
const buttonsDiv = document.createElement('div');
|
||||||
|
buttonsDiv.className = 'action-buttons';
|
||||||
|
|
||||||
|
actions.forEach(action => {
|
||||||
|
const btn = document.createElement('button');
|
||||||
|
btn.className = 'action-btn';
|
||||||
|
btn.dataset.actionId = action.id;
|
||||||
|
btn.dataset.messageIndex = messageIndex;
|
||||||
|
|
||||||
|
let btnText = 'Execute';
|
||||||
|
let icon = '';
|
||||||
|
|
||||||
|
switch (action.type) {
|
||||||
|
case 'ci-run':
|
||||||
|
icon = '🚀';
|
||||||
|
btnText = `Run CI for ${action.payload.branch || 'default'}`;
|
||||||
|
break;
|
||||||
|
case 'issue-create':
|
||||||
|
icon = '📝';
|
||||||
|
btnText = `Create Issue: ${action.payload.title ? action.payload.title.substring(0, 30) + (action.payload.title.length > 30 ? '...' : '') : 'New Issue'}`;
|
||||||
|
break;
|
||||||
|
case 'pr-create':
|
||||||
|
icon = '🔀';
|
||||||
|
btnText = `Create PR: ${action.payload.title ? action.payload.title.substring(0, 30) + (action.payload.title.length > 30 ? '...' : '') : 'New PR'}`;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
btnText = `Execute ${action.type}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
btn.innerHTML = `<span>${icon}</span><span>${btnText}</span>`;
|
||||||
|
|
||||||
|
btn.addEventListener('click', () => executeAction(btn, action));
|
||||||
|
buttonsDiv.appendChild(btn);
|
||||||
|
});
|
||||||
|
|
||||||
|
msgDiv.appendChild(buttonsDiv);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute an action
|
||||||
|
async function executeAction(btn, action) {
|
||||||
|
const messageIndex = btn.dataset.messageIndex;
|
||||||
|
const actionId = btn.dataset.actionId;
|
||||||
|
|
||||||
|
// Disable button and show loading state
|
||||||
|
btn.disabled = true;
|
||||||
|
btn.innerHTML = '<span class="spinner"></span> Executing...';
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await fetch(`/chat/action/${action.type}`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
...action.payload,
|
||||||
|
conversation_id: currentConversationId,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
|
||||||
|
if (result.success) {
|
||||||
|
btn.className = 'action-btn success';
|
||||||
|
btn.innerHTML = '<span>✓</span> Executed successfully';
|
||||||
|
} else {
|
||||||
|
btn.className = 'action-btn error';
|
||||||
|
btn.innerHTML = `<span>✗</span> Error: ${result.error || 'Unknown error'}`;
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
btn.className = 'action-btn error';
|
||||||
|
btn.innerHTML = `<span>✗</span> Error: ${error.message}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Send message handler
|
// Send message handler
|
||||||
async function sendMessage() {
|
async function sendMessage() {
|
||||||
const message = textarea.value.trim();
|
const message = textarea.value.trim();
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,4 @@
|
||||||
FROM caddy:latest
|
FROM caddy:latest
|
||||||
RUN apk add --no-cache bash jq curl git docker-cli python3 openssh-client autossh
|
RUN apk add --no-cache bash jq curl git docker-cli python3 openssh-client autossh
|
||||||
COPY entrypoint-edge.sh /usr/local/bin/entrypoint-edge.sh
|
COPY entrypoint-edge.sh /usr/local/bin/entrypoint-edge.sh
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
|
|
||||||
ENTRYPOINT ["bash", "/usr/local/bin/entrypoint-edge.sh"]
|
ENTRYPOINT ["bash", "/usr/local/bin/entrypoint-edge.sh"]
|
||||||
|
|
|
||||||
|
|
@ -8,8 +8,8 @@
|
||||||
# 2. Scan vault/actions/ for TOML files without .result.json
|
# 2. Scan vault/actions/ for TOML files without .result.json
|
||||||
# 3. Verify TOML arrived via merged PR with admin merger (Forgejo API)
|
# 3. Verify TOML arrived via merged PR with admin merger (Forgejo API)
|
||||||
# 4. Validate TOML using vault-env.sh validator
|
# 4. Validate TOML using vault-env.sh validator
|
||||||
# 5. Decrypt declared secrets via load_secret (lib/env.sh)
|
# 5. Decrypt .env.vault.enc and extract only declared secrets
|
||||||
# 6. Launch: delegate to _launch_runner_{docker,nomad} backend
|
# 6. Launch: docker run --rm disinto/agents:latest <action-id>
|
||||||
# 7. Write <action-id>.result.json with exit code, timestamp, logs summary
|
# 7. Write <action-id>.result.json with exit code, timestamp, logs summary
|
||||||
#
|
#
|
||||||
# Part of #76.
|
# Part of #76.
|
||||||
|
|
@ -19,7 +19,7 @@ set -euo pipefail
|
||||||
# Resolve script root (parent of lib/)
|
# Resolve script root (parent of lib/)
|
||||||
SCRIPT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
SCRIPT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
|
||||||
# Source shared environment (provides load_secret, log helpers, etc.)
|
# Source shared environment
|
||||||
source "${SCRIPT_ROOT}/../lib/env.sh"
|
source "${SCRIPT_ROOT}/../lib/env.sh"
|
||||||
|
|
||||||
# Project TOML location: prefer mounted path, fall back to cloned path
|
# Project TOML location: prefer mounted path, fall back to cloned path
|
||||||
|
|
@ -27,18 +27,26 @@ source "${SCRIPT_ROOT}/../lib/env.sh"
|
||||||
# the shallow clone only has .toml.example files.
|
# the shallow clone only has .toml.example files.
|
||||||
PROJECTS_DIR="${PROJECTS_DIR:-${FACTORY_ROOT:-/opt/disinto}-projects}"
|
PROJECTS_DIR="${PROJECTS_DIR:-${FACTORY_ROOT:-/opt/disinto}-projects}"
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# Load vault secrets after env.sh (env.sh unsets them for agent security)
|
||||||
# Backend selection: DISPATCHER_BACKEND={docker,nomad}
|
# Vault secrets must be available to the dispatcher
|
||||||
# Default: docker. nomad lands as a pure addition during migration Step 5.
|
if [ -f "$FACTORY_ROOT/.env.vault.enc" ] && command -v sops &>/dev/null; then
|
||||||
# -----------------------------------------------------------------------------
|
set -a
|
||||||
DISPATCHER_BACKEND="${DISPATCHER_BACKEND:-docker}"
|
eval "$(sops -d --output-type dotenv "$FACTORY_ROOT/.env.vault.enc" 2>/dev/null)" \
|
||||||
|
|| echo "Warning: failed to decrypt .env.vault.enc — vault secrets not loaded" >&2
|
||||||
|
set +a
|
||||||
|
elif [ -f "$FACTORY_ROOT/.env.vault" ]; then
|
||||||
|
set -a
|
||||||
|
# shellcheck source=/dev/null
|
||||||
|
source "$FACTORY_ROOT/.env.vault"
|
||||||
|
set +a
|
||||||
|
fi
|
||||||
|
|
||||||
# Ops repo location (vault/actions directory)
|
# Ops repo location (vault/actions directory)
|
||||||
OPS_REPO_ROOT="${OPS_REPO_ROOT:-/home/debian/disinto-ops}"
|
OPS_REPO_ROOT="${OPS_REPO_ROOT:-/home/debian/disinto-ops}"
|
||||||
VAULT_ACTIONS_DIR="${OPS_REPO_ROOT}/vault/actions"
|
VAULT_ACTIONS_DIR="${OPS_REPO_ROOT}/vault/actions"
|
||||||
|
|
||||||
# Vault action validation
|
# Vault action validation
|
||||||
VAULT_ENV="${SCRIPT_ROOT}/../action-vault/vault-env.sh"
|
VAULT_ENV="${SCRIPT_ROOT}/../vault/vault-env.sh"
|
||||||
|
|
||||||
# Admin users who can merge vault PRs (from issue #77)
|
# Admin users who can merge vault PRs (from issue #77)
|
||||||
# Comma-separated list of Forgejo usernames with admin role
|
# Comma-separated list of Forgejo usernames with admin role
|
||||||
|
|
@ -342,113 +350,73 @@ get_dispatch_mode() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Commit result.json to the ops repo via git push (portable, no bind-mount).
|
# Write result file for an action
|
||||||
#
|
|
||||||
# Clones the ops repo into a scratch directory, writes the result file,
|
|
||||||
# commits as vault-bot, and pushes to the primary branch.
|
|
||||||
# Idempotent: skips if result.json already exists upstream.
|
|
||||||
# Retries on push conflict with rebase-and-push (handles concurrent merges).
|
|
||||||
#
|
|
||||||
# Usage: commit_result_via_git <action_id> <exit_code> <logs>
|
|
||||||
commit_result_via_git() {
|
|
||||||
local action_id="$1"
|
|
||||||
local exit_code="$2"
|
|
||||||
local logs="$3"
|
|
||||||
|
|
||||||
local result_relpath="vault/actions/${action_id}.result.json"
|
|
||||||
local ops_clone_url="${FORGE_URL}/${FORGE_OPS_REPO}.git"
|
|
||||||
local branch="${PRIMARY_BRANCH:-main}"
|
|
||||||
local scratch_dir
|
|
||||||
scratch_dir=$(mktemp -d /tmp/dispatcher-result-XXXXXX)
|
|
||||||
# shellcheck disable=SC2064
|
|
||||||
trap "rm -rf '${scratch_dir}'" RETURN
|
|
||||||
|
|
||||||
# Shallow clone of the ops repo — only the primary branch
|
|
||||||
if ! git clone --depth 1 --branch "$branch" \
|
|
||||||
"$ops_clone_url" "$scratch_dir" 2>/dev/null; then
|
|
||||||
log "ERROR: Failed to clone ops repo for result commit (action ${action_id})"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Idempotency: skip if result.json already exists upstream
|
|
||||||
if [ -f "${scratch_dir}/${result_relpath}" ]; then
|
|
||||||
log "Result already exists upstream for ${action_id} — skipping commit"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Configure git identity as vault-bot
|
|
||||||
git -C "$scratch_dir" config user.name "vault-bot"
|
|
||||||
git -C "$scratch_dir" config user.email "vault-bot@disinto.local"
|
|
||||||
|
|
||||||
# Truncate logs if too long (keep last 1000 chars)
|
|
||||||
if [ ${#logs} -gt 1000 ]; then
|
|
||||||
logs="${logs: -1000}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Write result JSON via jq (never string-interpolate into JSON)
|
|
||||||
mkdir -p "$(dirname "${scratch_dir}/${result_relpath}")"
|
|
||||||
jq -n \
|
|
||||||
--arg id "$action_id" \
|
|
||||||
--argjson exit_code "$exit_code" \
|
|
||||||
--arg timestamp "$(date -u '+%Y-%m-%dT%H:%M:%SZ')" \
|
|
||||||
--arg logs "$logs" \
|
|
||||||
'{id: $id, exit_code: $exit_code, timestamp: $timestamp, logs: $logs}' \
|
|
||||||
> "${scratch_dir}/${result_relpath}"
|
|
||||||
|
|
||||||
git -C "$scratch_dir" add "$result_relpath"
|
|
||||||
git -C "$scratch_dir" commit -q -m "vault: result for ${action_id}"
|
|
||||||
|
|
||||||
# Push with retry on conflict (rebase-and-push pattern).
|
|
||||||
# Common case: admin merges another action PR between our clone and push.
|
|
||||||
local attempt
|
|
||||||
for attempt in 1 2 3; do
|
|
||||||
if git -C "$scratch_dir" push origin "$branch" 2>/dev/null; then
|
|
||||||
log "Result committed and pushed for ${action_id} (attempt ${attempt})"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
log "Push conflict for ${action_id} (attempt ${attempt}/3) — rebasing"
|
|
||||||
|
|
||||||
if ! git -C "$scratch_dir" pull --rebase origin "$branch" 2>/dev/null; then
|
|
||||||
# Rebase conflict — check if result was pushed by another process
|
|
||||||
git -C "$scratch_dir" rebase --abort 2>/dev/null || true
|
|
||||||
if git -C "$scratch_dir" fetch origin "$branch" 2>/dev/null && \
|
|
||||||
git -C "$scratch_dir" show "origin/${branch}:${result_relpath}" >/dev/null 2>&1; then
|
|
||||||
log "Result already exists upstream for ${action_id} (pushed by another process)"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
log "ERROR: Failed to push result for ${action_id} after 3 attempts"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Write result file for an action via git push to the ops repo.
|
|
||||||
# Usage: write_result <action_id> <exit_code> <logs>
|
# Usage: write_result <action_id> <exit_code> <logs>
|
||||||
write_result() {
|
write_result() {
|
||||||
local action_id="$1"
|
local action_id="$1"
|
||||||
local exit_code="$2"
|
local exit_code="$2"
|
||||||
local logs="$3"
|
local logs="$3"
|
||||||
|
|
||||||
commit_result_via_git "$action_id" "$exit_code" "$logs"
|
local result_file="${VAULT_ACTIONS_DIR}/${action_id}.result.json"
|
||||||
|
|
||||||
|
# Truncate logs if too long (keep last 1000 chars)
|
||||||
|
if [ ${#logs} -gt 1000 ]; then
|
||||||
|
logs="${logs: -1000}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Write result JSON
|
||||||
|
jq -n \
|
||||||
|
--arg id "$action_id" \
|
||||||
|
--argjson exit_code "$exit_code" \
|
||||||
|
--arg timestamp "$(date -u '+%Y-%m-%dT%H:%M:%SZ')" \
|
||||||
|
--arg logs "$logs" \
|
||||||
|
'{id: $id, exit_code: $exit_code, timestamp: $timestamp, logs: $logs}' \
|
||||||
|
> "$result_file"
|
||||||
|
|
||||||
|
log "Result written: ${result_file}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# Launch runner for the given action
|
||||||
# Pluggable launcher backends
|
# Usage: launch_runner <toml_file>
|
||||||
# -----------------------------------------------------------------------------
|
launch_runner() {
|
||||||
|
local toml_file="$1"
|
||||||
|
local action_id
|
||||||
|
action_id=$(basename "$toml_file" .toml)
|
||||||
|
|
||||||
# _launch_runner_docker ACTION_ID SECRETS_CSV MOUNTS_CSV
|
log "Launching runner for action: ${action_id}"
|
||||||
#
|
|
||||||
# Builds and executes a `docker run` command for the vault runner.
|
|
||||||
# Secrets are resolved via load_secret (lib/env.sh).
|
|
||||||
# Returns: exit code of the docker run. Stdout/stderr are captured to a temp
|
|
||||||
# log file whose path is printed to stdout (caller reads it).
|
|
||||||
_launch_runner_docker() {
|
|
||||||
local action_id="$1"
|
|
||||||
local secrets_csv="$2"
|
|
||||||
local mounts_csv="$3"
|
|
||||||
|
|
||||||
|
# Validate TOML
|
||||||
|
if ! validate_action "$toml_file"; then
|
||||||
|
log "ERROR: Action validation failed for ${action_id}"
|
||||||
|
write_result "$action_id" 1 "Validation failed: see logs above"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check dispatch mode to determine if admin verification is needed
|
||||||
|
local dispatch_mode
|
||||||
|
dispatch_mode=$(get_dispatch_mode "$toml_file")
|
||||||
|
|
||||||
|
if [ "$dispatch_mode" = "direct" ]; then
|
||||||
|
log "Action ${action_id}: tier=${VAULT_TIER:-unknown}, dispatch_mode=${dispatch_mode} — skipping admin merge verification (direct commit)"
|
||||||
|
else
|
||||||
|
# Verify admin merge for PR-based actions
|
||||||
|
log "Action ${action_id}: tier=${VAULT_TIER:-unknown}, dispatch_mode=${dispatch_mode} — verifying admin merge"
|
||||||
|
if ! verify_admin_merged "$toml_file"; then
|
||||||
|
log "ERROR: Admin merge verification failed for ${action_id}"
|
||||||
|
write_result "$action_id" 1 "Admin merge verification failed: see logs above"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
log "Action ${action_id}: admin merge verified"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Extract secrets from validated action
|
||||||
|
local secrets_array
|
||||||
|
secrets_array="${VAULT_ACTION_SECRETS:-}"
|
||||||
|
|
||||||
|
# Build docker run command (self-contained, no compose context needed).
|
||||||
|
# The edge container has the Docker socket but not the host's compose project,
|
||||||
|
# so docker compose run would fail with exit 125. docker run is self-contained:
|
||||||
|
# the dispatcher knows the image, network, env vars, and entrypoint.
|
||||||
local -a cmd=(docker run --rm
|
local -a cmd=(docker run --rm
|
||||||
--name "vault-runner-${action_id}"
|
--name "vault-runner-${action_id}"
|
||||||
--network host
|
--network host
|
||||||
|
|
@ -483,27 +451,29 @@ _launch_runner_docker() {
|
||||||
cmd+=(-v "${runtime_home}/.claude.json:/home/agent/.claude.json:ro")
|
cmd+=(-v "${runtime_home}/.claude.json:/home/agent/.claude.json:ro")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Add environment variables for secrets (resolved via load_secret)
|
# Add environment variables for secrets (if any declared)
|
||||||
if [ -n "$secrets_csv" ]; then
|
if [ -n "$secrets_array" ]; then
|
||||||
local secret
|
for secret in $secrets_array; do
|
||||||
for secret in $(echo "$secrets_csv" | tr ',' ' '); do
|
|
||||||
secret=$(echo "$secret" | xargs)
|
secret=$(echo "$secret" | xargs)
|
||||||
[ -n "$secret" ] || continue
|
if [ -n "$secret" ]; then
|
||||||
local secret_val
|
# Verify secret exists in vault
|
||||||
secret_val=$(load_secret "$secret") || true
|
if [ -z "${!secret:-}" ]; then
|
||||||
if [ -z "$secret_val" ]; then
|
log "ERROR: Secret '${secret}' not found in vault for action ${action_id}"
|
||||||
log "ERROR: Secret '${secret}' could not be resolved for action ${action_id}"
|
write_result "$action_id" 1 "Secret not found in vault: ${secret}"
|
||||||
write_result "$action_id" 1 "Secret not found: ${secret}"
|
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
cmd+=(-e "${secret}=${secret_val}")
|
cmd+=(-e "${secret}=${!secret}")
|
||||||
|
fi
|
||||||
done
|
done
|
||||||
|
else
|
||||||
|
log "Action ${action_id} has no secrets declared — runner will execute without extra env vars"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Add volume mounts for file-based credentials
|
# Add volume mounts for file-based credentials (if any declared)
|
||||||
if [ -n "$mounts_csv" ]; then
|
local mounts_array
|
||||||
local mount_alias
|
mounts_array="${VAULT_ACTION_MOUNTS:-}"
|
||||||
for mount_alias in $(echo "$mounts_csv" | tr ',' ' '); do
|
if [ -n "$mounts_array" ]; then
|
||||||
|
for mount_alias in $mounts_array; do
|
||||||
mount_alias=$(echo "$mount_alias" | xargs)
|
mount_alias=$(echo "$mount_alias" | xargs)
|
||||||
[ -n "$mount_alias" ] || continue
|
[ -n "$mount_alias" ] || continue
|
||||||
case "$mount_alias" in
|
case "$mount_alias" in
|
||||||
|
|
@ -531,7 +501,7 @@ _launch_runner_docker() {
|
||||||
# Image and entrypoint arguments: runner entrypoint + action-id
|
# Image and entrypoint arguments: runner entrypoint + action-id
|
||||||
cmd+=(disinto/agents:latest /home/agent/disinto/docker/runner/entrypoint-runner.sh "$action_id")
|
cmd+=(disinto/agents:latest /home/agent/disinto/docker/runner/entrypoint-runner.sh "$action_id")
|
||||||
|
|
||||||
log "Running: docker run --rm vault-runner-${action_id} (secrets: ${secrets_csv:-none}, mounts: ${mounts_csv:-none})"
|
log "Running: docker run --rm vault-runner-${action_id} (secrets: ${secrets_array:-none}, mounts: ${mounts_array:-none})"
|
||||||
|
|
||||||
# Create temp file for logs
|
# Create temp file for logs
|
||||||
local log_file
|
local log_file
|
||||||
|
|
@ -539,6 +509,7 @@ _launch_runner_docker() {
|
||||||
trap 'rm -f "$log_file"' RETURN
|
trap 'rm -f "$log_file"' RETURN
|
||||||
|
|
||||||
# Execute with array expansion (safe from shell injection)
|
# Execute with array expansion (safe from shell injection)
|
||||||
|
# Capture stdout and stderr to log file
|
||||||
"${cmd[@]}" > "$log_file" 2>&1
|
"${cmd[@]}" > "$log_file" 2>&1
|
||||||
local exit_code=$?
|
local exit_code=$?
|
||||||
|
|
||||||
|
|
@ -558,295 +529,6 @@ _launch_runner_docker() {
|
||||||
return $exit_code
|
return $exit_code
|
||||||
}
|
}
|
||||||
|
|
||||||
# _launch_runner_nomad ACTION_ID SECRETS_CSV MOUNTS_CSV
|
|
||||||
#
|
|
||||||
# Dispatches a vault-runner batch job via `nomad job dispatch`.
|
|
||||||
# Polls `nomad job status` until terminal state (completed/failed).
|
|
||||||
# Reads exit code from allocation and writes <action-id>.result.json.
|
|
||||||
#
|
|
||||||
# Usage: _launch_runner_nomad <action_id> <secrets_csv> <mounts_csv>
|
|
||||||
# Returns: exit code of the nomad job (0=success, non-zero=failure)
|
|
||||||
_launch_runner_nomad() {
|
|
||||||
local action_id="$1"
|
|
||||||
local secrets_csv="$2"
|
|
||||||
local mounts_csv="$3"
|
|
||||||
|
|
||||||
log "Dispatching vault-runner batch job via Nomad for action: ${action_id}"
|
|
||||||
|
|
||||||
# Dispatch the parameterized batch job
|
|
||||||
# The vault-runner job expects meta: action_id, secrets_csv
|
|
||||||
# Note: mounts_csv is not passed as meta (not declared in vault-runner.hcl)
|
|
||||||
local dispatch_output
|
|
||||||
dispatch_output=$(nomad job dispatch \
|
|
||||||
-detach \
|
|
||||||
-meta action_id="$action_id" \
|
|
||||||
-meta secrets_csv="$secrets_csv" \
|
|
||||||
vault-runner 2>&1) || {
|
|
||||||
log "ERROR: Failed to dispatch vault-runner job for ${action_id}"
|
|
||||||
log "Dispatch output: ${dispatch_output}"
|
|
||||||
write_result "$action_id" 1 "Nomad dispatch failed: ${dispatch_output}"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Extract dispatched job ID from output (format: "vault-runner/dispatch-<timestamp>-<uuid>")
|
|
||||||
local dispatched_job_id
|
|
||||||
dispatched_job_id=$(echo "$dispatch_output" | grep -oP '(?<=Dispatched Job ID = ).+' || true)
|
|
||||||
|
|
||||||
if [ -z "$dispatched_job_id" ]; then
|
|
||||||
log "ERROR: Could not extract dispatched job ID from nomad output"
|
|
||||||
log "Dispatch output: ${dispatch_output}"
|
|
||||||
write_result "$action_id" 1 "Could not extract dispatched job ID from nomad output"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
log "Dispatched vault-runner with job ID: ${dispatched_job_id}"
|
|
||||||
|
|
||||||
# Poll job status until terminal state
|
|
||||||
# Batch jobs transition: running -> completed/failed
|
|
||||||
local max_wait=300 # 5 minutes max wait
|
|
||||||
local elapsed=0
|
|
||||||
local poll_interval=5
|
|
||||||
local alloc_id=""
|
|
||||||
|
|
||||||
log "Polling nomad job status for ${dispatched_job_id}..."
|
|
||||||
|
|
||||||
while [ "$elapsed" -lt "$max_wait" ]; do
|
|
||||||
# Get job status with JSON output for the dispatched child job
|
|
||||||
local job_status_json
|
|
||||||
job_status_json=$(nomad job status -json "$dispatched_job_id" 2>/dev/null) || {
|
|
||||||
log "ERROR: Failed to get job status for ${dispatched_job_id}"
|
|
||||||
write_result "$action_id" 1 "Failed to get job status for ${dispatched_job_id}"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check job status field (transitions to "dead" on completion)
|
|
||||||
local job_state
|
|
||||||
job_state=$(echo "$job_status_json" | jq -r '.Status // empty' 2>/dev/null) || job_state=""
|
|
||||||
|
|
||||||
# Check allocation state directly
|
|
||||||
alloc_id=$(echo "$job_status_json" | jq -r '.Allocations[0]?.ID // empty' 2>/dev/null) || alloc_id=""
|
|
||||||
|
|
||||||
if [ -n "$alloc_id" ]; then
|
|
||||||
local alloc_state
|
|
||||||
alloc_state=$(nomad alloc status -short "$alloc_id" 2>/dev/null || true)
|
|
||||||
|
|
||||||
case "$alloc_state" in
|
|
||||||
*completed*|*success*|*dead*)
|
|
||||||
log "Allocation ${alloc_id} reached terminal state: ${alloc_state}"
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
*running*|*pending*|*starting*)
|
|
||||||
log "Allocation ${alloc_id} still running (state: ${alloc_state})..."
|
|
||||||
;;
|
|
||||||
*failed*|*crashed*)
|
|
||||||
log "Allocation ${alloc_id} failed (state: ${alloc_state})"
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Also check job-level state
|
|
||||||
case "$job_state" in
|
|
||||||
dead)
|
|
||||||
log "Job ${dispatched_job_id} reached terminal state: ${job_state}"
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
failed)
|
|
||||||
log "Job ${dispatched_job_id} failed"
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
sleep "$poll_interval"
|
|
||||||
elapsed=$((elapsed + poll_interval))
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$elapsed" -ge "$max_wait" ]; then
|
|
||||||
log "ERROR: Timeout waiting for vault-runner job to complete"
|
|
||||||
write_result "$action_id" 1 "Timeout waiting for nomad job to complete"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get final job status and exit code
|
|
||||||
local final_status_json
|
|
||||||
final_status_json=$(nomad job status -json "$dispatched_job_id" 2>/dev/null) || {
|
|
||||||
log "ERROR: Failed to get final job status"
|
|
||||||
write_result "$action_id" 1 "Failed to get final job status"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Get allocation exit code
|
|
||||||
local exit_code=0
|
|
||||||
local logs=""
|
|
||||||
|
|
||||||
if [ -n "$alloc_id" ]; then
|
|
||||||
# Get allocation logs
|
|
||||||
logs=$(nomad alloc logs -short "$alloc_id" 2>/dev/null || true)
|
|
||||||
|
|
||||||
# Try to get exit code from alloc status JSON
|
|
||||||
# Nomad alloc status -json has .TaskStates["<task_name>"].Events[].ExitCode
|
|
||||||
local alloc_exit_code
|
|
||||||
alloc_exit_code=$(nomad alloc status -json "$alloc_id" 2>/dev/null | jq -r '.TaskStates["runner"].Events[-1].ExitCode // empty' 2>/dev/null) || alloc_exit_code=""
|
|
||||||
|
|
||||||
if [ -n "$alloc_exit_code" ] && [ "$alloc_exit_code" != "null" ]; then
|
|
||||||
exit_code="$alloc_exit_code"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If we couldn't get exit code from alloc, check job state as fallback
|
|
||||||
# Note: "dead" = terminal state for batch jobs (includes successful completion)
|
|
||||||
# Only "failed" indicates actual failure
|
|
||||||
if [ "$exit_code" -eq 0 ]; then
|
|
||||||
local final_state
|
|
||||||
final_state=$(echo "$final_status_json" | jq -r '.Status // empty' 2>/dev/null) || final_state=""
|
|
||||||
|
|
||||||
case "$final_state" in
|
|
||||||
failed)
|
|
||||||
exit_code=1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Truncate logs if too long
|
|
||||||
if [ ${#logs} -gt 1000 ]; then
|
|
||||||
logs="${logs: -1000}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Write result file
|
|
||||||
write_result "$action_id" "$exit_code" "$logs"
|
|
||||||
|
|
||||||
if [ "$exit_code" -eq 0 ]; then
|
|
||||||
log "Vault-runner job completed successfully for action: ${action_id}"
|
|
||||||
else
|
|
||||||
log "Vault-runner job failed for action: ${action_id} (exit code: ${exit_code})"
|
|
||||||
fi
|
|
||||||
|
|
||||||
return "$exit_code"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Launch runner for the given action (backend-agnostic orchestrator)
|
|
||||||
# Usage: launch_runner <toml_file>
|
|
||||||
launch_runner() {
|
|
||||||
local toml_file="$1"
|
|
||||||
local action_id
|
|
||||||
action_id=$(basename "$toml_file" .toml)
|
|
||||||
|
|
||||||
log "Launching runner for action: ${action_id}"
|
|
||||||
|
|
||||||
# Validate TOML
|
|
||||||
if ! validate_action "$toml_file"; then
|
|
||||||
log "ERROR: Action validation failed for ${action_id}"
|
|
||||||
write_result "$action_id" 1 "Validation failed: see logs above"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check dispatch mode to determine if admin verification is needed
|
|
||||||
local dispatch_mode
|
|
||||||
dispatch_mode=$(get_dispatch_mode "$toml_file")
|
|
||||||
|
|
||||||
if [ "$dispatch_mode" = "direct" ]; then
|
|
||||||
log "Action ${action_id}: tier=${VAULT_TIER:-unknown}, dispatch_mode=${dispatch_mode} — skipping admin merge verification (direct commit)"
|
|
||||||
else
|
|
||||||
# Verify admin merge for PR-based actions
|
|
||||||
log "Action ${action_id}: tier=${VAULT_TIER:-unknown}, dispatch_mode=${dispatch_mode} — verifying admin merge"
|
|
||||||
if ! verify_admin_merged "$toml_file"; then
|
|
||||||
log "ERROR: Admin merge verification failed for ${action_id}"
|
|
||||||
write_result "$action_id" 1 "Admin merge verification failed: see logs above"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
log "Action ${action_id}: admin merge verified"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Build CSV lists from validated action metadata
|
|
||||||
local secrets_csv=""
|
|
||||||
if [ -n "${VAULT_ACTION_SECRETS:-}" ]; then
|
|
||||||
# Convert space-separated to comma-separated
|
|
||||||
secrets_csv=$(echo "${VAULT_ACTION_SECRETS}" | xargs | tr ' ' ',')
|
|
||||||
fi
|
|
||||||
|
|
||||||
local mounts_csv=""
|
|
||||||
if [ -n "${VAULT_ACTION_MOUNTS:-}" ]; then
|
|
||||||
mounts_csv=$(echo "${VAULT_ACTION_MOUNTS}" | xargs | tr ' ' ',')
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Delegate to the selected backend
|
|
||||||
"_launch_runner_${DISPATCHER_BACKEND}" "$action_id" "$secrets_csv" "$mounts_csv"
|
|
||||||
}
|
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
|
||||||
# Pluggable sidecar launcher (reproduce / triage / verify)
|
|
||||||
# -----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# _dispatch_sidecar_docker CONTAINER_NAME ISSUE_NUM PROJECT_TOML IMAGE [FORMULA]
|
|
||||||
#
|
|
||||||
# Launches a sidecar container via docker run (background, pid-tracked).
|
|
||||||
# Prints the background PID to stdout.
|
|
||||||
_dispatch_sidecar_docker() {
|
|
||||||
local container_name="$1"
|
|
||||||
local issue_number="$2"
|
|
||||||
local project_toml="$3"
|
|
||||||
local image="$4"
|
|
||||||
local formula="${5:-}"
|
|
||||||
|
|
||||||
local -a cmd=(docker run --rm
|
|
||||||
--name "${container_name}"
|
|
||||||
--network host
|
|
||||||
--security-opt apparmor=unconfined
|
|
||||||
-v /var/run/docker.sock:/var/run/docker.sock
|
|
||||||
-v agent-data:/home/agent/data
|
|
||||||
-v project-repos:/home/agent/repos
|
|
||||||
-e "FORGE_URL=${FORGE_URL}"
|
|
||||||
-e "FORGE_TOKEN=${FORGE_TOKEN}"
|
|
||||||
-e "FORGE_REPO=${FORGE_REPO}"
|
|
||||||
-e "PRIMARY_BRANCH=${PRIMARY_BRANCH:-main}"
|
|
||||||
-e DISINTO_CONTAINER=1
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set formula if provided
|
|
||||||
if [ -n "$formula" ]; then
|
|
||||||
cmd+=(-e "DISINTO_FORMULA=${formula}")
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Pass through ANTHROPIC_API_KEY if set
|
|
||||||
if [ -n "${ANTHROPIC_API_KEY:-}" ]; then
|
|
||||||
cmd+=(-e "ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}")
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Mount shared Claude config dir and ~/.ssh from the runtime user's home
|
|
||||||
local runtime_home="${HOME:-/home/debian}"
|
|
||||||
if [ -d "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}" ]; then
|
|
||||||
cmd+=(-v "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}")
|
|
||||||
cmd+=(-e "CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}")
|
|
||||||
fi
|
|
||||||
if [ -f "${runtime_home}/.claude.json" ]; then
|
|
||||||
cmd+=(-v "${runtime_home}/.claude.json:/home/agent/.claude.json:ro")
|
|
||||||
fi
|
|
||||||
if [ -d "${runtime_home}/.ssh" ]; then
|
|
||||||
cmd+=(-v "${runtime_home}/.ssh:/home/agent/.ssh:ro")
|
|
||||||
fi
|
|
||||||
if [ -f /usr/local/bin/claude ]; then
|
|
||||||
cmd+=(-v /usr/local/bin/claude:/usr/local/bin/claude:ro)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Mount the project TOML into the container at a stable path
|
|
||||||
local container_toml="/home/agent/project.toml"
|
|
||||||
cmd+=(-v "${project_toml}:${container_toml}:ro")
|
|
||||||
|
|
||||||
cmd+=("${image}" "$container_toml" "$issue_number")
|
|
||||||
|
|
||||||
# Launch in background
|
|
||||||
"${cmd[@]}" &
|
|
||||||
echo $!
|
|
||||||
}
|
|
||||||
|
|
||||||
# _dispatch_sidecar_nomad CONTAINER_NAME ISSUE_NUM PROJECT_TOML IMAGE [FORMULA]
|
|
||||||
#
|
|
||||||
# Nomad sidecar backend stub — will be implemented in migration Step 5.
|
|
||||||
_dispatch_sidecar_nomad() {
|
|
||||||
echo "nomad backend not yet implemented" >&2
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
# Reproduce dispatch — launch sidecar for bug-report issues
|
# Reproduce dispatch — launch sidecar for bug-report issues
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
|
|
@ -925,13 +607,52 @@ dispatch_reproduce() {
|
||||||
|
|
||||||
log "Dispatching reproduce-agent for issue #${issue_number} (project: ${project_toml})"
|
log "Dispatching reproduce-agent for issue #${issue_number} (project: ${project_toml})"
|
||||||
|
|
||||||
local bg_pid
|
# Build docker run command using array (safe from injection)
|
||||||
bg_pid=$("_dispatch_sidecar_${DISPATCHER_BACKEND}" \
|
local -a cmd=(docker run --rm
|
||||||
"disinto-reproduce-${issue_number}" \
|
--name "disinto-reproduce-${issue_number}"
|
||||||
"$issue_number" \
|
--network host
|
||||||
"$project_toml" \
|
--security-opt apparmor=unconfined
|
||||||
"disinto-reproduce:latest")
|
-v /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
-v agent-data:/home/agent/data
|
||||||
|
-v project-repos:/home/agent/repos
|
||||||
|
-e "FORGE_URL=${FORGE_URL}"
|
||||||
|
-e "FORGE_TOKEN=${FORGE_TOKEN}"
|
||||||
|
-e "FORGE_REPO=${FORGE_REPO}"
|
||||||
|
-e "PRIMARY_BRANCH=${PRIMARY_BRANCH:-main}"
|
||||||
|
-e DISINTO_CONTAINER=1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Pass through ANTHROPIC_API_KEY if set
|
||||||
|
if [ -n "${ANTHROPIC_API_KEY:-}" ]; then
|
||||||
|
cmd+=(-e "ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount shared Claude config dir and ~/.ssh from the runtime user's home if available
|
||||||
|
local runtime_home="${HOME:-/home/debian}"
|
||||||
|
if [ -d "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}" ]; then
|
||||||
|
cmd+=(-v "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}")
|
||||||
|
cmd+=(-e "CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}")
|
||||||
|
fi
|
||||||
|
if [ -f "${runtime_home}/.claude.json" ]; then
|
||||||
|
cmd+=(-v "${runtime_home}/.claude.json:/home/agent/.claude.json:ro")
|
||||||
|
fi
|
||||||
|
if [ -d "${runtime_home}/.ssh" ]; then
|
||||||
|
cmd+=(-v "${runtime_home}/.ssh:/home/agent/.ssh:ro")
|
||||||
|
fi
|
||||||
|
# Mount claude CLI binary if present on host
|
||||||
|
if [ -f /usr/local/bin/claude ]; then
|
||||||
|
cmd+=(-v /usr/local/bin/claude:/usr/local/bin/claude:ro)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount the project TOML into the container at a stable path
|
||||||
|
local container_toml="/home/agent/project.toml"
|
||||||
|
cmd+=(-v "${project_toml}:${container_toml}:ro")
|
||||||
|
|
||||||
|
cmd+=(disinto-reproduce:latest "$container_toml" "$issue_number")
|
||||||
|
|
||||||
|
# Launch in background; write pid-file so we don't double-launch
|
||||||
|
"${cmd[@]}" &
|
||||||
|
local bg_pid=$!
|
||||||
echo "$bg_pid" > "$(_reproduce_lockfile "$issue_number")"
|
echo "$bg_pid" > "$(_reproduce_lockfile "$issue_number")"
|
||||||
log "Reproduce container launched (pid ${bg_pid}) for issue #${issue_number}"
|
log "Reproduce container launched (pid ${bg_pid}) for issue #${issue_number}"
|
||||||
}
|
}
|
||||||
|
|
@ -1011,14 +732,53 @@ dispatch_triage() {
|
||||||
|
|
||||||
log "Dispatching triage-agent for issue #${issue_number} (project: ${project_toml})"
|
log "Dispatching triage-agent for issue #${issue_number} (project: ${project_toml})"
|
||||||
|
|
||||||
local bg_pid
|
# Build docker run command using array (safe from injection)
|
||||||
bg_pid=$("_dispatch_sidecar_${DISPATCHER_BACKEND}" \
|
local -a cmd=(docker run --rm
|
||||||
"disinto-triage-${issue_number}" \
|
--name "disinto-triage-${issue_number}"
|
||||||
"$issue_number" \
|
--network host
|
||||||
"$project_toml" \
|
--security-opt apparmor=unconfined
|
||||||
"disinto-reproduce:latest" \
|
-v /var/run/docker.sock:/var/run/docker.sock
|
||||||
"triage")
|
-v agent-data:/home/agent/data
|
||||||
|
-v project-repos:/home/agent/repos
|
||||||
|
-e "FORGE_URL=${FORGE_URL}"
|
||||||
|
-e "FORGE_TOKEN=${FORGE_TOKEN}"
|
||||||
|
-e "FORGE_REPO=${FORGE_REPO}"
|
||||||
|
-e "PRIMARY_BRANCH=${PRIMARY_BRANCH:-main}"
|
||||||
|
-e DISINTO_CONTAINER=1
|
||||||
|
-e DISINTO_FORMULA=triage
|
||||||
|
)
|
||||||
|
|
||||||
|
# Pass through ANTHROPIC_API_KEY if set
|
||||||
|
if [ -n "${ANTHROPIC_API_KEY:-}" ]; then
|
||||||
|
cmd+=(-e "ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount shared Claude config dir and ~/.ssh from the runtime user's home if available
|
||||||
|
local runtime_home="${HOME:-/home/debian}"
|
||||||
|
if [ -d "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}" ]; then
|
||||||
|
cmd+=(-v "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}")
|
||||||
|
cmd+=(-e "CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}")
|
||||||
|
fi
|
||||||
|
if [ -f "${runtime_home}/.claude.json" ]; then
|
||||||
|
cmd+=(-v "${runtime_home}/.claude.json:/home/agent/.claude.json:ro")
|
||||||
|
fi
|
||||||
|
if [ -d "${runtime_home}/.ssh" ]; then
|
||||||
|
cmd+=(-v "${runtime_home}/.ssh:/home/agent/.ssh:ro")
|
||||||
|
fi
|
||||||
|
# Mount claude CLI binary if present on host
|
||||||
|
if [ -f /usr/local/bin/claude ]; then
|
||||||
|
cmd+=(-v /usr/local/bin/claude:/usr/local/bin/claude:ro)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount the project TOML into the container at a stable path
|
||||||
|
local container_toml="/home/agent/project.toml"
|
||||||
|
cmd+=(-v "${project_toml}:${container_toml}:ro")
|
||||||
|
|
||||||
|
cmd+=(disinto-reproduce:latest "$container_toml" "$issue_number")
|
||||||
|
|
||||||
|
# Launch in background; write pid-file so we don't double-launch
|
||||||
|
"${cmd[@]}" &
|
||||||
|
local bg_pid=$!
|
||||||
echo "$bg_pid" > "$(_triage_lockfile "$issue_number")"
|
echo "$bg_pid" > "$(_triage_lockfile "$issue_number")"
|
||||||
log "Triage container launched (pid ${bg_pid}) for issue #${issue_number}"
|
log "Triage container launched (pid ${bg_pid}) for issue #${issue_number}"
|
||||||
}
|
}
|
||||||
|
|
@ -1174,14 +934,53 @@ dispatch_verify() {
|
||||||
|
|
||||||
log "Dispatching verification-agent for issue #${issue_number} (project: ${project_toml})"
|
log "Dispatching verification-agent for issue #${issue_number} (project: ${project_toml})"
|
||||||
|
|
||||||
local bg_pid
|
# Build docker run command using array (safe from injection)
|
||||||
bg_pid=$("_dispatch_sidecar_${DISPATCHER_BACKEND}" \
|
local -a cmd=(docker run --rm
|
||||||
"disinto-verify-${issue_number}" \
|
--name "disinto-verify-${issue_number}"
|
||||||
"$issue_number" \
|
--network host
|
||||||
"$project_toml" \
|
--security-opt apparmor=unconfined
|
||||||
"disinto-reproduce:latest" \
|
-v /var/run/docker.sock:/var/run/docker.sock
|
||||||
"verify")
|
-v agent-data:/home/agent/data
|
||||||
|
-v project-repos:/home/agent/repos
|
||||||
|
-e "FORGE_URL=${FORGE_URL}"
|
||||||
|
-e "FORGE_TOKEN=${FORGE_TOKEN}"
|
||||||
|
-e "FORGE_REPO=${FORGE_REPO}"
|
||||||
|
-e "PRIMARY_BRANCH=${PRIMARY_BRANCH:-main}"
|
||||||
|
-e DISINTO_CONTAINER=1
|
||||||
|
-e DISINTO_FORMULA=verify
|
||||||
|
)
|
||||||
|
|
||||||
|
# Pass through ANTHROPIC_API_KEY if set
|
||||||
|
if [ -n "${ANTHROPIC_API_KEY:-}" ]; then
|
||||||
|
cmd+=(-e "ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount shared Claude config dir and ~/.ssh from the runtime user's home if available
|
||||||
|
local runtime_home="${HOME:-/home/debian}"
|
||||||
|
if [ -d "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}" ]; then
|
||||||
|
cmd+=(-v "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}")
|
||||||
|
cmd+=(-e "CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}")
|
||||||
|
fi
|
||||||
|
if [ -f "${runtime_home}/.claude.json" ]; then
|
||||||
|
cmd+=(-v "${runtime_home}/.claude.json:/home/agent/.claude.json:ro")
|
||||||
|
fi
|
||||||
|
if [ -d "${runtime_home}/.ssh" ]; then
|
||||||
|
cmd+=(-v "${runtime_home}/.ssh:/home/agent/.ssh:ro")
|
||||||
|
fi
|
||||||
|
# Mount claude CLI binary if present on host
|
||||||
|
if [ -f /usr/local/bin/claude ]; then
|
||||||
|
cmd+=(-v /usr/local/bin/claude:/usr/local/bin/claude:ro)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount the project TOML into the container at a stable path
|
||||||
|
local container_toml="/home/agent/project.toml"
|
||||||
|
cmd+=(-v "${project_toml}:${container_toml}:ro")
|
||||||
|
|
||||||
|
cmd+=(disinto-reproduce:latest "$container_toml" "$issue_number")
|
||||||
|
|
||||||
|
# Launch in background; write pid-file so we don't double-launch
|
||||||
|
"${cmd[@]}" &
|
||||||
|
local bg_pid=$!
|
||||||
echo "$bg_pid" > "$(_verify_lockfile "$issue_number")"
|
echo "$bg_pid" > "$(_verify_lockfile "$issue_number")"
|
||||||
log "Verification container launched (pid ${bg_pid}) for issue #${issue_number}"
|
log "Verification container launched (pid ${bg_pid}) for issue #${issue_number}"
|
||||||
}
|
}
|
||||||
|
|
@ -1203,22 +1002,10 @@ ensure_ops_repo() {
|
||||||
|
|
||||||
# Main dispatcher loop
|
# Main dispatcher loop
|
||||||
main() {
|
main() {
|
||||||
log "Starting dispatcher (backend=${DISPATCHER_BACKEND})..."
|
log "Starting dispatcher..."
|
||||||
log "Polling ops repo: ${VAULT_ACTIONS_DIR}"
|
log "Polling ops repo: ${VAULT_ACTIONS_DIR}"
|
||||||
log "Admin users: ${ADMIN_USERS}"
|
log "Admin users: ${ADMIN_USERS}"
|
||||||
|
|
||||||
# Validate backend selection at startup
|
|
||||||
case "$DISPATCHER_BACKEND" in
|
|
||||||
docker|nomad)
|
|
||||||
log "Using ${DISPATCHER_BACKEND} backend for vault-runner dispatch"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
log "ERROR: unknown DISPATCHER_BACKEND=${DISPATCHER_BACKEND}"
|
|
||||||
echo "unknown DISPATCHER_BACKEND=${DISPATCHER_BACKEND} (expected: docker, nomad)" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
while true; do
|
while true; do
|
||||||
# Refresh ops repo at the start of each poll cycle
|
# Refresh ops repo at the start of each poll cycle
|
||||||
ensure_ops_repo
|
ensure_ops_repo
|
||||||
|
|
|
||||||
|
|
@ -173,67 +173,6 @@ PROJECT_TOML="${PROJECT_TOML:-projects/disinto.toml}"
|
||||||
sleep 1200 # 20 minutes
|
sleep 1200 # 20 minutes
|
||||||
done) &
|
done) &
|
||||||
|
|
||||||
# ── Load required secrets from secrets/*.enc (#777) ────────────────────
|
|
||||||
# Edge container declares its required secrets; missing ones cause a hard fail.
|
|
||||||
_AGE_KEY_FILE="${HOME}/.config/sops/age/keys.txt"
|
|
||||||
_SECRETS_DIR="/opt/disinto/secrets"
|
|
||||||
EDGE_REQUIRED_SECRETS="CADDY_SSH_KEY CADDY_SSH_HOST CADDY_SSH_USER CADDY_ACCESS_LOG"
|
|
||||||
|
|
||||||
_edge_decrypt_secret() {
|
|
||||||
local enc_path="${_SECRETS_DIR}/${1}.enc"
|
|
||||||
[ -f "$enc_path" ] || return 1
|
|
||||||
age -d -i "$_AGE_KEY_FILE" "$enc_path" 2>/dev/null
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ -f "$_AGE_KEY_FILE" ] && [ -d "$_SECRETS_DIR" ]; then
|
|
||||||
_missing=""
|
|
||||||
for _secret_name in $EDGE_REQUIRED_SECRETS; do
|
|
||||||
_val=$(_edge_decrypt_secret "$_secret_name") || { _missing="${_missing} ${_secret_name}"; continue; }
|
|
||||||
export "$_secret_name=$_val"
|
|
||||||
done
|
|
||||||
if [ -n "$_missing" ]; then
|
|
||||||
echo "FATAL: required secrets missing from secrets/*.enc:${_missing}" >&2
|
|
||||||
echo " Run 'disinto secrets add <NAME>' for each missing secret." >&2
|
|
||||||
echo " If migrating from .env.vault.enc, run 'disinto secrets migrate-from-vault' first." >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "edge: loaded required secrets: ${EDGE_REQUIRED_SECRETS}" >&2
|
|
||||||
else
|
|
||||||
echo "FATAL: age key (${_AGE_KEY_FILE}) or secrets dir (${_SECRETS_DIR}) not found — cannot load required secrets" >&2
|
|
||||||
echo " Ensure age is installed and secrets/*.enc files are present." >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Start daily engagement collection cron loop in background (#745)
|
|
||||||
# Runs collect-engagement.sh daily at ~23:50 UTC via a sleep loop that
|
|
||||||
# calculates seconds until the next 23:50 window. SSH key from secrets/*.enc (#777).
|
|
||||||
(while true; do
|
|
||||||
# Calculate seconds until next 23:50 UTC
|
|
||||||
_now=$(date -u +%s)
|
|
||||||
_target=$(date -u -d "today 23:50" +%s 2>/dev/null || date -u -d "23:50" +%s 2>/dev/null || echo 0)
|
|
||||||
if [ "$_target" -le "$_now" ]; then
|
|
||||||
_target=$(( _target + 86400 ))
|
|
||||||
fi
|
|
||||||
_sleep_secs=$(( _target - _now ))
|
|
||||||
echo "edge: collect-engagement scheduled in ${_sleep_secs}s (next 23:50 UTC)" >&2
|
|
||||||
sleep "$_sleep_secs"
|
|
||||||
_fetch_log="/tmp/caddy-access-log-fetch.log"
|
|
||||||
_ssh_key_file=$(mktemp)
|
|
||||||
printf '%s\n' "$CADDY_SSH_KEY" > "$_ssh_key_file"
|
|
||||||
chmod 0600 "$_ssh_key_file"
|
|
||||||
scp -i "$_ssh_key_file" -o StrictHostKeyChecking=accept-new -o ConnectTimeout=10 -o BatchMode=yes \
|
|
||||||
"${CADDY_SSH_USER}@${CADDY_SSH_HOST}:${CADDY_ACCESS_LOG}" \
|
|
||||||
"$_fetch_log" 2>&1 | tee -a /opt/disinto-logs/collect-engagement.log || true
|
|
||||||
rm -f "$_ssh_key_file"
|
|
||||||
if [ -s "$_fetch_log" ]; then
|
|
||||||
CADDY_ACCESS_LOG="$_fetch_log" bash /opt/disinto/site/collect-engagement.sh 2>&1 \
|
|
||||||
| tee -a /opt/disinto-logs/collect-engagement.log || true
|
|
||||||
else
|
|
||||||
echo "edge: collect-engagement: fetched log is empty, skipping parse" >&2
|
|
||||||
fi
|
|
||||||
rm -f "$_fetch_log"
|
|
||||||
done) &
|
|
||||||
|
|
||||||
# Caddy as main process — run in foreground via wait so background jobs survive
|
# Caddy as main process — run in foreground via wait so background jobs survive
|
||||||
# (exec replaces the shell, which can orphan backgrounded subshells)
|
# (exec replaces the shell, which can orphan backgrounded subshells)
|
||||||
caddy run --config /etc/caddy/Caddyfile --adapter caddyfile &
|
caddy run --config /etc/caddy/Caddyfile --adapter caddyfile &
|
||||||
|
|
|
||||||
|
|
@ -7,8 +7,5 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
RUN useradd -m -u 1000 -s /bin/bash agent
|
RUN useradd -m -u 1000 -s /bin/bash agent
|
||||||
COPY docker/reproduce/entrypoint-reproduce.sh /entrypoint-reproduce.sh
|
COPY docker/reproduce/entrypoint-reproduce.sh /entrypoint-reproduce.sh
|
||||||
RUN chmod +x /entrypoint-reproduce.sh
|
RUN chmod +x /entrypoint-reproduce.sh
|
||||||
VOLUME /home/agent/data
|
|
||||||
VOLUME /home/agent/repos
|
|
||||||
|
|
||||||
WORKDIR /home/agent
|
WORKDIR /home/agent
|
||||||
ENTRYPOINT ["/entrypoint-reproduce.sh"]
|
ENTRYPOINT ["/entrypoint-reproduce.sh"]
|
||||||
|
|
|
||||||
|
|
@ -26,8 +26,8 @@ The `main` branch on the ops repo (`johba/disinto-ops`) is protected via Forgejo
|
||||||
|
|
||||||
## Vault PR Lifecycle
|
## Vault PR Lifecycle
|
||||||
|
|
||||||
1. **Request** — Agent calls `lib/action-vault.sh:vault_request()` with action TOML content
|
1. **Request** — Agent calls `lib/vault.sh:vault_request()` with action TOML content
|
||||||
2. **Validation** — TOML is validated against the schema in `action-vault/vault-env.sh`
|
2. **Validation** — TOML is validated against the schema in `vault/vault-env.sh`
|
||||||
3. **PR Creation** — A PR is created on `disinto-ops` with:
|
3. **PR Creation** — A PR is created on `disinto-ops` with:
|
||||||
- Branch: `vault/<action-id>`
|
- Branch: `vault/<action-id>`
|
||||||
- Title: `vault: <action-id>`
|
- Title: `vault: <action-id>`
|
||||||
|
|
@ -90,12 +90,12 @@ To verify the protection is working:
|
||||||
|
|
||||||
- #73 — Vault redesign proposal
|
- #73 — Vault redesign proposal
|
||||||
- #74 — Vault action TOML schema
|
- #74 — Vault action TOML schema
|
||||||
- #75 — Vault PR creation helper (`lib/action-vault.sh`)
|
- #75 — Vault PR creation helper (`lib/vault.sh`)
|
||||||
- #76 — Dispatcher rewrite (poll for merged vault PRs)
|
- #76 — Dispatcher rewrite (poll for merged vault PRs)
|
||||||
- #77 — Branch protection on ops repo (this issue)
|
- #77 — Branch protection on ops repo (this issue)
|
||||||
|
|
||||||
## See Also
|
## See Also
|
||||||
|
|
||||||
- [`lib/action-vault.sh`](../lib/action-vault.sh) — Vault PR creation helper
|
- [`lib/vault.sh`](../lib/vault.sh) — Vault PR creation helper
|
||||||
- [`action-vault/vault-env.sh`](../action-vault/vault-env.sh) — TOML validation
|
- [`vault/vault-env.sh`](../vault/vault-env.sh) — TOML validation
|
||||||
- [`lib/branch-protection.sh`](../lib/branch-protection.sh) — Branch protection helper
|
- [`lib/branch-protection.sh`](../lib/branch-protection.sh) — Branch protection helper
|
||||||
|
|
|
||||||
|
|
@ -1,194 +0,0 @@
|
||||||
# Local-Model Agents
|
|
||||||
|
|
||||||
Local-model agents run the same agent code as the Claude-backed agents, but
|
|
||||||
connect to a local llama-server (or compatible OpenAI-API endpoint) instead of
|
|
||||||
the Anthropic API. This document describes the canonical activation flow using
|
|
||||||
`disinto hire-an-agent` and `[agents.X]` TOML configuration.
|
|
||||||
|
|
||||||
> **Note:** The legacy `ENABLE_LLAMA_AGENT=1` env flag has been removed (#846).
|
|
||||||
> Activation is now done exclusively via `[agents.X]` sections in project TOML.
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
Local-model agents are configured via `[agents.<name>]` sections in
|
|
||||||
`projects/<project>.toml`. Each agent gets:
|
|
||||||
- Its own Forgejo bot user with dedicated API token and password
|
|
||||||
- A dedicated compose service `agents-<name>`
|
|
||||||
- Isolated credentials stored as `FORGE_TOKEN_<USER_UPPER>` and `FORGE_PASS_<USER_UPPER>` in `.env`
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
- **llama-server** (or compatible OpenAI-API endpoint) running on the host,
|
|
||||||
reachable from inside Docker at the URL you will configure.
|
|
||||||
- A disinto factory already initialized (`disinto init` completed).
|
|
||||||
|
|
||||||
## Hiring a local-model agent
|
|
||||||
|
|
||||||
Use `disinto hire-an-agent` with `--local-model` to create a bot user and
|
|
||||||
configure the agent:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Hire a local-model agent for the dev role
|
|
||||||
disinto hire-an-agent dev-qwen dev \
|
|
||||||
--local-model http://10.10.10.1:8081 \
|
|
||||||
--model unsloth/Qwen3.5-35B-A3B
|
|
||||||
```
|
|
||||||
|
|
||||||
The command performs these steps:
|
|
||||||
|
|
||||||
1. **Creates a Forgejo user** `dev-qwen` with a random password
|
|
||||||
2. **Generates an API token** for the user
|
|
||||||
3. **Writes credentials to `.env`**:
|
|
||||||
- `FORGE_TOKEN_DEV_QWEN` — the API token
|
|
||||||
- `FORGE_PASS_DEV_QWEN` — the password
|
|
||||||
- `ANTHROPIC_BASE_URL` — the llama endpoint (required by the agent)
|
|
||||||
4. **Writes `[agents.dev-qwen]` to `projects/<project>.toml`** with:
|
|
||||||
- `base_url`, `model`, `api_key`
|
|
||||||
- `roles = ["dev"]`
|
|
||||||
- `forge_user = "dev-qwen"`
|
|
||||||
- `compact_pct = 60`
|
|
||||||
- `poll_interval = 60`
|
|
||||||
5. **Regenerates `docker-compose.yml`** to include the `agents-dev-qwen` service
|
|
||||||
|
|
||||||
### Anthropic backend agents
|
|
||||||
|
|
||||||
For agents that use Anthropic API instead of a local model, omit `--local-model`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Anthropic backend agent (requires ANTHROPIC_API_KEY in environment)
|
|
||||||
export ANTHROPIC_API_KEY="sk-..."
|
|
||||||
disinto hire-an-agent dev-claude dev
|
|
||||||
```
|
|
||||||
|
|
||||||
This writes `ANTHROPIC_API_KEY` to `.env` instead of `ANTHROPIC_BASE_URL`.
|
|
||||||
|
|
||||||
## Activation and running
|
|
||||||
|
|
||||||
Once hired, the agent service is added to `docker-compose.yml`. Start the
|
|
||||||
service with `docker compose up -d`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Start all agent services
|
|
||||||
docker compose up -d
|
|
||||||
|
|
||||||
# Start a single named agent service
|
|
||||||
docker compose up -d agents-dev-qwen
|
|
||||||
|
|
||||||
# Start multiple named agent services
|
|
||||||
docker compose up -d agents-dev-qwen agents-planner
|
|
||||||
```
|
|
||||||
|
|
||||||
### Stopping agents
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Stop a specific agent service
|
|
||||||
docker compose down agents-dev-qwen
|
|
||||||
|
|
||||||
# Stop all agent services
|
|
||||||
docker compose down
|
|
||||||
```
|
|
||||||
|
|
||||||
## Credential rotation
|
|
||||||
|
|
||||||
Re-running `disinto hire-an-agent <same-name>` with the same parameters rotates
|
|
||||||
credentials idempotently:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Re-hire the same agent to rotate token and password
|
|
||||||
disinto hire-an-agent dev-qwen dev \
|
|
||||||
--local-model http://10.10.10.1:8081 \
|
|
||||||
--model unsloth/Qwen3.5-35B-A3B
|
|
||||||
|
|
||||||
# The command will:
|
|
||||||
# 1. Detect the user already exists
|
|
||||||
# 2. Reset the password to a new random value
|
|
||||||
# 3. Create a new API token
|
|
||||||
# 4. Update .env with the new credentials
|
|
||||||
```
|
|
||||||
|
|
||||||
This is the recommended way to rotate agent credentials. The `.env` file is
|
|
||||||
updated in place, so no manual editing is required.
|
|
||||||
|
|
||||||
If you need to manually rotate credentials:
|
|
||||||
1. Generate a new token in Forgejo admin UI
|
|
||||||
2. Edit `.env` and replace `FORGE_TOKEN_<USER_UPPER>` and `FORGE_PASS_<USER_UPPER>`
|
|
||||||
3. Restart the agent service: `docker compose restart agents-<name>`
|
|
||||||
|
|
||||||
## Configuration reference
|
|
||||||
|
|
||||||
### Environment variables (`.env`)
|
|
||||||
|
|
||||||
| Variable | Description | Example |
|
|
||||||
|----------|-------------|---------|
|
|
||||||
| `FORGE_TOKEN_<USER_UPPER>` | Forgejo API token for the bot user | `FORGE_TOKEN_DEV_QWEN` |
|
|
||||||
| `FORGE_PASS_<USER_UPPER>` | Forgejo password for the bot user | `FORGE_PASS_DEV_QWEN` |
|
|
||||||
| `ANTHROPIC_BASE_URL` | Local llama endpoint (local model agents) | `http://host.docker.internal:8081` |
|
|
||||||
| `ANTHROPIC_API_KEY` | Anthropic API key (Anthropic backend agents) | `sk-...` |
|
|
||||||
|
|
||||||
### Project TOML (`[agents.<name>]` section)
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[agents.dev-qwen]
|
|
||||||
base_url = "http://10.10.10.1:8081"
|
|
||||||
model = "unsloth/Qwen3.5-35B-A3B"
|
|
||||||
api_key = "sk-no-key-required"
|
|
||||||
roles = ["dev"]
|
|
||||||
forge_user = "dev-qwen"
|
|
||||||
compact_pct = 60
|
|
||||||
poll_interval = 60
|
|
||||||
```
|
|
||||||
|
|
||||||
| Field | Description |
|
|
||||||
|-------|-------------|
|
|
||||||
| `base_url` | llama-server endpoint |
|
|
||||||
| `model` | Model name (for logging/identification) |
|
|
||||||
| `api_key` | Required by API; set to placeholder for llama |
|
|
||||||
| `roles` | Agent roles this instance handles |
|
|
||||||
| `forge_user` | Forgejo bot username |
|
|
||||||
| `compact_pct` | Context compaction threshold (lower = more aggressive) |
|
|
||||||
| `poll_interval` | Seconds between polling cycles |
|
|
||||||
|
|
||||||
## Behaviour
|
|
||||||
|
|
||||||
- Each agent runs with `AGENT_ROLES` set to its configured roles
|
|
||||||
- `CLAUDE_AUTOCOMPACT_PCT_OVERRIDE=60` — more aggressive compaction for smaller
|
|
||||||
context windows
|
|
||||||
- Agents serialize on the llama-server's single KV cache (AD-002)
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### Agent service not starting
|
|
||||||
|
|
||||||
Check that the service was created by `disinto hire-an-agent`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker compose config | grep -A5 "agents-dev-qwen"
|
|
||||||
```
|
|
||||||
|
|
||||||
If the service is missing, re-run `disinto hire-an-agent dev-qwen dev` to
|
|
||||||
regenerate `docker-compose.yml`.
|
|
||||||
|
|
||||||
### Model endpoint unreachable
|
|
||||||
|
|
||||||
Verify llama-server is accessible from inside Docker:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker compose -f docker-compose.yml exec agents curl -sf http://host.docker.internal:8081/health
|
|
||||||
```
|
|
||||||
|
|
||||||
If using a custom host IP, update `ANTHROPIC_BASE_URL` in `.env`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Update the base URL
|
|
||||||
sed -i 's|^ANTHROPIC_BASE_URL=.*|ANTHROPIC_BASE_URL=http://192.168.1.100:8081|' .env
|
|
||||||
|
|
||||||
# Restart the agent
|
|
||||||
docker compose restart agents-dev-qwen
|
|
||||||
```
|
|
||||||
|
|
||||||
### Invalid agent name
|
|
||||||
|
|
||||||
Agent names must match `^[a-z]([a-z0-9]|-[a-z0-9])*$` (lowercase letters, digits,
|
|
||||||
hyphens; starts with letter, ends with alphanumeric). Invalid names like
|
|
||||||
`dev-qwen2` (trailing digit is OK) or `dev--qwen` (consecutive hyphens) will
|
|
||||||
be rejected.
|
|
||||||
|
|
@ -1,59 +0,0 @@
|
||||||
# Mirror Bootstrap — Pull-Mirror Cutover Path
|
|
||||||
|
|
||||||
How to populate an empty Forgejo repo from an external source using
|
|
||||||
`lib/mirrors.sh`'s `mirror_pull_register()`.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
| Variable | Example | Purpose |
|
|
||||||
|---|---|---|
|
|
||||||
| `FORGE_URL` | `http://forgejo:3000` | Forgejo instance base URL |
|
|
||||||
| `FORGE_API_BASE` | `${FORGE_URL}/api/v1` | Global API base (set by `lib/env.sh`) |
|
|
||||||
| `FORGE_TOKEN` | (admin or org-owner token) | Must have `repo:create` scope |
|
|
||||||
|
|
||||||
The target org/user must already exist on the Forgejo instance.
|
|
||||||
|
|
||||||
## Command
|
|
||||||
|
|
||||||
```bash
|
|
||||||
source lib/env.sh
|
|
||||||
source lib/mirrors.sh
|
|
||||||
|
|
||||||
# Register a pull mirror — creates the repo and starts the first sync.
|
|
||||||
mirror_pull_register \
|
|
||||||
"https://codeberg.org/johba/disinto.git" \ # source URL
|
|
||||||
"disinto-admin" \ # target owner
|
|
||||||
"disinto" \ # target repo name
|
|
||||||
"8h0m0s" # sync interval (optional, default 8h)
|
|
||||||
```
|
|
||||||
|
|
||||||
The function calls `POST /api/v1/repos/migrate` with `mirror: true`.
|
|
||||||
Forgejo creates the repo and immediately queues the first sync.
|
|
||||||
|
|
||||||
## Verifying the sync
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check mirror status via API
|
|
||||||
forge_api GET "/repos/disinto-admin/disinto" | jq '.mirror, .mirror_interval'
|
|
||||||
|
|
||||||
# Confirm content arrived — should list branches
|
|
||||||
forge_api GET "/repos/disinto-admin/disinto/branches" | jq '.[].name'
|
|
||||||
```
|
|
||||||
|
|
||||||
The first sync typically completes within a few seconds for small-to-medium
|
|
||||||
repos. For large repos, poll the branches endpoint until content appears.
|
|
||||||
|
|
||||||
## Cutover scenario (Nomad migration)
|
|
||||||
|
|
||||||
At cutover to the Nomad box:
|
|
||||||
|
|
||||||
1. Stand up fresh Forgejo on the Nomad cluster (empty instance).
|
|
||||||
2. Create the `disinto-admin` org via `disinto init` or API.
|
|
||||||
3. Run `mirror_pull_register` pointing at the Codeberg source.
|
|
||||||
4. Wait for sync to complete (check branches endpoint).
|
|
||||||
5. Once content is confirmed, proceed with `disinto init` against the
|
|
||||||
now-populated repo — all subsequent `mirror_push` calls will push
|
|
||||||
to any additional mirrors configured in `projects/*.toml`.
|
|
||||||
|
|
||||||
No manual `git clone` + `git push` step is needed. The Forgejo pull-mirror
|
|
||||||
handles the entire transfer.
|
|
||||||
|
|
@ -1,124 +0,0 @@
|
||||||
<!-- last-reviewed: (new file, S2.5 #883) -->
|
|
||||||
# Nomad+Vault migration — cutover-day runbook
|
|
||||||
|
|
||||||
`disinto init --backend=nomad` is the single entry-point that turns a fresh
|
|
||||||
LXC (with the disinto repo cloned) into a running Nomad+Vault cluster with
|
|
||||||
policies applied, JWT workload-identity auth configured, secrets imported
|
|
||||||
from the old docker stack, and services deployed.
|
|
||||||
|
|
||||||
## Cutover-day invocation
|
|
||||||
|
|
||||||
On the new LXC, as root (or an operator with NOPASSWD sudo):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Copy the plaintext .env + sops-encrypted .env.vault.enc + age keyfile
|
|
||||||
# from the old box first (out of band — SSH, USB, whatever your ops
|
|
||||||
# procedure allows). Then:
|
|
||||||
|
|
||||||
sudo ./bin/disinto init \
|
|
||||||
--backend=nomad \
|
|
||||||
--import-env /tmp/.env \
|
|
||||||
--import-sops /tmp/.env.vault.enc \
|
|
||||||
--age-key /tmp/keys.txt \
|
|
||||||
--with forgejo
|
|
||||||
```
|
|
||||||
|
|
||||||
This runs, in order:
|
|
||||||
|
|
||||||
1. **`lib/init/nomad/cluster-up.sh`** (S0) — installs Nomad + Vault
|
|
||||||
binaries, writes `/etc/nomad.d/*`, initializes Vault, starts both
|
|
||||||
services, waits for the Nomad node to become ready.
|
|
||||||
2. **`tools/vault-apply-policies.sh`** (S2.1) — syncs every
|
|
||||||
`vault/policies/*.hcl` into Vault as an ACL policy. Idempotent.
|
|
||||||
3. **`lib/init/nomad/vault-nomad-auth.sh`** (S2.3) — enables Vault's
|
|
||||||
JWT auth method at `jwt-nomad`, points it at Nomad's JWKS, writes
|
|
||||||
one role per policy, reloads Nomad so jobs can exchange
|
|
||||||
workload-identity tokens for Vault tokens. Idempotent.
|
|
||||||
4. **`tools/vault-import.sh`** (S2.2) — reads `/tmp/.env` and the
|
|
||||||
sops-decrypted `/tmp/.env.vault.enc`, writes them to the KV paths
|
|
||||||
matching the S2.1 policy layout (`kv/disinto/bots/*`, `kv/disinto/shared/*`,
|
|
||||||
`kv/disinto/runner/*`). Idempotent (overwrites KV v2 data in place).
|
|
||||||
5. **`lib/init/nomad/deploy.sh forgejo`** (S1) — validates + runs the
|
|
||||||
`nomad/jobs/forgejo.hcl` jobspec. Forgejo reads its admin creds from
|
|
||||||
Vault via the `template` stanza (S2.4).
|
|
||||||
|
|
||||||
## Flag summary
|
|
||||||
|
|
||||||
| Flag | Meaning |
|
|
||||||
|---|---|
|
|
||||||
| `--backend=nomad` | Switch the init dispatcher to the Nomad+Vault path (instead of docker compose). |
|
|
||||||
| `--empty` | Bring the cluster up, skip policies/auth/import/deploy. Escape hatch for debugging. |
|
|
||||||
| `--with forgejo[,…]` | Deploy these services after the cluster is up. |
|
|
||||||
| `--import-env PATH` | Plaintext `.env` from the old stack. Optional. |
|
|
||||||
| `--import-sops PATH` | Sops-encrypted `.env.vault.enc` from the old stack. Requires `--age-key`. |
|
|
||||||
| `--age-key PATH` | Age keyfile used to decrypt `--import-sops`. Requires `--import-sops`. |
|
|
||||||
| `--dry-run` | Print the full plan (cluster-up + policies + auth + import + deploy) and exit. Touches nothing. |
|
|
||||||
|
|
||||||
### Flag validation
|
|
||||||
|
|
||||||
- `--import-sops` without `--age-key` → error.
|
|
||||||
- `--age-key` without `--import-sops` → error.
|
|
||||||
- `--import-env` alone (no sops) → OK (imports just the plaintext `.env`).
|
|
||||||
- `--backend=docker` with any `--import-*` flag → error.
|
|
||||||
- `--empty` with any `--import-*` flag → error (mutually exclusive: `--empty`
|
|
||||||
skips the import step, so pairing them silently discards the import
|
|
||||||
intent).
|
|
||||||
|
|
||||||
## Idempotency
|
|
||||||
|
|
||||||
Every layer is idempotent by design. Re-running the same command on an
|
|
||||||
already-provisioned box is a no-op at every step:
|
|
||||||
|
|
||||||
- **Cluster-up:** second run detects running `nomad`/`vault` systemd
|
|
||||||
units and state files, skips re-init.
|
|
||||||
- **Policies:** byte-for-byte compare against on-server policy text;
|
|
||||||
"unchanged" for every untouched file.
|
|
||||||
- **Auth:** skips auth-method create if `jwt-nomad/` already enabled,
|
|
||||||
skips config write if the JWKS + algs match, skips server.hcl write if
|
|
||||||
the file on disk is identical to the repo copy.
|
|
||||||
- **Import:** KV v2 writes overwrite in place (same path, same keys,
|
|
||||||
same values → no new version).
|
|
||||||
- **Deploy:** `nomad job run` is declarative; same jobspec → no new
|
|
||||||
allocation.
|
|
||||||
|
|
||||||
## Dry-run
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./bin/disinto init --backend=nomad \
|
|
||||||
--import-env /tmp/.env \
|
|
||||||
--import-sops /tmp/.env.vault.enc \
|
|
||||||
--age-key /tmp/keys.txt \
|
|
||||||
--with forgejo \
|
|
||||||
--dry-run
|
|
||||||
```
|
|
||||||
|
|
||||||
Prints the five-section plan — cluster-up, policies, auth, import,
|
|
||||||
deploy — with every path and every argv that would be executed. No
|
|
||||||
network, no sudo, no state mutation. See
|
|
||||||
`tests/disinto-init-nomad.bats` for the exact output shape.
|
|
||||||
|
|
||||||
## No-import path
|
|
||||||
|
|
||||||
If you already have `kv/disinto/*` seeded by other means (manual
|
|
||||||
`vault kv put`, a replica, etc.), omit all three `--import-*` flags.
|
|
||||||
`disinto init --backend=nomad --with forgejo` still applies policies,
|
|
||||||
configures auth, and deploys — but skips the import step with:
|
|
||||||
|
|
||||||
```
|
|
||||||
[import] no --import-env/--import-sops — skipping; set them or seed kv/disinto/* manually before deploying secret-dependent services
|
|
||||||
```
|
|
||||||
|
|
||||||
Forgejo's template stanza will fail to render (and thus the allocation
|
|
||||||
will stall) until those KV paths exist — so either import them or seed
|
|
||||||
them first.
|
|
||||||
|
|
||||||
## Secret hygiene
|
|
||||||
|
|
||||||
- Never log a secret value. The CLI only prints paths (`--import-env`,
|
|
||||||
`--age-key`) and KV *paths* (`kv/disinto/bots/review/token`), never
|
|
||||||
the values themselves. `tools/vault-import.sh` is the only thing that
|
|
||||||
reads the values, and it pipes them directly into Vault's HTTP API.
|
|
||||||
- The age keyfile must be mode 0400 — `vault-import.sh` refuses to
|
|
||||||
source a keyfile with looser permissions.
|
|
||||||
- `VAULT_ADDR` must be localhost during import — the import tool
|
|
||||||
refuses to run against a remote Vault, preventing accidental exposure.
|
|
||||||
|
|
@ -1,172 +0,0 @@
|
||||||
# formulas/collect-engagement.toml — Collect website engagement data
|
|
||||||
#
|
|
||||||
# Daily formula: SSH into Caddy host, fetch access log, parse locally,
|
|
||||||
# commit evidence JSON to ops repo via Forgejo API.
|
|
||||||
#
|
|
||||||
# Triggered by cron in the edge container entrypoint (daily at 23:50 UTC).
|
|
||||||
# Design choices from #426: Q1=A (fetch raw log, process locally),
|
|
||||||
# Q2=A (direct cron in edge container), Q3=B (dedicated purpose-limited SSH key).
|
|
||||||
#
|
|
||||||
# Steps: fetch-log → parse-engagement → commit-evidence
|
|
||||||
|
|
||||||
name = "collect-engagement"
|
|
||||||
description = "SSH-fetch Caddy access log, parse engagement metrics, commit evidence"
|
|
||||||
version = 1
|
|
||||||
|
|
||||||
[context]
|
|
||||||
files = ["AGENTS.md"]
|
|
||||||
|
|
||||||
[vars.caddy_host]
|
|
||||||
description = "SSH host for the Caddy server"
|
|
||||||
required = false
|
|
||||||
default = "${CADDY_SSH_HOST:-disinto.ai}"
|
|
||||||
|
|
||||||
[vars.caddy_user]
|
|
||||||
description = "SSH user on the Caddy host"
|
|
||||||
required = false
|
|
||||||
default = "${CADDY_SSH_USER:-debian}"
|
|
||||||
|
|
||||||
[vars.caddy_log_path]
|
|
||||||
description = "Path to Caddy access log on the remote host"
|
|
||||||
required = false
|
|
||||||
default = "${CADDY_ACCESS_LOG:-/var/log/caddy/access.log}"
|
|
||||||
|
|
||||||
[vars.local_log_path]
|
|
||||||
description = "Local path to store fetched access log"
|
|
||||||
required = false
|
|
||||||
default = "/tmp/caddy-access-log-fetch.log"
|
|
||||||
|
|
||||||
[vars.evidence_dir]
|
|
||||||
description = "Evidence output directory in the ops repo"
|
|
||||||
required = false
|
|
||||||
default = "evidence/engagement"
|
|
||||||
|
|
||||||
# ── Step 1: SSH fetch ────────────────────────────────────────────────
|
|
||||||
|
|
||||||
[[steps]]
|
|
||||||
id = "fetch-log"
|
|
||||||
title = "Fetch Caddy access log from remote host via SSH"
|
|
||||||
description = """
|
|
||||||
Fetch today's Caddy access log segment from the remote host using SCP.
|
|
||||||
|
|
||||||
The SSH key is read from the environment (CADDY_SSH_KEY), which is
|
|
||||||
decrypted from secrets/CADDY_SSH_KEY.enc by the edge entrypoint. It is NEVER hardcoded.
|
|
||||||
|
|
||||||
1. Write the SSH key to a temporary file with restricted permissions:
|
|
||||||
_ssh_key_file=$(mktemp)
|
|
||||||
trap 'rm -f "$_ssh_key_file"' EXIT
|
|
||||||
printf '%s\n' "$CADDY_SSH_KEY" > "$_ssh_key_file"
|
|
||||||
chmod 0600 "$_ssh_key_file"
|
|
||||||
|
|
||||||
2. Verify connectivity:
|
|
||||||
ssh -i "$_ssh_key_file" -o StrictHostKeyChecking=accept-new \
|
|
||||||
-o ConnectTimeout=10 -o BatchMode=yes \
|
|
||||||
{{caddy_user}}@{{caddy_host}} 'echo ok'
|
|
||||||
|
|
||||||
3. Fetch the access log via scp:
|
|
||||||
scp -i "$_ssh_key_file" -o StrictHostKeyChecking=accept-new \
|
|
||||||
-o ConnectTimeout=10 -o BatchMode=yes \
|
|
||||||
"{{caddy_user}}@{{caddy_host}}:{{caddy_log_path}}" \
|
|
||||||
"{{local_log_path}}"
|
|
||||||
|
|
||||||
4. Verify the fetched file is non-empty:
|
|
||||||
if [ ! -s "{{local_log_path}}" ]; then
|
|
||||||
echo "WARNING: fetched access log is empty — site may have no traffic"
|
|
||||||
else
|
|
||||||
echo "Fetched $(wc -l < "{{local_log_path}}") lines from {{caddy_host}}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
5. Clean up the temporary key file:
|
|
||||||
rm -f "$_ssh_key_file"
|
|
||||||
"""
|
|
||||||
|
|
||||||
# ── Step 2: Parse engagement ─────────────────────────────────────────
|
|
||||||
|
|
||||||
[[steps]]
|
|
||||||
id = "parse-engagement"
|
|
||||||
title = "Run collect-engagement.sh against the local log copy"
|
|
||||||
description = """
|
|
||||||
Run the engagement parser against the locally fetched access log.
|
|
||||||
|
|
||||||
1. Set CADDY_ACCESS_LOG to point at the local copy so collect-engagement.sh
|
|
||||||
reads from it instead of the default path:
|
|
||||||
export CADDY_ACCESS_LOG="{{local_log_path}}"
|
|
||||||
|
|
||||||
2. Run the parser:
|
|
||||||
bash "$FACTORY_ROOT/site/collect-engagement.sh"
|
|
||||||
|
|
||||||
3. Verify the evidence JSON was written:
|
|
||||||
REPORT_DATE=$(date -u +%Y-%m-%d)
|
|
||||||
EVIDENCE_FILE="${OPS_REPO_ROOT}/{{evidence_dir}}/${REPORT_DATE}.json"
|
|
||||||
if [ -f "$EVIDENCE_FILE" ]; then
|
|
||||||
echo "Evidence written: $EVIDENCE_FILE"
|
|
||||||
jq . "$EVIDENCE_FILE"
|
|
||||||
else
|
|
||||||
echo "ERROR: evidence file not found at $EVIDENCE_FILE"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
4. Clean up the fetched log:
|
|
||||||
rm -f "{{local_log_path}}"
|
|
||||||
"""
|
|
||||||
needs = ["fetch-log"]
|
|
||||||
|
|
||||||
# ── Step 3: Commit evidence ──────────────────────────────────────────
|
|
||||||
|
|
||||||
[[steps]]
|
|
||||||
id = "commit-evidence"
|
|
||||||
title = "Commit evidence JSON to ops repo via Forgejo API"
|
|
||||||
description = """
|
|
||||||
Commit the dated evidence JSON to the ops repo so the planner can
|
|
||||||
consume it during gap analysis.
|
|
||||||
|
|
||||||
1. Read the evidence file:
|
|
||||||
REPORT_DATE=$(date -u +%Y-%m-%d)
|
|
||||||
EVIDENCE_FILE="${OPS_REPO_ROOT}/{{evidence_dir}}/${REPORT_DATE}.json"
|
|
||||||
CONTENT=$(base64 < "$EVIDENCE_FILE")
|
|
||||||
|
|
||||||
2. Check if the file already exists in the ops repo (update vs create):
|
|
||||||
OPS_OWNER="${OPS_FORGE_OWNER:-${FORGE_REPO%%/*}}"
|
|
||||||
OPS_REPO="${OPS_FORGE_REPO:-${PROJECT_NAME:-disinto}-ops}"
|
|
||||||
FILE_PATH="{{evidence_dir}}/${REPORT_DATE}.json"
|
|
||||||
|
|
||||||
EXISTING=$(curl -sf \
|
|
||||||
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
"${FORGE_URL}/api/v1/repos/${OPS_OWNER}/${OPS_REPO}/contents/${FILE_PATH}" \
|
|
||||||
2>/dev/null || echo "")
|
|
||||||
|
|
||||||
3. Create or update the file via Forgejo API:
|
|
||||||
if [ -n "$EXISTING" ] && printf '%s' "$EXISTING" | jq -e '.sha' >/dev/null 2>&1; then
|
|
||||||
# Update existing file
|
|
||||||
SHA=$(printf '%s' "$EXISTING" | jq -r '.sha')
|
|
||||||
curl -sf -X PUT \
|
|
||||||
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${FORGE_URL}/api/v1/repos/${OPS_OWNER}/${OPS_REPO}/contents/${FILE_PATH}" \
|
|
||||||
-d "$(jq -nc --arg content "$CONTENT" --arg sha "$SHA" --arg msg "evidence: engagement ${REPORT_DATE}" \
|
|
||||||
'{message: $msg, content: $content, sha: $sha}')"
|
|
||||||
echo "Updated existing evidence file in ops repo"
|
|
||||||
else
|
|
||||||
# Create new file
|
|
||||||
curl -sf -X POST \
|
|
||||||
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${FORGE_URL}/api/v1/repos/${OPS_OWNER}/${OPS_REPO}/contents/${FILE_PATH}" \
|
|
||||||
-d "$(jq -nc --arg content "$CONTENT" --arg msg "evidence: engagement ${REPORT_DATE}" \
|
|
||||||
'{message: $msg, content: $content}')"
|
|
||||||
echo "Created evidence file in ops repo"
|
|
||||||
fi
|
|
||||||
|
|
||||||
4. Verify the commit landed:
|
|
||||||
VERIFY=$(curl -sf \
|
|
||||||
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
"${FORGE_URL}/api/v1/repos/${OPS_OWNER}/${OPS_REPO}/contents/${FILE_PATH}" \
|
|
||||||
| jq -r '.name // empty')
|
|
||||||
if [ "$VERIFY" = "${REPORT_DATE}.json" ]; then
|
|
||||||
echo "Evidence committed: ${FILE_PATH}"
|
|
||||||
else
|
|
||||||
echo "ERROR: could not verify evidence commit"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
"""
|
|
||||||
needs = ["parse-engagement"]
|
|
||||||
|
|
@ -178,8 +178,8 @@ log "Tagged disinto/agents:${RELEASE_VERSION}"
|
||||||
|
|
||||||
log "Step 6/6: Restarting agent containers"
|
log "Step 6/6: Restarting agent containers"
|
||||||
|
|
||||||
docker compose stop agents 2>/dev/null || true
|
docker compose stop agents agents-llama 2>/dev/null || true
|
||||||
docker compose up -d agents
|
docker compose up -d agents agents-llama
|
||||||
log "Agent containers restarted"
|
log "Agent containers restarted"
|
||||||
|
|
||||||
# ── Done ─────────────────────────────────────────────────────────────────
|
# ── Done ─────────────────────────────────────────────────────────────────
|
||||||
|
|
|
||||||
|
|
@ -189,10 +189,10 @@ Restart agent containers to use the new image.
|
||||||
- docker compose pull agents
|
- docker compose pull agents
|
||||||
|
|
||||||
2. Stop and remove existing agent containers:
|
2. Stop and remove existing agent containers:
|
||||||
- docker compose down agents
|
- docker compose down agents agents-llama 2>/dev/null || true
|
||||||
|
|
||||||
3. Start agents with new image:
|
3. Start agents with new image:
|
||||||
- docker compose up -d agents
|
- docker compose up -d agents agents-llama
|
||||||
|
|
||||||
4. Wait for containers to be healthy:
|
4. Wait for containers to be healthy:
|
||||||
- for i in {1..30}; do
|
- for i in {1..30}; do
|
||||||
|
|
@ -203,7 +203,7 @@ Restart agent containers to use the new image.
|
||||||
- done
|
- done
|
||||||
|
|
||||||
5. Verify containers are running:
|
5. Verify containers are running:
|
||||||
- docker compose ps agents
|
- docker compose ps agents agents-llama
|
||||||
|
|
||||||
6. Log restart:
|
6. Log restart:
|
||||||
- echo "Restarted agents containers"
|
- echo "Restarted agents containers"
|
||||||
|
|
|
||||||
|
|
@ -1,161 +0,0 @@
|
||||||
# formulas/rent-a-human-caddy-ssh.toml — Provision SSH key for Caddy log collection
|
|
||||||
#
|
|
||||||
# "Rent a Human" — walk the operator through provisioning a purpose-limited
|
|
||||||
# SSH keypair so collect-engagement.sh can fetch Caddy access logs remotely.
|
|
||||||
#
|
|
||||||
# The key uses a `command=` restriction so it can ONLY cat the access log.
|
|
||||||
# No interactive shell, no port forwarding, no agent forwarding.
|
|
||||||
#
|
|
||||||
# Parent vision issue: #426
|
|
||||||
# Sprint: website-observability-wire-up (ops PR #10)
|
|
||||||
# Consumed by: site/collect-engagement.sh (issue #745)
|
|
||||||
|
|
||||||
name = "rent-a-human-caddy-ssh"
|
|
||||||
description = "Provision a purpose-limited SSH keypair for remote Caddy log collection"
|
|
||||||
version = 1
|
|
||||||
|
|
||||||
# ── Step 1: Generate keypair ─────────────────────────────────────────────────
|
|
||||||
|
|
||||||
[[steps]]
|
|
||||||
id = "generate-keypair"
|
|
||||||
title = "Generate a dedicated ed25519 keypair"
|
|
||||||
description = """
|
|
||||||
Generate a purpose-limited SSH keypair for Caddy log collection.
|
|
||||||
|
|
||||||
Run on your local machine (NOT the Caddy host):
|
|
||||||
|
|
||||||
```
|
|
||||||
ssh-keygen -t ed25519 -f caddy-collect -N '' -C 'disinto-collect-engagement'
|
|
||||||
```
|
|
||||||
|
|
||||||
This produces two files:
|
|
||||||
- caddy-collect (private key — goes into the vault)
|
|
||||||
- caddy-collect.pub (public key — goes onto the Caddy host)
|
|
||||||
|
|
||||||
Do NOT set a passphrase (-N '') — the factory runs unattended.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# ── Step 2: Install public key on Caddy host ─────────────────────────────────
|
|
||||||
|
|
||||||
[[steps]]
|
|
||||||
id = "install-public-key"
|
|
||||||
title = "Install the public key on the Caddy host with command= restriction"
|
|
||||||
needs = ["generate-keypair"]
|
|
||||||
description = """
|
|
||||||
Install the public key on the Caddy host with a strict command= restriction
|
|
||||||
so this key can ONLY read the access log.
|
|
||||||
|
|
||||||
1. SSH into the Caddy host as the user who owns /var/log/caddy/access.log.
|
|
||||||
|
|
||||||
2. Open (or create) ~/.ssh/authorized_keys:
|
|
||||||
mkdir -p ~/.ssh && chmod 700 ~/.ssh
|
|
||||||
nano ~/.ssh/authorized_keys
|
|
||||||
|
|
||||||
3. Add this line (all on ONE line — do not wrap):
|
|
||||||
|
|
||||||
command="cat /var/log/caddy/access.log",no-port-forwarding,no-X11-forwarding,no-agent-forwarding ssh-ed25519 AAAA... disinto-collect-engagement
|
|
||||||
|
|
||||||
Replace "AAAA..." with the contents of caddy-collect.pub.
|
|
||||||
|
|
||||||
To build the line automatically:
|
|
||||||
echo "command=\"cat /var/log/caddy/access.log\",no-port-forwarding,no-X11-forwarding,no-agent-forwarding $(cat caddy-collect.pub)"
|
|
||||||
|
|
||||||
4. Set permissions:
|
|
||||||
chmod 600 ~/.ssh/authorized_keys
|
|
||||||
|
|
||||||
What the restrictions do:
|
|
||||||
- command="cat /var/log/caddy/access.log"
|
|
||||||
Forces this key to only execute `cat /var/log/caddy/access.log`,
|
|
||||||
regardless of what the client requests.
|
|
||||||
- no-port-forwarding — blocks SSH tunnels
|
|
||||||
- no-X11-forwarding — blocks X11
|
|
||||||
- no-agent-forwarding — blocks agent forwarding
|
|
||||||
|
|
||||||
If the access log is at a different path, update the command= restriction
|
|
||||||
AND set CADDY_ACCESS_LOG in the factory environment to match.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# ── Step 3: Add private key to vault secrets ─────────────────────────────────
|
|
||||||
|
|
||||||
[[steps]]
|
|
||||||
id = "store-private-key"
|
|
||||||
title = "Add the private key as CADDY_SSH_KEY secret"
|
|
||||||
needs = ["generate-keypair"]
|
|
||||||
description = """
|
|
||||||
Store the private key in the factory's encrypted secrets store.
|
|
||||||
|
|
||||||
1. Add the private key using `disinto secrets add`:
|
|
||||||
|
|
||||||
cat caddy-collect | disinto secrets add CADDY_SSH_KEY
|
|
||||||
|
|
||||||
This encrypts the key with age and stores it as secrets/CADDY_SSH_KEY.enc.
|
|
||||||
|
|
||||||
2. IMPORTANT: After storing, securely delete the local private key file:
|
|
||||||
shred -u caddy-collect 2>/dev/null || rm -f caddy-collect
|
|
||||||
rm -f caddy-collect.pub
|
|
||||||
|
|
||||||
The public key is already installed on the Caddy host; the private key
|
|
||||||
now lives only in secrets/CADDY_SSH_KEY.enc.
|
|
||||||
|
|
||||||
Never commit the private key to any git repository.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# ── Step 4: Configure Caddy host address ─────────────────────────────────────
|
|
||||||
|
|
||||||
[[steps]]
|
|
||||||
id = "store-caddy-host"
|
|
||||||
title = "Add the Caddy host details as secrets"
|
|
||||||
needs = ["install-public-key"]
|
|
||||||
description = """
|
|
||||||
Store the Caddy connection details so collect-engagement.sh knows
|
|
||||||
where to SSH.
|
|
||||||
|
|
||||||
1. Add each value using `disinto secrets add`:
|
|
||||||
|
|
||||||
echo 'disinto.ai' | disinto secrets add CADDY_SSH_HOST
|
|
||||||
echo 'debian' | disinto secrets add CADDY_SSH_USER
|
|
||||||
echo '/var/log/caddy/access.log' | disinto secrets add CADDY_ACCESS_LOG
|
|
||||||
|
|
||||||
Replace values with the actual SSH host, user, and log path for your setup.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# ── Step 5: Test the connection ──────────────────────────────────────────────
|
|
||||||
|
|
||||||
[[steps]]
|
|
||||||
id = "test-connection"
|
|
||||||
title = "Verify the SSH key works and returns the access log"
|
|
||||||
needs = ["install-public-key", "store-private-key", "store-caddy-host"]
|
|
||||||
description = """
|
|
||||||
Test the end-to-end connection before the factory tries to use it.
|
|
||||||
|
|
||||||
1. From the factory host (or anywhere with the private key), run:
|
|
||||||
|
|
||||||
ssh -i caddy-collect -o StrictHostKeyChecking=accept-new user@caddy-host
|
|
||||||
|
|
||||||
Expected behavior:
|
|
||||||
- Outputs the contents of /var/log/caddy/access.log
|
|
||||||
- Disconnects immediately (command= restriction forces this)
|
|
||||||
|
|
||||||
If you already shredded the local key, decode it from the vault:
|
|
||||||
echo "$CADDY_SSH_KEY" | base64 -d > /tmp/caddy-collect-test
|
|
||||||
chmod 600 /tmp/caddy-collect-test
|
|
||||||
ssh -i /tmp/caddy-collect-test -o StrictHostKeyChecking=accept-new user@caddy-host
|
|
||||||
rm -f /tmp/caddy-collect-test
|
|
||||||
|
|
||||||
2. Verify the output is Caddy structured JSON (one JSON object per line):
|
|
||||||
ssh -i /tmp/caddy-collect-test user@caddy-host | head -1 | jq .
|
|
||||||
|
|
||||||
You should see fields like: ts, request, status, duration.
|
|
||||||
|
|
||||||
3. If the connection fails:
|
|
||||||
- Permission denied → check authorized_keys format (must be one line)
|
|
||||||
- Connection refused → check sshd is running on the Caddy host
|
|
||||||
- Empty output → check /var/log/caddy/access.log exists and is readable
|
|
||||||
by the SSH user
|
|
||||||
- "jq: error" → Caddy may be using Combined Log Format instead of
|
|
||||||
structured JSON; check Caddy's log configuration
|
|
||||||
|
|
||||||
4. Once verified, the factory's collect-engagement.sh can use this key
|
|
||||||
to fetch logs remotely via:
|
|
||||||
ssh -i <decoded-key-path> $CADDY_HOST
|
|
||||||
"""
|
|
||||||
|
|
@ -213,7 +213,7 @@ should file a vault item instead of executing directly.
|
||||||
**Exceptions** (do NOT flag these):
|
**Exceptions** (do NOT flag these):
|
||||||
- Code inside `vault/` — the vault system itself is allowed to handle secrets
|
- Code inside `vault/` — the vault system itself is allowed to handle secrets
|
||||||
- References in comments or documentation explaining the architecture
|
- References in comments or documentation explaining the architecture
|
||||||
- `bin/disinto` setup commands that manage `secrets/*.enc` and the `run` subcommand
|
- `bin/disinto` setup commands that manage `.env.vault.enc` and the `run` subcommand
|
||||||
- Local operations (git push to forge, forge API calls with `FORGE_TOKEN`)
|
- Local operations (git push to forge, forge API calls with `FORGE_TOKEN`)
|
||||||
|
|
||||||
## 6. Re-review (if previous review is provided)
|
## 6. Re-review (if previous review is provided)
|
||||||
|
|
|
||||||
|
|
@ -16,14 +16,7 @@
|
||||||
# - Bash creates the ops PR with pitch content
|
# - Bash creates the ops PR with pitch content
|
||||||
# - Bash posts the ACCEPT/REJECT footer comment
|
# - Bash posts the ACCEPT/REJECT footer comment
|
||||||
# Step 3: Sprint PR creation with questions (issue #101) (one PR per pitch)
|
# Step 3: Sprint PR creation with questions (issue #101) (one PR per pitch)
|
||||||
# Step 4: Post-merge sub-issue filing via filer-bot (#764)
|
# Step 4: Answer parsing + sub-issue filing (issue #102)
|
||||||
#
|
|
||||||
# Permission model (#764):
|
|
||||||
# architect-bot: READ-ONLY on project repo (GET issues/PRs/labels for context).
|
|
||||||
# Cannot POST/PUT/PATCH/DELETE any project-repo resource.
|
|
||||||
# Write access ONLY on ops repo (branches, PRs, comments).
|
|
||||||
# filer-bot: issues:write on project repo. Files sub-issues from merged sprint
|
|
||||||
# PRs via ops-filer pipeline. Adds in-progress label to vision issues.
|
|
||||||
#
|
#
|
||||||
# Architecture:
|
# Architecture:
|
||||||
# - Bash script (architect-run.sh) handles ALL state management
|
# - Bash script (architect-run.sh) handles ALL state management
|
||||||
|
|
@ -153,32 +146,15 @@ For each issue in ARCHITECT_TARGET_ISSUES, bash performs:
|
||||||
## Recommendation
|
## Recommendation
|
||||||
<architect's assessment: worth it / defer / alternative approach>
|
<architect's assessment: worth it / defer / alternative approach>
|
||||||
|
|
||||||
## Sub-issues
|
|
||||||
|
|
||||||
<!-- filer:begin -->
|
|
||||||
- id: <kebab-case-id>
|
|
||||||
title: "vision(#N): <concise sub-issue title>"
|
|
||||||
labels: [backlog]
|
|
||||||
depends_on: []
|
|
||||||
body: |
|
|
||||||
## Goal
|
|
||||||
<what this sub-issue accomplishes>
|
|
||||||
## Acceptance criteria
|
|
||||||
- [ ] <criterion>
|
|
||||||
<!-- filer:end -->
|
|
||||||
|
|
||||||
IMPORTANT: Do NOT include design forks or questions yet. The pitch is a go/no-go
|
IMPORTANT: Do NOT include design forks or questions yet. The pitch is a go/no-go
|
||||||
decision for the human. Questions come only after acceptance.
|
decision for the human. Questions come only after acceptance.
|
||||||
The ## Sub-issues block is parsed by the filer-bot pipeline after sprint PR merge.
|
|
||||||
Each sub-issue between filer:begin/end markers becomes a Forgejo issue on the
|
|
||||||
project repo. The filer appends a decomposed-from marker to each body automatically.
|
|
||||||
|
|
||||||
4. Bash creates PR:
|
4. Bash creates PR:
|
||||||
- Create branch: architect/sprint-{pitch-number}
|
- Create branch: architect/sprint-{pitch-number}
|
||||||
- Write sprint spec to sprints/{sprint-slug}.md
|
- Write sprint spec to sprints/{sprint-slug}.md
|
||||||
- Create PR with pitch content as body
|
- Create PR with pitch content as body
|
||||||
- Post footer comment: "Reply ACCEPT to proceed with design questions, or REJECT: <reason> to decline."
|
- Post footer comment: "Reply ACCEPT to proceed with design questions, or REJECT: <reason> to decline."
|
||||||
- NOTE: in-progress label is added by filer-bot after sprint PR merge (#764)
|
- Add in-progress label to vision issue
|
||||||
|
|
||||||
Output:
|
Output:
|
||||||
- One PR per vision issue (up to 3 per run)
|
- One PR per vision issue (up to 3 per run)
|
||||||
|
|
@ -209,9 +185,6 @@ This ensures approved PRs don't sit indefinitely without design conversation.
|
||||||
Architecture:
|
Architecture:
|
||||||
- Bash creates PRs during stateless pitch generation (step 2)
|
- Bash creates PRs during stateless pitch generation (step 2)
|
||||||
- Model has no role in PR creation — no Forgejo API access
|
- Model has no role in PR creation — no Forgejo API access
|
||||||
- architect-bot is READ-ONLY on the project repo (#764) — all project-repo
|
|
||||||
writes (sub-issue filing, in-progress label) are handled by filer-bot
|
|
||||||
via the ops-filer pipeline after sprint PR merge
|
|
||||||
- This step describes the PR format for reference
|
- This step describes the PR format for reference
|
||||||
|
|
||||||
PR Format (created by bash):
|
PR Format (created by bash):
|
||||||
|
|
@ -228,29 +201,64 @@ PR Format (created by bash):
|
||||||
- Head: architect/sprint-{pitch-number}
|
- Head: architect/sprint-{pitch-number}
|
||||||
- Footer comment: "Reply ACCEPT to proceed with design questions, or REJECT: <reason> to decline."
|
- Footer comment: "Reply ACCEPT to proceed with design questions, or REJECT: <reason> to decline."
|
||||||
|
|
||||||
|
4. Add in-progress label to vision issue:
|
||||||
|
- Look up label ID: GET /repos/{owner}/{repo}/labels
|
||||||
|
- Add label: POST /repos/{owner}/{repo}/issues/{issue_number}/labels
|
||||||
|
|
||||||
After creating all PRs, signal PHASE:done.
|
After creating all PRs, signal PHASE:done.
|
||||||
NOTE: in-progress label on the vision issue is added by filer-bot after sprint PR merge (#764).
|
|
||||||
|
|
||||||
## Forgejo API Reference (ops repo only)
|
## Forgejo API Reference
|
||||||
|
|
||||||
All operations use the ops repo Forgejo API with `Authorization: token ${FORGE_TOKEN}` header.
|
All operations use the Forgejo API with Authorization: token ${FORGE_TOKEN} header.
|
||||||
architect-bot is READ-ONLY on the project repo — cannot POST/PUT/PATCH/DELETE project-repo resources (#764).
|
|
||||||
|
|
||||||
### Create branch (ops repo)
|
### Create branch
|
||||||
```
|
```
|
||||||
POST /repos/{owner}/{repo-ops}/branches
|
POST /repos/{owner}/{repo}/branches
|
||||||
Body: {"new_branch_name": "architect/<sprint-slug>", "old_branch_name": "main"}
|
Body: {"new_branch_name": "architect/<sprint-slug>", "old_branch_name": "main"}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Create/update file (ops repo)
|
### Create/update file
|
||||||
```
|
```
|
||||||
PUT /repos/{owner}/{repo-ops}/contents/<path>
|
PUT /repos/{owner}/{repo}/contents/<path>
|
||||||
Body: {"message": "sprint: add <sprint-slug>.md", "content": "<base64-encoded-content>", "branch": "architect/<sprint-slug>"}
|
Body: {"message": "sprint: add <sprint-slug>.md", "content": "<base64-encoded-content>", "branch": "architect/<sprint-slug>"}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Create PR (ops repo)
|
### Create PR
|
||||||
```
|
```
|
||||||
POST /repos/{owner}/{repo-ops}/pulls
|
POST /repos/{owner}/{repo}/pulls
|
||||||
|
Body: {"title": "architect: <sprint summary>", "body": "<markdown-text>", "head": "architect/<sprint-slug>", "base": "main"}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Important: PR body format**
|
||||||
|
- The body field must contain plain markdown text (the raw content from the model)
|
||||||
|
- Do NOT JSON-encode or escape the body — pass it as a JSON string value
|
||||||
|
- Newlines and markdown formatting (headings, lists, etc.) must be preserved as-is
|
||||||
|
|
||||||
|
### Add label to issue
|
||||||
|
```
|
||||||
|
POST /repos/{owner}/{repo}/issues/{index}/labels
|
||||||
|
Body: {"labels": [<label-id>]}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Forgejo API Reference
|
||||||
|
|
||||||
|
All operations use the Forgejo API with `Authorization: token ${FORGE_TOKEN}` header.
|
||||||
|
|
||||||
|
### Create branch
|
||||||
|
```
|
||||||
|
POST /repos/{owner}/{repo}/branches
|
||||||
|
Body: {"new_branch_name": "architect/<sprint-slug>", "old_branch_name": "main"}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create/update file
|
||||||
|
```
|
||||||
|
PUT /repos/{owner}/{repo}/contents/<path>
|
||||||
|
Body: {"message": "sprint: add <sprint-slug>.md", "content": "<base64-encoded-content>", "branch": "architect/<sprint-slug>"}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create PR
|
||||||
|
```
|
||||||
|
POST /repos/{owner}/{repo}/pulls
|
||||||
Body: {"title": "architect: <sprint summary>", "body": "<markdown-text>", "head": "architect/<sprint-slug>", "base": "main"}
|
Body: {"title": "architect: <sprint summary>", "body": "<markdown-text>", "head": "architect/<sprint-slug>", "base": "main"}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
@ -259,22 +267,30 @@ Body: {"title": "architect: <sprint summary>", "body": "<markdown-text>", "head"
|
||||||
- Do NOT JSON-encode or escape the body — pass it as a JSON string value
|
- Do NOT JSON-encode or escape the body — pass it as a JSON string value
|
||||||
- Newlines and markdown formatting (headings, lists, etc.) must be preserved as-is
|
- Newlines and markdown formatting (headings, lists, etc.) must be preserved as-is
|
||||||
|
|
||||||
### Close PR (ops repo)
|
### Close PR
|
||||||
```
|
```
|
||||||
PATCH /repos/{owner}/{repo-ops}/pulls/{index}
|
PATCH /repos/{owner}/{repo}/pulls/{index}
|
||||||
Body: {"state": "closed"}
|
Body: {"state": "closed"}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Delete branch (ops repo)
|
### Delete branch
|
||||||
```
|
```
|
||||||
DELETE /repos/{owner}/{repo-ops}/git/branches/<branch-name>
|
DELETE /repos/{owner}/{repo}/git/branches/<branch-name>
|
||||||
```
|
```
|
||||||
|
|
||||||
### Read-only on project repo (context gathering)
|
### Get labels (look up label IDs by name)
|
||||||
```
|
```
|
||||||
GET /repos/{owner}/{repo}/issues — list issues
|
GET /repos/{owner}/{repo}/labels
|
||||||
GET /repos/{owner}/{repo}/issues/{number} — read issue details
|
```
|
||||||
GET /repos/{owner}/{repo}/labels — list labels
|
|
||||||
GET /repos/{owner}/{repo}/pulls — list PRs
|
### Add label to issue (for in-progress on vision issue)
|
||||||
|
```
|
||||||
|
POST /repos/{owner}/{repo}/issues/{index}/labels
|
||||||
|
Body: {"labels": [<label-id>]}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Remove label from issue (for in-progress removal on REJECT)
|
||||||
|
```
|
||||||
|
DELETE /repos/{owner}/{repo}/issues/{index}/labels/{label-id}
|
||||||
```
|
```
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -177,7 +177,7 @@ DUST (trivial — single-line edit, rename, comment, style, whitespace):
|
||||||
|
|
||||||
VAULT (needs human decision or external resource):
|
VAULT (needs human decision or external resource):
|
||||||
File a vault procurement item using vault_request():
|
File a vault procurement item using vault_request():
|
||||||
source "$(dirname "$0")/../lib/action-vault.sh"
|
source "$(dirname "$0")/../lib/vault.sh"
|
||||||
TOML_CONTENT="# Vault action: <action_id>
|
TOML_CONTENT="# Vault action: <action_id>
|
||||||
context = \"<description of what decision/resource is needed>\"
|
context = \"<description of what decision/resource is needed>\"
|
||||||
unblocks = [\"#NNN\"]
|
unblocks = [\"#NNN\"]
|
||||||
|
|
|
||||||
|
|
@ -243,7 +243,7 @@ needs = ["preflight"]
|
||||||
|
|
||||||
[[steps]]
|
[[steps]]
|
||||||
id = "commit-ops-changes"
|
id = "commit-ops-changes"
|
||||||
title = "Write tree, memory, and journal; commit and push branch"
|
title = "Write tree, memory, and journal; commit and push"
|
||||||
description = """
|
description = """
|
||||||
### 1. Write prerequisite tree
|
### 1. Write prerequisite tree
|
||||||
Write to: $OPS_REPO_ROOT/prerequisites.md
|
Write to: $OPS_REPO_ROOT/prerequisites.md
|
||||||
|
|
@ -256,16 +256,14 @@ If (count - N) >= 5 or planner-memory.md missing, write to:
|
||||||
Include: run counter marker, date, constraint focus, patterns, direction.
|
Include: run counter marker, date, constraint focus, patterns, direction.
|
||||||
Keep under 100 lines. Replace entire file.
|
Keep under 100 lines. Replace entire file.
|
||||||
|
|
||||||
### 3. Commit ops repo changes to the planner branch
|
### 3. Commit ops repo changes
|
||||||
Commit the ops repo changes (prerequisites, memory, vault items) and push the
|
Commit the ops repo changes (prerequisites, memory, vault items):
|
||||||
branch. Do NOT push directly to $PRIMARY_BRANCH — planner-run.sh will create a
|
|
||||||
PR and walk it to merge via review-bot.
|
|
||||||
cd "$OPS_REPO_ROOT"
|
cd "$OPS_REPO_ROOT"
|
||||||
git add prerequisites.md knowledge/planner-memory.md vault/pending/
|
git add prerequisites.md knowledge/planner-memory.md vault/pending/
|
||||||
git add -u
|
git add -u
|
||||||
if ! git diff --cached --quiet; then
|
if ! git diff --cached --quiet; then
|
||||||
git commit -m "chore: planner run $(date -u +%Y-%m-%d)"
|
git commit -m "chore: planner run $(date -u +%Y-%m-%d)"
|
||||||
git push origin HEAD
|
git push origin "$PRIMARY_BRANCH"
|
||||||
fi
|
fi
|
||||||
cd "$PROJECT_REPO_ROOT"
|
cd "$PROJECT_REPO_ROOT"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -125,8 +125,8 @@ For each weakness you identify, choose one:
|
||||||
The prediction explains the theory. The vault PR triggers the proof
|
The prediction explains the theory. The vault PR triggers the proof
|
||||||
after human approval. When the planner runs next, evidence is already there.
|
after human approval. When the planner runs next, evidence is already there.
|
||||||
|
|
||||||
Vault dispatch (requires lib/action-vault.sh):
|
Vault dispatch (requires lib/vault.sh):
|
||||||
source "$PROJECT_REPO_ROOT/lib/action-vault.sh"
|
source "$PROJECT_REPO_ROOT/lib/vault.sh"
|
||||||
|
|
||||||
TOML_CONTENT="id = \"predict-<prediction_number>-<formula>\"
|
TOML_CONTENT="id = \"predict-<prediction_number>-<formula>\"
|
||||||
context = \"Test prediction #<prediction_number>: <theory summary> — focus: <specific test>\"
|
context = \"Test prediction #<prediction_number>: <theory summary> — focus: <specific test>\"
|
||||||
|
|
@ -154,7 +154,7 @@ tea is pre-configured with login "$TEA_LOGIN" and repo "$FORGE_REPO".
|
||||||
--title "<title>" --body "<body>" --labels "prediction/unreviewed"
|
--title "<title>" --body "<body>" --labels "prediction/unreviewed"
|
||||||
|
|
||||||
2. Dispatch formula via vault (if exploiting):
|
2. Dispatch formula via vault (if exploiting):
|
||||||
source "$PROJECT_REPO_ROOT/lib/action-vault.sh"
|
source "$PROJECT_REPO_ROOT/lib/vault.sh"
|
||||||
PR_NUM=$(vault_request "predict-NNN-<formula>" "$TOML_CONTENT")
|
PR_NUM=$(vault_request "predict-NNN-<formula>" "$TOML_CONTENT")
|
||||||
# See EXPLOIT section above for TOML_CONTENT format
|
# See EXPLOIT section above for TOML_CONTENT format
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@ and injected into your prompt above. Review them now.
|
||||||
|
|
||||||
1. Read the injected metrics data carefully (System Resources, Docker,
|
1. Read the injected metrics data carefully (System Resources, Docker,
|
||||||
Active Sessions, Phase Files, Stale Phase Cleanup, Lock Files, Agent Logs,
|
Active Sessions, Phase Files, Stale Phase Cleanup, Lock Files, Agent Logs,
|
||||||
CI Pipelines, Open PRs, Issue Status, Stale Worktrees, **Woodpecker Agent Health**).
|
CI Pipelines, Open PRs, Issue Status, Stale Worktrees).
|
||||||
Note: preflight.sh auto-removes PHASE:escalate files for closed issues
|
Note: preflight.sh auto-removes PHASE:escalate files for closed issues
|
||||||
(24h grace period). Check the "Stale Phase Cleanup" section for any
|
(24h grace period). Check the "Stale Phase Cleanup" section for any
|
||||||
files cleaned or in grace period this run.
|
files cleaned or in grace period this run.
|
||||||
|
|
@ -75,10 +75,6 @@ Categorize every finding from the metrics into priority levels.
|
||||||
- Dev/action sessions in PHASE:escalate for > 24h (session timeout)
|
- Dev/action sessions in PHASE:escalate for > 24h (session timeout)
|
||||||
(Note: PHASE:escalate files for closed issues are auto-cleaned by preflight;
|
(Note: PHASE:escalate files for closed issues are auto-cleaned by preflight;
|
||||||
this check covers sessions where the issue is still open)
|
this check covers sessions where the issue is still open)
|
||||||
- **Woodpecker agent unhealthy** — see "Woodpecker Agent Health" section in preflight:
|
|
||||||
- Container not running or in unhealthy state
|
|
||||||
- gRPC errors >= 3 in last 20 minutes
|
|
||||||
- Fast-failure pipelines (duration < 60s) >= 3 in last 15 minutes
|
|
||||||
|
|
||||||
### P3 — Factory degraded
|
### P3 — Factory degraded
|
||||||
- PRs stale: CI finished >20min ago AND no git push to the PR branch since CI completed
|
- PRs stale: CI finished >20min ago AND no git push to the PR branch since CI completed
|
||||||
|
|
@ -104,15 +100,6 @@ For each finding from the health assessment, decide and execute an action.
|
||||||
|
|
||||||
### Auto-fixable (execute these directly)
|
### Auto-fixable (execute these directly)
|
||||||
|
|
||||||
**P2 Woodpecker agent unhealthy:**
|
|
||||||
The supervisor-run.sh script automatically handles WP agent recovery:
|
|
||||||
- Detects unhealthy state via preflight.sh health checks
|
|
||||||
- Restarts container via `docker restart`
|
|
||||||
- Scans for `blocked: ci_exhausted` issues updated in last 30 minutes
|
|
||||||
- Unassigns and removes blocked label from affected issues
|
|
||||||
- Posts recovery comment with infra-flake context
|
|
||||||
- Avoids duplicate restarts via 5-minute cooldown in history file
|
|
||||||
|
|
||||||
**P0 Memory crisis:**
|
**P0 Memory crisis:**
|
||||||
# Kill stale one-shot claude processes (>3h old)
|
# Kill stale one-shot claude processes (>3h old)
|
||||||
pgrep -f "claude -p" --older 10800 2>/dev/null | xargs kill 2>/dev/null || true
|
pgrep -f "claude -p" --older 10800 2>/dev/null | xargs kill 2>/dev/null || true
|
||||||
|
|
@ -261,11 +248,6 @@ Format:
|
||||||
- <what was fixed>
|
- <what was fixed>
|
||||||
(or "No actions needed")
|
(or "No actions needed")
|
||||||
|
|
||||||
### WP Agent Recovery (if applicable)
|
|
||||||
- WP agent restart: <time of restart or "none">
|
|
||||||
- Issues recovered: <count>
|
|
||||||
- Reason: <health check reason or "healthy">
|
|
||||||
|
|
||||||
### Vault items filed
|
### Vault items filed
|
||||||
- vault/pending/<id>.md — <reason>
|
- vault/pending/<id>.md — <reason>
|
||||||
(or "None")
|
(or "None")
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: c872f282428861a735fbbb00609f77d063ad92b3 -->
|
<!-- last-reviewed: ec7dff854a71d64f427a4e6d6874de8e6dee822c -->
|
||||||
# Gardener Agent
|
# Gardener Agent
|
||||||
|
|
||||||
**Role**: Backlog grooming — detect duplicate issues, missing acceptance
|
**Role**: Backlog grooming — detect duplicate issues, missing acceptance
|
||||||
|
|
@ -32,7 +32,7 @@ the gardener runs as part of the polling loop alongside the planner, predictor,
|
||||||
PR, reviewed alongside AGENTS.md changes, executed by gardener-run.sh after merge.
|
PR, reviewed alongside AGENTS.md changes, executed by gardener-run.sh after merge.
|
||||||
|
|
||||||
**Environment variables consumed**:
|
**Environment variables consumed**:
|
||||||
- `FORGE_TOKEN`, `FORGE_GARDENER_TOKEN` (falls back to FORGE_TOKEN), `FORGE_REPO`, `FORGE_API`, `PROJECT_NAME`, `PROJECT_REPO_ROOT`. `FORGE_TOKEN_OVERRIDE` is exported to `$FORGE_GARDENER_TOKEN` before sourcing env.sh so the gardener-bot identity survives re-sourcing (#762).
|
- `FORGE_TOKEN`, `FORGE_GARDENER_TOKEN` (falls back to FORGE_TOKEN), `FORGE_REPO`, `FORGE_API`, `PROJECT_NAME`, `PROJECT_REPO_ROOT`
|
||||||
- `PRIMARY_BRANCH`, `CLAUDE_MODEL` (set to sonnet by gardener-run.sh)
|
- `PRIMARY_BRANCH`, `CLAUDE_MODEL` (set to sonnet by gardener-run.sh)
|
||||||
|
|
||||||
**Lifecycle**: gardener-run.sh (invoked by polling loop every 6h, `check_active gardener`) →
|
**Lifecycle**: gardener-run.sh (invoked by polling loop every 6h, `check_active gardener`) →
|
||||||
|
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
{"issue":915,"group":"lib/generators.sh","title":"remove no-op sed in generate_compose --build mode","reason":"sed replaces agents: with itself — no behavior change; single-line removal","ts":"2026-04-17T01:04:05Z"}
|
|
||||||
|
|
@ -26,11 +26,10 @@ FACTORY_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||||
|
|
||||||
# Accept project config from argument; default to disinto
|
# Accept project config from argument; default to disinto
|
||||||
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
||||||
# Set override BEFORE sourcing env.sh so it survives any later re-source of
|
|
||||||
# env.sh from nested shells / claude -p tools (#762, #747)
|
|
||||||
export FORGE_TOKEN_OVERRIDE="${FORGE_GARDENER_TOKEN:-}"
|
|
||||||
# shellcheck source=../lib/env.sh
|
# shellcheck source=../lib/env.sh
|
||||||
source "$FACTORY_ROOT/lib/env.sh"
|
source "$FACTORY_ROOT/lib/env.sh"
|
||||||
|
# Use gardener-bot's own Forgejo identity (#747)
|
||||||
|
FORGE_TOKEN="${FORGE_GARDENER_TOKEN:-${FORGE_TOKEN}}"
|
||||||
# shellcheck source=../lib/formula-session.sh
|
# shellcheck source=../lib/formula-session.sh
|
||||||
source "$FACTORY_ROOT/lib/formula-session.sh"
|
source "$FACTORY_ROOT/lib/formula-session.sh"
|
||||||
# shellcheck source=../lib/worktree.sh
|
# shellcheck source=../lib/worktree.sh
|
||||||
|
|
|
||||||
|
|
@ -1 +1,42 @@
|
||||||
[]
|
[
|
||||||
|
{
|
||||||
|
"action": "edit_body",
|
||||||
|
"issue": 737,
|
||||||
|
"body": "Flagged by AI reviewer in PR #736.\n\n## Problem\n\n`close_vision_issue()` posts a completion comment and then closes the issue in two separate API calls. The close calls (lines 587-597 of `architect/architect-run.sh`) both use `|| true`, so a failed `state=closed` PATCH is silently ignored.\n\nThe idempotency guard added in PR #736 checks for the completion comment before proceeding. If the comment was posted successfully in a prior run but the close PATCH failed, the guard will now return 0 on all subsequent runs without retrying the close — the vision issue stays open indefinitely with a misleading \"Vision Issue Completed\" comment.\n\n## Fix\n\nAfter posting the comment, verify the issue is actually closed (check `.state` in the PATCH response or re-fetch). If the close fails, return 1 so the next polling cycle retries. The idempotency guard should also verify the issue state, not just the comment presence.\n\n---\n*Auto-created from AI review*\n\n## Acceptance criteria\n\n- [ ] After posting the completion comment, `close_vision_issue()` verifies `.state == \"closed\"` in the PATCH response or re-fetches the issue; if close fails, returns 1\n- [ ] The idempotency guard checks both comment presence AND `.state == \"closed\"` before returning 0; if the comment exists but the issue is still open, attempts the close again\n- [ ] Running `close_vision_issue` twice on a stuck-open issue (comment posted but state still open) retries the close and does not post a duplicate comment\n\n## Affected files\n\n- `architect/architect-run.sh` — `close_vision_issue()` function (~lines 527-600)\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "add_label",
|
||||||
|
"issue": 737,
|
||||||
|
"label": "backlog"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "edit_body",
|
||||||
|
"issue": 712,
|
||||||
|
"body": "## Goal\n\nLet `disinto-chat` perform scoped write actions against the factory — specifically: trigger a Woodpecker CI run, create a Forgejo issue, create a Forgejo PR — via explicit backend endpoints. The UI surfaces these as buttons the user clicks from a chat turn that proposes an action. The model never holds API tokens directly.\n\n## Why\n\n- #623 lists these escalations as the difference between \"chat that talks about the project\" and \"chat that moves the project forward\".\n- Routing through explicit backend endpoints (instead of giving the sandboxed claude process API tokens) keeps the trust model tight: the *user* authorises each action, not the model.\n\n## Scope\n\n### Files to touch\n\n- `docker/chat/server.{py,go}` — new authenticated endpoints (reuse #708 / #709 session check):\n - `POST /chat/action/ci-run` — body `{repo, branch}` → calls Woodpecker API with `WOODPECKER_TOKEN` (already in `.env` from existing factory setup) to trigger a pipeline.\n - `POST /chat/action/issue-create` — body `{title, body, labels}` → calls Forgejo API `/repos/<owner>/<repo>/issues` with `FORGE_TOKEN`.\n - `POST /chat/action/pr-create` — body `{head, base, title, body}` → calls `/repos/<owner>/<repo>/pulls`.\n - All actions record to #710's NDJSON history as `{role: \"action\", ...}` lines.\n- `docker/chat/ui/index.html` — small HTMX pattern: when claude's response contains a marker like `<action type=\"issue-create\">{...}</action>`, render a clickable button below the message; clicking POSTs to `/chat/action/<type>` with the payload.\n- `lib/generators.sh` chat env: pass `WOODPECKER_TOKEN`, `FORGE_TOKEN`, `FORGE_URL`, `FORGE_OWNER`, `FORGE_REPO`.\n\n### Out of scope\n\n- Destructive actions (branch delete, force push, secret rotation) — deliberately excluded.\n- Multi-step workflows / approval chains.\n- Arbitrary code execution in the chat container (that is what the agents exist for).\n\n## Acceptance\n\n- [ ] A chat turn that emits an `<action type=\"issue-create\">{...}</action>` block renders a button; clicking it creates an issue on Forgejo, visible via the API.\n- [ ] CI-trigger action creates a Woodpecker pipeline that can be seen in the CI UI.\n- [ ] PR-create action produces a Forgejo PR with the specified head / base.\n- [ ] All three actions are logged into the #710 history file with role `action` and the response from the API call.\n- [ ] Unauthenticated requests to `/chat/action/*` return 401 (inherits #708 gate).\n\n## Depends on\n\n- #708 (OAuth gate — actions are authorised by the logged-in user).\n- #710 (history — actions need to be logged alongside chat turns).\n\n## Notes\n\n- Forgejo API auth: the factory's `FORGE_TOKEN` is a long-lived admin token. For MVP, reuse it; a follow-up issue can scope it down to per-user Forgejo tokens derived from the OAuth flow.\n- Woodpecker API is at `http://woodpecker:8000/api/...`, reachable via the compose network — no need to go through the edge container.\n- The `<action>` marker is deliberately simple markup the model can emit in its response text. Do not implement tool-calling protocol; do not spin up an MCP server.\n\n## Boundaries for dev-agent\n\n- Do not give the claude subprocess direct API tokens. The chat backend holds them; the model only emits action markers the user clicks.\n- Do not add destructive actions (delete, force-push). Additive only.\n- Do not invent a new markup format beyond `<action type=\"...\">{JSON}</action>`.\n- Parent vision: #623.\n\n## Affected files\n\n- `docker/chat/server.py` (or `server.go`) — new `/chat/action/ci-run`, `/chat/action/issue-create`, `/chat/action/pr-create` endpoints\n- `docker/chat/ui/index.html` — action button rendering from `<action type=\"...\">{...}</action>` markers\n- `lib/generators.sh` — chat service env block: pass `WOODPECKER_TOKEN`, `FORGE_TOKEN`, `FORGE_URL`, `FORGE_OWNER`, `FORGE_REPO`\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "remove_label",
|
||||||
|
"issue": 712,
|
||||||
|
"label": "blocked"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "add_label",
|
||||||
|
"issue": 712,
|
||||||
|
"label": "backlog"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "edit_body",
|
||||||
|
"issue": 707,
|
||||||
|
"body": "## Goal\n\nGive `disinto-chat` its own Claude identity mount so its OAuth refresh races cannot corrupt the factory agents' shared `~/.claude` credentials. Default to a separate `~/.claude-chat/` on the host; support `ANTHROPIC_API_KEY` as a fallback that skips OAuth entirely.\n\n## Why\n\n- #623 root-caused this: Claude Code's internal refresh lock in `~/.claude.lock` operates outside bind-mounted directories, so two containers sharing `~/.claude` can race during token refresh and invalidate each other. The factory has already had OAuth expiry incidents traced to multiple agents sharing credentials.\n- Scoping chat to its own identity dir means chat can be logged in as a different Anthropic account, or pinned to an API key, without touching agent credentials.\n\n## Scope\n\n### Files to touch\n\n- `lib/generators.sh` chat service block (from #705):\n - Replace the throwaway named volume with `${CHAT_CLAUDE_DIR:-${HOME}/.claude-chat}:/home/chat/.claude-chat`.\n - Env: `CLAUDE_CONFIG_DIR=/home/chat/.claude-chat/config`, `CLAUDE_CREDENTIALS_DIR=/home/chat/.claude-chat/config/credentials`.\n - Conditional: if `ANTHROPIC_API_KEY` is set in `.env`, pass it through and **do not** mount `~/.claude-chat` at all (no credentials on disk in that mode).\n- `bin/disinto disinto_init()` — after #620's admin password prompt, add an optional prompt: `Use separate Anthropic identity for chat? (y/N)`. On yes, create `~/.claude-chat/` and invoke `claude login` in a subshell with `CLAUDE_CONFIG_DIR=~/.claude-chat/config`.\n- `lib/claude-config.sh` — factor out the existing `~/.claude` setup logic so a non-default `CLAUDE_CONFIG_DIR` is a first-class parameter. If it is already parameterised, just document it; if not, extract a helper `setup_claude_dir <dir>` and have the existing path call it with the default dir.\n- `docker/chat/Dockerfile` — declare `VOLUME /home/chat/.claude-chat`, set owner to the non-root chat user introduced in #706.\n\n### Out of scope\n\n- Cross-session lock coherence for multiple concurrent chat containers (single-chat-container assumption is fine for MVP).\n- Anthropic team / workspace support — single identity is enough.\n\n## Acceptance\n\n- [ ] Fresh `disinto init` with \"use separate chat identity\" answered yes creates `~/.claude-chat/` and logs in successfully.\n- [ ] With `ANTHROPIC_API_KEY=sk-ant-...` set in `.env`, chat starts without any `~/.claude-chat` mount (verified via `docker inspect disinto-chat`) and successfully completes a test prompt.\n- [ ] Running the factory agents AND chat simultaneously for 24h does not produce any OAuth refresh failures on either side (manual soak test — document result in PR).\n- [ ] `CLAUDE_CONFIG_DIR` and `CLAUDE_CREDENTIALS_DIR` inside the chat container resolve to `/home/chat/.claude-chat/config*`, not the shared factory path.\n\n## Depends on\n\n- #705 (chat scaffold).\n- #620 (admin password prompt — same init flow this adds a step to).\n\n## Notes\n\n- The factory's existing shared mount is `/var/lib/disinto/claude-shared` (see `lib/generators.sh:113,327,381,426`). Chat must NOT use this path.\n- `flock(\"${HOME}/.claude/session.lock\")` logic mentioned in #623 is load-bearing, not redundant — do not \"simplify\" it.\n- Prefer the API-key path for anyone running the factory on shared hardware; call this out in README updates.\n\n## Boundaries for dev-agent\n\n- Do not try to make chat share `~/.claude` with the agents \"just for convenience\". The whole point of this chunk is the opposite.\n- Do not add a third claude config dir. One for agents, one for chat, done.\n- Do not refactor `lib/claude-config.sh` beyond extracting a parameterised helper if needed.\n- Parent vision: #623.\n\n## Affected files\n\n- `lib/generators.sh` — chat service block: replace throwaway named volume with `${CHAT_CLAUDE_DIR:-${HOME}/.claude-chat}` bind mount; add `CLAUDE_CONFIG_DIR`/`CLAUDE_CREDENTIALS_DIR` env vars; skip mount when `ANTHROPIC_API_KEY` is set\n- `bin/disinto` — `disinto_init()`: add optional prompt for separate Anthropic identity for chat\n- `lib/claude-config.sh` — extract parameterized `setup_claude_dir <dir>` helper\n- `docker/chat/Dockerfile` — declare `VOLUME /home/chat/.claude-chat`, set owner to non-root chat user\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "remove_label",
|
||||||
|
"issue": 707,
|
||||||
|
"label": "blocked"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "add_label",
|
||||||
|
"issue": 707,
|
||||||
|
"label": "backlog"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: c872f282428861a735fbbb00609f77d063ad92b3 -->
|
<!-- last-reviewed: ec7dff854a71d64f427a4e6d6874de8e6dee822c -->
|
||||||
# Shared Helpers (`lib/`)
|
# Shared Helpers (`lib/`)
|
||||||
|
|
||||||
All agents source `lib/env.sh` as their first action. Additional helpers are
|
All agents source `lib/env.sh` as their first action. Additional helpers are
|
||||||
|
|
@ -6,7 +6,7 @@ sourced as needed.
|
||||||
|
|
||||||
| File | What it provides | Sourced by |
|
| File | What it provides | Sourced by |
|
||||||
|---|---|---|
|
|---|---|---|
|
||||||
| `lib/env.sh` | Loads `.env`, sets `FACTORY_ROOT`, exports project config (`FORGE_REPO`, `PROJECT_NAME`, etc.), defines `log()`, `forge_api()`, `forge_api_all()` (paginates all pages; accepts optional second TOKEN parameter, defaults to `$FORGE_TOKEN`; handles invalid/empty JSON responses gracefully — returns empty on parse error instead of crashing), `woodpecker_api()`, `wpdb()`, `memory_guard()` (skips agent if RAM < threshold), `load_secret()` (secret-source abstraction — see below). Auto-loads project TOML if `PROJECT_TOML` is set. Exports per-agent tokens (`FORGE_PLANNER_TOKEN`, `FORGE_GARDENER_TOKEN`, `FORGE_VAULT_TOKEN`, `FORGE_SUPERVISOR_TOKEN`, `FORGE_PREDICTOR_TOKEN`) — each falls back to `$FORGE_TOKEN` if not set. **Vault-only token guard (AD-006)**: `unset GITHUB_TOKEN CLAWHUB_TOKEN` so agents never hold external-action tokens — only the runner container receives them. **Container note**: when `DISINTO_CONTAINER=1`, `.env` is NOT re-sourced — compose already injects env vars (including `FORGE_URL=http://forgejo:3000`) and re-sourcing would clobber them. **Save/restore scope (#364)**: only `FORGE_URL` is preserved across `.env` re-sourcing (compose injects `http://forgejo:3000`, `.env` has `http://localhost:3000`). `FORGE_TOKEN` is NOT preserved so refreshed tokens in `.env` take effect immediately. **Per-agent token override (#762)**: agent run scripts export `FORGE_TOKEN_OVERRIDE=<agent-specific-token>` BEFORE sourcing `env.sh`; `env.sh` applies this override at lines 98-100, ensuring the correct identity survives any re-sourcing of `env.sh` by nested shells or `claude -p` invocations. **Required env var**: `FORGE_PASS` — bot password for git HTTP push (Forgejo 11.x rejects API tokens for `git push`, #361). **Hard preconditions (#674)**: `USER` and `HOME` must be exported by the entrypoint before sourcing. When `PROJECT_TOML` is set, `PROJECT_REPO_ROOT`, `PRIMARY_BRANCH`, and `OPS_REPO_ROOT` must also be set (by entrypoint or TOML). **`load_secret NAME [DEFAULT]` (#793)**: backend-agnostic secret resolution. Precedence: (1) `/secrets/<NAME>.env` — Nomad-rendered template, (2) current environment — already set by `.env.enc` / compose, (3) `secrets/<NAME>.enc` — age-encrypted per-key file (decrypted on demand, cached in process env), (4) DEFAULT or empty. Consumers call `$(load_secret GITHUB_TOKEN)` instead of `${GITHUB_TOKEN}` — identical behavior whether secrets come from Docker compose injection or Nomad Vault templates. | Every agent |
|
| `lib/env.sh` | Loads `.env`, sets `FACTORY_ROOT`, exports project config (`FORGE_REPO`, `PROJECT_NAME`, etc.), defines `log()`, `forge_api()`, `forge_api_all()` (paginates all pages; accepts optional second TOKEN parameter, defaults to `$FORGE_TOKEN`; handles invalid/empty JSON responses gracefully — returns empty on parse error instead of crashing), `woodpecker_api()`, `wpdb()`, `memory_guard()` (skips agent if RAM < threshold). Auto-loads project TOML if `PROJECT_TOML` is set. Exports per-agent tokens (`FORGE_PLANNER_TOKEN`, `FORGE_GARDENER_TOKEN`, `FORGE_VAULT_TOKEN`, `FORGE_SUPERVISOR_TOKEN`, `FORGE_PREDICTOR_TOKEN`) — each falls back to `$FORGE_TOKEN` if not set. **Vault-only token guard (AD-006)**: `unset GITHUB_TOKEN CLAWHUB_TOKEN` so agents never hold external-action tokens — only the runner container receives them. **Container note**: when `DISINTO_CONTAINER=1`, `.env` is NOT re-sourced — compose already injects env vars (including `FORGE_URL=http://forgejo:3000`) and re-sourcing would clobber them. **Save/restore scope (#364)**: only `FORGE_URL` is preserved across `.env` re-sourcing (compose injects `http://forgejo:3000`, `.env` has `http://localhost:3000`). `FORGE_TOKEN` is NOT preserved so refreshed tokens in `.env` take effect immediately. **Required env var**: `FORGE_PASS` — bot password for git HTTP push (Forgejo 11.x rejects API tokens for `git push`, #361). **Hard preconditions (#674)**: `USER` and `HOME` must be exported by the entrypoint before sourcing. When `PROJECT_TOML` is set, `PROJECT_REPO_ROOT`, `PRIMARY_BRANCH`, and `OPS_REPO_ROOT` must also be set (by entrypoint or TOML). | Every agent |
|
||||||
| `lib/ci-helpers.sh` | `ci_passed()` — returns 0 if CI state is "success" (or no CI configured). `ci_required_for_pr()` — returns 0 if PR has code files (CI required), 1 if non-code only (CI not required). `is_infra_step()` — returns 0 if a single CI step failure matches infra heuristics (clone/git exit 128, any exit 137, log timeout patterns). `classify_pipeline_failure()` — returns "infra \<reason>" if any failed Woodpecker step matches infra heuristics via `is_infra_step()`, else "code". `ensure_priority_label()` — looks up (or creates) the `priority` label and returns its ID; caches in `_PRIORITY_LABEL_ID`. `ci_commit_status <sha>` — queries Woodpecker directly for CI state, falls back to forge commit status API. `ci_pipeline_number <sha>` — returns the Woodpecker pipeline number for a commit, falls back to parsing forge status `target_url`. `ci_promote <repo_id> <pipeline_num> <environment>` — promotes a pipeline to a named Woodpecker environment (vault-gated deployment: vault approves, vault-fire calls this — vault redesign in progress, see #73-#77). `ci_get_logs <pipeline_number> [--step <name>]` — reads CI logs from Woodpecker SQLite database via `lib/ci-log-reader.py`; outputs last 200 lines to stdout. Requires mounted woodpecker-data volume at /woodpecker-data. | dev-poll, review-poll, review-pr |
|
| `lib/ci-helpers.sh` | `ci_passed()` — returns 0 if CI state is "success" (or no CI configured). `ci_required_for_pr()` — returns 0 if PR has code files (CI required), 1 if non-code only (CI not required). `is_infra_step()` — returns 0 if a single CI step failure matches infra heuristics (clone/git exit 128, any exit 137, log timeout patterns). `classify_pipeline_failure()` — returns "infra \<reason>" if any failed Woodpecker step matches infra heuristics via `is_infra_step()`, else "code". `ensure_priority_label()` — looks up (or creates) the `priority` label and returns its ID; caches in `_PRIORITY_LABEL_ID`. `ci_commit_status <sha>` — queries Woodpecker directly for CI state, falls back to forge commit status API. `ci_pipeline_number <sha>` — returns the Woodpecker pipeline number for a commit, falls back to parsing forge status `target_url`. `ci_promote <repo_id> <pipeline_num> <environment>` — promotes a pipeline to a named Woodpecker environment (vault-gated deployment: vault approves, vault-fire calls this — vault redesign in progress, see #73-#77). `ci_get_logs <pipeline_number> [--step <name>]` — reads CI logs from Woodpecker SQLite database via `lib/ci-log-reader.py`; outputs last 200 lines to stdout. Requires mounted woodpecker-data volume at /woodpecker-data. | dev-poll, review-poll, review-pr |
|
||||||
| `lib/ci-debug.sh` | CLI tool for Woodpecker CI: `list`, `status`, `logs`, `failures` subcommands. Not sourced — run directly. | Humans / dev-agent (tool access) |
|
| `lib/ci-debug.sh` | CLI tool for Woodpecker CI: `list`, `status`, `logs`, `failures` subcommands. Not sourced — run directly. | Humans / dev-agent (tool access) |
|
||||||
| `lib/ci-log-reader.py` | Python tool: reads CI logs from Woodpecker SQLite database. `<pipeline_number> [--step <name>]` — returns last 200 lines from failed steps (or specified step). Used by `ci_get_logs()` in ci-helpers.sh. Requires `WOODPECKER_DATA_DIR` (default: /woodpecker-data). | ci-helpers.sh |
|
| `lib/ci-log-reader.py` | Python tool: reads CI logs from Woodpecker SQLite database. `<pipeline_number> [--step <name>]` — returns last 200 lines from failed steps (or specified step). Used by `ci_get_logs()` in ci-helpers.sh. Requires `WOODPECKER_DATA_DIR` (default: /woodpecker-data). | ci-helpers.sh |
|
||||||
|
|
@ -14,7 +14,7 @@ sourced as needed.
|
||||||
| `lib/parse-deps.sh` | Extracts dependency issue numbers from an issue body (stdin → stdout, one number per line). Matches `## Dependencies` / `## Depends on` / `## Blocked by` sections and inline `depends on #N` / `blocked by #N` patterns. Inline scan skips fenced code blocks to prevent false positives from code examples in issue bodies. Not sourced — executed via `bash lib/parse-deps.sh`. | dev-poll |
|
| `lib/parse-deps.sh` | Extracts dependency issue numbers from an issue body (stdin → stdout, one number per line). Matches `## Dependencies` / `## Depends on` / `## Blocked by` sections and inline `depends on #N` / `blocked by #N` patterns. Inline scan skips fenced code blocks to prevent false positives from code examples in issue bodies. Not sourced — executed via `bash lib/parse-deps.sh`. | dev-poll |
|
||||||
| `lib/formula-session.sh` | `acquire_run_lock()`, `load_formula()`, `load_formula_or_profile()`, `build_context_block()`, `ensure_ops_repo()`, `ops_commit_and_push()`, `build_prompt_footer()`, `build_sdk_prompt_footer()`, `formula_worktree_setup()`, `formula_prepare_profile_context()`, `formula_lessons_block()`, `profile_write_journal()`, `profile_load_lessons()`, `ensure_profile_repo()`, `_profile_has_repo()`, `_count_undigested_journals()`, `_profile_digest_journals()`, `_profile_restore_lessons()`, `_profile_commit_and_push()`, `resolve_agent_identity()`, `build_graph_section()`, `build_scratch_instruction()`, `read_scratch_context()`, `cleanup_stale_crashed_worktrees()` — shared helpers for formula-driven polling-loop agents (lock, .profile repo management, prompt assembly, worktree setup). Memory guard is provided by `memory_guard()` in `lib/env.sh` (not duplicated here). `resolve_agent_identity()` — sets `FORGE_TOKEN`, `AGENT_IDENTITY`, `FORGE_REMOTE` from per-agent token env vars and FORGE_URL remote detection. `build_graph_section()` generates the structural-analysis section (runs `lib/build-graph.py`, formats JSON output) — previously duplicated in planner-run.sh and predictor-run.sh, now shared here. `cleanup_stale_crashed_worktrees()` — thin wrapper around `worktree_cleanup_stale()` from `lib/worktree.sh` (kept for backwards compatibility). **Journal digestion guards (#702)**: `_profile_digest_journals()` respects `PROFILE_DIGEST_TIMEOUT` (default 300s) and `PROFILE_DIGEST_MAX_BATCH` (default 5 journals per run); `_profile_restore_lessons()` restores the previous lessons-learned.md on digest failure. | planner-run.sh, predictor-run.sh, gardener-run.sh, supervisor-run.sh, dev-agent.sh |
|
| `lib/formula-session.sh` | `acquire_run_lock()`, `load_formula()`, `load_formula_or_profile()`, `build_context_block()`, `ensure_ops_repo()`, `ops_commit_and_push()`, `build_prompt_footer()`, `build_sdk_prompt_footer()`, `formula_worktree_setup()`, `formula_prepare_profile_context()`, `formula_lessons_block()`, `profile_write_journal()`, `profile_load_lessons()`, `ensure_profile_repo()`, `_profile_has_repo()`, `_count_undigested_journals()`, `_profile_digest_journals()`, `_profile_restore_lessons()`, `_profile_commit_and_push()`, `resolve_agent_identity()`, `build_graph_section()`, `build_scratch_instruction()`, `read_scratch_context()`, `cleanup_stale_crashed_worktrees()` — shared helpers for formula-driven polling-loop agents (lock, .profile repo management, prompt assembly, worktree setup). Memory guard is provided by `memory_guard()` in `lib/env.sh` (not duplicated here). `resolve_agent_identity()` — sets `FORGE_TOKEN`, `AGENT_IDENTITY`, `FORGE_REMOTE` from per-agent token env vars and FORGE_URL remote detection. `build_graph_section()` generates the structural-analysis section (runs `lib/build-graph.py`, formats JSON output) — previously duplicated in planner-run.sh and predictor-run.sh, now shared here. `cleanup_stale_crashed_worktrees()` — thin wrapper around `worktree_cleanup_stale()` from `lib/worktree.sh` (kept for backwards compatibility). **Journal digestion guards (#702)**: `_profile_digest_journals()` respects `PROFILE_DIGEST_TIMEOUT` (default 300s) and `PROFILE_DIGEST_MAX_BATCH` (default 5 journals per run); `_profile_restore_lessons()` restores the previous lessons-learned.md on digest failure. | planner-run.sh, predictor-run.sh, gardener-run.sh, supervisor-run.sh, dev-agent.sh |
|
||||||
| `lib/guard.sh` | `check_active(agent_name)` — reads `$FACTORY_ROOT/state/.{agent_name}-active`; exits 0 (skip) if the file is absent. Factory is off by default — state files must be created to enable each agent. **Logs a message to stderr** when skipping (`[check_active] SKIP: state file not found`), so agent dropout is visible in loop logs. Sourced by dev-poll.sh, review-poll.sh, predictor-run.sh, supervisor-run.sh. | polling-loop entry points |
|
| `lib/guard.sh` | `check_active(agent_name)` — reads `$FACTORY_ROOT/state/.{agent_name}-active`; exits 0 (skip) if the file is absent. Factory is off by default — state files must be created to enable each agent. **Logs a message to stderr** when skipping (`[check_active] SKIP: state file not found`), so agent dropout is visible in loop logs. Sourced by dev-poll.sh, review-poll.sh, predictor-run.sh, supervisor-run.sh. | polling-loop entry points |
|
||||||
| `lib/mirrors.sh` | `mirror_push()` — pushes `$PRIMARY_BRANCH` + tags to all configured mirror remotes (fire-and-forget background pushes). Reads `MIRROR_NAMES` and `MIRROR_*` vars exported by `load-project.sh` from the `[mirrors]` TOML section. Failures are logged but never block the pipeline. `mirror_pull_register(clone_url, owner, repo_name, [interval])` — registers a Forgejo pull mirror via `POST /repos/migrate` with `mirror: true`. Creates the target repo and queues the first sync automatically. Works against empty Forgejo instances — no pre-existing content required. Used for Nomad migration cutover: point at Codeberg source, wait for sync, then proceed with `disinto init`. See [docs/mirror-bootstrap.md](../docs/mirror-bootstrap.md) for the full cutover path. Sourced by dev-poll.sh — called after every successful merge. | dev-poll.sh |
|
| `lib/mirrors.sh` | `mirror_push()` — pushes `$PRIMARY_BRANCH` + tags to all configured mirror remotes (fire-and-forget background pushes). Reads `MIRROR_NAMES` and `MIRROR_*` vars exported by `load-project.sh` from the `[mirrors]` TOML section. Failures are logged but never block the pipeline. Sourced by dev-poll.sh — called after every successful merge. | dev-poll.sh |
|
||||||
| `lib/build-graph.py` | Python tool: parses VISION.md, prerequisites.md (from ops repo), AGENTS.md, formulas/*.toml, evidence/ (from ops repo), and forge issues/labels into a NetworkX DiGraph. Runs structural analyses (orphaned objectives, stale prerequisites, thin evidence, circular deps) and outputs a JSON report. Used by `review-pr.sh` (per-PR changed-file analysis) and `predictor-run.sh` (full-project analysis) to provide structural context to Claude. | review-pr.sh, predictor-run.sh |
|
| `lib/build-graph.py` | Python tool: parses VISION.md, prerequisites.md (from ops repo), AGENTS.md, formulas/*.toml, evidence/ (from ops repo), and forge issues/labels into a NetworkX DiGraph. Runs structural analyses (orphaned objectives, stale prerequisites, thin evidence, circular deps) and outputs a JSON report. Used by `review-pr.sh` (per-PR changed-file analysis) and `predictor-run.sh` (full-project analysis) to provide structural context to Claude. | review-pr.sh, predictor-run.sh |
|
||||||
| `lib/secret-scan.sh` | `scan_for_secrets()` — detects potential secrets (API keys, bearer tokens, private keys, URLs with embedded credentials) in text; returns 1 if secrets found. `redact_secrets()` — replaces detected secret patterns with `[REDACTED]`. | issue-lifecycle.sh |
|
| `lib/secret-scan.sh` | `scan_for_secrets()` — detects potential secrets (API keys, bearer tokens, private keys, URLs with embedded credentials) in text; returns 1 if secrets found. `redact_secrets()` — replaces detected secret patterns with `[REDACTED]`. | issue-lifecycle.sh |
|
||||||
| `lib/stack-lock.sh` | File-based lock protocol for singleton project stack access. `stack_lock_acquire(holder, project)` — polls until free, breaks stale heartbeats (>10 min old), claims lock. `stack_lock_release(project)` — deletes lock file. `stack_lock_check(project)` — inspect current lock state. `stack_lock_heartbeat(project)` — update heartbeat timestamp (callers must call every 2 min while holding). Lock files at `~/data/locks/<project>-stack.lock`. | docker/edge/dispatcher.sh, reproduce formula |
|
| `lib/stack-lock.sh` | File-based lock protocol for singleton project stack access. `stack_lock_acquire(holder, project)` — polls until free, breaks stale heartbeats (>10 min old), claims lock. `stack_lock_release(project)` — deletes lock file. `stack_lock_check(project)` — inspect current lock state. `stack_lock_heartbeat(project)` — update heartbeat timestamp (callers must call every 2 min while holding). Lock files at `~/data/locks/<project>-stack.lock`. | docker/edge/dispatcher.sh, reproduce formula |
|
||||||
|
|
@ -22,17 +22,14 @@ sourced as needed.
|
||||||
| `lib/worktree.sh` | Reusable git worktree management: `worktree_create(path, branch, [base_ref])` — create worktree, checkout base, fetch submodules. `worktree_recover(path, branch, [remote])` — detect existing worktree, reuse if on correct branch (sets `_WORKTREE_REUSED`), otherwise clean and recreate. `worktree_cleanup(path)` — `git worktree remove --force`, clear Claude Code project cache (`~/.claude/projects/` matching path). `worktree_cleanup_stale([max_age_hours])` — scan `/tmp` for orphaned worktrees older than threshold, skip preserved and active tmux worktrees, prune. `worktree_preserve(path, reason)` — mark worktree as preserved for debugging (writes `.worktree-preserved` marker, skipped by stale cleanup). | dev-agent.sh, supervisor-run.sh, planner-run.sh, predictor-run.sh, gardener-run.sh |
|
| `lib/worktree.sh` | Reusable git worktree management: `worktree_create(path, branch, [base_ref])` — create worktree, checkout base, fetch submodules. `worktree_recover(path, branch, [remote])` — detect existing worktree, reuse if on correct branch (sets `_WORKTREE_REUSED`), otherwise clean and recreate. `worktree_cleanup(path)` — `git worktree remove --force`, clear Claude Code project cache (`~/.claude/projects/` matching path). `worktree_cleanup_stale([max_age_hours])` — scan `/tmp` for orphaned worktrees older than threshold, skip preserved and active tmux worktrees, prune. `worktree_preserve(path, reason)` — mark worktree as preserved for debugging (writes `.worktree-preserved` marker, skipped by stale cleanup). | dev-agent.sh, supervisor-run.sh, planner-run.sh, predictor-run.sh, gardener-run.sh |
|
||||||
| `lib/pr-lifecycle.sh` | Reusable PR lifecycle library: `pr_create()`, `pr_find_by_branch()`, `pr_poll_ci()`, `pr_poll_review()`, `pr_merge()`, `pr_is_merged()`, `pr_walk_to_merge()`, `build_phase_protocol_prompt()`. Requires `lib/ci-helpers.sh`. | dev-agent.sh (future) |
|
| `lib/pr-lifecycle.sh` | Reusable PR lifecycle library: `pr_create()`, `pr_find_by_branch()`, `pr_poll_ci()`, `pr_poll_review()`, `pr_merge()`, `pr_is_merged()`, `pr_walk_to_merge()`, `build_phase_protocol_prompt()`. Requires `lib/ci-helpers.sh`. | dev-agent.sh (future) |
|
||||||
| `lib/issue-lifecycle.sh` | Reusable issue lifecycle library: `issue_claim()` (add in-progress, remove backlog), `issue_release()` (remove in-progress, add backlog), `issue_block()` (post diagnostic comment with secret redaction, add blocked label), `issue_close()`, `issue_check_deps()` (parse deps, check transitive closure; sets `_ISSUE_BLOCKED_BY`, `_ISSUE_SUGGESTION`), `issue_suggest_next()` (find next unblocked backlog issue; sets `_ISSUE_NEXT`), `issue_post_refusal()` (structured refusal comment with dedup). Label IDs cached in globals on first lookup. Sources `lib/secret-scan.sh`. | dev-agent.sh (future) |
|
| `lib/issue-lifecycle.sh` | Reusable issue lifecycle library: `issue_claim()` (add in-progress, remove backlog), `issue_release()` (remove in-progress, add backlog), `issue_block()` (post diagnostic comment with secret redaction, add blocked label), `issue_close()`, `issue_check_deps()` (parse deps, check transitive closure; sets `_ISSUE_BLOCKED_BY`, `_ISSUE_SUGGESTION`), `issue_suggest_next()` (find next unblocked backlog issue; sets `_ISSUE_NEXT`), `issue_post_refusal()` (structured refusal comment with dedup). Label IDs cached in globals on first lookup. Sources `lib/secret-scan.sh`. | dev-agent.sh (future) |
|
||||||
| `lib/action-vault.sh` | **Vault PR helper** — create vault action PRs on ops repo via Forgejo API (works from containers without SSH). `vault_request <action_id> <toml_content>` validates TOML (using `validate_vault_action` from `action-vault/vault-env.sh`), creates branch `vault/<action-id>`, writes `vault/actions/<action-id>.toml`, creates PR targeting `main` with title `vault: <action-id>` and body from context field, returns PR number. Idempotent: if PR exists, returns existing number. **Low-tier bypass**: if the action's `blast_radius` classifies as `low` (via `action-vault/classify.sh`), `vault_request` calls `_vault_commit_direct()` which commits directly to ops `main` using `FORGE_ADMIN_TOKEN` — no PR, no approval wait. Returns `0` (not a PR number) for direct commits. Requires `FORGE_TOKEN`, `FORGE_ADMIN_TOKEN` (low-tier only), `FORGE_URL`, `FORGE_REPO`, `FORGE_OPS_REPO`. Uses the calling agent's own token (saves/restores `FORGE_TOKEN` around sourcing `vault-env.sh`), so approval workflow respects individual agent identities. | dev-agent (vault actions), future vault dispatcher |
|
| `lib/vault.sh` | **Vault PR helper** — create vault action PRs on ops repo via Forgejo API (works from containers without SSH). `vault_request <action_id> <toml_content>` validates TOML (using `validate_vault_action` from `vault/vault-env.sh`), creates branch `vault/<action-id>`, writes `vault/actions/<action-id>.toml`, creates PR targeting `main` with title `vault: <action-id>` and body from context field, returns PR number. Idempotent: if PR exists, returns existing number. **Low-tier bypass**: if the action's `blast_radius` classifies as `low` (via `vault/classify.sh`), `vault_request` calls `_vault_commit_direct()` which commits directly to ops `main` using `FORGE_ADMIN_TOKEN` — no PR, no approval wait. Returns `0` (not a PR number) for direct commits. Requires `FORGE_TOKEN`, `FORGE_ADMIN_TOKEN` (low-tier only), `FORGE_URL`, `FORGE_REPO`, `FORGE_OPS_REPO`. Uses the calling agent's own token (saves/restores `FORGE_TOKEN` around sourcing `vault-env.sh`), so approval workflow respects individual agent identities. | dev-agent (vault actions), future vault dispatcher |
|
||||||
| `lib/branch-protection.sh` | Branch protection helpers for Forgejo repos. `setup_vault_branch_protection()` — configures admin-only merge protection on main (require 1 approval, restrict merge to admin role, block direct pushes). `setup_profile_branch_protection()` — same protection for `.profile` repos. `verify_branch_protection()` — checks protection is correctly configured. `remove_branch_protection()` — removes protection (cleanup/testing). Handles race condition after initial push: retries with backoff if Forgejo hasn't processed the branch yet. Requires `FORGE_TOKEN`, `FORGE_URL`, `FORGE_OPS_REPO`. | bin/disinto (hire-an-agent) |
|
| `lib/branch-protection.sh` | Branch protection helpers for Forgejo repos. `setup_vault_branch_protection()` — configures admin-only merge protection on main (require 1 approval, restrict merge to admin role, block direct pushes). `setup_profile_branch_protection()` — same protection for `.profile` repos. `verify_branch_protection()` — checks protection is correctly configured. `remove_branch_protection()` — removes protection (cleanup/testing). Handles race condition after initial push: retries with backoff if Forgejo hasn't processed the branch yet. Requires `FORGE_TOKEN`, `FORGE_URL`, `FORGE_OPS_REPO`. | bin/disinto (hire-an-agent) |
|
||||||
| `lib/agent-sdk.sh` | `agent_run([--resume SESSION_ID] [--worktree DIR] PROMPT)` — one-shot `claude -p` invocation with session persistence. Saves session ID to `SID_FILE`, reads it back on resume. `agent_recover_session()` — restore previous session ID from `SID_FILE` on startup. **Nudge guard**: skips nudge injection if the worktree is clean and no push is expected, preventing spurious re-invocations. Callers must define `SID_FILE`, `LOGFILE`, and `log()` before sourcing. **Concurrency**: external `flock` on `session.lock` is gated behind `CLAUDE_EXTERNAL_LOCK=1` (default off). When unset, each container's per-session `CLAUDE_CONFIG_DIR` isolation lets Claude Code's native lockfile handle OAuth refresh — no external serialization needed. Set `CLAUDE_EXTERNAL_LOCK=1` to re-enable the old flock wrapper as a rollback mechanism. See [`docs/CLAUDE-AUTH-CONCURRENCY.md`](../docs/CLAUDE-AUTH-CONCURRENCY.md) and AD-002 (#647). | formula-driven agents (dev-agent, planner-run, predictor-run, gardener-run) |
|
| `lib/agent-sdk.sh` | `agent_run([--resume SESSION_ID] [--worktree DIR] PROMPT)` — one-shot `claude -p` invocation with session persistence. Saves session ID to `SID_FILE`, reads it back on resume. `agent_recover_session()` — restore previous session ID from `SID_FILE` on startup. **Nudge guard**: skips nudge injection if the worktree is clean and no push is expected, preventing spurious re-invocations. Callers must define `SID_FILE`, `LOGFILE`, and `log()` before sourcing. **Concurrency**: external `flock` on `session.lock` is gated behind `CLAUDE_EXTERNAL_LOCK=1` (default off). When unset, each container's per-session `CLAUDE_CONFIG_DIR` isolation lets Claude Code's native lockfile handle OAuth refresh — no external serialization needed. Set `CLAUDE_EXTERNAL_LOCK=1` to re-enable the old flock wrapper as a rollback mechanism. See [`docs/CLAUDE-AUTH-CONCURRENCY.md`](../docs/CLAUDE-AUTH-CONCURRENCY.md) and AD-002 (#647). | formula-driven agents (dev-agent, planner-run, predictor-run, gardener-run) |
|
||||||
| `lib/forge-setup.sh` | `setup_forge()` — Forgejo instance provisioning: creates admin user, bot accounts, org, repos (code + ops), configures webhooks, sets repo topics. Extracted from `bin/disinto`. Requires `FORGE_URL`, `FORGE_TOKEN`, `FACTORY_ROOT`. **Password storage (#361)**: after creating each bot account, stores its password in `.env` as `FORGE_<BOT>_PASS` (e.g. `FORGE_PASS`, `FORGE_REVIEW_PASS`, etc.) for use by `forge-push.sh`. | bin/disinto (init) |
|
| `lib/forge-setup.sh` | `setup_forge()` — Forgejo instance provisioning: creates admin user, bot accounts, org, repos (code + ops), configures webhooks, sets repo topics. Extracted from `bin/disinto`. Requires `FORGE_URL`, `FORGE_TOKEN`, `FACTORY_ROOT`. **Password storage (#361)**: after creating each bot account, stores its password in `.env` as `FORGE_<BOT>_PASS` (e.g. `FORGE_PASS`, `FORGE_REVIEW_PASS`, etc.) for use by `forge-push.sh`. | bin/disinto (init) |
|
||||||
| `lib/forge-push.sh` | `push_to_forge()` — pushes a local clone to the Forgejo remote and verifies the push. `_assert_forge_push_globals()` validates required env vars before use. Requires `FORGE_URL`, `FORGE_PASS`, `FACTORY_ROOT`, `PRIMARY_BRANCH`. **Auth**: uses `FORGE_PASS` (bot password) for git HTTP push — Forgejo 11.x rejects API tokens for `git push` (#361). | bin/disinto (init) |
|
| `lib/forge-push.sh` | `push_to_forge()` — pushes a local clone to the Forgejo remote and verifies the push. `_assert_forge_push_globals()` validates required env vars before use. Requires `FORGE_URL`, `FORGE_PASS`, `FACTORY_ROOT`, `PRIMARY_BRANCH`. **Auth**: uses `FORGE_PASS` (bot password) for git HTTP push — Forgejo 11.x rejects API tokens for `git push` (#361). | bin/disinto (init) |
|
||||||
| `lib/git-creds.sh` | Shared git credential helper configuration. `configure_git_creds([HOME_DIR] [RUN_AS_CMD])` — writes a static credential helper script and configures git globally to use password-based HTTP auth (Forgejo 11.x rejects API tokens for `git push`, #361). **Retry on cold boot (#741)**: resolves bot username from `FORGE_TOKEN` with 5 retries (exponential backoff 1-5s); fails loudly and returns 1 if Forgejo is unreachable — never falls back to a wrong hardcoded default (exports `BOT_USER` on success). `repair_baked_cred_urls([--as RUN_AS_CMD] DIR ...)` — rewrites any git remote URLs that have credentials baked in to use clean URLs instead; uses `safe.directory` bypass for root-owned repos (#671). Requires `FORGE_PASS`, `FORGE_URL`, `FORGE_TOKEN`. | entrypoints (agents, edge) |
|
| `lib/git-creds.sh` | Shared git credential helper configuration. `configure_git_creds([HOME_DIR] [RUN_AS_CMD])` — writes a static credential helper script and configures git globally to use password-based HTTP auth (Forgejo 11.x rejects API tokens for `git push`, #361). `repair_baked_cred_urls([--as RUN_AS_CMD] DIR ...)` — rewrites any git remote URLs that have credentials baked in to use clean URLs instead; uses `safe.directory` bypass for root-owned repos (#671). Requires `FORGE_PASS`, `FORGE_URL`, `FORGE_TOKEN`. | entrypoints (agents, edge) |
|
||||||
| `lib/ops-setup.sh` | `setup_ops_repo()` — creates ops repo on Forgejo if it doesn't exist, configures bot collaborators, clones/initializes ops repo locally, seeds directory structure (vault, knowledge, evidence, sprints). Evidence subdirectories seeded: engagement/, red-team/, holdout/, evolution/, user-test/. Also seeds sprints/ for architect output. Exports `_ACTUAL_OPS_SLUG`. `migrate_ops_repo(ops_root, [primary_branch])` — idempotent migration helper that seeds missing directories and .gitkeep files on existing ops repos (pre-#407 deployments). | bin/disinto (init) |
|
| `lib/ops-setup.sh` | `setup_ops_repo()` — creates ops repo on Forgejo if it doesn't exist, configures bot collaborators, clones/initializes ops repo locally, seeds directory structure (vault, knowledge, evidence, sprints). Evidence subdirectories seeded: engagement/, red-team/, holdout/, evolution/, user-test/. Also seeds sprints/ for architect output. Exports `_ACTUAL_OPS_SLUG`. `migrate_ops_repo(ops_root, [primary_branch])` — idempotent migration helper that seeds missing directories and .gitkeep files on existing ops repos (pre-#407 deployments). | bin/disinto (init) |
|
||||||
| `lib/ci-setup.sh` | `_install_cron_impl()` — installs crontab entries for bare-metal deployments (compose mode uses polling loop instead). `_create_forgejo_oauth_app()` — generic helper to create an OAuth2 app on Forgejo (shared by Woodpecker and chat). `_create_woodpecker_oauth_impl()` — creates Woodpecker OAuth2 app (thin wrapper). `_create_chat_oauth_impl()` — creates disinto-chat OAuth2 app, writes `CHAT_OAUTH_CLIENT_ID`/`CHAT_OAUTH_CLIENT_SECRET` to `.env` (#708). `_generate_woodpecker_token_impl()` — auto-generates WOODPECKER_TOKEN via OAuth2 flow. `_activate_woodpecker_repo_impl()` — activates repo in Woodpecker. All gated by `_load_ci_context()` which validates required env vars. | bin/disinto (init) |
|
| `lib/ci-setup.sh` | `_install_cron_impl()` — installs crontab entries for bare-metal deployments (compose mode uses polling loop instead). `_create_forgejo_oauth_app()` — generic helper to create an OAuth2 app on Forgejo (shared by Woodpecker and chat). `_create_woodpecker_oauth_impl()` — creates Woodpecker OAuth2 app (thin wrapper). `_create_chat_oauth_impl()` — creates disinto-chat OAuth2 app, writes `CHAT_OAUTH_CLIENT_ID`/`CHAT_OAUTH_CLIENT_SECRET` to `.env` (#708). `_generate_woodpecker_token_impl()` — auto-generates WOODPECKER_TOKEN via OAuth2 flow. `_activate_woodpecker_repo_impl()` — activates repo in Woodpecker. All gated by `_load_ci_context()` which validates required env vars. | bin/disinto (init) |
|
||||||
| `lib/generators.sh` | Template generation for `disinto init`: `generate_compose()` — docker-compose.yml (uses `codeberg.org/forgejo/forgejo:11.0` tag; adds `security_opt: [apparmor:unconfined]` to all services for rootless container compatibility; Forgejo includes a healthcheck so dependent services use `condition: service_healthy` — fixes cold-start races, #665; adds `chat` service block with isolated `chat-config` named volume and `CHAT_HISTORY_DIR` bind-mount for per-user NDJSON history persistence (#710); injects `FORWARD_AUTH_SECRET` for Caddy↔chat defense-in-depth auth (#709); cost-cap env vars `CHAT_MAX_REQUESTS_PER_HOUR`, `CHAT_MAX_REQUESTS_PER_DAY`, `CHAT_MAX_TOKENS_PER_DAY` (#711); subdomain fallback comment for `EDGE_TUNNEL_FQDN_*` vars (#713); all `depends_on` now use `condition: service_healthy/started` instead of bare service names; all services now include `restart: unless-stopped` including the edge service — #768; agents service now uses `image: ghcr.io/disinto/agents:${DISINTO_IMAGE_TAG:-latest}` instead of `build:` (#429); `WOODPECKER_PLUGINS_PRIVILEGED` env var added to woodpecker service (#779); agents-llama conditional block gated on `ENABLE_LLAMA_AGENT=1` (#769); `agents-llama-all` compose service (profile `agents-llama-all`, all 7 roles: review,dev,gardener,architect,planner,predictor,supervisor) added by #801; agents service gains volume mounts for `./projects`, `./.env`, `./state`), `generate_caddyfile()` — Caddyfile (routes: `/forge/*` → forgejo:3000, `/woodpecker/*` → woodpecker:8000, `/staging/*` → staging:80; `/chat/login` and `/chat/oauth/callback` bypass `forward_auth` so unauthenticated users can reach the OAuth flow; `/chat/*` gated by `forward_auth` on `chat:8080/chat/auth/verify` which stamps `X-Forwarded-User` (#709); root `/` redirects to `/forge/`), `generate_staging_index()` — staging index, `generate_deploy_pipelines()` — Woodpecker deployment pipeline configs. Requires `FACTORY_ROOT`, `PROJECT_NAME`, `PRIMARY_BRANCH`. | bin/disinto (init) |
|
| `lib/generators.sh` | Template generation for `disinto init`: `generate_compose()` — docker-compose.yml (uses `codeberg.org/forgejo/forgejo:11.0` tag; adds `security_opt: [apparmor:unconfined]` to all services for rootless container compatibility; Forgejo includes a healthcheck so dependent services use `condition: service_healthy` — fixes cold-start races, #665; adds `chat` service block with isolated `chat-config` named volume and `CHAT_HISTORY_DIR` bind-mount for per-user NDJSON history persistence (#710); injects `FORWARD_AUTH_SECRET` for Caddy↔chat defense-in-depth auth (#709); cost-cap env vars `CHAT_MAX_REQUESTS_PER_HOUR`, `CHAT_MAX_REQUESTS_PER_DAY`, `CHAT_MAX_TOKENS_PER_DAY` (#711); subdomain fallback comment for `EDGE_TUNNEL_FQDN_*` vars (#713); all `depends_on` now use `condition: service_healthy/started` instead of bare service names), `generate_caddyfile()` — Caddyfile (routes: `/forge/*` → forgejo:3000, `/woodpecker/*` → woodpecker:8000, `/staging/*` → staging:80; `/chat/login` and `/chat/oauth/callback` bypass `forward_auth` so unauthenticated users can reach the OAuth flow; `/chat/*` gated by `forward_auth` on `chat:8080/chat/auth/verify` which stamps `X-Forwarded-User` (#709); root `/` redirects to `/forge/`), `generate_staging_index()` — staging index, `generate_deploy_pipelines()` — Woodpecker deployment pipeline configs. Requires `FACTORY_ROOT`, `PROJECT_NAME`, `PRIMARY_BRANCH`. | bin/disinto (init) |
|
||||||
| `lib/sprint-filer.sh` | Post-merge sub-issue filer for sprint PRs. Invoked by the `.woodpecker/ops-filer.yml` pipeline after a sprint PR merges to ops repo `main`. Parses `<!-- filer:begin --> ... <!-- filer:end -->` blocks from sprint PR bodies to extract sub-issue definitions, creates them on the project repo using `FORGE_FILER_TOKEN` (narrow-scope `filer-bot` identity with `issues:write` only), adds `in-progress` label to the parent vision issue, and handles vision lifecycle closure when all sub-issues are closed. Uses `filer_api_all()` for paginated fetches. Idempotent: uses `<!-- decomposed-from: #<vision>, sprint: <slug>, id: <id> -->` markers to skip already-filed issues. Requires `FORGE_FILER_TOKEN`, `FORGE_API`, `FORGE_API_BASE`, `FORGE_OPS_REPO`. | `.woodpecker/ops-filer.yml` (CI pipeline on ops repo) |
|
|
||||||
| `lib/hire-agent.sh` | `disinto_hire_an_agent()` — user creation, `.profile` repo setup, formula copying, branch protection, and state marker creation for hiring a new agent. Requires `FORGE_URL`, `FORGE_TOKEN`, `FACTORY_ROOT`, `PROJECT_NAME`. Extracted from `bin/disinto`. | bin/disinto (hire) |
|
| `lib/hire-agent.sh` | `disinto_hire_an_agent()` — user creation, `.profile` repo setup, formula copying, branch protection, and state marker creation for hiring a new agent. Requires `FORGE_URL`, `FORGE_TOKEN`, `FACTORY_ROOT`, `PROJECT_NAME`. Extracted from `bin/disinto`. | bin/disinto (hire) |
|
||||||
| `lib/release.sh` | `disinto_release()` — vault TOML creation, branch setup on ops repo, PR creation, and auto-merge request for a versioned release. `_assert_release_globals()` validates required env vars. Requires `FORGE_URL`, `FORGE_TOKEN`, `FORGE_OPS_REPO`, `FACTORY_ROOT`, `PRIMARY_BRANCH`. Extracted from `bin/disinto`. | bin/disinto (release) |
|
| `lib/release.sh` | `disinto_release()` — vault TOML creation, branch setup on ops repo, PR creation, and auto-merge request for a versioned release. `_assert_release_globals()` validates required env vars. Requires `FORGE_URL`, `FORGE_TOKEN`, `FORGE_OPS_REPO`, `FACTORY_ROOT`, `PRIMARY_BRANCH`. Extracted from `bin/disinto`. | bin/disinto (release) |
|
||||||
| `lib/hvault.sh` | HashiCorp Vault helper module. `hvault_kv_get(PATH, [KEY])` — read KV v2 secret, optionally extract one key. `hvault_kv_put(PATH, KEY=VAL ...)` — write KV v2 secret. `hvault_kv_list(PATH)` — list keys at a KV path. `hvault_get_or_empty(PATH)` — GET /v1/PATH; 200→raw body, 404→empty, else structured error + return 1 (used by sync scripts to distinguish "absent, create" from hard failure without tripping errexit, #881). `hvault_ensure_kv_v2(MOUNT, [LOG_PREFIX])` — idempotent KV v2 mount assertion: enables mount if absent, fails loudly if present as wrong type/version. Extracted from all `vault-seed-*.sh` scripts to eliminate dup-detector violations. Respects `DRY_RUN=1`. `hvault_policy_apply(NAME, FILE)` — idempotent policy upsert. `hvault_jwt_login(ROLE, JWT)` — exchange JWT for short-lived token. `hvault_token_lookup()` — returns TTL/policies/accessor for current token. All functions use `VAULT_ADDR` + `VAULT_TOKEN` from env (fallback: `/etc/vault.d/root.token`), emit structured JSON errors to stderr on failure. Tests: `tests/lib-hvault.bats` (requires `vault server -dev`). | `tools/vault-apply-policies.sh`, `tools/vault-apply-roles.sh`, `lib/init/nomad/vault-nomad-auth.sh`, `tools/vault-seed-*.sh` |
|
|
||||||
| `lib/init/nomad/` | Nomad+Vault installer scripts. `cluster-up.sh` — idempotent Step-0 orchestrator that runs all steps in order (installs packages, writes HCL, enables systemd units, unseals Vault); uses `poll_until_healthy()` helper for deduped readiness polling. `install.sh` — installs pinned Nomad+Vault apt packages. `vault-init.sh` — initializes Vault (unseal keys → `/etc/vault.d/`), creates dev-persisted unseal unit. `lib-systemd.sh` — shared systemd unit helpers. `systemd-nomad.sh`, `systemd-vault.sh` — write and enable service units. `vault-nomad-auth.sh` — Step-2 script that enables Vault's JWT auth at path `jwt-nomad`, writes the JWKS/algs config pointing at Nomad's workload-identity signer, delegates role sync to `tools/vault-apply-roles.sh`, installs `/etc/nomad.d/server.hcl`, and SIGHUPs `nomad.service` if the file changed (#881). `wp-oauth-register.sh` — S3.3 script that creates the Woodpecker OAuth2 app in Forgejo and stores `forgejo_client`/`forgejo_secret` in Vault KV v2 at `kv/disinto/shared/woodpecker`; idempotent (skips if app or secrets already present); called by `bin/disinto --with woodpecker`. `deploy.sh` — S4 dependency-ordered Nomad job deploy + health-wait; takes a list of jobspec basenames, submits each to Nomad and polls until healthy before proceeding to the next; supports `--dry-run` and per-job timeout overrides via `JOB_READY_TIMEOUT_<JOBNAME>`; invoked by `bin/disinto --with <svc>` and `cluster-up.sh`. Idempotent: each step checks current state before acting. Sourced and called by `cluster-up.sh`; not sourced by agents. | `bin/disinto init --backend=nomad` |
|
|
||||||
|
|
|
||||||
69
lib/env.sh
69
lib/env.sh
|
|
@ -121,10 +121,9 @@ export FORGE_VAULT_TOKEN="${FORGE_VAULT_TOKEN:-${FORGE_TOKEN}}"
|
||||||
export FORGE_SUPERVISOR_TOKEN="${FORGE_SUPERVISOR_TOKEN:-${FORGE_TOKEN}}"
|
export FORGE_SUPERVISOR_TOKEN="${FORGE_SUPERVISOR_TOKEN:-${FORGE_TOKEN}}"
|
||||||
export FORGE_PREDICTOR_TOKEN="${FORGE_PREDICTOR_TOKEN:-${FORGE_TOKEN}}"
|
export FORGE_PREDICTOR_TOKEN="${FORGE_PREDICTOR_TOKEN:-${FORGE_TOKEN}}"
|
||||||
export FORGE_ARCHITECT_TOKEN="${FORGE_ARCHITECT_TOKEN:-${FORGE_TOKEN}}"
|
export FORGE_ARCHITECT_TOKEN="${FORGE_ARCHITECT_TOKEN:-${FORGE_TOKEN}}"
|
||||||
export FORGE_FILER_TOKEN="${FORGE_FILER_TOKEN:-${FORGE_TOKEN}}"
|
|
||||||
|
|
||||||
# Bot usernames filter
|
# Bot usernames filter
|
||||||
export FORGE_BOT_USERNAMES="${FORGE_BOT_USERNAMES:-dev-bot,review-bot,planner-bot,gardener-bot,vault-bot,supervisor-bot,predictor-bot,architect-bot,filer-bot}"
|
export FORGE_BOT_USERNAMES="${FORGE_BOT_USERNAMES:-dev-bot,review-bot,planner-bot,gardener-bot,vault-bot,supervisor-bot,predictor-bot,architect-bot}"
|
||||||
|
|
||||||
# Project config
|
# Project config
|
||||||
export FORGE_REPO="${FORGE_REPO:-}"
|
export FORGE_REPO="${FORGE_REPO:-}"
|
||||||
|
|
@ -158,8 +157,8 @@ export WOODPECKER_SERVER="${WOODPECKER_SERVER:-http://localhost:8000}"
|
||||||
export CLAUDE_TIMEOUT="${CLAUDE_TIMEOUT:-7200}"
|
export CLAUDE_TIMEOUT="${CLAUDE_TIMEOUT:-7200}"
|
||||||
|
|
||||||
# Vault-only token guard (#745): external-action tokens (GITHUB_TOKEN, CLAWHUB_TOKEN)
|
# Vault-only token guard (#745): external-action tokens (GITHUB_TOKEN, CLAWHUB_TOKEN)
|
||||||
# must NEVER be available to agents. They live in secrets/*.enc and are decrypted
|
# must NEVER be available to agents. They live in .env.vault.enc and are injected
|
||||||
# only into the ephemeral runner container at fire time (#777). Unset them here so
|
# only into the ephemeral runner container at fire time. Unset them here so
|
||||||
# even an accidental .env inclusion cannot leak them into agent sessions.
|
# even an accidental .env inclusion cannot leak them into agent sessions.
|
||||||
unset GITHUB_TOKEN 2>/dev/null || true
|
unset GITHUB_TOKEN 2>/dev/null || true
|
||||||
unset CLAWHUB_TOKEN 2>/dev/null || true
|
unset CLAWHUB_TOKEN 2>/dev/null || true
|
||||||
|
|
@ -313,68 +312,6 @@ memory_guard() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# SECRET LOADING ABSTRACTION
|
|
||||||
# =============================================================================
|
|
||||||
# load_secret NAME [DEFAULT]
|
|
||||||
#
|
|
||||||
# Resolves a secret value using the following precedence:
|
|
||||||
# 1. /secrets/<NAME>.env — Nomad-rendered template (future)
|
|
||||||
# 2. Current environment — already set by .env.enc, compose, etc.
|
|
||||||
# 3. secrets/<NAME>.enc — age-encrypted per-key file (decrypted on demand)
|
|
||||||
# 4. DEFAULT (or empty)
|
|
||||||
#
|
|
||||||
# Prints the resolved value to stdout. Caches age-decrypted values in the
|
|
||||||
# process environment so subsequent calls are free.
|
|
||||||
# =============================================================================
|
|
||||||
load_secret() {
|
|
||||||
local name="$1"
|
|
||||||
local default="${2:-}"
|
|
||||||
|
|
||||||
# 1. Nomad-rendered template (future: Nomad writes /secrets/<NAME>.env)
|
|
||||||
local nomad_path="/secrets/${name}.env"
|
|
||||||
if [ -f "$nomad_path" ]; then
|
|
||||||
# Source into a subshell to extract just the value
|
|
||||||
local _nomad_val
|
|
||||||
_nomad_val=$(
|
|
||||||
set -a
|
|
||||||
# shellcheck source=/dev/null
|
|
||||||
source "$nomad_path"
|
|
||||||
set +a
|
|
||||||
printf '%s' "${!name:-}"
|
|
||||||
)
|
|
||||||
if [ -n "$_nomad_val" ]; then
|
|
||||||
export "$name=$_nomad_val"
|
|
||||||
printf '%s' "$_nomad_val"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# 2. Already in environment (set by .env.enc, compose injection, etc.)
|
|
||||||
if [ -n "${!name:-}" ]; then
|
|
||||||
printf '%s' "${!name}"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# 3. Age-encrypted per-key file: secrets/<NAME>.enc (#777)
|
|
||||||
local _age_key="${HOME}/.config/sops/age/keys.txt"
|
|
||||||
local _enc_path="${FACTORY_ROOT}/secrets/${name}.enc"
|
|
||||||
if [ -f "$_enc_path" ] && [ -f "$_age_key" ] && command -v age &>/dev/null; then
|
|
||||||
local _dec_val
|
|
||||||
if _dec_val=$(age -d -i "$_age_key" "$_enc_path" 2>/dev/null) && [ -n "$_dec_val" ]; then
|
|
||||||
export "$name=$_dec_val"
|
|
||||||
printf '%s' "$_dec_val"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# 4. Default (or empty)
|
|
||||||
if [ -n "$default" ]; then
|
|
||||||
printf '%s' "$default"
|
|
||||||
fi
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# Source tea helpers (available when tea binary is installed)
|
# Source tea helpers (available when tea binary is installed)
|
||||||
if command -v tea &>/dev/null; then
|
if command -v tea &>/dev/null; then
|
||||||
# shellcheck source=tea-helpers.sh
|
# shellcheck source=tea-helpers.sh
|
||||||
|
|
|
||||||
|
|
@ -31,9 +31,8 @@ _load_init_context() {
|
||||||
# Execute a command in the Forgejo container (for admin operations)
|
# Execute a command in the Forgejo container (for admin operations)
|
||||||
_forgejo_exec() {
|
_forgejo_exec() {
|
||||||
local use_bare="${DISINTO_BARE:-false}"
|
local use_bare="${DISINTO_BARE:-false}"
|
||||||
local cname="${FORGEJO_CONTAINER_NAME:-disinto-forgejo}"
|
|
||||||
if [ "$use_bare" = true ]; then
|
if [ "$use_bare" = true ]; then
|
||||||
docker exec -u git "$cname" "$@"
|
docker exec -u git disinto-forgejo "$@"
|
||||||
else
|
else
|
||||||
docker compose -f "${FACTORY_ROOT}/docker-compose.yml" exec -T -u git forgejo "$@"
|
docker compose -f "${FACTORY_ROOT}/docker-compose.yml" exec -T -u git forgejo "$@"
|
||||||
fi
|
fi
|
||||||
|
|
@ -95,12 +94,11 @@ setup_forge() {
|
||||||
# Bare-metal mode: standalone docker run
|
# Bare-metal mode: standalone docker run
|
||||||
mkdir -p "${FORGEJO_DATA_DIR}"
|
mkdir -p "${FORGEJO_DATA_DIR}"
|
||||||
|
|
||||||
local cname="${FORGEJO_CONTAINER_NAME:-disinto-forgejo}"
|
if docker ps -a --format '{{.Names}}' | grep -q '^disinto-forgejo$'; then
|
||||||
if docker ps -a --format '{{.Names}}' | grep -q "^${cname}$"; then
|
docker start disinto-forgejo >/dev/null 2>&1 || true
|
||||||
docker start "$cname" >/dev/null 2>&1 || true
|
|
||||||
else
|
else
|
||||||
docker run -d \
|
docker run -d \
|
||||||
--name "$cname" \
|
--name disinto-forgejo \
|
||||||
--restart unless-stopped \
|
--restart unless-stopped \
|
||||||
-p "${forge_port}:3000" \
|
-p "${forge_port}:3000" \
|
||||||
-p 2222:22 \
|
-p 2222:22 \
|
||||||
|
|
@ -212,8 +210,8 @@ setup_forge() {
|
||||||
|
|
||||||
# Create human user (disinto-admin) as site admin if it doesn't exist
|
# Create human user (disinto-admin) as site admin if it doesn't exist
|
||||||
local human_user="disinto-admin"
|
local human_user="disinto-admin"
|
||||||
# human_user == admin_user; reuse admin_pass for basic-auth operations
|
local human_pass
|
||||||
local human_pass="$admin_pass"
|
human_pass="admin-$(head -c 16 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 20)"
|
||||||
|
|
||||||
if ! curl -sf --max-time 5 -H "Authorization: token ${FORGE_TOKEN:-}" "${forge_url}/api/v1/users/${human_user}" >/dev/null 2>&1; then
|
if ! curl -sf --max-time 5 -H "Authorization: token ${FORGE_TOKEN:-}" "${forge_url}/api/v1/users/${human_user}" >/dev/null 2>&1; then
|
||||||
echo "Creating human user: ${human_user}"
|
echo "Creating human user: ${human_user}"
|
||||||
|
|
@ -245,14 +243,6 @@ setup_forge() {
|
||||||
echo "Human user: ${human_user} (already exists)"
|
echo "Human user: ${human_user} (already exists)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Preserve admin token if already stored in .env (idempotent re-run)
|
|
||||||
local admin_token=""
|
|
||||||
if _token_exists_in_env "FORGE_ADMIN_TOKEN" "$env_file" && [ "$rotate_tokens" = false ]; then
|
|
||||||
admin_token=$(grep '^FORGE_ADMIN_TOKEN=' "$env_file" | head -1 | cut -d= -f2-)
|
|
||||||
[ -n "$admin_token" ] && echo "Admin token: preserved (use --rotate-tokens to force)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$admin_token" ]; then
|
|
||||||
# Delete existing admin token if present (token sha1 is only returned at creation time)
|
# Delete existing admin token if present (token sha1 is only returned at creation time)
|
||||||
local existing_token_id
|
local existing_token_id
|
||||||
existing_token_id=$(curl -sf \
|
existing_token_id=$(curl -sf \
|
||||||
|
|
@ -266,6 +256,7 @@ setup_forge() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create admin token (fresh, so sha1 is returned)
|
# Create admin token (fresh, so sha1 is returned)
|
||||||
|
local admin_token
|
||||||
admin_token=$(curl -sf -X POST \
|
admin_token=$(curl -sf -X POST \
|
||||||
-u "${admin_user}:${admin_pass}" \
|
-u "${admin_user}:${admin_pass}" \
|
||||||
-H "Content-Type: application/json" \
|
-H "Content-Type: application/json" \
|
||||||
|
|
@ -278,41 +269,23 @@ setup_forge() {
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Store admin token for idempotent re-runs
|
# Get or create human user token
|
||||||
if grep -q '^FORGE_ADMIN_TOKEN=' "$env_file" 2>/dev/null; then
|
|
||||||
sed -i "s|^FORGE_ADMIN_TOKEN=.*|FORGE_ADMIN_TOKEN=${admin_token}|" "$env_file"
|
|
||||||
else
|
|
||||||
printf 'FORGE_ADMIN_TOKEN=%s\n' "$admin_token" >> "$env_file"
|
|
||||||
fi
|
|
||||||
echo "Admin token: generated and saved (FORGE_ADMIN_TOKEN)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get or create human user token (human_user == admin_user; use admin_pass)
|
|
||||||
local human_token=""
|
local human_token=""
|
||||||
if _token_exists_in_env "HUMAN_TOKEN" "$env_file" && [ "$rotate_tokens" = false ]; then
|
|
||||||
human_token=$(grep '^HUMAN_TOKEN=' "$env_file" | head -1 | cut -d= -f2-)
|
|
||||||
if [ -n "$human_token" ]; then
|
|
||||||
export HUMAN_TOKEN="$human_token"
|
|
||||||
echo " Human token preserved (use --rotate-tokens to force)"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$human_token" ]; then
|
|
||||||
# Delete existing human token if present (token sha1 is only returned at creation time)
|
# Delete existing human token if present (token sha1 is only returned at creation time)
|
||||||
local existing_human_token_id
|
local existing_human_token_id
|
||||||
existing_human_token_id=$(curl -sf \
|
existing_human_token_id=$(curl -sf \
|
||||||
-u "${admin_user}:${admin_pass}" \
|
-u "${human_user}:${human_pass}" \
|
||||||
"${forge_url}/api/v1/users/${human_user}/tokens" 2>/dev/null \
|
"${forge_url}/api/v1/users/${human_user}/tokens" 2>/dev/null \
|
||||||
| jq -r '.[] | select(.name == "disinto-human-token") | .id') || existing_human_token_id=""
|
| jq -r '.[] | select(.name == "disinto-human-token") | .id') || existing_human_token_id=""
|
||||||
if [ -n "$existing_human_token_id" ]; then
|
if [ -n "$existing_human_token_id" ]; then
|
||||||
curl -sf -X DELETE \
|
curl -sf -X DELETE \
|
||||||
-u "${admin_user}:${admin_pass}" \
|
-u "${human_user}:${human_pass}" \
|
||||||
"${forge_url}/api/v1/users/${human_user}/tokens/${existing_human_token_id}" >/dev/null 2>&1 || true
|
"${forge_url}/api/v1/users/${human_user}/tokens/${existing_human_token_id}" >/dev/null 2>&1 || true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create human token (use admin_pass since human_user == admin_user)
|
# Create human token (fresh, so sha1 is returned)
|
||||||
human_token=$(curl -sf -X POST \
|
human_token=$(curl -sf -X POST \
|
||||||
-u "${admin_user}:${admin_pass}" \
|
-u "${human_user}:${human_pass}" \
|
||||||
-H "Content-Type: application/json" \
|
-H "Content-Type: application/json" \
|
||||||
"${forge_url}/api/v1/users/${human_user}/tokens" \
|
"${forge_url}/api/v1/users/${human_user}/tokens" \
|
||||||
-d '{"name":"disinto-human-token","scopes":["all"]}' 2>/dev/null \
|
-d '{"name":"disinto-human-token","scopes":["all"]}' 2>/dev/null \
|
||||||
|
|
@ -326,8 +299,7 @@ setup_forge() {
|
||||||
printf 'HUMAN_TOKEN=%s\n' "$human_token" >> "$env_file"
|
printf 'HUMAN_TOKEN=%s\n' "$human_token" >> "$env_file"
|
||||||
fi
|
fi
|
||||||
export HUMAN_TOKEN="$human_token"
|
export HUMAN_TOKEN="$human_token"
|
||||||
echo " Human token generated and saved (HUMAN_TOKEN)"
|
echo " Human token saved (HUMAN_TOKEN)"
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create bot users and tokens
|
# Create bot users and tokens
|
||||||
|
|
@ -356,6 +328,16 @@ setup_forge() {
|
||||||
[predictor-bot]="FORGE_PREDICTOR_PASS"
|
[predictor-bot]="FORGE_PREDICTOR_PASS"
|
||||||
[architect-bot]="FORGE_ARCHITECT_PASS"
|
[architect-bot]="FORGE_ARCHITECT_PASS"
|
||||||
)
|
)
|
||||||
|
# Llama bot users (local-model agents) — separate from main agents
|
||||||
|
# Each llama agent gets its own Forgejo user, token, and password
|
||||||
|
local -A llama_token_vars=(
|
||||||
|
[dev-qwen]="FORGE_TOKEN_LLAMA"
|
||||||
|
[dev-qwen-nightly]="FORGE_TOKEN_LLAMA_NIGHTLY"
|
||||||
|
)
|
||||||
|
local -A llama_pass_vars=(
|
||||||
|
[dev-qwen]="FORGE_PASS_LLAMA"
|
||||||
|
[dev-qwen-nightly]="FORGE_PASS_LLAMA_NIGHTLY"
|
||||||
|
)
|
||||||
|
|
||||||
local bot_user bot_pass token token_var pass_var
|
local bot_user bot_pass token token_var pass_var
|
||||||
|
|
||||||
|
|
@ -505,12 +487,159 @@ setup_forge() {
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# Create llama bot users and tokens (local-model agents)
|
||||||
|
# These are separate from the main agents and get their own credentials
|
||||||
|
echo ""
|
||||||
|
echo "── Setting up llama bot users ────────────────────────────"
|
||||||
|
|
||||||
|
local llama_user llama_pass llama_token llama_token_var llama_pass_var
|
||||||
|
for llama_user in "${!llama_token_vars[@]}"; do
|
||||||
|
llama_token_var="${llama_token_vars[$llama_user]}"
|
||||||
|
llama_pass_var="${llama_pass_vars[$llama_user]}"
|
||||||
|
|
||||||
|
# Check if token already exists in .env
|
||||||
|
local token_exists=false
|
||||||
|
if _token_exists_in_env "$llama_token_var" "$env_file"; then
|
||||||
|
token_exists=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if password already exists in .env
|
||||||
|
local pass_exists=false
|
||||||
|
if _pass_exists_in_env "$llama_pass_var" "$env_file"; then
|
||||||
|
pass_exists=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if llama bot user exists on Forgejo
|
||||||
|
local llama_user_exists=false
|
||||||
|
if curl -sf --max-time 5 \
|
||||||
|
-H "Authorization: token ${admin_token}" \
|
||||||
|
"${forge_url}/api/v1/users/${llama_user}" >/dev/null 2>&1; then
|
||||||
|
llama_user_exists=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Skip token/password regeneration if both exist in .env and not forcing rotation
|
||||||
|
if [ "$token_exists" = true ] && [ "$pass_exists" = true ] && [ "$rotate_tokens" = false ]; then
|
||||||
|
echo " ${llama_user} token and password preserved (use --rotate-tokens to force)"
|
||||||
|
# Still export the existing token for use within this run
|
||||||
|
local existing_token existing_pass
|
||||||
|
existing_token=$(grep "^${llama_token_var}=" "$env_file" | head -1 | cut -d= -f2-)
|
||||||
|
existing_pass=$(grep "^${llama_pass_var}=" "$env_file" | head -1 | cut -d= -f2-)
|
||||||
|
export "${llama_token_var}=${existing_token}"
|
||||||
|
export "${llama_pass_var}=${existing_pass}"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Generate new credentials if:
|
||||||
|
# - Token doesn't exist (first run)
|
||||||
|
# - Password doesn't exist (first run)
|
||||||
|
# - --rotate-tokens flag is set (explicit rotation)
|
||||||
|
if [ "$llama_user_exists" = false ]; then
|
||||||
|
# User doesn't exist - create it
|
||||||
|
llama_pass="llama-$(head -c 16 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 20)"
|
||||||
|
echo "Creating llama bot user: ${llama_user}"
|
||||||
|
local create_output
|
||||||
|
if ! create_output=$(_forgejo_exec forgejo admin user create \
|
||||||
|
--username "${llama_user}" \
|
||||||
|
--password "${llama_pass}" \
|
||||||
|
--email "${llama_user}@disinto.local" \
|
||||||
|
--must-change-password=false 2>&1); then
|
||||||
|
echo "Error: failed to create llama bot user '${llama_user}':" >&2
|
||||||
|
echo " ${create_output}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
# Forgejo 11.x ignores --must-change-password=false on create;
|
||||||
|
# explicitly clear the flag so basic-auth token creation works.
|
||||||
|
_forgejo_exec forgejo admin user change-password \
|
||||||
|
--username "${llama_user}" \
|
||||||
|
--password "${llama_pass}" \
|
||||||
|
--must-change-password=false
|
||||||
|
|
||||||
|
# Verify llama bot user was actually created
|
||||||
|
if ! curl -sf --max-time 5 \
|
||||||
|
-H "Authorization: token ${admin_token}" \
|
||||||
|
"${forge_url}/api/v1/users/${llama_user}" >/dev/null 2>&1; then
|
||||||
|
echo "Error: llama bot user '${llama_user}' not found after creation" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo " ${llama_user} user created"
|
||||||
|
else
|
||||||
|
# User exists - reset password if needed
|
||||||
|
echo " ${llama_user} user exists"
|
||||||
|
if [ "$rotate_tokens" = true ] || [ "$pass_exists" = false ]; then
|
||||||
|
llama_pass="llama-$(head -c 16 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 20)"
|
||||||
|
_forgejo_exec forgejo admin user change-password \
|
||||||
|
--username "${llama_user}" \
|
||||||
|
--password "${llama_pass}" \
|
||||||
|
--must-change-password=false || {
|
||||||
|
echo "Error: failed to reset password for existing llama bot user '${llama_user}'" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
echo " ${llama_user} password reset for token generation"
|
||||||
|
else
|
||||||
|
# Password exists, get it from .env
|
||||||
|
llama_pass=$(grep "^${llama_pass_var}=" "$env_file" | head -1 | cut -d= -f2-)
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Generate token via API (basic auth as the llama user)
|
||||||
|
# First, delete any existing tokens to avoid name collision
|
||||||
|
local existing_llama_token_ids
|
||||||
|
existing_llama_token_ids=$(curl -sf \
|
||||||
|
-u "${llama_user}:${llama_pass}" \
|
||||||
|
"${forge_url}/api/v1/users/${llama_user}/tokens" 2>/dev/null \
|
||||||
|
| jq -r '.[].id // empty' 2>/dev/null) || existing_llama_token_ids=""
|
||||||
|
|
||||||
|
# Delete any existing tokens for this user
|
||||||
|
if [ -n "$existing_llama_token_ids" ]; then
|
||||||
|
while IFS= read -r tid; do
|
||||||
|
[ -n "$tid" ] && curl -sf -X DELETE \
|
||||||
|
-u "${llama_user}:${llama_pass}" \
|
||||||
|
"${forge_url}/api/v1/users/${llama_user}/tokens/${tid}" >/dev/null 2>&1 || true
|
||||||
|
done <<< "$existing_llama_token_ids"
|
||||||
|
fi
|
||||||
|
|
||||||
|
llama_token=$(curl -sf -X POST \
|
||||||
|
-u "${llama_user}:${llama_pass}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"${forge_url}/api/v1/users/${llama_user}/tokens" \
|
||||||
|
-d "{\"name\":\"disinto-${llama_user}-token\",\"scopes\":[\"all\"]}" 2>/dev/null \
|
||||||
|
| jq -r '.sha1 // empty') || llama_token=""
|
||||||
|
|
||||||
|
if [ -z "$llama_token" ]; then
|
||||||
|
echo "Error: failed to create API token for '${llama_user}'" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Store token in .env under the llama-specific variable name
|
||||||
|
if grep -q "^${llama_token_var}=" "$env_file" 2>/dev/null; then
|
||||||
|
sed -i "s|^${llama_token_var}=.*|${llama_token_var}=${llama_token}|" "$env_file"
|
||||||
|
else
|
||||||
|
printf '%s=%s\n' "$llama_token_var" "$llama_token" >> "$env_file"
|
||||||
|
fi
|
||||||
|
export "${llama_token_var}=${llama_token}"
|
||||||
|
echo " ${llama_user} token generated and saved (${llama_token_var})"
|
||||||
|
|
||||||
|
# Store password in .env for git HTTP push (#361)
|
||||||
|
# Forgejo 11.x API tokens don't work for git push; password auth does.
|
||||||
|
if grep -q "^${llama_pass_var}=" "$env_file" 2>/dev/null; then
|
||||||
|
sed -i "s|^${llama_pass_var}=.*|${llama_pass_var}=${llama_pass}|" "$env_file"
|
||||||
|
else
|
||||||
|
printf '%s=%s\n' "$llama_pass_var" "$llama_pass" >> "$env_file"
|
||||||
|
fi
|
||||||
|
export "${llama_pass_var}=${llama_pass}"
|
||||||
|
echo " ${llama_user} password saved (${llama_pass_var})"
|
||||||
|
done
|
||||||
|
|
||||||
# Create .profile repos for all bot users (if they don't already exist)
|
# Create .profile repos for all bot users (if they don't already exist)
|
||||||
# This runs the same logic as hire-an-agent Step 2-3 for idempotent setup
|
# This runs the same logic as hire-an-agent Step 2-3 for idempotent setup
|
||||||
echo ""
|
echo ""
|
||||||
echo "── Setting up .profile repos ────────────────────────────"
|
echo "── Setting up .profile repos ────────────────────────────"
|
||||||
|
|
||||||
local -a bot_users=(dev-bot review-bot planner-bot gardener-bot vault-bot supervisor-bot predictor-bot architect-bot)
|
local -a bot_users=(dev-bot review-bot planner-bot gardener-bot vault-bot supervisor-bot predictor-bot architect-bot)
|
||||||
|
# Add llama bot users to .profile repo creation
|
||||||
|
for llama_user in "${!llama_token_vars[@]}"; do
|
||||||
|
bot_users+=("$llama_user")
|
||||||
|
done
|
||||||
local bot_user
|
local bot_user
|
||||||
|
|
||||||
for bot_user in "${bot_users[@]}"; do
|
for bot_user in "${bot_users[@]}"; do
|
||||||
|
|
@ -590,7 +719,7 @@ setup_forge() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Add all bot users as collaborators with appropriate permissions
|
# Add all bot users as collaborators with appropriate permissions
|
||||||
# dev-bot: write (PR creation via lib/action-vault.sh)
|
# dev-bot: write (PR creation via lib/vault.sh)
|
||||||
# review-bot: read (PR review)
|
# review-bot: read (PR review)
|
||||||
# planner-bot: write (prerequisites.md, memory)
|
# planner-bot: write (prerequisites.md, memory)
|
||||||
# gardener-bot: write (backlog grooming)
|
# gardener-bot: write (backlog grooming)
|
||||||
|
|
@ -618,6 +747,15 @@ setup_forge() {
|
||||||
-d "{\"permission\":\"${bot_perm}\"}" >/dev/null 2>&1 || true
|
-d "{\"permission\":\"${bot_perm}\"}" >/dev/null 2>&1 || true
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# Add llama bot users as write collaborators for local-model agents
|
||||||
|
for llama_user in "${!llama_token_vars[@]}"; do
|
||||||
|
curl -sf -X PUT \
|
||||||
|
-H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"${forge_url}/api/v1/repos/${repo_slug}/collaborators/${llama_user}" \
|
||||||
|
-d '{"permission":"write"}' >/dev/null 2>&1 || true
|
||||||
|
done
|
||||||
|
|
||||||
# Add disinto-admin as admin collaborator
|
# Add disinto-admin as admin collaborator
|
||||||
curl -sf -X PUT \
|
curl -sf -X PUT \
|
||||||
-H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \
|
-H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \
|
||||||
|
|
|
||||||
|
|
@ -819,7 +819,8 @@ build_prompt_footer() {
|
||||||
Base URL: ${FORGE_API}
|
Base URL: ${FORGE_API}
|
||||||
Auth header: -H \"Authorization: token \${FORGE_TOKEN}\"
|
Auth header: -H \"Authorization: token \${FORGE_TOKEN}\"
|
||||||
Read issue: curl -sf -H \"Authorization: token \${FORGE_TOKEN}\" '${FORGE_API}/issues/{number}' | jq '.body'
|
Read issue: curl -sf -H \"Authorization: token \${FORGE_TOKEN}\" '${FORGE_API}/issues/{number}' | jq '.body'
|
||||||
List labels: curl -sf -H \"Authorization: token \${FORGE_TOKEN}\" '${FORGE_API}/labels'${extra_api}
|
Create issue: curl -sf -X POST -H \"Authorization: token \${FORGE_TOKEN}\" -H 'Content-Type: application/json' '${FORGE_API}/issues' -d '{\"title\":\"...\",\"body\":\"...\",\"labels\":[LABEL_ID]}'${extra_api}
|
||||||
|
List labels: curl -sf -H \"Authorization: token \${FORGE_TOKEN}\" '${FORGE_API}/labels'
|
||||||
NEVER echo or include the actual token value in output — always reference \${FORGE_TOKEN}.
|
NEVER echo or include the actual token value in output — always reference \${FORGE_TOKEN}.
|
||||||
|
|
||||||
## Environment
|
## Environment
|
||||||
|
|
|
||||||
|
|
@ -97,57 +97,29 @@ _generate_local_model_services() {
|
||||||
POLL_INTERVAL) poll_interval_val="$value" ;;
|
POLL_INTERVAL) poll_interval_val="$value" ;;
|
||||||
---)
|
---)
|
||||||
if [ -n "$service_name" ] && [ -n "$base_url" ]; then
|
if [ -n "$service_name" ] && [ -n "$base_url" ]; then
|
||||||
# Per-agent FORGE_TOKEN / FORGE_PASS lookup (#834 Gap 3).
|
|
||||||
# Two hired llama agents must not share the same Forgejo identity,
|
|
||||||
# so we key the env-var lookup by forge_user (which hire-agent.sh
|
|
||||||
# writes as the Forgejo username). Apply the same tr 'a-z-' 'A-Z_'
|
|
||||||
# convention as hire-agent.sh Gap 1 so the names match.
|
|
||||||
#
|
|
||||||
# NOTE (#845): the emitted block has NO `profiles:` key. The
|
|
||||||
# [agents.<name>] TOML entry is already the activation gate —
|
|
||||||
# its presence is what drives emission here. Profile-gating
|
|
||||||
# the service caused `disinto up` (without COMPOSE_PROFILES)
|
|
||||||
# to treat the hired container as an orphan and silently
|
|
||||||
# remove it via --remove-orphans.
|
|
||||||
local user_upper
|
|
||||||
user_upper=$(echo "$forge_user" | tr 'a-z-' 'A-Z_')
|
|
||||||
cat >> "$temp_file" <<EOF
|
cat >> "$temp_file" <<EOF
|
||||||
|
|
||||||
agents-${service_name}:
|
agents-${service_name}:
|
||||||
# Local image ref (#853): registry-less name matches what \`disinto init --build\`
|
|
||||||
# and the legacy agents-llama stanza produce. Paired with build: so hosts without
|
|
||||||
# a pre-built image can rebuild locally; ghcr.io/disinto/agents is not publicly
|
|
||||||
# pullable, and emitting that prefix caused \`docker compose up\` to fail with
|
|
||||||
# \`denied\` on every hired agent.
|
|
||||||
build:
|
build:
|
||||||
context: .
|
context: .
|
||||||
dockerfile: docker/agents/Dockerfile
|
dockerfile: docker/agents/Dockerfile
|
||||||
image: disinto/agents:\${DISINTO_IMAGE_TAG:-latest}
|
|
||||||
# Rebuild on every up (#887): without this, \`docker compose up -d --force-recreate\`
|
|
||||||
# reuses the cached image and silently keeps running stale docker/agents/ code
|
|
||||||
# even after the repo is updated. \`pull_policy: build\` makes Compose rebuild
|
|
||||||
# the image on every up; BuildKit layer cache makes unchanged rebuilds fast.
|
|
||||||
pull_policy: build
|
|
||||||
container_name: disinto-agents-${service_name}
|
container_name: disinto-agents-${service_name}
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
security_opt:
|
security_opt:
|
||||||
- apparmor=unconfined
|
- apparmor=unconfined
|
||||||
volumes:
|
volumes:
|
||||||
- agents-${service_name}-data:/home/agent/data
|
- agents-${service_name}-data:/home/agent/data
|
||||||
- project-repos-${service_name}:/home/agent/repos
|
- project-repos:/home/agent/repos
|
||||||
- \${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:\${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
- \${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:\${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
||||||
- \${CLAUDE_CONFIG_FILE:-\${HOME}/.claude.json}:/home/agent/.claude.json:ro
|
- \${HOME}/.claude.json:/home/agent/.claude.json:ro
|
||||||
- \${AGENT_SSH_DIR:-\${HOME}/.ssh}:/home/agent/.ssh:ro
|
- CLAUDE_BIN_PLACEHOLDER:/usr/local/bin/claude:ro
|
||||||
- ./projects:/home/agent/disinto/projects:ro
|
- \${HOME}/.ssh:/home/agent/.ssh:ro
|
||||||
- ./.env:/home/agent/disinto/.env:ro
|
|
||||||
- ./state:/home/agent/disinto/state
|
|
||||||
environment:
|
environment:
|
||||||
FORGE_URL: http://forgejo:3000
|
FORGE_URL: http://forgejo:3000
|
||||||
FORGE_REPO: ${FORGE_REPO:-disinto-admin/disinto}
|
FORGE_REPO: ${FORGE_REPO:-disinto-admin/disinto}
|
||||||
FACTORY_REPO: ${FORGE_REPO:-disinto-admin/disinto}
|
# Use llama-specific credentials if available, otherwise fall back to main FORGE_TOKEN
|
||||||
# Per-agent credentials keyed by forge_user (#834 Gap 3).
|
FORGE_TOKEN: \${FORGE_TOKEN_LLAMA:-\${FORGE_TOKEN:-}}
|
||||||
FORGE_TOKEN: \${FORGE_TOKEN_${user_upper}:-}
|
FORGE_PASS: \${FORGE_PASS_LLAMA:-\${FORGE_PASS:-}}
|
||||||
FORGE_PASS: \${FORGE_PASS_${user_upper}:-}
|
|
||||||
FORGE_REVIEW_TOKEN: \${FORGE_REVIEW_TOKEN:-}
|
FORGE_REVIEW_TOKEN: \${FORGE_REVIEW_TOKEN:-}
|
||||||
FORGE_BOT_USERNAMES: \${FORGE_BOT_USERNAMES:-}
|
FORGE_BOT_USERNAMES: \${FORGE_BOT_USERNAMES:-}
|
||||||
AGENT_ROLES: "${roles}"
|
AGENT_ROLES: "${roles}"
|
||||||
|
|
@ -165,12 +137,11 @@ _generate_local_model_services() {
|
||||||
PROJECT_REPO_ROOT: /home/agent/repos/${PROJECT_NAME:-project}
|
PROJECT_REPO_ROOT: /home/agent/repos/${PROJECT_NAME:-project}
|
||||||
WOODPECKER_DATA_DIR: /woodpecker-data
|
WOODPECKER_DATA_DIR: /woodpecker-data
|
||||||
WOODPECKER_REPO_ID: "${wp_repo_id}"
|
WOODPECKER_REPO_ID: "${wp_repo_id}"
|
||||||
FORGE_BOT_USER_${user_upper}: "${forge_user}"
|
FORGE_BOT_USER_${service_name^^}: "${forge_user}"
|
||||||
POLL_INTERVAL: "${poll_interval_val}"
|
POLL_INTERVAL: "${poll_interval_val}"
|
||||||
GARDENER_INTERVAL: "${GARDENER_INTERVAL:-21600}"
|
GARDENER_INTERVAL: "${GARDENER_INTERVAL:-21600}"
|
||||||
ARCHITECT_INTERVAL: "${ARCHITECT_INTERVAL:-21600}"
|
ARCHITECT_INTERVAL: "${ARCHITECT_INTERVAL:-21600}"
|
||||||
PLANNER_INTERVAL: "${PLANNER_INTERVAL:-43200}"
|
PLANNER_INTERVAL: "${PLANNER_INTERVAL:-43200}"
|
||||||
SUPERVISOR_INTERVAL: "${SUPERVISOR_INTERVAL:-1200}"
|
|
||||||
depends_on:
|
depends_on:
|
||||||
forgejo:
|
forgejo:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
@ -178,22 +149,18 @@ _generate_local_model_services() {
|
||||||
condition: service_started
|
condition: service_started
|
||||||
networks:
|
networks:
|
||||||
- disinto-net
|
- disinto-net
|
||||||
|
profiles: ["agents-${service_name}"]
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
has_services=true
|
has_services=true
|
||||||
fi
|
fi
|
||||||
# Collect per-agent volume names for later (#834 Gap 4: project-repos
|
# Collect volume name for later
|
||||||
# must be per-agent so concurrent llama devs don't race on
|
local vol_name=" agents-${service_name}-data:"
|
||||||
# /home/agent/repos/_factory or state/.dev-active).
|
|
||||||
local vol_data=" agents-${service_name}-data:"
|
|
||||||
local vol_repos=" project-repos-${service_name}:"
|
|
||||||
if [ -n "$all_vols" ]; then
|
if [ -n "$all_vols" ]; then
|
||||||
all_vols="${all_vols}
|
all_vols="${all_vols}
|
||||||
${vol_data}
|
${vol_name}"
|
||||||
${vol_repos}"
|
|
||||||
else
|
else
|
||||||
all_vols="${vol_data}
|
all_vols="${vol_name}"
|
||||||
${vol_repos}"
|
|
||||||
fi
|
fi
|
||||||
service_name="" base_url="" model="" roles="" api_key="" forge_user="" compact_pct="" poll_interval_val=""
|
service_name="" base_url="" model="" roles="" api_key="" forge_user="" compact_pct="" poll_interval_val=""
|
||||||
;;
|
;;
|
||||||
|
|
@ -250,14 +217,8 @@ for name, config in agents.items():
|
||||||
|
|
||||||
# Add local-model volumes to the volumes section
|
# Add local-model volumes to the volumes section
|
||||||
if [ -n "$all_vols" ]; then
|
if [ -n "$all_vols" ]; then
|
||||||
# Escape embedded newlines as literal \n so sed's s/// replacement
|
|
||||||
# tolerates multi-line $all_vols (needed once >1 local-model agent is
|
|
||||||
# configured — without this, the second agent's volume entry would
|
|
||||||
# unterminate the sed expression).
|
|
||||||
local all_vols_escaped
|
|
||||||
all_vols_escaped=$(printf '%s' "$all_vols" | sed ':a;N;$!ba;s/\n/\\n/g')
|
|
||||||
# Find the volumes section and add the new volumes
|
# Find the volumes section and add the new volumes
|
||||||
sed -i "/^volumes:/{n;:a;n;/^[a-z]/!{s/$/\n$all_vols_escaped/;b};ba}" "$temp_compose"
|
sed -i "/^volumes:/{n;:a;n;/^[a-z]/!{s/$/\n$all_vols/;b};ba}" "$temp_compose"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mv "$temp_compose" "$compose_file"
|
mv "$temp_compose" "$compose_file"
|
||||||
|
|
@ -272,7 +233,6 @@ for name, config in agents.items():
|
||||||
# to materialize a working stack on a fresh checkout.
|
# to materialize a working stack on a fresh checkout.
|
||||||
_generate_compose_impl() {
|
_generate_compose_impl() {
|
||||||
local forge_port="${1:-3000}"
|
local forge_port="${1:-3000}"
|
||||||
local use_build="${2:-false}"
|
|
||||||
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
||||||
|
|
||||||
# Check if compose file already exists
|
# Check if compose file already exists
|
||||||
|
|
@ -336,7 +296,6 @@ services:
|
||||||
WOODPECKER_AGENT_SECRET: ${WOODPECKER_AGENT_SECRET:-}
|
WOODPECKER_AGENT_SECRET: ${WOODPECKER_AGENT_SECRET:-}
|
||||||
WOODPECKER_DATABASE_DRIVER: sqlite3
|
WOODPECKER_DATABASE_DRIVER: sqlite3
|
||||||
WOODPECKER_DATABASE_DATASOURCE: /var/lib/woodpecker/woodpecker.sqlite
|
WOODPECKER_DATABASE_DATASOURCE: /var/lib/woodpecker/woodpecker.sqlite
|
||||||
WOODPECKER_PLUGINS_PRIVILEGED: ${WOODPECKER_PLUGINS_PRIVILEGED:-plugins/docker}
|
|
||||||
WOODPECKER_ENVIRONMENT: "FORGE_TOKEN:${FORGE_TOKEN}"
|
WOODPECKER_ENVIRONMENT: "FORGE_TOKEN:${FORGE_TOKEN}"
|
||||||
depends_on:
|
depends_on:
|
||||||
forgejo:
|
forgejo:
|
||||||
|
|
@ -359,19 +318,15 @@ services:
|
||||||
WOODPECKER_AGENT_SECRET: ${WOODPECKER_AGENT_SECRET:-}
|
WOODPECKER_AGENT_SECRET: ${WOODPECKER_AGENT_SECRET:-}
|
||||||
WOODPECKER_GRPC_SECURE: "false"
|
WOODPECKER_GRPC_SECURE: "false"
|
||||||
WOODPECKER_HEALTHCHECK_ADDR: ":3333"
|
WOODPECKER_HEALTHCHECK_ADDR: ":3333"
|
||||||
WOODPECKER_BACKEND_DOCKER_NETWORK: ${WOODPECKER_CI_NETWORK:-disinto_disinto-net}
|
WOODPECKER_BACKEND_DOCKER_NETWORK: disinto_disinto-net
|
||||||
WOODPECKER_MAX_WORKFLOWS: 1
|
WOODPECKER_MAX_WORKFLOWS: 1
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3333/healthz"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 3
|
|
||||||
start_period: 15s
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- woodpecker
|
- woodpecker
|
||||||
|
|
||||||
agents:
|
agents:
|
||||||
image: ghcr.io/disinto/agents:${DISINTO_IMAGE_TAG:-latest}
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: docker/agents/Dockerfile
|
||||||
container_name: disinto-agents
|
container_name: disinto-agents
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
security_opt:
|
security_opt:
|
||||||
|
|
@ -380,13 +335,11 @@ services:
|
||||||
- agent-data:/home/agent/data
|
- agent-data:/home/agent/data
|
||||||
- project-repos:/home/agent/repos
|
- project-repos:/home/agent/repos
|
||||||
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
||||||
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
|
- ${HOME}/.claude.json:/home/agent/.claude.json:ro
|
||||||
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
|
- CLAUDE_BIN_PLACEHOLDER:/usr/local/bin/claude:ro
|
||||||
- ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro
|
- ${HOME}/.ssh:/home/agent/.ssh:ro
|
||||||
|
- ${HOME}/.config/sops/age:/home/agent/.config/sops/age:ro
|
||||||
- woodpecker-data:/woodpecker-data:ro
|
- woodpecker-data:/woodpecker-data:ro
|
||||||
- ./projects:/home/agent/disinto/projects:ro
|
|
||||||
- ./.env:/home/agent/disinto/.env:ro
|
|
||||||
- ./state:/home/agent/disinto/state
|
|
||||||
environment:
|
environment:
|
||||||
FORGE_URL: http://forgejo:3000
|
FORGE_URL: http://forgejo:3000
|
||||||
FORGE_REPO: ${FORGE_REPO:-disinto-admin/disinto}
|
FORGE_REPO: ${FORGE_REPO:-disinto-admin/disinto}
|
||||||
|
|
@ -418,14 +371,8 @@ services:
|
||||||
PLANNER_INTERVAL: ${PLANNER_INTERVAL:-43200}
|
PLANNER_INTERVAL: ${PLANNER_INTERVAL:-43200}
|
||||||
# IMPORTANT: agents get explicit environment variables (forge tokens, CI tokens, config).
|
# IMPORTANT: agents get explicit environment variables (forge tokens, CI tokens, config).
|
||||||
# Vault-only secrets (GITHUB_TOKEN, CLAWHUB_TOKEN, deploy keys) live in
|
# Vault-only secrets (GITHUB_TOKEN, CLAWHUB_TOKEN, deploy keys) live in
|
||||||
# secrets/*.enc and are NEVER injected here — only the runner
|
# .env.vault.enc and are NEVER injected here — only the runner
|
||||||
# container receives them at fire time (AD-006, #745, #777).
|
# container receives them at fire time (AD-006, #745).
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "pgrep", "-f", "entrypoint.sh"]
|
|
||||||
interval: 60s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 3
|
|
||||||
start_period: 30s
|
|
||||||
depends_on:
|
depends_on:
|
||||||
forgejo:
|
forgejo:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
@ -434,13 +381,10 @@ services:
|
||||||
networks:
|
networks:
|
||||||
- disinto-net
|
- disinto-net
|
||||||
|
|
||||||
COMPOSEEOF
|
|
||||||
|
|
||||||
# Resume the rest of the compose file (runner onward)
|
|
||||||
cat >> "$compose_file" <<'COMPOSEEOF'
|
|
||||||
|
|
||||||
runner:
|
runner:
|
||||||
image: ghcr.io/disinto/agents:${DISINTO_IMAGE_TAG:-latest}
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: docker/agents/Dockerfile
|
||||||
profiles: ["vault"]
|
profiles: ["vault"]
|
||||||
security_opt:
|
security_opt:
|
||||||
- apparmor=unconfined
|
- apparmor=unconfined
|
||||||
|
|
@ -461,9 +405,8 @@ COMPOSEEOF
|
||||||
# Edge proxy — reverse proxy to Forgejo, Woodpecker, and staging
|
# Edge proxy — reverse proxy to Forgejo, Woodpecker, and staging
|
||||||
# Serves on ports 80/443, routes based on path
|
# Serves on ports 80/443, routes based on path
|
||||||
edge:
|
edge:
|
||||||
image: ghcr.io/disinto/edge:${DISINTO_IMAGE_TAG:-latest}
|
build: ./docker/edge
|
||||||
container_name: disinto-edge
|
container_name: disinto-edge
|
||||||
restart: unless-stopped
|
|
||||||
security_opt:
|
security_opt:
|
||||||
- apparmor=unconfined
|
- apparmor=unconfined
|
||||||
ports:
|
ports:
|
||||||
|
|
@ -498,13 +441,7 @@ COMPOSEEOF
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
- ./secrets/tunnel_key:/run/secrets/tunnel_key:ro
|
- ./secrets/tunnel_key:/run/secrets/tunnel_key:ro
|
||||||
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
||||||
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
|
- ${HOME}/.claude.json:/home/agent/.claude.json:ro
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "curl", "-fsS", "http://localhost:2019/config/"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 3
|
|
||||||
start_period: 15s
|
|
||||||
depends_on:
|
depends_on:
|
||||||
forgejo:
|
forgejo:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
@ -522,12 +459,6 @@ COMPOSEEOF
|
||||||
command: ["caddy", "file-server", "--root", "/srv/site"]
|
command: ["caddy", "file-server", "--root", "/srv/site"]
|
||||||
security_opt:
|
security_opt:
|
||||||
- apparmor=unconfined
|
- apparmor=unconfined
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "wget", "-q", "--spider", "http://localhost:2019/config/"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 3
|
|
||||||
start_period: 10s
|
|
||||||
volumes:
|
volumes:
|
||||||
- ./docker:/srv/site:ro
|
- ./docker:/srv/site:ro
|
||||||
networks:
|
networks:
|
||||||
|
|
@ -568,7 +499,7 @@ COMPOSEEOF
|
||||||
memswap_limit: 512m
|
memswap_limit: 512m
|
||||||
volumes:
|
volumes:
|
||||||
# Mount claude binary from host (same as agents)
|
# Mount claude binary from host (same as agents)
|
||||||
- ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro
|
- CLAUDE_BIN_PLACEHOLDER:/usr/local/bin/claude:ro
|
||||||
# Throwaway named volume for chat config (isolated from host ~/.claude)
|
# Throwaway named volume for chat config (isolated from host ~/.claude)
|
||||||
- chat-config:/var/chat/config
|
- chat-config:/var/chat/config
|
||||||
# Chat history persistence: per-user NDJSON files on bind-mounted host volume
|
# Chat history persistence: per-user NDJSON files on bind-mounted host volume
|
||||||
|
|
@ -587,12 +518,12 @@ COMPOSEEOF
|
||||||
CHAT_MAX_REQUESTS_PER_HOUR: ${CHAT_MAX_REQUESTS_PER_HOUR:-60}
|
CHAT_MAX_REQUESTS_PER_HOUR: ${CHAT_MAX_REQUESTS_PER_HOUR:-60}
|
||||||
CHAT_MAX_REQUESTS_PER_DAY: ${CHAT_MAX_REQUESTS_PER_DAY:-500}
|
CHAT_MAX_REQUESTS_PER_DAY: ${CHAT_MAX_REQUESTS_PER_DAY:-500}
|
||||||
CHAT_MAX_TOKENS_PER_DAY: ${CHAT_MAX_TOKENS_PER_DAY:-1000000}
|
CHAT_MAX_TOKENS_PER_DAY: ${CHAT_MAX_TOKENS_PER_DAY:-1000000}
|
||||||
healthcheck:
|
# Action endpoints (#712)
|
||||||
test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8080/health')"]
|
WOODPECKER_TOKEN: ${WOODPECKER_TOKEN:-}
|
||||||
interval: 30s
|
WOODPECKER_URL: http://woodpecker:8000
|
||||||
timeout: 5s
|
FORGE_TOKEN: ${FORGE_TOKEN:-}
|
||||||
retries: 3
|
FORGE_OWNER: ${FORGE_OWNER:-}
|
||||||
start_period: 10s
|
FORGE_REPO: ${FORGE_REPO:-}
|
||||||
networks:
|
networks:
|
||||||
- disinto-net
|
- disinto-net
|
||||||
|
|
||||||
|
|
@ -631,35 +562,20 @@ COMPOSEEOF
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Append local-model agent services if any are configured
|
# Append local-model agent services if any are configured
|
||||||
|
# (must run before CLAUDE_BIN_PLACEHOLDER substitution so the placeholder
|
||||||
|
# in local-model services is also resolved)
|
||||||
_generate_local_model_services "$compose_file"
|
_generate_local_model_services "$compose_file"
|
||||||
|
|
||||||
# Resolve the Claude CLI binary path and persist as CLAUDE_BIN_DIR in .env.
|
# Patch the Claude CLI binary path — resolve from host PATH at init time.
|
||||||
# Only used by reproduce and edge services which still use host-mounted CLI.
|
|
||||||
local claude_bin
|
local claude_bin
|
||||||
claude_bin="$(command -v claude 2>/dev/null || true)"
|
claude_bin="$(command -v claude 2>/dev/null || true)"
|
||||||
if [ -n "$claude_bin" ]; then
|
if [ -n "$claude_bin" ]; then
|
||||||
|
# Resolve symlinks to get the real binary path
|
||||||
claude_bin="$(readlink -f "$claude_bin")"
|
claude_bin="$(readlink -f "$claude_bin")"
|
||||||
|
sed -i "s|CLAUDE_BIN_PLACEHOLDER|${claude_bin}|g" "$compose_file"
|
||||||
else
|
else
|
||||||
echo "Warning: claude CLI not found in PATH — reproduce/edge services will fail to start" >&2
|
echo "Warning: claude CLI not found in PATH — update docker-compose.yml volumes manually" >&2
|
||||||
claude_bin="/usr/local/bin/claude"
|
sed -i "s|CLAUDE_BIN_PLACEHOLDER|/usr/local/bin/claude|g" "$compose_file"
|
||||||
fi
|
|
||||||
# Persist CLAUDE_BIN_DIR into .env so docker-compose can resolve it.
|
|
||||||
local env_file="${FACTORY_ROOT}/.env"
|
|
||||||
if [ -f "$env_file" ]; then
|
|
||||||
if grep -q "^CLAUDE_BIN_DIR=" "$env_file" 2>/dev/null; then
|
|
||||||
sed -i "s|^CLAUDE_BIN_DIR=.*|CLAUDE_BIN_DIR=${claude_bin}|" "$env_file"
|
|
||||||
else
|
|
||||||
printf 'CLAUDE_BIN_DIR=%s\n' "$claude_bin" >> "$env_file"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
printf 'CLAUDE_BIN_DIR=%s\n' "$claude_bin" > "$env_file"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# In build mode, replace image: with build: for locally-built images
|
|
||||||
if [ "$use_build" = true ]; then
|
|
||||||
sed -i 's|^\( agents:\)|\1|' "$compose_file"
|
|
||||||
sed -i '/^ image: ghcr\.io\/disinto\/agents:/{s|image: ghcr\.io/disinto/agents:.*|build:\n context: .\n dockerfile: docker/agents/Dockerfile\n pull_policy: build|}' "$compose_file"
|
|
||||||
sed -i '/^ image: ghcr\.io\/disinto\/edge:/{s|image: ghcr\.io/disinto/edge:.*|build: ./docker/edge\n pull_policy: build|}' "$compose_file"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Created: ${compose_file}"
|
echo "Created: ${compose_file}"
|
||||||
|
|
@ -678,11 +594,7 @@ _generate_agent_docker_impl() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Generate docker/Caddyfile for the edge proxy.
|
# Generate docker/Caddyfile template for edge proxy.
|
||||||
# **CANONICAL SOURCE**: This generator is the single source of truth for the Caddyfile.
|
|
||||||
# Output path: ${FACTORY_ROOT}/docker/Caddyfile (gitignored — generated artifact).
|
|
||||||
# The edge compose service mounts this path as /etc/caddy/Caddyfile.
|
|
||||||
# On a fresh clone, `disinto init` calls generate_caddyfile before first `disinto up`.
|
|
||||||
_generate_caddyfile_impl() {
|
_generate_caddyfile_impl() {
|
||||||
local docker_dir="${FACTORY_ROOT}/docker"
|
local docker_dir="${FACTORY_ROOT}/docker"
|
||||||
local caddyfile="${docker_dir}/Caddyfile"
|
local caddyfile="${docker_dir}/Caddyfile"
|
||||||
|
|
|
||||||
|
|
@ -35,35 +35,13 @@ configure_git_creds() {
|
||||||
forge_host=$(printf '%s' "$FORGE_URL" | sed 's|https\?://||; s|/.*||')
|
forge_host=$(printf '%s' "$FORGE_URL" | sed 's|https\?://||; s|/.*||')
|
||||||
forge_proto=$(printf '%s' "$FORGE_URL" | sed 's|://.*||')
|
forge_proto=$(printf '%s' "$FORGE_URL" | sed 's|://.*||')
|
||||||
|
|
||||||
local log_fn="${_GIT_CREDS_LOG_FN:-echo}"
|
# Determine the bot username from FORGE_TOKEN identity (or default to dev-bot)
|
||||||
|
|
||||||
# Determine the bot username from FORGE_TOKEN identity with retry/backoff.
|
|
||||||
# Never fall back to a hardcoded default — a wrong username paired with the
|
|
||||||
# real password produces a cryptic 401 that's much harder to diagnose than
|
|
||||||
# a missing credential helper (#741).
|
|
||||||
local bot_user=""
|
local bot_user=""
|
||||||
if [ -n "${FORGE_TOKEN:-}" ]; then
|
if [ -n "${FORGE_TOKEN:-}" ]; then
|
||||||
local attempt
|
bot_user=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
for attempt in 1 2 3 4 5; do
|
|
||||||
bot_user=$(curl -sf --max-time 5 -H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
"${FORGE_URL}/api/v1/user" 2>/dev/null | jq -r '.login // empty') || bot_user=""
|
"${FORGE_URL}/api/v1/user" 2>/dev/null | jq -r '.login // empty') || bot_user=""
|
||||||
if [ -n "$bot_user" ]; then
|
|
||||||
break
|
|
||||||
fi
|
fi
|
||||||
$log_fn "WARNING: Forgejo not reachable (attempt ${attempt}/5) — retrying in ${attempt}s"
|
bot_user="${bot_user:-dev-bot}"
|
||||||
sleep "$attempt"
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$bot_user" ]; then
|
|
||||||
$log_fn "ERROR: Could not determine bot username from FORGE_TOKEN after 5 attempts — credential helper NOT configured"
|
|
||||||
$log_fn "ERROR: git push will fail until this is resolved. Restart the container after Forgejo is healthy."
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Export BOT_USER so downstream functions (e.g. configure_git_identity) can
|
|
||||||
# reuse the resolved value without a redundant API call.
|
|
||||||
export BOT_USER="$bot_user"
|
|
||||||
|
|
||||||
local helper_path="${home_dir}/.git-credentials-helper"
|
local helper_path="${home_dir}/.git-credentials-helper"
|
||||||
|
|
||||||
|
|
@ -99,17 +77,6 @@ CREDEOF
|
||||||
else
|
else
|
||||||
git config --global --add safe.directory '*'
|
git config --global --add safe.directory '*'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Verify the credential helper actually authenticates (#741).
|
|
||||||
# A helper that was written with a valid username but a mismatched password
|
|
||||||
# would silently 401 on every push — catch it now.
|
|
||||||
if ! curl -sf --max-time 5 -u "${bot_user}:${FORGE_PASS}" \
|
|
||||||
"${FORGE_URL}/api/v1/user" >/dev/null 2>&1; then
|
|
||||||
$log_fn "ERROR: credential helper verification failed — ${bot_user}:FORGE_PASS rejected by Forgejo"
|
|
||||||
rm -f "$helper_path"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
$log_fn "Git credential helper verified: ${bot_user}@${forge_host}"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# repair_baked_cred_urls [--as RUN_AS_CMD] DIR [DIR ...]
|
# repair_baked_cred_urls [--as RUN_AS_CMD] DIR [DIR ...]
|
||||||
|
|
|
||||||
|
|
@ -30,29 +30,6 @@ disinto_hire_an_agent() {
|
||||||
echo "Usage: disinto hire-an-agent <agent-name> <role> [--formula <path>] [--local-model <url>] [--model <name>] [--poll-interval <seconds>]" >&2
|
echo "Usage: disinto hire-an-agent <agent-name> <role> [--formula <path>] [--local-model <url>] [--model <name>] [--poll-interval <seconds>]" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Validate agent name before any side effects (Forgejo user creation, TOML
|
|
||||||
# write, token issuance). The name flows through several systems that have
|
|
||||||
# stricter rules than the raw TOML spec:
|
|
||||||
# - load-project.sh emits shell vars keyed by the name (dashes are mapped
|
|
||||||
# to underscores via tr 'a-z-' 'A-Z_')
|
|
||||||
# - generators.sh emits a docker-compose service name `agents-<name>` and
|
|
||||||
# uppercases it for env var keys (#852 tracks the `^^` bug; we keep the
|
|
||||||
# grammar tight here so that fix can happen without re-validation)
|
|
||||||
# - Forgejo usernames are lowercase alnum + dash
|
|
||||||
# Constraint: start with a lowercase letter, contain only [a-z0-9-], end
|
|
||||||
# with a lowercase letter or digit (no trailing dash), no consecutive
|
|
||||||
# dashes. Rejecting at hire-time prevents unparseable TOML sections like
|
|
||||||
# [agents.dev-qwen2] from landing on disk and crashing load-project.sh on
|
|
||||||
# the next `disinto up` (#862).
|
|
||||||
if ! [[ "$agent_name" =~ ^[a-z]([a-z0-9]|-[a-z0-9])*$ ]]; then
|
|
||||||
echo "Error: invalid agent name '${agent_name}'" >&2
|
|
||||||
echo " Agent names must match: ^[a-z]([a-z0-9]|-[a-z0-9])*$" >&2
|
|
||||||
echo " (lowercase letters/digits/single dashes, starts with letter, ends with alphanumeric)" >&2
|
|
||||||
echo " Examples: dev, dev-qwen2, review-qwen, planner" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
shift 2
|
shift 2
|
||||||
|
|
||||||
# Parse flags
|
# Parse flags
|
||||||
|
|
@ -190,14 +167,10 @@ disinto_hire_an_agent() {
|
||||||
echo ""
|
echo ""
|
||||||
echo "Step 1.5: Generating Forge token for '${agent_name}'..."
|
echo "Step 1.5: Generating Forge token for '${agent_name}'..."
|
||||||
|
|
||||||
# Key per-agent credentials by *agent name*, not role (#834 Gap 1).
|
# Convert role to uppercase token variable name (e.g., architect -> FORGE_ARCHITECT_TOKEN)
|
||||||
# Two agents with the same role (e.g. two `dev` agents) must not collide on
|
local role_upper
|
||||||
# FORGE_<ROLE>_TOKEN — the compose generator looks up FORGE_TOKEN_<USER_UPPER>
|
role_upper=$(echo "$role" | tr '[:lower:]' '[:upper:]')
|
||||||
# where USER_UPPER = tr 'a-z-' 'A-Z_' of the agent's forge_user.
|
local token_var="FORGE_${role_upper}_TOKEN"
|
||||||
local agent_upper
|
|
||||||
agent_upper=$(echo "$agent_name" | tr 'a-z-' 'A-Z_')
|
|
||||||
local token_var="FORGE_TOKEN_${agent_upper}"
|
|
||||||
local pass_var="FORGE_PASS_${agent_upper}"
|
|
||||||
|
|
||||||
# Generate token using the user's password (basic auth)
|
# Generate token using the user's password (basic auth)
|
||||||
local agent_token=""
|
local agent_token=""
|
||||||
|
|
@ -221,7 +194,7 @@ disinto_hire_an_agent() {
|
||||||
if [ -z "$agent_token" ]; then
|
if [ -z "$agent_token" ]; then
|
||||||
echo " Warning: failed to create API token for '${agent_name}'" >&2
|
echo " Warning: failed to create API token for '${agent_name}'" >&2
|
||||||
else
|
else
|
||||||
# Store token in .env under the per-agent variable name
|
# Store token in .env under the role-specific variable name
|
||||||
if grep -q "^${token_var}=" "$env_file" 2>/dev/null; then
|
if grep -q "^${token_var}=" "$env_file" 2>/dev/null; then
|
||||||
# Use sed with alternative delimiter and proper escaping for special chars in token
|
# Use sed with alternative delimiter and proper escaping for special chars in token
|
||||||
local escaped_token
|
local escaped_token
|
||||||
|
|
@ -235,94 +208,6 @@ disinto_hire_an_agent() {
|
||||||
export "${token_var}=${agent_token}"
|
export "${token_var}=${agent_token}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Persist FORGE_PASS_<AGENT_UPPER> to .env (#834 Gap 2).
|
|
||||||
# The container's git credential helper (docker/agents/entrypoint.sh) needs
|
|
||||||
# both FORGE_TOKEN_* and FORGE_PASS_* to pass HTTPS auth for git push
|
|
||||||
# (Forgejo 11.x rejects API tokens for git push, #361).
|
|
||||||
if [ -n "${user_pass:-}" ]; then
|
|
||||||
local escaped_pass
|
|
||||||
escaped_pass=$(printf '%s\n' "$user_pass" | sed 's/[&/\]/\\&/g')
|
|
||||||
if grep -q "^${pass_var}=" "$env_file" 2>/dev/null; then
|
|
||||||
sed -i "s|^${pass_var}=.*|${pass_var}=${escaped_pass}|" "$env_file"
|
|
||||||
echo " ${agent_name} password updated (${pass_var})"
|
|
||||||
else
|
|
||||||
printf '%s=%s\n' "$pass_var" "$user_pass" >> "$env_file"
|
|
||||||
echo " ${agent_name} password saved (${pass_var})"
|
|
||||||
fi
|
|
||||||
export "${pass_var}=${user_pass}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Step 1.7: Write backend credentials to .env (#847).
|
|
||||||
# Local-model agents need ANTHROPIC_BASE_URL; Anthropic-backend agents need ANTHROPIC_API_KEY.
|
|
||||||
# These must be persisted so the container can start with valid credentials.
|
|
||||||
echo ""
|
|
||||||
echo "Step 1.7: Writing backend credentials to .env..."
|
|
||||||
|
|
||||||
if [ -n "$local_model" ]; then
|
|
||||||
# Local model agent: write ANTHROPIC_BASE_URL
|
|
||||||
local backend_var="ANTHROPIC_BASE_URL"
|
|
||||||
local backend_val="$local_model"
|
|
||||||
local escaped_val
|
|
||||||
escaped_val=$(printf '%s\n' "$backend_val" | sed 's/[&/\]/\\&/g')
|
|
||||||
if grep -q "^${backend_var}=" "$env_file" 2>/dev/null; then
|
|
||||||
sed -i "s|^${backend_var}=.*|${backend_var}=${escaped_val}|" "$env_file"
|
|
||||||
echo " ${backend_var} updated"
|
|
||||||
else
|
|
||||||
printf '%s=%s\n' "$backend_var" "$backend_val" >> "$env_file"
|
|
||||||
echo " ${backend_var} saved"
|
|
||||||
fi
|
|
||||||
export "${backend_var}=${backend_val}"
|
|
||||||
else
|
|
||||||
# Anthropic backend: check if ANTHROPIC_API_KEY is set, write it if present
|
|
||||||
if [ -n "${ANTHROPIC_API_KEY:-}" ]; then
|
|
||||||
local backend_var="ANTHROPIC_API_KEY"
|
|
||||||
local backend_val="$ANTHROPIC_API_KEY"
|
|
||||||
local escaped_key
|
|
||||||
escaped_key=$(printf '%s\n' "$backend_val" | sed 's/[&/\]/\\&/g')
|
|
||||||
if grep -q "^${backend_var}=" "$env_file" 2>/dev/null; then
|
|
||||||
sed -i "s|^${backend_var}=.*|${backend_var}=${escaped_key}|" "$env_file"
|
|
||||||
echo " ${backend_var} updated"
|
|
||||||
else
|
|
||||||
printf '%s=%s\n' "$backend_var" "$backend_val" >> "$env_file"
|
|
||||||
echo " ${backend_var} saved"
|
|
||||||
fi
|
|
||||||
export "${backend_var}=${backend_val}"
|
|
||||||
else
|
|
||||||
echo " Note: ANTHROPIC_API_KEY not set — required for Anthropic backend agents"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Step 1.6: Add the new agent as a write collaborator on the project repo (#856).
|
|
||||||
# Without this, PATCH /issues/{n} {assignees:[agent]} returns 403 Forbidden and
|
|
||||||
# the dev-agent polls forever logging "claim lost to <none> — skipping" (see
|
|
||||||
# issue_claim()'s post-PATCH verify). Mirrors the collaborator setup applied
|
|
||||||
# to the canonical bot users in lib/forge-setup.sh. Idempotent: Forgejo's PUT
|
|
||||||
# returns 204 whether the user is being added for the first time or already a
|
|
||||||
# collaborator at the same permission.
|
|
||||||
if [ -n "${FORGE_REPO:-}" ]; then
|
|
||||||
echo ""
|
|
||||||
echo "Step 1.6: Adding '${agent_name}' as write collaborator on '${FORGE_REPO}'..."
|
|
||||||
local collab_code
|
|
||||||
collab_code=$(curl -s -o /dev/null -w '%{http_code}' -X PUT \
|
|
||||||
-H "Authorization: token ${admin_token}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${forge_url}/api/v1/repos/${FORGE_REPO}/collaborators/${agent_name}" \
|
|
||||||
-d '{"permission":"write"}')
|
|
||||||
case "$collab_code" in
|
|
||||||
204|201|200)
|
|
||||||
echo " ${agent_name} is a write collaborator on ${FORGE_REPO} (HTTP ${collab_code})"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo " Warning: failed to add '${agent_name}' as collaborator on '${FORGE_REPO}' (HTTP ${collab_code})" >&2
|
|
||||||
echo " The agent will not be able to claim issues until this is fixed." >&2
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
else
|
|
||||||
echo ""
|
|
||||||
echo "Step 1.6: FORGE_REPO not set — skipping collaborator step" >&2
|
|
||||||
echo " Warning: the agent will not be able to claim issues on the project repo" >&2
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Step 2: Create .profile repo on Forgejo
|
# Step 2: Create .profile repo on Forgejo
|
||||||
echo ""
|
echo ""
|
||||||
echo "Step 2: Creating '${agent_name}/.profile' repo (if not exists)..."
|
echo "Step 2: Creating '${agent_name}/.profile' repo (if not exists)..."
|
||||||
|
|
@ -535,10 +420,7 @@ EOF
|
||||||
local interval="${poll_interval:-60}"
|
local interval="${poll_interval:-60}"
|
||||||
echo " Writing [agents.${section_name}] to ${toml_file}..."
|
echo " Writing [agents.${section_name}] to ${toml_file}..."
|
||||||
python3 -c '
|
python3 -c '
|
||||||
import sys
|
import sys, re, pathlib
|
||||||
import tomlkit
|
|
||||||
import re
|
|
||||||
import pathlib
|
|
||||||
|
|
||||||
toml_path = sys.argv[1]
|
toml_path = sys.argv[1]
|
||||||
section_name = sys.argv[2]
|
section_name = sys.argv[2]
|
||||||
|
|
@ -551,39 +433,38 @@ poll_interval = sys.argv[7]
|
||||||
p = pathlib.Path(toml_path)
|
p = pathlib.Path(toml_path)
|
||||||
text = p.read_text()
|
text = p.read_text()
|
||||||
|
|
||||||
# Step 1: Remove any commented-out [agents.X] blocks (they cause parse issues)
|
# Build the new section
|
||||||
# Match # [agents.section_name] followed by lines that are not section headers
|
new_section = f"""
|
||||||
# Use negative lookahead to stop before a real section header (# [ or [)
|
[agents.{section_name}]
|
||||||
commented_pattern = rf"(?:^|\n)# \[agents\.{re.escape(section_name)}\](?:\n(?!# \[|\[)[^\n]*)*"
|
base_url = "{base_url}"
|
||||||
text = re.sub(commented_pattern, "", text, flags=re.DOTALL)
|
model = "{model}"
|
||||||
|
api_key = "sk-no-key-required"
|
||||||
|
roles = ["{role}"]
|
||||||
|
forge_user = "{agent_name}"
|
||||||
|
compact_pct = 60
|
||||||
|
poll_interval = {poll_interval}
|
||||||
|
"""
|
||||||
|
|
||||||
# Step 2: Parse TOML with tomlkit (preserves comments and formatting)
|
# Check if section already exists and replace it
|
||||||
try:
|
pattern = rf"\[agents\.{re.escape(section_name)}\][^\[]*"
|
||||||
doc = tomlkit.parse(text)
|
if re.search(pattern, text):
|
||||||
except Exception as e:
|
text = re.sub(pattern, new_section.strip() + "\n", text)
|
||||||
print(f"Error: Invalid TOML in {toml_path}: {e}", file=sys.stderr)
|
else:
|
||||||
sys.exit(1)
|
# Remove commented-out example [agents.llama] block if present
|
||||||
|
text = re.sub(
|
||||||
|
r"\n# Local-model agents \(optional\).*?(?=\n# \[mirrors\]|\n\[mirrors\]|\Z)",
|
||||||
|
"",
|
||||||
|
text,
|
||||||
|
flags=re.DOTALL,
|
||||||
|
)
|
||||||
|
# Append before [mirrors] if it exists, otherwise at end
|
||||||
|
mirrors_match = re.search(r"\n(# )?\[mirrors\]", text)
|
||||||
|
if mirrors_match:
|
||||||
|
text = text[:mirrors_match.start()] + "\n" + new_section + text[mirrors_match.start():]
|
||||||
|
else:
|
||||||
|
text = text.rstrip() + "\n" + new_section
|
||||||
|
|
||||||
# Step 3: Ensure agents table exists
|
p.write_text(text)
|
||||||
if "agents" not in doc:
|
|
||||||
doc.add("agents", tomlkit.table())
|
|
||||||
|
|
||||||
# Step 4: Update the specific agent section
|
|
||||||
doc["agents"][section_name] = {
|
|
||||||
"base_url": base_url,
|
|
||||||
"model": model,
|
|
||||||
"api_key": "sk-no-key-required",
|
|
||||||
"roles": [role],
|
|
||||||
"forge_user": agent_name,
|
|
||||||
"compact_pct": 60,
|
|
||||||
"poll_interval": int(poll_interval),
|
|
||||||
}
|
|
||||||
|
|
||||||
# Step 5: Serialize back to TOML (preserves comments)
|
|
||||||
output = tomlkit.dumps(doc)
|
|
||||||
|
|
||||||
# Step 6: Write back
|
|
||||||
p.write_text(output)
|
|
||||||
' "$toml_file" "$section_name" "$local_model" "$model" "$agent_name" "$role" "$interval"
|
' "$toml_file" "$section_name" "$local_model" "$model" "$agent_name" "$role" "$interval"
|
||||||
|
|
||||||
echo " Agent config written to TOML"
|
echo " Agent config written to TOML"
|
||||||
|
|
@ -611,7 +492,7 @@ p.write_text(output)
|
||||||
echo " Model: ${model}"
|
echo " Model: ${model}"
|
||||||
echo ""
|
echo ""
|
||||||
echo " To start the agent, run:"
|
echo " To start the agent, run:"
|
||||||
echo " disinto up"
|
echo " docker compose --profile ${service_name} up -d ${service_name}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
|
|
|
||||||
440
lib/hvault.sh
440
lib/hvault.sh
|
|
@ -1,440 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# hvault.sh — HashiCorp Vault helper module
|
|
||||||
#
|
|
||||||
# Typed, audited helpers for Vault KV v2 access so no script re-implements
|
|
||||||
# `curl -H "X-Vault-Token: ..."` ad-hoc.
|
|
||||||
#
|
|
||||||
# Usage: source this file, then call any hvault_* function.
|
|
||||||
#
|
|
||||||
# Environment:
|
|
||||||
# VAULT_ADDR — Vault server address (required, no default)
|
|
||||||
# VAULT_TOKEN — auth token (precedence: env > /etc/vault.d/root.token)
|
|
||||||
#
|
|
||||||
# All functions emit structured JSON errors to stderr on failure.
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# ── Internal helpers ─────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
# _hvault_err — emit structured JSON error to stderr
|
|
||||||
# Args: func_name, message, [detail]
|
|
||||||
_hvault_err() {
|
|
||||||
local func="$1" msg="$2" detail="${3:-}"
|
|
||||||
jq -n --arg func "$func" --arg msg "$msg" --arg detail "$detail" \
|
|
||||||
'{error:true,function:$func,message:$msg,detail:$detail}' >&2
|
|
||||||
}
|
|
||||||
|
|
||||||
# _hvault_resolve_token — resolve VAULT_TOKEN from env or token file
|
|
||||||
_hvault_resolve_token() {
|
|
||||||
if [ -n "${VAULT_TOKEN:-}" ]; then
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
local token_file="/etc/vault.d/root.token"
|
|
||||||
if [ -f "$token_file" ]; then
|
|
||||||
VAULT_TOKEN="$(cat "$token_file")"
|
|
||||||
export VAULT_TOKEN
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# _hvault_default_env — set the local-cluster Vault env if unset
|
|
||||||
#
|
|
||||||
# Idempotent helper used by every Vault-touching script that runs during
|
|
||||||
# `disinto init` (S2). On the local-cluster common case, operators (and
|
|
||||||
# the init dispatcher in bin/disinto) have not exported VAULT_ADDR or
|
|
||||||
# VAULT_TOKEN — the server is reachable on localhost:8200 and the root
|
|
||||||
# token lives at /etc/vault.d/root.token. Scripts must Just Work in that
|
|
||||||
# shape.
|
|
||||||
#
|
|
||||||
# - If VAULT_ADDR is unset, defaults to http://127.0.0.1:8200.
|
|
||||||
# - If VAULT_TOKEN is unset, resolves from /etc/vault.d/root.token via
|
|
||||||
# _hvault_resolve_token. A missing token file is not an error here —
|
|
||||||
# downstream hvault_token_lookup() probes connectivity and emits the
|
|
||||||
# operator-facing "VAULT_ADDR + VAULT_TOKEN" diagnostic.
|
|
||||||
#
|
|
||||||
# Centralised to keep the defaulting stanza in one place — copy-pasting
|
|
||||||
# the 5-line block into each init script trips the repo-wide 5-line
|
|
||||||
# sliding-window duplicate detector (.woodpecker/detect-duplicates.py).
|
|
||||||
_hvault_default_env() {
|
|
||||||
VAULT_ADDR="${VAULT_ADDR:-http://127.0.0.1:8200}"
|
|
||||||
export VAULT_ADDR
|
|
||||||
_hvault_resolve_token || :
|
|
||||||
}
|
|
||||||
|
|
||||||
# _hvault_check_prereqs — validate VAULT_ADDR and VAULT_TOKEN are set
|
|
||||||
# Args: caller function name
|
|
||||||
_hvault_check_prereqs() {
|
|
||||||
local caller="$1"
|
|
||||||
if [ -z "${VAULT_ADDR:-}" ]; then
|
|
||||||
_hvault_err "$caller" "VAULT_ADDR is not set" "export VAULT_ADDR before calling $caller"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
if ! _hvault_resolve_token; then
|
|
||||||
_hvault_err "$caller" "VAULT_TOKEN is not set and /etc/vault.d/root.token not found" \
|
|
||||||
"export VAULT_TOKEN or write token to /etc/vault.d/root.token"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# _hvault_request — execute a Vault API request
|
|
||||||
# Args: method, path, [data]
|
|
||||||
# Outputs: response body to stdout
|
|
||||||
# Returns: 0 on 2xx, 1 otherwise (error JSON to stderr)
|
|
||||||
_hvault_request() {
|
|
||||||
local method="$1" path="$2" data="${3:-}"
|
|
||||||
local url="${VAULT_ADDR}/v1/${path}"
|
|
||||||
local http_code body
|
|
||||||
local tmpfile
|
|
||||||
tmpfile="$(mktemp)"
|
|
||||||
|
|
||||||
local curl_args=(
|
|
||||||
-s
|
|
||||||
-w '%{http_code}'
|
|
||||||
-H "X-Vault-Token: ${VAULT_TOKEN}"
|
|
||||||
-H "Content-Type: application/json"
|
|
||||||
-X "$method"
|
|
||||||
-o "$tmpfile"
|
|
||||||
)
|
|
||||||
if [ -n "$data" ]; then
|
|
||||||
curl_args+=(-d "$data")
|
|
||||||
fi
|
|
||||||
|
|
||||||
http_code="$(curl "${curl_args[@]}" "$url")" || {
|
|
||||||
_hvault_err "_hvault_request" "curl failed" "url=$url"
|
|
||||||
rm -f "$tmpfile"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
body="$(cat "$tmpfile")"
|
|
||||||
rm -f "$tmpfile"
|
|
||||||
|
|
||||||
# Check HTTP status — 2xx is success
|
|
||||||
case "$http_code" in
|
|
||||||
2[0-9][0-9])
|
|
||||||
printf '%s' "$body"
|
|
||||||
return 0
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
_hvault_err "_hvault_request" "HTTP $http_code" "$body"
|
|
||||||
return 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Public API ───────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
# VAULT_KV_MOUNT — KV v2 mount point (default: "kv")
|
|
||||||
# Override with: export VAULT_KV_MOUNT=secret
|
|
||||||
# Used by: hvault_kv_get, hvault_kv_put, hvault_kv_list
|
|
||||||
: "${VAULT_KV_MOUNT:=kv}"
|
|
||||||
|
|
||||||
# hvault_ensure_kv_v2 MOUNT [LOG_PREFIX]
|
|
||||||
# Assert that the given KV mount is present and KV v2. If absent, enable
|
|
||||||
# it. If present as wrong type/version, exit 1. Callers must have already
|
|
||||||
# checked VAULT_ADDR / VAULT_TOKEN.
|
|
||||||
#
|
|
||||||
# DRY_RUN (env, default 0): when 1, log intent without writing.
|
|
||||||
# LOG_PREFIX (optional): label for log lines, e.g. "[vault-seed-forgejo]".
|
|
||||||
#
|
|
||||||
# Extracted here because every vault-seed-*.sh script needs this exact
|
|
||||||
# sequence, and the 5-line sliding-window dup detector flags the
|
|
||||||
# copy-paste. One place, one implementation.
|
|
||||||
hvault_ensure_kv_v2() {
|
|
||||||
local mount="${1:?hvault_ensure_kv_v2: MOUNT required}"
|
|
||||||
local prefix="${2:-[hvault]}"
|
|
||||||
local dry_run="${DRY_RUN:-0}"
|
|
||||||
local mounts_json mount_exists mount_type mount_version
|
|
||||||
|
|
||||||
mounts_json="$(hvault_get_or_empty "sys/mounts")" \
|
|
||||||
|| { printf '%s ERROR: failed to list Vault mounts\n' "$prefix" >&2; return 1; }
|
|
||||||
|
|
||||||
mount_exists=false
|
|
||||||
if printf '%s' "$mounts_json" | jq -e --arg m "${mount}/" '.[$m]' >/dev/null 2>&1; then
|
|
||||||
mount_exists=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$mount_exists" = true ]; then
|
|
||||||
mount_type="$(printf '%s' "$mounts_json" \
|
|
||||||
| jq -r --arg m "${mount}/" '.[$m].type // ""')"
|
|
||||||
mount_version="$(printf '%s' "$mounts_json" \
|
|
||||||
| jq -r --arg m "${mount}/" '.[$m].options.version // "1"')"
|
|
||||||
if [ "$mount_type" != "kv" ]; then
|
|
||||||
printf '%s ERROR: %s/ is mounted as type=%q, expected kv — refuse to re-mount\n' \
|
|
||||||
"$prefix" "$mount" "$mount_type" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
if [ "$mount_version" != "2" ]; then
|
|
||||||
printf '%s ERROR: %s/ is KV v%s, expected v2 — refuse to upgrade in place\n' \
|
|
||||||
"$prefix" "$mount" "$mount_version" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
printf '%s %s/ already mounted (kv v2) — skipping enable\n' "$prefix" "$mount"
|
|
||||||
else
|
|
||||||
if [ "$dry_run" -eq 1 ]; then
|
|
||||||
printf '%s [dry-run] would enable %s/ as kv v2\n' "$prefix" "$mount"
|
|
||||||
else
|
|
||||||
local payload
|
|
||||||
payload="$(jq -n '{type:"kv",options:{version:"2"},description:"disinto shared KV v2 (S2.4)"}')"
|
|
||||||
_hvault_request POST "sys/mounts/${mount}" "$payload" >/dev/null \
|
|
||||||
|| { printf '%s ERROR: failed to enable %s/ as kv v2\n' "$prefix" "$mount" >&2; return 1; }
|
|
||||||
printf '%s %s/ enabled as kv v2\n' "$prefix" "$mount"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# hvault_kv_get PATH [KEY]
|
|
||||||
# Read a KV v2 secret at PATH, optionally extract a single KEY.
|
|
||||||
# Outputs: JSON value (full data object, or single key value)
|
|
||||||
hvault_kv_get() {
|
|
||||||
local path="${1:-}"
|
|
||||||
local key="${2:-}"
|
|
||||||
|
|
||||||
if [ -z "$path" ]; then
|
|
||||||
_hvault_err "hvault_kv_get" "PATH is required" "usage: hvault_kv_get PATH [KEY]"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
_hvault_check_prereqs "hvault_kv_get" || return 1
|
|
||||||
|
|
||||||
local response
|
|
||||||
response="$(_hvault_request GET "${VAULT_KV_MOUNT}/data/${path}")" || return 1
|
|
||||||
|
|
||||||
if [ -n "$key" ]; then
|
|
||||||
printf '%s' "$response" | jq -e -r --arg key "$key" '.data.data[$key]' 2>/dev/null || {
|
|
||||||
_hvault_err "hvault_kv_get" "key not found" "key=$key path=$path"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
else
|
|
||||||
printf '%s' "$response" | jq -e '.data.data' 2>/dev/null || {
|
|
||||||
_hvault_err "hvault_kv_get" "failed to parse response" "path=$path"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# hvault_kv_put PATH KEY=VAL [KEY=VAL ...]
|
|
||||||
# Write a KV v2 secret at PATH. Accepts one or more KEY=VAL pairs.
|
|
||||||
hvault_kv_put() {
|
|
||||||
local path="${1:-}"
|
|
||||||
shift || true
|
|
||||||
|
|
||||||
if [ -z "$path" ] || [ $# -eq 0 ]; then
|
|
||||||
_hvault_err "hvault_kv_put" "PATH and at least one KEY=VAL required" \
|
|
||||||
"usage: hvault_kv_put PATH KEY=VAL [KEY=VAL ...]"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
_hvault_check_prereqs "hvault_kv_put" || return 1
|
|
||||||
|
|
||||||
# Build JSON payload from KEY=VAL pairs entirely via jq
|
|
||||||
local payload='{"data":{}}'
|
|
||||||
for kv in "$@"; do
|
|
||||||
local k="${kv%%=*}"
|
|
||||||
local v="${kv#*=}"
|
|
||||||
if [ "$k" = "$kv" ]; then
|
|
||||||
_hvault_err "hvault_kv_put" "invalid KEY=VAL pair" "got: $kv"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
payload="$(printf '%s' "$payload" | jq --arg k "$k" --arg v "$v" '.data[$k] = $v')"
|
|
||||||
done
|
|
||||||
|
|
||||||
_hvault_request POST "${VAULT_KV_MOUNT}/data/${path}" "$payload" >/dev/null
|
|
||||||
}
|
|
||||||
|
|
||||||
# hvault_kv_list PATH
|
|
||||||
# List keys at a KV v2 path.
|
|
||||||
# Outputs: JSON array of key names
|
|
||||||
hvault_kv_list() {
|
|
||||||
local path="${1:-}"
|
|
||||||
|
|
||||||
if [ -z "$path" ]; then
|
|
||||||
_hvault_err "hvault_kv_list" "PATH is required" "usage: hvault_kv_list PATH"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
_hvault_check_prereqs "hvault_kv_list" || return 1
|
|
||||||
|
|
||||||
local response
|
|
||||||
response="$(_hvault_request LIST "${VAULT_KV_MOUNT}/metadata/${path}")" || return 1
|
|
||||||
|
|
||||||
printf '%s' "$response" | jq -e '.data.keys' 2>/dev/null || {
|
|
||||||
_hvault_err "hvault_kv_list" "failed to parse response" "path=$path"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# hvault_get_or_empty PATH
|
|
||||||
# GET /v1/PATH. On 200, prints the raw response body to stdout (caller
|
|
||||||
# parses with jq). On 404, prints nothing and returns 0 — caller treats
|
|
||||||
# the empty string as "resource absent, needs create". Any other HTTP
|
|
||||||
# status is a hard error: response body is logged to stderr as a
|
|
||||||
# structured JSON error and the function returns 1.
|
|
||||||
#
|
|
||||||
# Used by the sync scripts (tools/vault-apply-*.sh +
|
|
||||||
# lib/init/nomad/vault-nomad-auth.sh) to read existing policies, roles,
|
|
||||||
# auth-method listings, and per-role configs without triggering errexit
|
|
||||||
# on the expected absent-resource case. `_hvault_request` is not a
|
|
||||||
# substitute — it treats 404 as a hard error, which is correct for
|
|
||||||
# writes but wrong for "does this already exist?" checks.
|
|
||||||
#
|
|
||||||
# Subshell + EXIT trap: the RETURN trap does NOT fire on set-e abort,
|
|
||||||
# so tmpfile cleanup from a function-scoped RETURN trap would leak on
|
|
||||||
# jq/curl errors under `set -eo pipefail`. The subshell + EXIT trap
|
|
||||||
# is the reliable cleanup boundary.
|
|
||||||
hvault_get_or_empty() {
|
|
||||||
local path="${1:-}"
|
|
||||||
|
|
||||||
if [ -z "$path" ]; then
|
|
||||||
_hvault_err "hvault_get_or_empty" "PATH is required" \
|
|
||||||
"usage: hvault_get_or_empty PATH"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
_hvault_check_prereqs "hvault_get_or_empty" || return 1
|
|
||||||
|
|
||||||
(
|
|
||||||
local tmp http_code
|
|
||||||
tmp="$(mktemp)"
|
|
||||||
trap 'rm -f "$tmp"' EXIT
|
|
||||||
http_code="$(curl -sS -o "$tmp" -w '%{http_code}' \
|
|
||||||
-H "X-Vault-Token: ${VAULT_TOKEN}" \
|
|
||||||
"${VAULT_ADDR}/v1/${path}")" \
|
|
||||||
|| { _hvault_err "hvault_get_or_empty" "curl failed" "path=$path"; exit 1; }
|
|
||||||
case "$http_code" in
|
|
||||||
2[0-9][0-9]) cat "$tmp" ;;
|
|
||||||
404) printf '' ;;
|
|
||||||
*) _hvault_err "hvault_get_or_empty" "HTTP $http_code" "$(cat "$tmp")"
|
|
||||||
exit 1 ;;
|
|
||||||
esac
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
# hvault_policy_apply NAME FILE
|
|
||||||
# Idempotent policy upsert — create or update a Vault policy.
|
|
||||||
hvault_policy_apply() {
|
|
||||||
local name="${1:-}"
|
|
||||||
local file="${2:-}"
|
|
||||||
|
|
||||||
if [ -z "$name" ] || [ -z "$file" ]; then
|
|
||||||
_hvault_err "hvault_policy_apply" "NAME and FILE are required" \
|
|
||||||
"usage: hvault_policy_apply NAME FILE"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
if [ ! -f "$file" ]; then
|
|
||||||
_hvault_err "hvault_policy_apply" "policy file not found" "file=$file"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
_hvault_check_prereqs "hvault_policy_apply" || return 1
|
|
||||||
|
|
||||||
local policy_content
|
|
||||||
policy_content="$(cat "$file")"
|
|
||||||
local payload
|
|
||||||
payload="$(jq -n --arg policy "$policy_content" '{"policy": $policy}')"
|
|
||||||
|
|
||||||
_hvault_request PUT "sys/policies/acl/${name}" "$payload" >/dev/null
|
|
||||||
}
|
|
||||||
|
|
||||||
# hvault_jwt_login ROLE JWT
|
|
||||||
# Exchange a JWT for a short-lived Vault token.
|
|
||||||
# Outputs: client token string
|
|
||||||
hvault_jwt_login() {
|
|
||||||
local role="${1:-}"
|
|
||||||
local jwt="${2:-}"
|
|
||||||
|
|
||||||
if [ -z "$role" ] || [ -z "$jwt" ]; then
|
|
||||||
_hvault_err "hvault_jwt_login" "ROLE and JWT are required" \
|
|
||||||
"usage: hvault_jwt_login ROLE JWT"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
# Only need VAULT_ADDR, not VAULT_TOKEN (we're obtaining a token)
|
|
||||||
if [ -z "${VAULT_ADDR:-}" ]; then
|
|
||||||
_hvault_err "hvault_jwt_login" "VAULT_ADDR is not set"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
local payload
|
|
||||||
payload="$(jq -n --arg role "$role" --arg jwt "$jwt" \
|
|
||||||
'{"role": $role, "jwt": $jwt}')"
|
|
||||||
|
|
||||||
local response
|
|
||||||
# JWT login does not require an existing token — use curl directly
|
|
||||||
local tmpfile http_code
|
|
||||||
tmpfile="$(mktemp)"
|
|
||||||
http_code="$(curl -s -w '%{http_code}' \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-X POST \
|
|
||||||
-d "$payload" \
|
|
||||||
-o "$tmpfile" \
|
|
||||||
"${VAULT_ADDR}/v1/auth/jwt/login")" || {
|
|
||||||
_hvault_err "hvault_jwt_login" "curl failed"
|
|
||||||
rm -f "$tmpfile"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
local body
|
|
||||||
body="$(cat "$tmpfile")"
|
|
||||||
rm -f "$tmpfile"
|
|
||||||
|
|
||||||
case "$http_code" in
|
|
||||||
2[0-9][0-9])
|
|
||||||
printf '%s' "$body" | jq -e -r '.auth.client_token' 2>/dev/null || {
|
|
||||||
_hvault_err "hvault_jwt_login" "failed to extract client_token" "$body"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
_hvault_err "hvault_jwt_login" "HTTP $http_code" "$body"
|
|
||||||
return 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
# hvault_token_lookup
|
|
||||||
# Returns TTL, policies, and accessor for the current token.
|
|
||||||
# Outputs: JSON object with ttl, policies, accessor fields
|
|
||||||
hvault_token_lookup() {
|
|
||||||
_hvault_check_prereqs "hvault_token_lookup" || return 1
|
|
||||||
|
|
||||||
local response
|
|
||||||
response="$(_hvault_request GET "auth/token/lookup-self")" || return 1
|
|
||||||
|
|
||||||
printf '%s' "$response" | jq -e '{
|
|
||||||
ttl: .data.ttl,
|
|
||||||
policies: .data.policies,
|
|
||||||
accessor: .data.accessor,
|
|
||||||
display_name: .data.display_name
|
|
||||||
}' 2>/dev/null || {
|
|
||||||
_hvault_err "hvault_token_lookup" "failed to parse token info"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# _hvault_seed_key — Seed a single KV key if it doesn't exist.
|
|
||||||
# Reads existing data and merges to preserve sibling keys (KV v2 replaces
|
|
||||||
# .data atomically). Returns 0=created, 1=unchanged, 2=API error.
|
|
||||||
# Args:
|
|
||||||
# path: KV v2 logical path (e.g. "disinto/shared/chat")
|
|
||||||
# key: key name within the path (e.g. "chat_oauth_client_id")
|
|
||||||
# generator: shell command that outputs a random value (default: openssl rand -hex 32)
|
|
||||||
# Usage:
|
|
||||||
# _hvault_seed_key "disinto/shared/chat" "chat_oauth_client_id"
|
|
||||||
# rc=$? # 0=created, 1=unchanged
|
|
||||||
_hvault_seed_key() {
|
|
||||||
local path="$1" key="$2" generator="${3:-openssl rand -hex 32}"
|
|
||||||
local existing
|
|
||||||
existing=$(hvault_kv_get "$path" "$key" 2>/dev/null) || true
|
|
||||||
if [ -n "$existing" ]; then
|
|
||||||
return 1 # unchanged
|
|
||||||
fi
|
|
||||||
|
|
||||||
local value
|
|
||||||
value=$(eval "$generator")
|
|
||||||
|
|
||||||
# Read existing data to preserve sibling keys (KV v2 replaces atomically)
|
|
||||||
local kv_api="${VAULT_KV_MOUNT}/data/${path}"
|
|
||||||
local raw existing_data payload
|
|
||||||
raw="$(hvault_get_or_empty "$kv_api")" || return 2
|
|
||||||
existing_data="{}"
|
|
||||||
[ -n "$raw" ] && existing_data="$(printf '%s' "$raw" | jq '.data.data // {}')"
|
|
||||||
payload="$(printf '%s' "$existing_data" \
|
|
||||||
| jq --arg k "$key" --arg v "$value" '{data: (. + {($k): $v})}')"
|
|
||||||
_hvault_request POST "$kv_api" "$payload" >/dev/null
|
|
||||||
return 0 # created
|
|
||||||
}
|
|
||||||
|
|
@ -1,368 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# =============================================================================
|
|
||||||
# lib/init/nomad/cluster-up.sh — Empty Nomad+Vault cluster orchestrator (S0.4)
|
|
||||||
#
|
|
||||||
# Wires together the S0.1–S0.3 building blocks into one idempotent
|
|
||||||
# "bring up a single-node Nomad+Vault cluster" script:
|
|
||||||
#
|
|
||||||
# 1. install.sh (nomad + vault binaries + docker daemon)
|
|
||||||
# 2. systemd-nomad.sh (nomad.service — unit + enable, not started)
|
|
||||||
# 3. systemd-vault.sh (vault.service — unit + vault.hcl + enable)
|
|
||||||
# 4. Host-volume dirs (/srv/disinto/* matching nomad/client.hcl)
|
|
||||||
# 5. /etc/nomad.d/*.hcl (server.hcl + client.hcl from repo)
|
|
||||||
# 6. vault-init.sh (first-run init + unseal + persist keys)
|
|
||||||
# 7. systemctl start vault (auto-unseal via ExecStartPost; poll)
|
|
||||||
# 8. systemctl start nomad (poll until ≥1 ready node)
|
|
||||||
# 9. /etc/profile.d/disinto-nomad.sh (VAULT_ADDR + NOMAD_ADDR for shells)
|
|
||||||
#
|
|
||||||
# This is the "empty cluster" orchestrator — no jobs deployed. Subsequent
|
|
||||||
# Step-1 issues layer job deployment on top of this checkpoint.
|
|
||||||
#
|
|
||||||
# Idempotency contract:
|
|
||||||
# Running twice back-to-back on a healthy box is a no-op. Each sub-step
|
|
||||||
# is itself idempotent — see install.sh / systemd-*.sh / vault-init.sh
|
|
||||||
# headers for the per-step contract. Fast-paths in steps 7 and 8 skip
|
|
||||||
# the systemctl start when the service is already active + healthy.
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# sudo lib/init/nomad/cluster-up.sh # bring cluster up
|
|
||||||
# sudo lib/init/nomad/cluster-up.sh --dry-run # print step list, exit 0
|
|
||||||
#
|
|
||||||
# Environment (override polling for slow boxes):
|
|
||||||
# VAULT_POLL_SECS max seconds to wait for vault to unseal (default: 30)
|
|
||||||
# NOMAD_POLL_SECS max seconds to wait for nomad node=ready (default: 60)
|
|
||||||
#
|
|
||||||
# Exit codes:
|
|
||||||
# 0 success (cluster up, or already up)
|
|
||||||
# 1 precondition or step failure
|
|
||||||
# =============================================================================
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
||||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
|
||||||
|
|
||||||
# Sub-scripts (siblings in this directory).
|
|
||||||
INSTALL_SH="${SCRIPT_DIR}/install.sh"
|
|
||||||
SYSTEMD_NOMAD_SH="${SCRIPT_DIR}/systemd-nomad.sh"
|
|
||||||
SYSTEMD_VAULT_SH="${SCRIPT_DIR}/systemd-vault.sh"
|
|
||||||
VAULT_INIT_SH="${SCRIPT_DIR}/vault-init.sh"
|
|
||||||
|
|
||||||
# In-repo Nomad configs copied to /etc/nomad.d/.
|
|
||||||
NOMAD_CONFIG_DIR="/etc/nomad.d"
|
|
||||||
NOMAD_SERVER_HCL_SRC="${REPO_ROOT}/nomad/server.hcl"
|
|
||||||
NOMAD_CLIENT_HCL_SRC="${REPO_ROOT}/nomad/client.hcl"
|
|
||||||
|
|
||||||
# /etc/profile.d entry — makes VAULT_ADDR + NOMAD_ADDR available to
|
|
||||||
# interactive shells without requiring the operator to source anything.
|
|
||||||
PROFILE_D_FILE="/etc/profile.d/disinto-nomad.sh"
|
|
||||||
|
|
||||||
# Host-volume paths — MUST match the `host_volume "..."` declarations
|
|
||||||
# in nomad/client.hcl. Adding a host_volume block there requires adding
|
|
||||||
# its path here so the dir exists before nomad starts (otherwise client
|
|
||||||
# fingerprinting fails and the node stays in "initializing").
|
|
||||||
HOST_VOLUME_DIRS=(
|
|
||||||
"/srv/disinto/forgejo-data"
|
|
||||||
"/srv/disinto/woodpecker-data"
|
|
||||||
"/srv/disinto/agent-data"
|
|
||||||
"/srv/disinto/project-repos"
|
|
||||||
"/srv/disinto/caddy-data"
|
|
||||||
"/srv/disinto/docker"
|
|
||||||
"/srv/disinto/chat-history"
|
|
||||||
"/srv/disinto/ops-repo"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Default API addresses — matches the listener bindings in
|
|
||||||
# nomad/server.hcl and nomad/vault.hcl. If either file ever moves
|
|
||||||
# off 127.0.0.1 / default port, update both places together.
|
|
||||||
VAULT_ADDR_DEFAULT="http://127.0.0.1:8200"
|
|
||||||
NOMAD_ADDR_DEFAULT="http://127.0.0.1:4646"
|
|
||||||
|
|
||||||
VAULT_POLL_SECS="${VAULT_POLL_SECS:-30}"
|
|
||||||
NOMAD_POLL_SECS="${NOMAD_POLL_SECS:-60}"
|
|
||||||
|
|
||||||
log() { printf '[cluster-up] %s\n' "$*"; }
|
|
||||||
die() { printf '[cluster-up] ERROR: %s\n' "$*" >&2; exit 1; }
|
|
||||||
|
|
||||||
# ── Flag parsing ─────────────────────────────────────────────────────────────
|
|
||||||
dry_run=false
|
|
||||||
while [ $# -gt 0 ]; do
|
|
||||||
case "$1" in
|
|
||||||
--dry-run) dry_run=true; shift ;;
|
|
||||||
-h|--help)
|
|
||||||
cat <<EOF
|
|
||||||
Usage: sudo $(basename "$0") [--dry-run]
|
|
||||||
|
|
||||||
Brings up an empty single-node Nomad+Vault cluster (idempotent).
|
|
||||||
|
|
||||||
--dry-run Print the step list without performing any action.
|
|
||||||
EOF
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
*) die "unknown flag: $1" ;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# ── Dry-run: print step list + exit ──────────────────────────────────────────
|
|
||||||
if [ "$dry_run" = true ]; then
|
|
||||||
cat <<EOF
|
|
||||||
[dry-run] Step 1/9: install nomad + vault binaries + docker daemon
|
|
||||||
→ sudo ${INSTALL_SH}
|
|
||||||
|
|
||||||
[dry-run] Step 2/9: write + enable nomad.service (NOT started)
|
|
||||||
→ sudo ${SYSTEMD_NOMAD_SH}
|
|
||||||
|
|
||||||
[dry-run] Step 3/9: write + enable vault.service + vault.hcl (NOT started)
|
|
||||||
→ sudo ${SYSTEMD_VAULT_SH}
|
|
||||||
|
|
||||||
[dry-run] Step 4/9: create host-volume dirs under /srv/disinto/
|
|
||||||
EOF
|
|
||||||
for d in "${HOST_VOLUME_DIRS[@]}"; do
|
|
||||||
printf ' → install -d -m 0777 %s\n' "$d"
|
|
||||||
done
|
|
||||||
cat <<EOF
|
|
||||||
|
|
||||||
[dry-run] Step 5/9: install /etc/nomad.d/server.hcl + client.hcl from repo
|
|
||||||
→ ${NOMAD_SERVER_HCL_SRC} → ${NOMAD_CONFIG_DIR}/server.hcl
|
|
||||||
→ ${NOMAD_CLIENT_HCL_SRC} → ${NOMAD_CONFIG_DIR}/client.hcl
|
|
||||||
|
|
||||||
[dry-run] Step 6/9: first-run vault init + persist unseal.key + root.token
|
|
||||||
→ sudo ${VAULT_INIT_SH}
|
|
||||||
|
|
||||||
[dry-run] Step 7/9: systemctl start vault + poll until unsealed (≤${VAULT_POLL_SECS}s)
|
|
||||||
|
|
||||||
[dry-run] Step 8/9: systemctl start nomad + poll until ≥1 node ready + docker driver healthy (≤${NOMAD_POLL_SECS}s each)
|
|
||||||
|
|
||||||
[dry-run] Step 9/9: write ${PROFILE_D_FILE}
|
|
||||||
→ export VAULT_ADDR=${VAULT_ADDR_DEFAULT}
|
|
||||||
→ export NOMAD_ADDR=${NOMAD_ADDR_DEFAULT}
|
|
||||||
|
|
||||||
Dry run complete — no changes made.
|
|
||||||
EOF
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Preconditions ────────────────────────────────────────────────────────────
|
|
||||||
if [ "$(id -u)" -ne 0 ]; then
|
|
||||||
die "must run as root (spawns install/systemd/vault-init sub-scripts)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
command -v systemctl >/dev/null 2>&1 \
|
|
||||||
|| die "systemctl not found (systemd required)"
|
|
||||||
|
|
||||||
for f in "$INSTALL_SH" "$SYSTEMD_NOMAD_SH" "$SYSTEMD_VAULT_SH" "$VAULT_INIT_SH"; do
|
|
||||||
[ -x "$f" ] || die "sub-script missing or non-executable: ${f}"
|
|
||||||
done
|
|
||||||
|
|
||||||
[ -f "$NOMAD_SERVER_HCL_SRC" ] \
|
|
||||||
|| die "source config not found: ${NOMAD_SERVER_HCL_SRC}"
|
|
||||||
[ -f "$NOMAD_CLIENT_HCL_SRC" ] \
|
|
||||||
|| die "source config not found: ${NOMAD_CLIENT_HCL_SRC}"
|
|
||||||
|
|
||||||
# ── Helpers ──────────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
# install_file_if_differs SRC DST MODE
|
|
||||||
# Copy SRC to DST (root:root with MODE) iff on-disk content differs.
|
|
||||||
# No-op + log otherwise — preserves mtime, avoids spurious reloads.
|
|
||||||
install_file_if_differs() {
|
|
||||||
local src="$1" dst="$2" mode="$3"
|
|
||||||
if [ -f "$dst" ] && cmp -s "$src" "$dst"; then
|
|
||||||
log "unchanged: ${dst}"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
log "writing: ${dst}"
|
|
||||||
install -m "$mode" -o root -g root "$src" "$dst"
|
|
||||||
}
|
|
||||||
|
|
||||||
# vault_status_json — echo `vault status -format=json`, or '' on unreachable.
|
|
||||||
# vault status exit codes: 0 = unsealed, 2 = sealed/uninit, 1 = unreachable.
|
|
||||||
# We treat all of 0/2 as "reachable with state"; 1 yields empty output.
|
|
||||||
# Wrapped in `|| true` so set -e doesn't abort on exit 2 (the expected
|
|
||||||
# sealed-state case during first-boot polling).
|
|
||||||
vault_status_json() {
|
|
||||||
VAULT_ADDR="$VAULT_ADDR_DEFAULT" vault status -format=json 2>/dev/null || true
|
|
||||||
}
|
|
||||||
|
|
||||||
# vault_is_unsealed — true iff vault reachable AND initialized AND unsealed.
|
|
||||||
vault_is_unsealed() {
|
|
||||||
local out init sealed
|
|
||||||
out="$(vault_status_json)"
|
|
||||||
[ -n "$out" ] || return 1
|
|
||||||
init="$(printf '%s' "$out" | jq -r '.initialized' 2>/dev/null)" || init=""
|
|
||||||
sealed="$(printf '%s' "$out" | jq -r '.sealed' 2>/dev/null)" || sealed=""
|
|
||||||
[ "$init" = "true" ] && [ "$sealed" = "false" ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# nomad_ready_count — echo the number of ready nodes, or 0 on error.
|
|
||||||
# `nomad node status -json` returns a JSON array of nodes, each with a
|
|
||||||
# .Status field ("initializing" | "ready" | "down" | "disconnected").
|
|
||||||
nomad_ready_count() {
|
|
||||||
local out
|
|
||||||
out="$(NOMAD_ADDR="$NOMAD_ADDR_DEFAULT" nomad node status -json 2>/dev/null || true)"
|
|
||||||
if [ -z "$out" ]; then
|
|
||||||
printf '0'
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
printf '%s' "$out" \
|
|
||||||
| jq '[.[] | select(.Status == "ready")] | length' 2>/dev/null \
|
|
||||||
|| printf '0'
|
|
||||||
}
|
|
||||||
|
|
||||||
# nomad_has_ready_node — true iff nomad_ready_count ≥ 1. Wrapper exists
|
|
||||||
# so poll_until_healthy can call it as a single-arg command name.
|
|
||||||
nomad_has_ready_node() { [ "$(nomad_ready_count)" -ge 1 ]; }
|
|
||||||
|
|
||||||
# nomad_docker_driver_healthy — true iff the nomad self-node reports the
|
|
||||||
# docker driver as Detected=true AND Healthy=true. Required by Step-1's
|
|
||||||
# forgejo jobspec (the first docker-driver consumer) — without this the
|
|
||||||
# node reaches "ready" while docker fingerprinting is still in flight,
|
|
||||||
# and the first `nomad job run forgejo` times out with an opaque
|
|
||||||
# "missing drivers" placement failure (#871).
|
|
||||||
nomad_docker_driver_healthy() {
|
|
||||||
local out detected healthy
|
|
||||||
out="$(NOMAD_ADDR="$NOMAD_ADDR_DEFAULT" nomad node status -self -json 2>/dev/null || true)"
|
|
||||||
[ -n "$out" ] || return 1
|
|
||||||
detected="$(printf '%s' "$out" | jq -r '.Drivers.docker.Detected // false' 2>/dev/null)" || detected=""
|
|
||||||
healthy="$(printf '%s' "$out" | jq -r '.Drivers.docker.Healthy // false' 2>/dev/null)" || healthy=""
|
|
||||||
[ "$detected" = "true" ] && [ "$healthy" = "true" ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# _die_with_service_status SVC REASON
|
|
||||||
# Log + dump `systemctl status SVC` to stderr + die with REASON. Factored
|
|
||||||
# out so the poll helper doesn't carry three copies of the same dump.
|
|
||||||
_die_with_service_status() {
|
|
||||||
local svc="$1" reason="$2"
|
|
||||||
log "${svc}.service ${reason} — systemctl status follows:"
|
|
||||||
systemctl --no-pager --full status "$svc" >&2 || true
|
|
||||||
die "${svc}.service ${reason}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# poll_until_healthy SVC CHECK_CMD TIMEOUT
|
|
||||||
# Tick once per second for up to TIMEOUT seconds, invoking CHECK_CMD as a
|
|
||||||
# command name (no arguments). Returns 0 on the first successful check.
|
|
||||||
# Fails fast via _die_with_service_status if SVC enters systemd "failed"
|
|
||||||
# state, and dies with a status dump if TIMEOUT elapses before CHECK_CMD
|
|
||||||
# succeeds. Replaces the two in-line ready=1/break/sleep poll loops that
|
|
||||||
# would otherwise each duplicate the same pattern already in vault-init.sh.
|
|
||||||
poll_until_healthy() {
|
|
||||||
local svc="$1" check="$2" timeout="$3"
|
|
||||||
local waited=0
|
|
||||||
until [ "$waited" -ge "$timeout" ]; do
|
|
||||||
systemctl is-failed --quiet "$svc" \
|
|
||||||
&& _die_with_service_status "$svc" "entered failed state during startup"
|
|
||||||
if "$check"; then
|
|
||||||
log "${svc} healthy after ${waited}s"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
waited=$((waited + 1))
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
_die_with_service_status "$svc" "not healthy within ${timeout}s"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Step 1/9: install.sh (nomad + vault binaries + docker daemon) ────────────
|
|
||||||
log "── Step 1/9: install nomad + vault binaries + docker daemon ──"
|
|
||||||
"$INSTALL_SH"
|
|
||||||
|
|
||||||
# ── Step 2/9: systemd-nomad.sh (unit + enable, not started) ──────────────────
|
|
||||||
log "── Step 2/9: install nomad.service (enable, not start) ──"
|
|
||||||
"$SYSTEMD_NOMAD_SH"
|
|
||||||
|
|
||||||
# ── Step 3/9: systemd-vault.sh (unit + vault.hcl + enable) ───────────────────
|
|
||||||
log "── Step 3/9: install vault.service + vault.hcl (enable, not start) ──"
|
|
||||||
"$SYSTEMD_VAULT_SH"
|
|
||||||
|
|
||||||
# ── Step 4/9: host-volume dirs matching nomad/client.hcl ─────────────────────
|
|
||||||
log "── Step 4/9: host-volume dirs under /srv/disinto/ ──"
|
|
||||||
# Parent /srv/disinto/ first (install -d handles missing parents, but being
|
|
||||||
# explicit makes the log output read naturally as a top-down creation).
|
|
||||||
install -d -m 0755 -o root -g root "/srv/disinto"
|
|
||||||
for d in "${HOST_VOLUME_DIRS[@]}"; do
|
|
||||||
if [ -d "$d" ]; then
|
|
||||||
log "unchanged: ${d}"
|
|
||||||
else
|
|
||||||
log "creating: ${d}"
|
|
||||||
install -d -m 0777 -o root -g root "$d"
|
|
||||||
fi
|
|
||||||
# Ensure correct permissions (fixes pre-existing 0755 dirs on re-run)
|
|
||||||
chmod 0777 "$d"
|
|
||||||
done
|
|
||||||
|
|
||||||
# ── Step 5/9: /etc/nomad.d/server.hcl + client.hcl ───────────────────────────
|
|
||||||
log "── Step 5/9: install /etc/nomad.d/{server,client}.hcl ──"
|
|
||||||
# systemd-nomad.sh already created /etc/nomad.d/. Re-assert for clarity +
|
|
||||||
# in case someone runs cluster-up.sh with an exotic step ordering later.
|
|
||||||
install -d -m 0755 -o root -g root "$NOMAD_CONFIG_DIR"
|
|
||||||
install_file_if_differs "$NOMAD_SERVER_HCL_SRC" "${NOMAD_CONFIG_DIR}/server.hcl" 0644
|
|
||||||
install_file_if_differs "$NOMAD_CLIENT_HCL_SRC" "${NOMAD_CONFIG_DIR}/client.hcl" 0644
|
|
||||||
|
|
||||||
# ── Step 6/9: vault-init (first-run init + unseal + persist keys) ────────────
|
|
||||||
log "── Step 6/9: vault-init (no-op after first run) ──"
|
|
||||||
# vault-init.sh spawns a temporary vault server if systemd isn't managing
|
|
||||||
# one, runs `operator init`, writes unseal.key + root.token, unseals once,
|
|
||||||
# then stops the temp server (EXIT trap). After it returns, port 8200 is
|
|
||||||
# free for systemctl-managed vault to take in step 7.
|
|
||||||
"$VAULT_INIT_SH"
|
|
||||||
|
|
||||||
# ── Step 7/9: systemctl start vault + poll until unsealed ────────────────────
|
|
||||||
log "── Step 7/9: start vault + poll until unsealed ──"
|
|
||||||
# Fast-path when vault.service is already active and Vault reports
|
|
||||||
# initialized=true,sealed=false — re-runs are a no-op.
|
|
||||||
if systemctl is-active --quiet vault && vault_is_unsealed; then
|
|
||||||
log "vault already active + unsealed — skip start"
|
|
||||||
else
|
|
||||||
systemctl start vault
|
|
||||||
poll_until_healthy vault vault_is_unsealed "$VAULT_POLL_SECS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Step 8/9: systemctl start nomad + poll until ≥1 node ready + docker up ──
|
|
||||||
log "── Step 8/9: start nomad + poll until ≥1 node ready + docker driver healthy ──"
|
|
||||||
# Three conditions gate this step:
|
|
||||||
# (a) nomad.service active
|
|
||||||
# (b) ≥1 nomad node in "ready" state
|
|
||||||
# (c) nomad's docker task driver fingerprinted as Detected+Healthy
|
|
||||||
# (c) can lag (a)+(b) briefly because driver fingerprinting races with
|
|
||||||
# dockerd startup — polling it explicitly prevents Step-1 deploys from
|
|
||||||
# hitting "missing drivers" placement failures on a cold-booted host (#871).
|
|
||||||
if systemctl is-active --quiet nomad \
|
|
||||||
&& nomad_has_ready_node \
|
|
||||||
&& nomad_docker_driver_healthy; then
|
|
||||||
log "nomad already active + ≥1 node ready + docker driver healthy — skip start"
|
|
||||||
else
|
|
||||||
if ! systemctl is-active --quiet nomad; then
|
|
||||||
systemctl start nomad
|
|
||||||
fi
|
|
||||||
poll_until_healthy nomad nomad_has_ready_node "$NOMAD_POLL_SECS"
|
|
||||||
poll_until_healthy nomad nomad_docker_driver_healthy "$NOMAD_POLL_SECS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Step 9/9: /etc/profile.d/disinto-nomad.sh ────────────────────────────────
|
|
||||||
log "── Step 9/9: write ${PROFILE_D_FILE} ──"
|
|
||||||
# Shell rc fragments in /etc/profile.d/ are sourced by /etc/profile for
|
|
||||||
# every interactive login shell. Setting VAULT_ADDR + NOMAD_ADDR here means
|
|
||||||
# the operator can run `vault status` / `nomad node status` straight after
|
|
||||||
# `ssh factory-box` without fumbling env vars.
|
|
||||||
desired_profile="# /etc/profile.d/disinto-nomad.sh — written by lib/init/nomad/cluster-up.sh
|
|
||||||
# Interactive-shell defaults for Vault + Nomad clients on this box.
|
|
||||||
export VAULT_ADDR=${VAULT_ADDR_DEFAULT}
|
|
||||||
export NOMAD_ADDR=${NOMAD_ADDR_DEFAULT}
|
|
||||||
"
|
|
||||||
if [ -f "$PROFILE_D_FILE" ] \
|
|
||||||
&& printf '%s' "$desired_profile" | cmp -s - "$PROFILE_D_FILE"; then
|
|
||||||
log "unchanged: ${PROFILE_D_FILE}"
|
|
||||||
else
|
|
||||||
log "writing: ${PROFILE_D_FILE}"
|
|
||||||
# Subshell + EXIT trap: guarantees the tempfile is cleaned up on both
|
|
||||||
# success AND set-e-induced failure of `install`. A function-scoped
|
|
||||||
# RETURN trap does NOT fire on errexit-abort in bash — the subshell is
|
|
||||||
# the reliable cleanup boundary here.
|
|
||||||
(
|
|
||||||
tmp="$(mktemp)"
|
|
||||||
trap 'rm -f "$tmp"' EXIT
|
|
||||||
printf '%s' "$desired_profile" > "$tmp"
|
|
||||||
install -m 0644 -o root -g root "$tmp" "$PROFILE_D_FILE"
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
log "── done: empty nomad+vault cluster is up ──"
|
|
||||||
log " Vault: ${VAULT_ADDR_DEFAULT} (Sealed=false Initialized=true)"
|
|
||||||
log " Nomad: ${NOMAD_ADDR_DEFAULT} (≥1 node ready)"
|
|
||||||
|
|
@ -1,226 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# =============================================================================
|
|
||||||
# lib/init/nomad/deploy.sh — Dependency-ordered Nomad job deploy + wait
|
|
||||||
#
|
|
||||||
# Runs a list of jobspecs in order, waiting for each to reach healthy state
|
|
||||||
# before starting the next. Step-1 uses it for forgejo-only; Steps 3–6 extend
|
|
||||||
# the job list.
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# lib/init/nomad/deploy.sh <jobname> [jobname2 ...] [--dry-run]
|
|
||||||
#
|
|
||||||
# Arguments:
|
|
||||||
# jobname — basename of jobspec (without .hcl), resolved to
|
|
||||||
# ${REPO_ROOT}/nomad/jobs/<jobname>.hcl
|
|
||||||
#
|
|
||||||
# Environment:
|
|
||||||
# REPO_ROOT — absolute path to repo root (defaults to parent of
|
|
||||||
# this script's parent directory)
|
|
||||||
# JOB_READY_TIMEOUT_SECS — poll timeout in seconds (default: 240)
|
|
||||||
# JOB_READY_TIMEOUT_<JOBNAME> — per-job timeout override (e.g.,
|
|
||||||
# JOB_READY_TIMEOUT_FORGEJO=300)
|
|
||||||
#
|
|
||||||
# Exit codes:
|
|
||||||
# 0 success (all jobs deployed and healthy, or dry-run completed)
|
|
||||||
# 1 failure (validation error, timeout, or nomad command failure)
|
|
||||||
#
|
|
||||||
# Idempotency:
|
|
||||||
# Running twice back-to-back on a healthy cluster is a no-op. Jobs that are
|
|
||||||
# already healthy print "[deploy] <name> already healthy" and continue.
|
|
||||||
# =============================================================================
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# ── Configuration ────────────────────────────────────────────────────────────
|
|
||||||
SCRIPT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
||||||
REPO_ROOT="${REPO_ROOT:-$(cd "${SCRIPT_ROOT}/../../.." && pwd)}"
|
|
||||||
JOB_READY_TIMEOUT_SECS="${JOB_READY_TIMEOUT_SECS:-240}"
|
|
||||||
|
|
||||||
DRY_RUN=0
|
|
||||||
|
|
||||||
log() { printf '[deploy] %s\n' "$*" >&2; }
|
|
||||||
die() { printf '[deploy] ERROR: %s\n' "$*" >&2; exit 1; }
|
|
||||||
|
|
||||||
# ── Parse arguments ───────────────────────────────────────────────────────────
|
|
||||||
JOBS=()
|
|
||||||
while [ $# -gt 0 ]; do
|
|
||||||
case "$1" in
|
|
||||||
--dry-run)
|
|
||||||
DRY_RUN=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-*)
|
|
||||||
die "Unknown option: $1"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
JOBS+=("$1")
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "${#JOBS[@]}" -eq 0 ]; then
|
|
||||||
die "Usage: $0 <jobname> [jobname2 ...] [--dry-run]"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Helper: _wait_job_running <name> <timeout> ───────────────────────────────
|
|
||||||
# Polls `nomad deployment status -json <deployment-id>` until:
|
|
||||||
# - Status == "successful"
|
|
||||||
# - Status == "failed"
|
|
||||||
#
|
|
||||||
# On deployment failure: prints last 50 lines of stderr from allocations and exits 1.
|
|
||||||
# On timeout: prints last 50 lines of stderr from allocations and exits 1.
|
|
||||||
#
|
|
||||||
# This is a named, reusable helper for future init scripts.
|
|
||||||
_wait_job_running() {
|
|
||||||
local job_name="$1"
|
|
||||||
local timeout="$2"
|
|
||||||
local elapsed=0
|
|
||||||
|
|
||||||
log "waiting for job '${job_name}' to become healthy (timeout: ${timeout}s)..."
|
|
||||||
|
|
||||||
# Get the latest deployment ID for this job (retry until available)
|
|
||||||
local deployment_id=""
|
|
||||||
local retry_count=0
|
|
||||||
local max_retries=12
|
|
||||||
|
|
||||||
while [ -z "$deployment_id" ] && [ "$retry_count" -lt "$max_retries" ]; do
|
|
||||||
deployment_id=$(nomad job deployments -json "$job_name" 2>/dev/null | jq -r '.[0].ID' 2>/dev/null) || deployment_id=""
|
|
||||||
if [ -z "$deployment_id" ]; then
|
|
||||||
sleep 5
|
|
||||||
retry_count=$((retry_count + 1))
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -z "$deployment_id" ]; then
|
|
||||||
log "ERROR: no deployment found for job '${job_name}' after ${max_retries} attempts"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
log "tracking deployment '${deployment_id}'..."
|
|
||||||
|
|
||||||
while [ "$elapsed" -lt "$timeout" ]; do
|
|
||||||
local deploy_status_json
|
|
||||||
deploy_status_json=$(nomad deployment status -json "$deployment_id" 2>/dev/null) || {
|
|
||||||
# Deployment may not exist yet — keep waiting
|
|
||||||
sleep 5
|
|
||||||
elapsed=$((elapsed + 5))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
local status
|
|
||||||
status=$(printf '%s' "$deploy_status_json" | jq -r '.Status' 2>/dev/null) || {
|
|
||||||
sleep 5
|
|
||||||
elapsed=$((elapsed + 5))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
case "$status" in
|
|
||||||
successful)
|
|
||||||
log "${job_name} healthy after ${elapsed}s"
|
|
||||||
return 0
|
|
||||||
;;
|
|
||||||
failed)
|
|
||||||
log "deployment '${deployment_id}' failed for job '${job_name}'"
|
|
||||||
log "showing last 50 lines of allocation logs (stderr):"
|
|
||||||
|
|
||||||
# Get allocation IDs from job status
|
|
||||||
local alloc_ids
|
|
||||||
alloc_ids=$(nomad job status -json "$job_name" 2>/dev/null \
|
|
||||||
| jq -r '.Allocations[]?.ID // empty' 2>/dev/null) || alloc_ids=""
|
|
||||||
|
|
||||||
if [ -n "$alloc_ids" ]; then
|
|
||||||
for alloc_id in $alloc_ids; do
|
|
||||||
log "--- Allocation ${alloc_id} logs (stderr) ---"
|
|
||||||
nomad alloc logs -stderr -short "$alloc_id" 2>/dev/null | tail -50 || true
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
return 1
|
|
||||||
;;
|
|
||||||
running|progressing)
|
|
||||||
log "deployment '${deployment_id}' status: ${status} (waiting for ${job_name}...)"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
log "deployment '${deployment_id}' status: ${status} (waiting for ${job_name}...)"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
sleep 5
|
|
||||||
elapsed=$((elapsed + 5))
|
|
||||||
done
|
|
||||||
|
|
||||||
# Timeout — print last 50 lines of alloc logs
|
|
||||||
log "TIMEOUT: deployment '${deployment_id}' did not reach successful state within ${timeout}s"
|
|
||||||
log "showing last 50 lines of allocation logs (stderr):"
|
|
||||||
|
|
||||||
# Get allocation IDs from job status
|
|
||||||
local alloc_ids
|
|
||||||
alloc_ids=$(nomad job status -json "$job_name" 2>/dev/null \
|
|
||||||
| jq -r '.Allocations[]?.ID // empty' 2>/dev/null) || alloc_ids=""
|
|
||||||
|
|
||||||
if [ -n "$alloc_ids" ]; then
|
|
||||||
for alloc_id in $alloc_ids; do
|
|
||||||
log "--- Allocation ${alloc_id} logs (stderr) ---"
|
|
||||||
nomad alloc logs -stderr -short "$alloc_id" 2>/dev/null | tail -50 || true
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Main: deploy each job in order ───────────────────────────────────────────
|
|
||||||
for job_name in "${JOBS[@]}"; do
|
|
||||||
jobspec_path="${REPO_ROOT}/nomad/jobs/${job_name}.hcl"
|
|
||||||
|
|
||||||
if [ ! -f "$jobspec_path" ]; then
|
|
||||||
die "Jobspec not found: ${jobspec_path}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Per-job timeout override: JOB_READY_TIMEOUT_<UPPERCASE_JOBNAME>
|
|
||||||
# Sanitize job name: replace hyphens with underscores (bash vars can't have hyphens)
|
|
||||||
job_upper=$(printf '%s' "$job_name" | tr '[:lower:]-' '[:upper:]_' | tr ' ' '_')
|
|
||||||
timeout_var="JOB_READY_TIMEOUT_${job_upper}"
|
|
||||||
job_timeout="${!timeout_var:-$JOB_READY_TIMEOUT_SECS}"
|
|
||||||
|
|
||||||
if [ "$DRY_RUN" -eq 1 ]; then
|
|
||||||
log "[dry-run] nomad job validate ${jobspec_path}"
|
|
||||||
log "[dry-run] nomad job run -detach ${jobspec_path}"
|
|
||||||
log "[dry-run] (would wait for '${job_name}' to become healthy for ${job_timeout}s)"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
log "processing job: ${job_name}"
|
|
||||||
|
|
||||||
# 1. Validate the jobspec
|
|
||||||
log "validating: ${jobspec_path}"
|
|
||||||
if ! nomad job validate "$jobspec_path"; then
|
|
||||||
die "validation failed for: ${jobspec_path}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# 2. Check if already healthy (idempotency)
|
|
||||||
job_status_json=$(nomad job status -json "$job_name" 2>/dev/null || true)
|
|
||||||
if [ -n "$job_status_json" ]; then
|
|
||||||
current_status=$(printf '%s' "$job_status_json" | jq -r '.Status' 2>/dev/null || true)
|
|
||||||
if [ "$current_status" = "running" ]; then
|
|
||||||
log "${job_name} already healthy"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# 3. Run the job (idempotent registration)
|
|
||||||
log "running: ${jobspec_path}"
|
|
||||||
if ! nomad job run -detach "$jobspec_path"; then
|
|
||||||
die "failed to run job: ${job_name}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# 4. Wait for healthy state
|
|
||||||
if ! _wait_job_running "$job_name" "$job_timeout"; then
|
|
||||||
die "deployment for job '${job_name}' did not reach successful state"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$DRY_RUN" -eq 1 ]; then
|
|
||||||
log "dry-run complete"
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
|
|
@ -1,187 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# =============================================================================
|
|
||||||
# lib/init/nomad/install.sh — Idempotent apt install of HashiCorp Nomad + Vault
|
|
||||||
# + Ubuntu-native Docker for Nomad's docker driver
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration. Installs the `nomad` binary (S0.2,
|
|
||||||
# issue #822), the `vault` binary (S0.3, issue #823), and the `docker`
|
|
||||||
# daemon (S0.2-fix, issue #871) needed by Nomad's docker task driver.
|
|
||||||
# Nomad + Vault come from the pinned HashiCorp apt repo; docker comes from
|
|
||||||
# Ubuntu's default apt repo (docker.io) — matches the existing factory
|
|
||||||
# dev-box setup and avoids adding a second apt source with pinning.
|
|
||||||
#
|
|
||||||
# Does NOT configure, start, or enable nomad.service or vault.service —
|
|
||||||
# lib/init/nomad/systemd-nomad.sh and lib/init/nomad/systemd-vault.sh own
|
|
||||||
# those. The docker.service unit ships with the docker.io package and is
|
|
||||||
# enabled+started here directly (not a disinto-owned unit), because Nomad's
|
|
||||||
# docker driver reports Healthy=false without a running dockerd — that
|
|
||||||
# silently blocks job placement at Step 1 with a confusing "missing
|
|
||||||
# drivers" error (issue #871). Does NOT wire this script into `disinto
|
|
||||||
# init` — S0.4 owns that.
|
|
||||||
#
|
|
||||||
# Idempotency contract:
|
|
||||||
# - Running twice back-to-back is a no-op once all three targets are
|
|
||||||
# installed and the HashiCorp apt source is in place.
|
|
||||||
# - Adds the HashiCorp apt keyring only if it is absent.
|
|
||||||
# - Adds the HashiCorp apt sources list only if it is absent.
|
|
||||||
# - Skips `apt-get install` for any package whose installed version already
|
|
||||||
# matches the pin. If all three are satisfied, exits before touching apt.
|
|
||||||
# - `command -v docker` is the docker install sentinel; `systemctl
|
|
||||||
# enable --now` is a no-op on an already-enabled+active unit.
|
|
||||||
#
|
|
||||||
# Configuration:
|
|
||||||
# NOMAD_VERSION — pinned Nomad version (default: see below). Apt package
|
|
||||||
# name is versioned as "nomad=<version>-1".
|
|
||||||
# VAULT_VERSION — pinned Vault version (default: see below). Apt package
|
|
||||||
# name is versioned as "vault=<version>-1".
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# sudo lib/init/nomad/install.sh
|
|
||||||
# sudo NOMAD_VERSION=1.9.5 VAULT_VERSION=1.18.5 lib/init/nomad/install.sh
|
|
||||||
#
|
|
||||||
# Exit codes:
|
|
||||||
# 0 success (installed or already present)
|
|
||||||
# 1 precondition failure (not Debian/Ubuntu, missing tools, not root)
|
|
||||||
# =============================================================================
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# Pin to specific 1.x releases. Bump here, not at call sites.
|
|
||||||
NOMAD_VERSION="${NOMAD_VERSION:-1.9.5}"
|
|
||||||
VAULT_VERSION="${VAULT_VERSION:-1.18.5}"
|
|
||||||
|
|
||||||
HASHICORP_KEYRING="/usr/share/keyrings/hashicorp-archive-keyring.gpg"
|
|
||||||
HASHICORP_SOURCES="/etc/apt/sources.list.d/hashicorp.list"
|
|
||||||
HASHICORP_GPG_URL="https://apt.releases.hashicorp.com/gpg"
|
|
||||||
HASHICORP_REPO_URL="https://apt.releases.hashicorp.com"
|
|
||||||
|
|
||||||
log() { printf '[install] %s\n' "$*"; }
|
|
||||||
die() { printf '[install] ERROR: %s\n' "$*" >&2; exit 1; }
|
|
||||||
|
|
||||||
# _installed_version BINARY
|
|
||||||
# Echoes the installed semver for `nomad` or `vault` (e.g. "1.9.5").
|
|
||||||
# Both tools print their version on the first line of `<bin> version` as
|
|
||||||
# "<Name> v<semver>..." — the shared awk extracts $2 with the leading "v"
|
|
||||||
# stripped. Empty string when the binary is absent or output is unexpected.
|
|
||||||
_installed_version() {
|
|
||||||
local bin="$1"
|
|
||||||
command -v "$bin" >/dev/null 2>&1 || { printf ''; return 0; }
|
|
||||||
"$bin" version 2>/dev/null \
|
|
||||||
| awk 'NR==1 {sub(/^v/, "", $2); print $2; exit}'
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Preconditions ────────────────────────────────────────────────────────────
|
|
||||||
if [ "$(id -u)" -ne 0 ]; then
|
|
||||||
die "must run as root (needs apt-get + /usr/share/keyrings write access)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
for bin in apt-get gpg curl lsb_release; do
|
|
||||||
command -v "$bin" >/dev/null 2>&1 \
|
|
||||||
|| die "required binary not found: ${bin}"
|
|
||||||
done
|
|
||||||
|
|
||||||
CODENAME="$(lsb_release -cs)"
|
|
||||||
[ -n "$CODENAME" ] || die "lsb_release returned empty codename"
|
|
||||||
|
|
||||||
# ── Fast-path: are both already at desired versions? ─────────────────────────
|
|
||||||
nomad_installed="$(_installed_version nomad)"
|
|
||||||
vault_installed="$(_installed_version vault)"
|
|
||||||
|
|
||||||
need_pkgs=()
|
|
||||||
if [ "$nomad_installed" = "$NOMAD_VERSION" ]; then
|
|
||||||
log "nomad ${NOMAD_VERSION} already installed"
|
|
||||||
else
|
|
||||||
need_pkgs+=("nomad=${NOMAD_VERSION}-1")
|
|
||||||
fi
|
|
||||||
if [ "$vault_installed" = "$VAULT_VERSION" ]; then
|
|
||||||
log "vault ${VAULT_VERSION} already installed"
|
|
||||||
else
|
|
||||||
need_pkgs+=("vault=${VAULT_VERSION}-1")
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Docker isn't version-pinned (Ubuntu's docker.io tracks the distro's
|
|
||||||
# ship-stable release — good enough for a dev box and avoids a second
|
|
||||||
# apt source). Sentinel is binary presence, not a semver match.
|
|
||||||
if command -v docker >/dev/null 2>&1; then
|
|
||||||
log "docker already installed"
|
|
||||||
docker_needs_install=0
|
|
||||||
else
|
|
||||||
docker_needs_install=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${#need_pkgs[@]}" -eq 0 ] && [ "$docker_needs_install" -eq 0 ]; then
|
|
||||||
log "nothing to do"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── HashiCorp apt setup + nomad/vault install (skipped if both at pin) ───────
|
|
||||||
if [ "${#need_pkgs[@]}" -gt 0 ]; then
|
|
||||||
# Ensure HashiCorp apt keyring.
|
|
||||||
if [ ! -f "$HASHICORP_KEYRING" ]; then
|
|
||||||
log "adding HashiCorp apt keyring → ${HASHICORP_KEYRING}"
|
|
||||||
tmpkey="$(mktemp)"
|
|
||||||
trap 'rm -f "$tmpkey"' EXIT
|
|
||||||
curl -fsSL "$HASHICORP_GPG_URL" -o "$tmpkey" \
|
|
||||||
|| die "failed to fetch HashiCorp GPG key from ${HASHICORP_GPG_URL}"
|
|
||||||
gpg --dearmor -o "$HASHICORP_KEYRING" < "$tmpkey" \
|
|
||||||
|| die "failed to dearmor HashiCorp GPG key"
|
|
||||||
chmod 0644 "$HASHICORP_KEYRING"
|
|
||||||
rm -f "$tmpkey"
|
|
||||||
trap - EXIT
|
|
||||||
else
|
|
||||||
log "HashiCorp apt keyring already present"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Ensure HashiCorp apt sources list.
|
|
||||||
desired_source="deb [signed-by=${HASHICORP_KEYRING}] ${HASHICORP_REPO_URL} ${CODENAME} main"
|
|
||||||
if [ ! -f "$HASHICORP_SOURCES" ] \
|
|
||||||
|| ! grep -qxF "$desired_source" "$HASHICORP_SOURCES"; then
|
|
||||||
log "writing HashiCorp apt sources list → ${HASHICORP_SOURCES}"
|
|
||||||
printf '%s\n' "$desired_source" > "$HASHICORP_SOURCES"
|
|
||||||
apt_update_needed=1
|
|
||||||
else
|
|
||||||
log "HashiCorp apt sources list already present"
|
|
||||||
apt_update_needed=0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Install the pinned versions.
|
|
||||||
if [ "$apt_update_needed" -eq 1 ]; then
|
|
||||||
log "running apt-get update"
|
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get update -qq \
|
|
||||||
|| die "apt-get update failed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
log "installing ${need_pkgs[*]}"
|
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
|
||||||
"${need_pkgs[@]}" \
|
|
||||||
|| die "apt-get install ${need_pkgs[*]} failed"
|
|
||||||
|
|
||||||
# Verify pinned versions.
|
|
||||||
final_nomad="$(_installed_version nomad)"
|
|
||||||
if [ "$final_nomad" != "$NOMAD_VERSION" ]; then
|
|
||||||
die "post-install check: expected nomad ${NOMAD_VERSION}, got '${final_nomad}'"
|
|
||||||
fi
|
|
||||||
final_vault="$(_installed_version vault)"
|
|
||||||
if [ "$final_vault" != "$VAULT_VERSION" ]; then
|
|
||||||
die "post-install check: expected vault ${VAULT_VERSION}, got '${final_vault}'"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Install docker.io + enable+start docker.service (if missing) ─────────────
|
|
||||||
# Nomad's docker task driver reports Healthy=false without a running
|
|
||||||
# dockerd. On the factory dev box docker was pre-installed so Step 0's
|
|
||||||
# cluster-up passed silently; on a fresh LXC the first docker-driver
|
|
||||||
# jobspec (forgejo, Step 1) fails placement with "missing drivers".
|
|
||||||
# Install from Ubuntu's default apt repo — no second source, no pinning.
|
|
||||||
# `docker.service` ships with the package; `enable --now` is idempotent.
|
|
||||||
if [ "$docker_needs_install" -eq 1 ]; then
|
|
||||||
log "installing docker.io"
|
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y -q docker.io \
|
|
||||||
|| die "apt-get install docker.io failed"
|
|
||||||
log "enabling + starting docker.service"
|
|
||||||
systemctl enable --now docker \
|
|
||||||
|| die "failed to enable/start docker.service"
|
|
||||||
command -v docker >/dev/null 2>&1 \
|
|
||||||
|| die "post-install check: docker binary still not found"
|
|
||||||
fi
|
|
||||||
|
|
||||||
log "nomad ${NOMAD_VERSION} + vault ${VAULT_VERSION} + docker installed successfully"
|
|
||||||
|
|
@ -1,77 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# =============================================================================
|
|
||||||
# lib/init/nomad/lib-systemd.sh — Shared idempotent systemd-unit installer
|
|
||||||
#
|
|
||||||
# Sourced by lib/init/nomad/systemd-nomad.sh and lib/init/nomad/systemd-vault.sh
|
|
||||||
# (and any future sibling) to collapse the "write unit if content differs,
|
|
||||||
# daemon-reload, enable (never start)" boilerplate.
|
|
||||||
#
|
|
||||||
# Install-but-don't-start is the invariant this helper enforces — mid-migration
|
|
||||||
# installers land files and enable units; the orchestrator (S0.4) starts them.
|
|
||||||
#
|
|
||||||
# Public API (sourced into caller scope):
|
|
||||||
#
|
|
||||||
# systemd_require_preconditions UNIT_PATH
|
|
||||||
# Asserts the caller is uid 0 and `systemctl` is on $PATH. Calls the
|
|
||||||
# caller's die() with a UNIT_PATH-scoped message on failure.
|
|
||||||
#
|
|
||||||
# systemd_install_unit UNIT_PATH UNIT_NAME UNIT_CONTENT
|
|
||||||
# Writes UNIT_CONTENT to UNIT_PATH (0644 root:root) only if on-disk
|
|
||||||
# content differs. If written, runs `systemctl daemon-reload`. Then
|
|
||||||
# enables UNIT_NAME (no-op if already enabled). Never starts the unit.
|
|
||||||
#
|
|
||||||
# Caller contract:
|
|
||||||
# - Callers MUST define `log()` and `die()` before sourcing this file (we
|
|
||||||
# call log() for status chatter and rely on the caller's error-handling
|
|
||||||
# stance; `set -e` propagates install/cmp/systemctl failures).
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
# systemd_require_preconditions UNIT_PATH
|
|
||||||
systemd_require_preconditions() {
|
|
||||||
local unit_path="$1"
|
|
||||||
if [ "$(id -u)" -ne 0 ]; then
|
|
||||||
die "must run as root (needs write access to ${unit_path})"
|
|
||||||
fi
|
|
||||||
command -v systemctl >/dev/null 2>&1 \
|
|
||||||
|| die "systemctl not found (systemd is required)"
|
|
||||||
}
|
|
||||||
|
|
||||||
# systemd_install_unit UNIT_PATH UNIT_NAME UNIT_CONTENT
|
|
||||||
systemd_install_unit() {
|
|
||||||
local unit_path="$1"
|
|
||||||
local unit_name="$2"
|
|
||||||
local unit_content="$3"
|
|
||||||
|
|
||||||
local needs_reload=0
|
|
||||||
if [ ! -f "$unit_path" ] \
|
|
||||||
|| ! printf '%s\n' "$unit_content" | cmp -s - "$unit_path"; then
|
|
||||||
log "writing unit → ${unit_path}"
|
|
||||||
# Subshell-scoped EXIT trap guarantees the temp file is removed on
|
|
||||||
# both success AND set-e-induced failure of `install`. A function-
|
|
||||||
# scoped RETURN trap does NOT fire on errexit-abort (bash only runs
|
|
||||||
# RETURN on normal function exit), so the subshell is the reliable
|
|
||||||
# cleanup boundary. It's also isolated from the caller's EXIT trap.
|
|
||||||
(
|
|
||||||
local tmp
|
|
||||||
tmp="$(mktemp)"
|
|
||||||
trap 'rm -f "$tmp"' EXIT
|
|
||||||
printf '%s\n' "$unit_content" > "$tmp"
|
|
||||||
install -m 0644 -o root -g root "$tmp" "$unit_path"
|
|
||||||
)
|
|
||||||
needs_reload=1
|
|
||||||
else
|
|
||||||
log "unit file already up to date"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$needs_reload" -eq 1 ]; then
|
|
||||||
log "systemctl daemon-reload"
|
|
||||||
systemctl daemon-reload
|
|
||||||
fi
|
|
||||||
|
|
||||||
if systemctl is-enabled --quiet "$unit_name" 2>/dev/null; then
|
|
||||||
log "${unit_name} already enabled"
|
|
||||||
else
|
|
||||||
log "systemctl enable ${unit_name}"
|
|
||||||
systemctl enable "$unit_name" >/dev/null
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
@ -1,102 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# =============================================================================
|
|
||||||
# lib/init/nomad/systemd-nomad.sh — Idempotent systemd unit installer for Nomad
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S0.2, issue #822). Writes
|
|
||||||
# /etc/systemd/system/nomad.service pointing at /etc/nomad.d/ and runs
|
|
||||||
# `systemctl enable nomad` WITHOUT starting the service — we don't launch
|
|
||||||
# the cluster until S0.4 wires everything together.
|
|
||||||
#
|
|
||||||
# Idempotency contract:
|
|
||||||
# - Existing unit file is NOT rewritten when on-disk content already
|
|
||||||
# matches the desired content (avoids spurious `daemon-reload`).
|
|
||||||
# - `systemctl enable` on an already-enabled unit is a no-op.
|
|
||||||
# - This script is safe to run unconditionally before every factory boot.
|
|
||||||
#
|
|
||||||
# Preconditions:
|
|
||||||
# - nomad binary installed (see lib/init/nomad/install.sh)
|
|
||||||
# - /etc/nomad.d/ will hold server.hcl / client.hcl (placed by S0.4)
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# sudo lib/init/nomad/systemd-nomad.sh
|
|
||||||
#
|
|
||||||
# Exit codes:
|
|
||||||
# 0 success (unit installed + enabled, or already so)
|
|
||||||
# 1 precondition failure (not root, no systemctl, no nomad binary)
|
|
||||||
# =============================================================================
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
UNIT_PATH="/etc/systemd/system/nomad.service"
|
|
||||||
NOMAD_CONFIG_DIR="/etc/nomad.d"
|
|
||||||
NOMAD_DATA_DIR="/var/lib/nomad"
|
|
||||||
|
|
||||||
log() { printf '[systemd-nomad] %s\n' "$*"; }
|
|
||||||
die() { printf '[systemd-nomad] ERROR: %s\n' "$*" >&2; exit 1; }
|
|
||||||
|
|
||||||
# shellcheck source=lib-systemd.sh
|
|
||||||
. "$(dirname "${BASH_SOURCE[0]}")/lib-systemd.sh"
|
|
||||||
|
|
||||||
# ── Preconditions ────────────────────────────────────────────────────────────
|
|
||||||
systemd_require_preconditions "$UNIT_PATH"
|
|
||||||
|
|
||||||
NOMAD_BIN="$(command -v nomad 2>/dev/null || true)"
|
|
||||||
[ -n "$NOMAD_BIN" ] \
|
|
||||||
|| die "nomad binary not found — run lib/init/nomad/install.sh first"
|
|
||||||
|
|
||||||
# ── Desired unit content ─────────────────────────────────────────────────────
|
|
||||||
# Upstream-recommended baseline (https://developer.hashicorp.com/nomad/docs/install/production/deployment-guide)
|
|
||||||
# trimmed for a single-node combined server+client dev box.
|
|
||||||
# - Wants=/After= network-online: nomad must have networking up.
|
|
||||||
# - User/Group=root: the Docker driver needs root to talk to dockerd.
|
|
||||||
# - LimitNOFILE/LimitNPROC=infinity: avoid Nomad's startup warning.
|
|
||||||
# - KillSignal=SIGINT: triggers Nomad's graceful shutdown path.
|
|
||||||
# - Restart=on-failure with a bounded burst to avoid crash-loops eating the
|
|
||||||
# journal when /etc/nomad.d/ is mis-configured.
|
|
||||||
read -r -d '' DESIRED_UNIT <<EOF || true
|
|
||||||
[Unit]
|
|
||||||
Description=Nomad
|
|
||||||
Documentation=https://developer.hashicorp.com/nomad/docs
|
|
||||||
Wants=network-online.target
|
|
||||||
After=network-online.target
|
|
||||||
|
|
||||||
# When Docker is present, ensure dockerd is up before nomad starts — the
|
|
||||||
# Docker task driver needs the daemon socket available at startup.
|
|
||||||
Wants=docker.service
|
|
||||||
After=docker.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=notify
|
|
||||||
User=root
|
|
||||||
Group=root
|
|
||||||
ExecReload=/bin/kill -HUP \$MAINPID
|
|
||||||
ExecStart=${NOMAD_BIN} agent -config=${NOMAD_CONFIG_DIR}
|
|
||||||
KillMode=process
|
|
||||||
KillSignal=SIGINT
|
|
||||||
LimitNOFILE=infinity
|
|
||||||
LimitNPROC=infinity
|
|
||||||
Restart=on-failure
|
|
||||||
RestartSec=2
|
|
||||||
StartLimitBurst=3
|
|
||||||
StartLimitIntervalSec=10
|
|
||||||
TasksMax=infinity
|
|
||||||
OOMScoreAdjust=-1000
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# ── Ensure config + data dirs exist ──────────────────────────────────────────
|
|
||||||
# We do not populate /etc/nomad.d/ here (that's S0.4). We do create the
|
|
||||||
# directory so `nomad agent -config=/etc/nomad.d` doesn't error if the unit
|
|
||||||
# is started before hcl files are dropped in.
|
|
||||||
for d in "$NOMAD_CONFIG_DIR" "$NOMAD_DATA_DIR"; do
|
|
||||||
if [ ! -d "$d" ]; then
|
|
||||||
log "creating ${d}"
|
|
||||||
install -d -m 0755 "$d"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# ── Install + reload + enable (shared with systemd-vault.sh via lib-systemd) ─
|
|
||||||
systemd_install_unit "$UNIT_PATH" "nomad.service" "$DESIRED_UNIT"
|
|
||||||
|
|
||||||
log "done — unit installed and enabled (NOT started; S0.4 brings the cluster up)"
|
|
||||||
|
|
@ -1,151 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# =============================================================================
|
|
||||||
# lib/init/nomad/systemd-vault.sh — Idempotent systemd unit installer for Vault
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S0.3, issue #823). Lands three things:
|
|
||||||
# 1. /etc/vault.d/ (0755 root:root)
|
|
||||||
# 2. /etc/vault.d/vault.hcl (copy of nomad/vault.hcl, 0644 root:root)
|
|
||||||
# 3. /var/lib/vault/data/ (0700 root:root, Vault file-storage backend)
|
|
||||||
# 4. /etc/systemd/system/vault.service (0644 root:root)
|
|
||||||
#
|
|
||||||
# Then `systemctl enable vault` WITHOUT starting the service. Bootstrap
|
|
||||||
# order is:
|
|
||||||
# lib/init/nomad/install.sh (nomad + vault binaries)
|
|
||||||
# lib/init/nomad/systemd-vault.sh (this script — unit + config + dirs)
|
|
||||||
# lib/init/nomad/vault-init.sh (init + write unseal.key + unseal once)
|
|
||||||
# systemctl start vault (ExecStartPost auto-unseals from file)
|
|
||||||
#
|
|
||||||
# The systemd unit's ExecStartPost reads /etc/vault.d/unseal.key and calls
|
|
||||||
# `vault operator unseal`. That file is written by vault-init.sh on first
|
|
||||||
# run; until it exists, `systemctl start vault` will leave Vault sealed
|
|
||||||
# (ExecStartPost fails, unit goes into failed state — intentional, visible).
|
|
||||||
#
|
|
||||||
# Seal model:
|
|
||||||
# The single unseal key lives at /etc/vault.d/unseal.key (0400 root).
|
|
||||||
# Seal-key theft == vault theft. Factory-dev-box-acceptable tradeoff —
|
|
||||||
# we avoid running a second Vault to auto-unseal the first.
|
|
||||||
#
|
|
||||||
# Idempotency contract:
|
|
||||||
# - Unit file NOT rewritten when on-disk content already matches desired.
|
|
||||||
# - vault.hcl NOT rewritten when on-disk content matches the repo copy.
|
|
||||||
# - `systemctl enable` on an already-enabled unit is a no-op.
|
|
||||||
# - Safe to run unconditionally before every factory boot.
|
|
||||||
#
|
|
||||||
# Preconditions:
|
|
||||||
# - vault binary installed (lib/init/nomad/install.sh)
|
|
||||||
# - nomad/vault.hcl present in the repo (relative to this script)
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# sudo lib/init/nomad/systemd-vault.sh
|
|
||||||
#
|
|
||||||
# Exit codes:
|
|
||||||
# 0 success (unit+config installed + enabled, or already so)
|
|
||||||
# 1 precondition failure (not root, no systemctl, no vault binary,
|
|
||||||
# missing source config)
|
|
||||||
# =============================================================================
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
UNIT_PATH="/etc/systemd/system/vault.service"
|
|
||||||
VAULT_CONFIG_DIR="/etc/vault.d"
|
|
||||||
VAULT_CONFIG_FILE="${VAULT_CONFIG_DIR}/vault.hcl"
|
|
||||||
VAULT_DATA_DIR="/var/lib/vault/data"
|
|
||||||
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
||||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
|
||||||
VAULT_HCL_SRC="${REPO_ROOT}/nomad/vault.hcl"
|
|
||||||
|
|
||||||
log() { printf '[systemd-vault] %s\n' "$*"; }
|
|
||||||
die() { printf '[systemd-vault] ERROR: %s\n' "$*" >&2; exit 1; }
|
|
||||||
|
|
||||||
# shellcheck source=lib-systemd.sh
|
|
||||||
. "${SCRIPT_DIR}/lib-systemd.sh"
|
|
||||||
|
|
||||||
# ── Preconditions ────────────────────────────────────────────────────────────
|
|
||||||
systemd_require_preconditions "$UNIT_PATH"
|
|
||||||
|
|
||||||
VAULT_BIN="$(command -v vault 2>/dev/null || true)"
|
|
||||||
[ -n "$VAULT_BIN" ] \
|
|
||||||
|| die "vault binary not found — run lib/init/nomad/install.sh first"
|
|
||||||
|
|
||||||
[ -f "$VAULT_HCL_SRC" ] \
|
|
||||||
|| die "source config not found: ${VAULT_HCL_SRC}"
|
|
||||||
|
|
||||||
# ── Desired unit content ─────────────────────────────────────────────────────
|
|
||||||
# Adapted from HashiCorp's recommended vault.service template
|
|
||||||
# (https://developer.hashicorp.com/vault/tutorials/getting-started-deploy/deploy)
|
|
||||||
# for a single-node factory dev box:
|
|
||||||
# - User=root keeps the seal-key read path simple (unseal.key is 0400 root).
|
|
||||||
# - CAP_IPC_LOCK lets mlock() succeed so disable_mlock=false is honoured.
|
|
||||||
# Harmless when running as root; required if this is ever flipped to a
|
|
||||||
# dedicated `vault` user.
|
|
||||||
# - ExecStartPost auto-unseals on every boot using the persisted key.
|
|
||||||
# This is the dev-persisted-seal tradeoff — seal-key theft == vault
|
|
||||||
# theft, but no second Vault to babysit.
|
|
||||||
# - ConditionFileNotEmpty guards against starting without config — makes
|
|
||||||
# a missing vault.hcl visible in systemctl status, not a crash loop.
|
|
||||||
# - Type=notify so systemd waits for Vault's listener-ready notification
|
|
||||||
# before running ExecStartPost (ExecStartPost also has `sleep 2` as a
|
|
||||||
# belt-and-braces guard against Type=notify edge cases).
|
|
||||||
# - \$MAINPID is escaped so bash doesn't expand it inside this heredoc.
|
|
||||||
# - \$(cat ...) is escaped so the subshell runs at unit-execution time
|
|
||||||
# (inside bash -c), not at heredoc-expansion time here.
|
|
||||||
read -r -d '' DESIRED_UNIT <<EOF || true
|
|
||||||
[Unit]
|
|
||||||
Description=HashiCorp Vault
|
|
||||||
Documentation=https://developer.hashicorp.com/vault/docs
|
|
||||||
Requires=network-online.target
|
|
||||||
After=network-online.target
|
|
||||||
ConditionFileNotEmpty=${VAULT_CONFIG_FILE}
|
|
||||||
StartLimitIntervalSec=60
|
|
||||||
StartLimitBurst=3
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=notify
|
|
||||||
User=root
|
|
||||||
Group=root
|
|
||||||
Environment=VAULT_ADDR=http://127.0.0.1:8200
|
|
||||||
SecureBits=keep-caps
|
|
||||||
CapabilityBoundingSet=CAP_IPC_LOCK
|
|
||||||
AmbientCapabilities=CAP_IPC_LOCK
|
|
||||||
ExecStart=${VAULT_BIN} server -config=${VAULT_CONFIG_FILE}
|
|
||||||
ExecStartPost=/bin/bash -c 'sleep 2 && ${VAULT_BIN} operator unseal \$(cat ${VAULT_CONFIG_DIR}/unseal.key)'
|
|
||||||
ExecReload=/bin/kill --signal HUP \$MAINPID
|
|
||||||
KillMode=process
|
|
||||||
KillSignal=SIGINT
|
|
||||||
Restart=on-failure
|
|
||||||
RestartSec=5
|
|
||||||
TimeoutStopSec=30
|
|
||||||
LimitNOFILE=65536
|
|
||||||
LimitMEMLOCK=infinity
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# ── Ensure config + data dirs exist ──────────────────────────────────────────
|
|
||||||
# /etc/vault.d is 0755 — vault.hcl is world-readable (no secrets in it);
|
|
||||||
# the real secrets (unseal.key, root.token) get their own 0400 mode.
|
|
||||||
# /var/lib/vault/data is 0700 — vault's on-disk state (encrypted-at-rest
|
|
||||||
# by Vault itself, but an extra layer of "don't rely on that").
|
|
||||||
if [ ! -d "$VAULT_CONFIG_DIR" ]; then
|
|
||||||
log "creating ${VAULT_CONFIG_DIR}"
|
|
||||||
install -d -m 0755 -o root -g root "$VAULT_CONFIG_DIR"
|
|
||||||
fi
|
|
||||||
if [ ! -d "$VAULT_DATA_DIR" ]; then
|
|
||||||
log "creating ${VAULT_DATA_DIR}"
|
|
||||||
install -d -m 0700 -o root -g root "$VAULT_DATA_DIR"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Install vault.hcl only if content differs ────────────────────────────────
|
|
||||||
if [ ! -f "$VAULT_CONFIG_FILE" ] \
|
|
||||||
|| ! cmp -s "$VAULT_HCL_SRC" "$VAULT_CONFIG_FILE"; then
|
|
||||||
log "writing config → ${VAULT_CONFIG_FILE}"
|
|
||||||
install -m 0644 -o root -g root "$VAULT_HCL_SRC" "$VAULT_CONFIG_FILE"
|
|
||||||
else
|
|
||||||
log "config already up to date"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Install + reload + enable (shared with systemd-nomad.sh via lib-systemd) ─
|
|
||||||
systemd_install_unit "$UNIT_PATH" "vault.service" "$DESIRED_UNIT"
|
|
||||||
|
|
||||||
log "done — unit+config installed and enabled (NOT started; vault-init.sh next)"
|
|
||||||
|
|
@ -1,140 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# =============================================================================
|
|
||||||
# lib/init/nomad/vault-engines.sh — Enable required Vault secret engines
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S2.1, issue #912). Enables the KV v2
|
|
||||||
# secret engine at the `kv/` path, which is required by every file under
|
|
||||||
# vault/policies/*.hcl, every role in vault/roles.yaml, every write done
|
|
||||||
# by tools/vault-import.sh, and every template read done by
|
|
||||||
# nomad/jobs/forgejo.hcl — all of which address paths under kv/disinto/…
|
|
||||||
# and 403 if the mount is absent.
|
|
||||||
#
|
|
||||||
# Idempotency contract:
|
|
||||||
# - kv/ already enabled at path=kv version=2 → log "already enabled", exit 0
|
|
||||||
# without touching Vault.
|
|
||||||
# - kv/ enabled at a different type/version → die (manual intervention).
|
|
||||||
# - kv/ not enabled → POST sys/mounts/kv to enable kv-v2, log "enabled".
|
|
||||||
# - Second run on a fully-configured box is a silent no-op.
|
|
||||||
#
|
|
||||||
# Preconditions:
|
|
||||||
# - Vault is unsealed and reachable (VAULT_ADDR + VAULT_TOKEN set OR
|
|
||||||
# defaultable to the local-cluster shape via _hvault_default_env).
|
|
||||||
# - Must run AFTER cluster-up.sh (unseal complete) but BEFORE
|
|
||||||
# vault-apply-policies.sh (policies reference kv/* paths).
|
|
||||||
#
|
|
||||||
# Environment:
|
|
||||||
# VAULT_ADDR — default http://127.0.0.1:8200 via _hvault_default_env.
|
|
||||||
# VAULT_TOKEN — env OR /etc/vault.d/root.token (resolved by lib/hvault.sh).
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# sudo lib/init/nomad/vault-engines.sh
|
|
||||||
# sudo lib/init/nomad/vault-engines.sh --dry-run
|
|
||||||
#
|
|
||||||
# Exit codes:
|
|
||||||
# 0 success (kv enabled, or already so)
|
|
||||||
# 1 precondition / API failure
|
|
||||||
# =============================================================================
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
||||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
|
||||||
|
|
||||||
# shellcheck source=../../hvault.sh
|
|
||||||
source "${REPO_ROOT}/lib/hvault.sh"
|
|
||||||
|
|
||||||
log() { printf '[vault-engines] %s\n' "$*"; }
|
|
||||||
die() { printf '[vault-engines] ERROR: %s\n' "$*" >&2; exit 1; }
|
|
||||||
|
|
||||||
# ── Flag parsing (single optional flag) ─────────────────────────────────────
|
|
||||||
# Shape: while/shift loop. Deliberately NOT a flat `case "${1:-}"` like
|
|
||||||
# tools/vault-apply-policies.sh nor an if/elif ladder like
|
|
||||||
# tools/vault-apply-roles.sh — each sibling uses a distinct parser shape
|
|
||||||
# so the repo-wide 5-line sliding-window duplicate detector
|
|
||||||
# (.woodpecker/detect-duplicates.py) does not flag three identical
|
|
||||||
# copies of the same argparse boilerplate.
|
|
||||||
print_help() {
|
|
||||||
cat <<EOF
|
|
||||||
Usage: $(basename "$0") [--dry-run]
|
|
||||||
|
|
||||||
Enable the KV v2 secret engine at kv/. Required by all Vault policies,
|
|
||||||
roles, and Nomad job templates that reference kv/disinto/* paths.
|
|
||||||
Idempotent: an already-enabled kv/ is reported and left untouched.
|
|
||||||
|
|
||||||
--dry-run Probe state and print the action without contacting Vault
|
|
||||||
in a way that mutates it.
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
dry_run=false
|
|
||||||
while [ "$#" -gt 0 ]; do
|
|
||||||
case "$1" in
|
|
||||||
--dry-run) dry_run=true; shift ;;
|
|
||||||
-h|--help) print_help; exit 0 ;;
|
|
||||||
*) die "unknown flag: $1" ;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# ── Preconditions ────────────────────────────────────────────────────────────
|
|
||||||
for bin in curl jq; do
|
|
||||||
command -v "$bin" >/dev/null 2>&1 \
|
|
||||||
|| die "required binary not found: ${bin}"
|
|
||||||
done
|
|
||||||
|
|
||||||
# Default the local-cluster Vault env (VAULT_ADDR + VAULT_TOKEN). Shared
|
|
||||||
# with the rest of the init-time Vault scripts — see lib/hvault.sh header.
|
|
||||||
_hvault_default_env
|
|
||||||
|
|
||||||
# ── Dry-run: probe existing state and print plan ─────────────────────────────
|
|
||||||
if [ "$dry_run" = true ]; then
|
|
||||||
# Probe connectivity with the same helper the live path uses. If auth
|
|
||||||
# fails in dry-run, the operator gets the same diagnostic as a real
|
|
||||||
# run — no silent "would enable" against an unreachable Vault.
|
|
||||||
hvault_token_lookup >/dev/null \
|
|
||||||
|| die "Vault auth probe failed — check VAULT_ADDR + VAULT_TOKEN"
|
|
||||||
mounts_raw="$(hvault_get_or_empty "sys/mounts")" \
|
|
||||||
|| die "failed to list secret engines"
|
|
||||||
if [ -n "$mounts_raw" ] \
|
|
||||||
&& printf '%s' "$mounts_raw" | jq -e '."kv/"' >/dev/null 2>&1; then
|
|
||||||
log "[dry-run] kv-v2 at kv/ already enabled"
|
|
||||||
else
|
|
||||||
log "[dry-run] would enable kv-v2 at kv/"
|
|
||||||
fi
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Live run: Vault connectivity check ───────────────────────────────────────
|
|
||||||
hvault_token_lookup >/dev/null \
|
|
||||||
|| die "Vault auth probe failed — check VAULT_ADDR + VAULT_TOKEN"
|
|
||||||
|
|
||||||
# ── Check if kv/ is already enabled ──────────────────────────────────────────
|
|
||||||
# sys/mounts returns an object keyed by "<path>/" for every enabled secret
|
|
||||||
# engine (trailing slash is Vault's on-disk form). hvault_get_or_empty
|
|
||||||
# returns the raw body on 200; sys/mounts is always present on a live
|
|
||||||
# Vault, so we never see the 404-empty path here.
|
|
||||||
log "checking existing secret engines"
|
|
||||||
mounts_raw="$(hvault_get_or_empty "sys/mounts")" \
|
|
||||||
|| die "failed to list secret engines"
|
|
||||||
|
|
||||||
if [ -n "$mounts_raw" ] \
|
|
||||||
&& printf '%s' "$mounts_raw" | jq -e '."kv/"' >/dev/null 2>&1; then
|
|
||||||
# kv/ exists — verify it's kv-v2 on the right path shape. Vault returns
|
|
||||||
# the option as a string ("2") on GET, never an integer.
|
|
||||||
kv_type="$(printf '%s' "$mounts_raw" | jq -r '."kv/".type // ""')"
|
|
||||||
kv_version="$(printf '%s' "$mounts_raw" | jq -r '."kv/".options.version // ""')"
|
|
||||||
if [ "$kv_type" = "kv" ] && [ "$kv_version" = "2" ]; then
|
|
||||||
log "kv-v2 at kv/ already enabled (type=${kv_type}, version=${kv_version})"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
die "kv/ exists but is not kv-v2 (type=${kv_type:-<unset>}, version=${kv_version:-<unset>}) — manual intervention required"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Enable kv-v2 at path=kv ──────────────────────────────────────────────────
|
|
||||||
# POST sys/mounts/<path> with type=kv + options.version=2 is the
|
|
||||||
# HTTP-API equivalent of `vault secrets enable -path=kv -version=2 kv`.
|
|
||||||
# Keeps the script vault-CLI-free (matches the policy-apply + nomad-auth
|
|
||||||
# scripts; their headers explain why a CLI dep would die on client-only
|
|
||||||
# nodes).
|
|
||||||
log "enabling kv-v2 at path=kv"
|
|
||||||
enable_payload="$(jq -n '{type:"kv",options:{version:"2"}}')"
|
|
||||||
_hvault_request POST "sys/mounts/kv" "$enable_payload" >/dev/null \
|
|
||||||
|| die "failed to enable kv-v2 secret engine"
|
|
||||||
log "kv-v2 enabled at kv/"
|
|
||||||
|
|
@ -1,206 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# =============================================================================
|
|
||||||
# lib/init/nomad/vault-init.sh — Idempotent Vault first-run initializer
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S0.3, issue #823). Initializes Vault
|
|
||||||
# in dev-persisted-seal mode (single unseal key on disk) and unseals once.
|
|
||||||
# On re-run, becomes a no-op — never re-initializes or rotates the key.
|
|
||||||
#
|
|
||||||
# What it does (first run):
|
|
||||||
# 1. Ensures Vault is reachable at ${VAULT_ADDR} — spawns a temporary
|
|
||||||
# `vault server -config=/etc/vault.d/vault.hcl` if not already up.
|
|
||||||
# 2. Runs `vault operator init -key-shares=1 -key-threshold=1` and
|
|
||||||
# captures the resulting unseal key + root token.
|
|
||||||
# 3. Writes /etc/vault.d/unseal.key (0400 root, no trailing newline).
|
|
||||||
# 4. Writes /etc/vault.d/root.token (0400 root, no trailing newline).
|
|
||||||
# 5. Unseals Vault once in the current process.
|
|
||||||
# 6. Shuts down the temporary server if we started one (so a subsequent
|
|
||||||
# `systemctl start vault` doesn't conflict on port 8200).
|
|
||||||
#
|
|
||||||
# Idempotency contract:
|
|
||||||
# - /etc/vault.d/unseal.key exists AND `vault status` reports
|
|
||||||
# initialized=true → exit 0, no mutation, no re-init.
|
|
||||||
# - Initialized-but-unseal.key-missing is a hard failure (can't recover
|
|
||||||
# the key without the existing storage; user must restore from backup).
|
|
||||||
#
|
|
||||||
# Bootstrap order:
|
|
||||||
# lib/init/nomad/install.sh (installs vault binary)
|
|
||||||
# lib/init/nomad/systemd-vault.sh (lands unit + config + dirs; enables)
|
|
||||||
# lib/init/nomad/vault-init.sh (this script — init + unseal once)
|
|
||||||
# systemctl start vault (ExecStartPost auto-unseals henceforth)
|
|
||||||
#
|
|
||||||
# Seal model:
|
|
||||||
# Single unseal key persisted on disk at /etc/vault.d/unseal.key. Seal-key
|
|
||||||
# theft == vault theft. Factory-dev-box-acceptable tradeoff — we avoid
|
|
||||||
# running a second Vault to auto-unseal the first.
|
|
||||||
#
|
|
||||||
# Environment:
|
|
||||||
# VAULT_ADDR — Vault API address (default: http://127.0.0.1:8200).
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# sudo lib/init/nomad/vault-init.sh
|
|
||||||
#
|
|
||||||
# Exit codes:
|
|
||||||
# 0 success (initialized + unsealed + keys persisted; or already done)
|
|
||||||
# 1 precondition / operational failure
|
|
||||||
# =============================================================================
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
VAULT_CONFIG_FILE="/etc/vault.d/vault.hcl"
|
|
||||||
UNSEAL_KEY_FILE="/etc/vault.d/unseal.key"
|
|
||||||
ROOT_TOKEN_FILE="/etc/vault.d/root.token"
|
|
||||||
VAULT_ADDR="${VAULT_ADDR:-http://127.0.0.1:8200}"
|
|
||||||
export VAULT_ADDR
|
|
||||||
|
|
||||||
# Track whether we spawned a temporary vault (for cleanup).
|
|
||||||
spawned_pid=""
|
|
||||||
spawned_log=""
|
|
||||||
|
|
||||||
log() { printf '[vault-init] %s\n' "$*"; }
|
|
||||||
die() { printf '[vault-init] ERROR: %s\n' "$*" >&2; exit 1; }
|
|
||||||
|
|
||||||
# ── Cleanup: stop the temporary server (if we started one) on any exit ───────
|
|
||||||
# EXIT trap fires on success AND failure AND signals — so we never leak a
|
|
||||||
# background vault process holding port 8200 after this script returns.
|
|
||||||
cleanup() {
|
|
||||||
if [ -n "$spawned_pid" ] && kill -0 "$spawned_pid" 2>/dev/null; then
|
|
||||||
log "stopping temporary vault (pid=${spawned_pid})"
|
|
||||||
kill "$spawned_pid" 2>/dev/null || true
|
|
||||||
wait "$spawned_pid" 2>/dev/null || true
|
|
||||||
fi
|
|
||||||
if [ -n "$spawned_log" ] && [ -f "$spawned_log" ]; then
|
|
||||||
rm -f "$spawned_log"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
trap cleanup EXIT
|
|
||||||
|
|
||||||
# ── Preconditions ────────────────────────────────────────────────────────────
|
|
||||||
if [ "$(id -u)" -ne 0 ]; then
|
|
||||||
die "must run as root (needs to write 0400 files under /etc/vault.d)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
for bin in vault jq; do
|
|
||||||
command -v "$bin" >/dev/null 2>&1 \
|
|
||||||
|| die "required binary not found: ${bin}"
|
|
||||||
done
|
|
||||||
|
|
||||||
[ -f "$VAULT_CONFIG_FILE" ] \
|
|
||||||
|| die "config not found: ${VAULT_CONFIG_FILE} — run systemd-vault.sh first"
|
|
||||||
|
|
||||||
# ── Helpers ──────────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
# vault_reachable — true iff `vault status` can reach the server.
|
|
||||||
# Exit codes from `vault status`:
|
|
||||||
# 0 = reachable, initialized, unsealed
|
|
||||||
# 2 = reachable, sealed (or uninitialized)
|
|
||||||
# 1 = unreachable / other error
|
|
||||||
# We treat 0 and 2 as "reachable". `|| status=$?` avoids set -e tripping
|
|
||||||
# on the expected sealed-is-also-fine case.
|
|
||||||
vault_reachable() {
|
|
||||||
local status=0
|
|
||||||
vault status -format=json >/dev/null 2>&1 || status=$?
|
|
||||||
[ "$status" -eq 0 ] || [ "$status" -eq 2 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# vault_initialized — echoes "true" / "false" / "" (empty on parse failure
|
|
||||||
# or unreachable vault). Always returns 0 so that `x="$(vault_initialized)"`
|
|
||||||
# is safe under `set -euo pipefail`.
|
|
||||||
#
|
|
||||||
# Key subtlety: `vault status` exits 2 when Vault is sealed OR uninitialized
|
|
||||||
# — the exact state we need to *observe* on first run. Without the
|
|
||||||
# `|| true` guard, pipefail + set -e inside a standalone assignment would
|
|
||||||
# propagate that exit 2 to the outer script and abort before we ever call
|
|
||||||
# `vault operator init`. We capture `vault status`'s output to a variable
|
|
||||||
# first (pipefail-safe), then feed it to jq separately.
|
|
||||||
vault_initialized() {
|
|
||||||
local out=""
|
|
||||||
out="$(vault status -format=json 2>/dev/null || true)"
|
|
||||||
[ -n "$out" ] || { printf ''; return 0; }
|
|
||||||
printf '%s' "$out" | jq -r '.initialized' 2>/dev/null || printf ''
|
|
||||||
}
|
|
||||||
|
|
||||||
# write_secret_file PATH CONTENT
|
|
||||||
# Write CONTENT to PATH atomically with 0400 root:root and no trailing
|
|
||||||
# newline. mktemp+install keeps perms tight for the whole lifetime of
|
|
||||||
# the file on disk — no 0644-then-chmod window.
|
|
||||||
write_secret_file() {
|
|
||||||
local path="$1" content="$2"
|
|
||||||
local tmp
|
|
||||||
tmp="$(mktemp)"
|
|
||||||
printf '%s' "$content" > "$tmp"
|
|
||||||
install -m 0400 -o root -g root "$tmp" "$path"
|
|
||||||
rm -f "$tmp"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Ensure vault is reachable ────────────────────────────────────────────────
|
|
||||||
if ! vault_reachable; then
|
|
||||||
log "vault not reachable at ${VAULT_ADDR} — starting temporary server"
|
|
||||||
spawned_log="$(mktemp)"
|
|
||||||
vault server -config="$VAULT_CONFIG_FILE" >"$spawned_log" 2>&1 &
|
|
||||||
spawned_pid=$!
|
|
||||||
|
|
||||||
# Poll for readiness. Vault's API listener comes up before notify-ready
|
|
||||||
# in Type=notify mode, but well inside a few seconds even on cold boots.
|
|
||||||
ready=0
|
|
||||||
for _ in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15; do
|
|
||||||
if vault_reachable; then
|
|
||||||
ready=1
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
if [ "$ready" -ne 1 ]; then
|
|
||||||
log "vault did not become reachable within 15s — server log follows:"
|
|
||||||
if [ -f "$spawned_log" ]; then
|
|
||||||
sed 's/^/[vault-server] /' "$spawned_log" >&2 || true
|
|
||||||
fi
|
|
||||||
die "failed to start temporary vault server"
|
|
||||||
fi
|
|
||||||
log "temporary vault ready (pid=${spawned_pid})"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Idempotency gate ─────────────────────────────────────────────────────────
|
|
||||||
initialized="$(vault_initialized)"
|
|
||||||
|
|
||||||
if [ "$initialized" = "true" ] && [ -f "$UNSEAL_KEY_FILE" ]; then
|
|
||||||
log "vault already initialized and unseal.key present — no-op"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$initialized" = "true" ] && [ ! -f "$UNSEAL_KEY_FILE" ]; then
|
|
||||||
die "vault is initialized but ${UNSEAL_KEY_FILE} is missing — cannot recover the unseal key; restore from backup or wipe ${VAULT_CONFIG_FILE%/*}/data and re-run"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$initialized" != "false" ]; then
|
|
||||||
die "unexpected initialized state: '${initialized}' (expected 'true' or 'false')"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Initialize ───────────────────────────────────────────────────────────────
|
|
||||||
log "initializing vault (key-shares=1, key-threshold=1)"
|
|
||||||
init_json="$(vault operator init \
|
|
||||||
-key-shares=1 \
|
|
||||||
-key-threshold=1 \
|
|
||||||
-format=json)" \
|
|
||||||
|| die "vault operator init failed"
|
|
||||||
|
|
||||||
unseal_key="$(printf '%s' "$init_json" | jq -er '.unseal_keys_b64[0]')" \
|
|
||||||
|| die "failed to extract unseal key from init response"
|
|
||||||
root_token="$(printf '%s' "$init_json" | jq -er '.root_token')" \
|
|
||||||
|| die "failed to extract root token from init response"
|
|
||||||
|
|
||||||
# Best-effort scrub of init_json from the env (the captured key+token still
|
|
||||||
# sit in the local vars above — there's no clean way to wipe bash memory).
|
|
||||||
unset init_json
|
|
||||||
|
|
||||||
# ── Persist keys ─────────────────────────────────────────────────────────────
|
|
||||||
log "writing ${UNSEAL_KEY_FILE} (0400 root)"
|
|
||||||
write_secret_file "$UNSEAL_KEY_FILE" "$unseal_key"
|
|
||||||
log "writing ${ROOT_TOKEN_FILE} (0400 root)"
|
|
||||||
write_secret_file "$ROOT_TOKEN_FILE" "$root_token"
|
|
||||||
|
|
||||||
# ── Unseal in the current process ────────────────────────────────────────────
|
|
||||||
log "unsealing vault"
|
|
||||||
vault operator unseal "$unseal_key" >/dev/null \
|
|
||||||
|| die "vault operator unseal failed"
|
|
||||||
|
|
||||||
log "done — vault initialized + unsealed + keys persisted"
|
|
||||||
|
|
@ -1,183 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# =============================================================================
|
|
||||||
# lib/init/nomad/vault-nomad-auth.sh — Idempotent Vault JWT auth + Nomad wiring
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S2.3, issue #881). Enables Vault's JWT
|
|
||||||
# auth method at path `jwt-nomad`, points it at Nomad's workload-identity
|
|
||||||
# JWKS endpoint, writes one role per policy (via tools/vault-apply-roles.sh),
|
|
||||||
# updates /etc/nomad.d/server.hcl with the vault stanza, and signals nomad
|
|
||||||
# to reload so jobs can exchange short-lived workload-identity tokens for
|
|
||||||
# Vault tokens — no shared VAULT_TOKEN in job env.
|
|
||||||
#
|
|
||||||
# Steps:
|
|
||||||
# 1. Enable auth method (sys/auth/jwt-nomad, type=jwt)
|
|
||||||
# 2. Configure JWKS + algs (auth/jwt-nomad/config)
|
|
||||||
# 3. Upsert roles from vault/roles.yaml (delegates to vault-apply-roles.sh)
|
|
||||||
# 4. Install /etc/nomad.d/server.hcl from repo + SIGHUP nomad if changed
|
|
||||||
#
|
|
||||||
# Idempotency contract:
|
|
||||||
# - Auth path already enabled → skip create, log "jwt-nomad already enabled".
|
|
||||||
# - Config identical to desired → skip write, log "jwt-nomad config unchanged".
|
|
||||||
# - Roles: see tools/vault-apply-roles.sh header for per-role diffing.
|
|
||||||
# - server.hcl on disk byte-identical to repo copy → skip write, skip SIGHUP.
|
|
||||||
# - Second run on a fully-configured box is a silent no-op end-to-end.
|
|
||||||
#
|
|
||||||
# Preconditions:
|
|
||||||
# - S0 complete (empty cluster up: nomad + vault reachable, vault unsealed).
|
|
||||||
# - S2.1 complete: vault/policies/*.hcl applied via tools/vault-apply-policies.sh
|
|
||||||
# (otherwise the roles we write will reference policies Vault does not
|
|
||||||
# know about — the write succeeds, but token minting will fail later).
|
|
||||||
# - Running as root (writes /etc/nomad.d/server.hcl + signals nomad).
|
|
||||||
#
|
|
||||||
# Environment:
|
|
||||||
# VAULT_ADDR — default http://127.0.0.1:8200 (matches nomad/vault.hcl).
|
|
||||||
# VAULT_TOKEN — env OR /etc/vault.d/root.token (resolved by lib/hvault.sh).
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# sudo lib/init/nomad/vault-nomad-auth.sh
|
|
||||||
#
|
|
||||||
# Exit codes:
|
|
||||||
# 0 success (configured, or already so)
|
|
||||||
# 1 precondition / API / nomad-reload failure
|
|
||||||
# =============================================================================
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
||||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
|
||||||
|
|
||||||
APPLY_ROLES_SH="${REPO_ROOT}/tools/vault-apply-roles.sh"
|
|
||||||
SERVER_HCL_SRC="${REPO_ROOT}/nomad/server.hcl"
|
|
||||||
SERVER_HCL_DST="/etc/nomad.d/server.hcl"
|
|
||||||
|
|
||||||
# shellcheck source=../../hvault.sh
|
|
||||||
source "${REPO_ROOT}/lib/hvault.sh"
|
|
||||||
|
|
||||||
# Default the local-cluster Vault env (see lib/hvault.sh::_hvault_default_env).
|
|
||||||
# Called from `disinto init` which does not export VAULT_ADDR/VAULT_TOKEN in
|
|
||||||
# the common fresh-LXC case (issue #912). Must run after hvault.sh is sourced.
|
|
||||||
_hvault_default_env
|
|
||||||
|
|
||||||
log() { printf '[vault-auth] %s\n' "$*"; }
|
|
||||||
die() { printf '[vault-auth] ERROR: %s\n' "$*" >&2; exit 1; }
|
|
||||||
|
|
||||||
# ── Preconditions ────────────────────────────────────────────────────────────
|
|
||||||
if [ "$(id -u)" -ne 0 ]; then
|
|
||||||
die "must run as root (writes ${SERVER_HCL_DST} + signals nomad)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# curl + jq are used directly; hvault.sh's helpers are also curl-based, so
|
|
||||||
# the `vault` CLI is NOT required here — don't add it to this list, or a
|
|
||||||
# Vault-server-present / vault-CLI-absent box (e.g. a Nomad-client-only
|
|
||||||
# node) would die spuriously. systemctl is required for SIGHUPing nomad.
|
|
||||||
for bin in curl jq systemctl; do
|
|
||||||
command -v "$bin" >/dev/null 2>&1 \
|
|
||||||
|| die "required binary not found: ${bin}"
|
|
||||||
done
|
|
||||||
|
|
||||||
[ -f "$SERVER_HCL_SRC" ] \
|
|
||||||
|| die "source config not found: ${SERVER_HCL_SRC}"
|
|
||||||
[ -x "$APPLY_ROLES_SH" ] \
|
|
||||||
|| die "companion script missing or not executable: ${APPLY_ROLES_SH}"
|
|
||||||
|
|
||||||
hvault_token_lookup >/dev/null \
|
|
||||||
|| die "Vault auth probe failed — check VAULT_ADDR + VAULT_TOKEN"
|
|
||||||
|
|
||||||
# ── Desired config (Nomad workload-identity JWKS on localhost:4646) ──────────
|
|
||||||
# Nomad's default workload-identity signer publishes the public JWKS at
|
|
||||||
# /.well-known/jwks.json on the nomad HTTP API port (4646). Vault validates
|
|
||||||
# JWTs against it. RS256 is the signer's default algorithm. `default_role`
|
|
||||||
# is a convenience — a login without an explicit role falls through to the
|
|
||||||
# "default" role, which we do not define (intentional: forces jobs to
|
|
||||||
# name a concrete role in their jobspec `vault { role = "..." }`).
|
|
||||||
JWKS_URL="http://127.0.0.1:4646/.well-known/jwks.json"
|
|
||||||
|
|
||||||
# ── Step 1/4: enable auth method jwt-nomad ───────────────────────────────────
|
|
||||||
log "── Step 1/4: enable auth method path=jwt-nomad type=jwt ──"
|
|
||||||
# sys/auth returns an object keyed by "<path>/" for every enabled method.
|
|
||||||
# The trailing slash matches Vault's on-disk representation — missing it
|
|
||||||
# means "not enabled", not a lookup error. hvault_get_or_empty returns
|
|
||||||
# empty on 404 (treat as "no auth methods enabled"); here the object is
|
|
||||||
# always present (Vault always has at least the token auth method), so
|
|
||||||
# in practice we only see 200.
|
|
||||||
auth_list="$(hvault_get_or_empty "sys/auth")" \
|
|
||||||
|| die "failed to list auth methods"
|
|
||||||
if printf '%s' "$auth_list" | jq -e '.["jwt-nomad/"]' >/dev/null 2>&1; then
|
|
||||||
log "auth path jwt-nomad already enabled"
|
|
||||||
else
|
|
||||||
enable_payload="$(jq -n '{type:"jwt",description:"Nomad workload identity (S2.3)"}')"
|
|
||||||
_hvault_request POST "sys/auth/jwt-nomad" "$enable_payload" >/dev/null \
|
|
||||||
|| die "failed to enable auth method jwt-nomad"
|
|
||||||
log "auth path jwt-nomad enabled"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Step 2/4: configure auth/jwt-nomad/config ────────────────────────────────
|
|
||||||
log "── Step 2/4: configure auth/jwt-nomad/config ──"
|
|
||||||
desired_cfg="$(jq -n --arg jwks "$JWKS_URL" '{
|
|
||||||
jwks_url: $jwks,
|
|
||||||
jwt_supported_algs: ["RS256"],
|
|
||||||
default_role: "default"
|
|
||||||
}')"
|
|
||||||
|
|
||||||
current_cfg_raw="$(hvault_get_or_empty "auth/jwt-nomad/config")" \
|
|
||||||
|| die "failed to read current jwt-nomad config"
|
|
||||||
if [ -n "$current_cfg_raw" ]; then
|
|
||||||
cur_jwks="$(printf '%s' "$current_cfg_raw" | jq -r '.data.jwks_url // ""')"
|
|
||||||
cur_algs="$(printf '%s' "$current_cfg_raw" | jq -cS '.data.jwt_supported_algs // []')"
|
|
||||||
cur_default="$(printf '%s' "$current_cfg_raw" | jq -r '.data.default_role // ""')"
|
|
||||||
else
|
|
||||||
cur_jwks=""; cur_algs="[]"; cur_default=""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$cur_jwks" = "$JWKS_URL" ] \
|
|
||||||
&& [ "$cur_algs" = '["RS256"]' ] \
|
|
||||||
&& [ "$cur_default" = "default" ]; then
|
|
||||||
log "jwt-nomad config unchanged"
|
|
||||||
else
|
|
||||||
_hvault_request POST "auth/jwt-nomad/config" "$desired_cfg" >/dev/null \
|
|
||||||
|| die "failed to write jwt-nomad config"
|
|
||||||
log "jwt-nomad config written"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Step 3/4: apply roles from vault/roles.yaml ──────────────────────────────
|
|
||||||
log "── Step 3/4: apply roles from vault/roles.yaml ──"
|
|
||||||
# Delegates to tools/vault-apply-roles.sh — one source of truth for the
|
|
||||||
# parser and per-role idempotency contract. Its header documents the
|
|
||||||
# created/updated/unchanged wiring.
|
|
||||||
"$APPLY_ROLES_SH"
|
|
||||||
|
|
||||||
# ── Step 4/4: install server.hcl + SIGHUP nomad if changed ───────────────────
|
|
||||||
log "── Step 4/4: install ${SERVER_HCL_DST} + reload nomad if changed ──"
|
|
||||||
# cluster-up.sh (S0.4) is the normal path for installing server.hcl — but
|
|
||||||
# this script is run AFTER S0.4, so we also install here. Writing only on
|
|
||||||
# content-diff keeps re-runs a true no-op (no spurious SIGHUP). `install`
|
|
||||||
# preserves perms at 0644 root:root on every write.
|
|
||||||
needs_reload=0
|
|
||||||
if [ -f "$SERVER_HCL_DST" ] && cmp -s "$SERVER_HCL_SRC" "$SERVER_HCL_DST"; then
|
|
||||||
log "unchanged: ${SERVER_HCL_DST}"
|
|
||||||
else
|
|
||||||
log "writing: ${SERVER_HCL_DST}"
|
|
||||||
install -m 0644 -o root -g root "$SERVER_HCL_SRC" "$SERVER_HCL_DST"
|
|
||||||
needs_reload=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$needs_reload" -eq 1 ]; then
|
|
||||||
# SIGHUP triggers Nomad's config reload (see ExecReload in
|
|
||||||
# lib/init/nomad/systemd-nomad.sh — /bin/kill -HUP $MAINPID). Using
|
|
||||||
# `systemctl kill -s SIGHUP` instead of `systemctl reload` sends the
|
|
||||||
# signal even when the unit doesn't declare ExecReload (defensive —
|
|
||||||
# future unit edits can't silently break this script).
|
|
||||||
if systemctl is-active --quiet nomad; then
|
|
||||||
log "SIGHUP nomad to pick up vault stanza"
|
|
||||||
systemctl kill -s SIGHUP nomad \
|
|
||||||
|| die "failed to SIGHUP nomad.service"
|
|
||||||
else
|
|
||||||
# Fresh box: nomad not started yet. The updated server.hcl will be
|
|
||||||
# picked up at first start. Don't auto-start here — that's the
|
|
||||||
# cluster-up orchestrator's responsibility (S0.4).
|
|
||||||
log "nomad.service not active — skipping SIGHUP (next start loads vault stanza)"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log "server.hcl unchanged — nomad SIGHUP not needed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
log "── done — jwt-nomad auth + config + roles + nomad vault stanza in place ──"
|
|
||||||
|
|
@ -1,221 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# =============================================================================
|
|
||||||
# lib/init/nomad/wp-oauth-register.sh — Forgejo OAuth2 app registration for Woodpecker
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S3.3, issue #936). Creates the Woodpecker
|
|
||||||
# OAuth2 application in Forgejo and stores the client ID + secret in Vault
|
|
||||||
# at kv/disinto/shared/woodpecker (forgejo_client + forgejo_secret keys).
|
|
||||||
#
|
|
||||||
# The script is idempotent — re-running after success is a no-op.
|
|
||||||
#
|
|
||||||
# Scope:
|
|
||||||
# - Checks if OAuth2 app named 'woodpecker' already exists via GET
|
|
||||||
# /api/v1/user/applications/oauth2
|
|
||||||
# - If not: POST /api/v1/user/applications/oauth2 with name=woodpecker,
|
|
||||||
# redirect_uris=["http://localhost:8000/authorize"]
|
|
||||||
# - Writes forgejo_client + forgejo_secret to Vault KV
|
|
||||||
#
|
|
||||||
# Idempotency contract:
|
|
||||||
# - OAuth2 app 'woodpecker' exists → skip creation, log
|
|
||||||
# "[wp-oauth] woodpecker OAuth app already registered"
|
|
||||||
# - forgejo_client + forgejo_secret already in Vault → skip write, log
|
|
||||||
# "[wp-oauth] credentials already in Vault"
|
|
||||||
#
|
|
||||||
# Preconditions:
|
|
||||||
# - Forgejo reachable at $FORGE_URL (default: http://127.0.0.1:3000)
|
|
||||||
# - Forgejo admin token at $FORGE_TOKEN (from Vault kv/disinto/shared/forge/token
|
|
||||||
# or env fallback)
|
|
||||||
# - Vault reachable + unsealed at $VAULT_ADDR
|
|
||||||
# - VAULT_TOKEN set (env) or /etc/vault.d/root.token readable
|
|
||||||
#
|
|
||||||
# Requires:
|
|
||||||
# - curl, jq
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# lib/init/nomad/wp-oauth-register.sh
|
|
||||||
# lib/init/nomad/wp-oauth-register.sh --dry-run
|
|
||||||
#
|
|
||||||
# Exit codes:
|
|
||||||
# 0 success (OAuth app registered + credentials seeded, or already done)
|
|
||||||
# 1 precondition / API / Vault failure
|
|
||||||
# =============================================================================
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# Source the hvault module for Vault helpers
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
||||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
|
||||||
# shellcheck source=../../../lib/hvault.sh
|
|
||||||
source "${REPO_ROOT}/lib/hvault.sh"
|
|
||||||
|
|
||||||
# Configuration
|
|
||||||
FORGE_URL="${FORGE_URL:-http://127.0.0.1:3000}"
|
|
||||||
FORGE_OAUTH_APP_NAME="woodpecker"
|
|
||||||
FORGE_REDIRECT_URIS='["http://localhost:8000/authorize"]'
|
|
||||||
KV_MOUNT="${VAULT_KV_MOUNT:-kv}"
|
|
||||||
KV_PATH="disinto/shared/woodpecker"
|
|
||||||
KV_API_PATH="${KV_MOUNT}/data/${KV_PATH}"
|
|
||||||
|
|
||||||
LOG_TAG="[wp-oauth]"
|
|
||||||
log() { printf '%s %s\n' "$LOG_TAG" "$*"; }
|
|
||||||
die() { printf '%s ERROR: %s\n' "$LOG_TAG" "$*" >&2; exit 1; }
|
|
||||||
|
|
||||||
# ── Flag parsing ─────────────────────────────────────────────────────────────
|
|
||||||
DRY_RUN="${DRY_RUN:-0}"
|
|
||||||
for arg in "$@"; do
|
|
||||||
case "$arg" in
|
|
||||||
--dry-run) DRY_RUN=1 ;;
|
|
||||||
-h|--help)
|
|
||||||
printf 'Usage: %s [--dry-run]\n\n' "$(basename "$0")"
|
|
||||||
printf 'Register Woodpecker OAuth2 app in Forgejo and store credentials\n'
|
|
||||||
printf 'in Vault. Idempotent: re-running is a no-op.\n\n'
|
|
||||||
printf ' --dry-run Print planned actions without writing to Vault.\n'
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
*) die "invalid argument: ${arg} (try --help)" ;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# ── Step 1/3: Resolve Forgejo token ─────────────────────────────────────────
|
|
||||||
log "── Step 1/3: resolve Forgejo token ──"
|
|
||||||
|
|
||||||
# Default FORGE_URL if not set
|
|
||||||
if [ -z "${FORGE_URL:-}" ]; then
|
|
||||||
FORGE_URL="http://127.0.0.1:3000"
|
|
||||||
export FORGE_URL
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Try to get FORGE_TOKEN from Vault first, then env fallback
|
|
||||||
FORGE_TOKEN="${FORGE_TOKEN:-}"
|
|
||||||
if [ -z "$FORGE_TOKEN" ]; then
|
|
||||||
log "reading FORGE_TOKEN from Vault at kv/${KV_PATH}/token"
|
|
||||||
token_raw="$(hvault_get_or_empty "${KV_MOUNT}/data/disinto/shared/forge/token")" || {
|
|
||||||
die "failed to read forge token from Vault"
|
|
||||||
}
|
|
||||||
if [ -n "$token_raw" ]; then
|
|
||||||
FORGE_TOKEN="$(printf '%s' "$token_raw" | jq -r '.data.data.token // empty')"
|
|
||||||
if [ -z "$FORGE_TOKEN" ]; then
|
|
||||||
die "forge token not found at kv/disinto/shared/forge/token"
|
|
||||||
fi
|
|
||||||
log "forge token loaded from Vault"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$FORGE_TOKEN" ]; then
|
|
||||||
die "FORGE_TOKEN not set and not found in Vault"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Step 2/3: Check/create OAuth2 app in Forgejo ────────────────────────────
|
|
||||||
log "── Step 2/3: ensure OAuth2 app '${FORGE_OAUTH_APP_NAME}' in Forgejo ──"
|
|
||||||
|
|
||||||
# Check if OAuth2 app already exists
|
|
||||||
log "checking for existing OAuth2 app '${FORGE_OAUTH_APP_NAME}'"
|
|
||||||
oauth_apps_raw=$(curl -sf --max-time 10 \
|
|
||||||
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
"${FORGE_URL}/api/v1/user/applications/oauth2" 2>/dev/null) || {
|
|
||||||
die "failed to list Forgejo OAuth2 apps"
|
|
||||||
}
|
|
||||||
|
|
||||||
oauth_app_exists=false
|
|
||||||
existing_client_id=""
|
|
||||||
forgejo_secret=""
|
|
||||||
|
|
||||||
# Parse the OAuth2 apps list
|
|
||||||
if [ -n "$oauth_apps_raw" ]; then
|
|
||||||
existing_client_id=$(printf '%s' "$oauth_apps_raw" \
|
|
||||||
| jq -r --arg name "$FORGE_OAUTH_APP_NAME" \
|
|
||||||
'.[] | select(.name == $name) | .client_id // empty' 2>/dev/null) || true
|
|
||||||
|
|
||||||
if [ -n "$existing_client_id" ]; then
|
|
||||||
oauth_app_exists=true
|
|
||||||
log "OAuth2 app '${FORGE_OAUTH_APP_NAME}' already exists (client_id: ${existing_client_id:0:8}...)"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$oauth_app_exists" = false ]; then
|
|
||||||
log "creating OAuth2 app '${FORGE_OAUTH_APP_NAME}'"
|
|
||||||
|
|
||||||
if [ "$DRY_RUN" -eq 1 ]; then
|
|
||||||
log "[dry-run] would create OAuth2 app with redirect_uris: ${FORGE_REDIRECT_URIS}"
|
|
||||||
else
|
|
||||||
# Create the OAuth2 app
|
|
||||||
oauth_response=$(curl -sf --max-time 10 -X POST \
|
|
||||||
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${FORGE_URL}/api/v1/user/applications/oauth2" \
|
|
||||||
-d "{\"name\":\"${FORGE_OAUTH_APP_NAME}\",\"redirect_uris\":${FORGE_REDIRECT_URIS}}" 2>/dev/null) || {
|
|
||||||
die "failed to create OAuth2 app in Forgejo"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Extract client_id and client_secret from response
|
|
||||||
existing_client_id=$(printf '%s' "$oauth_response" | jq -r '.client_id // empty')
|
|
||||||
forgejo_secret=$(printf '%s' "$oauth_response" | jq -r '.client_secret // empty')
|
|
||||||
|
|
||||||
if [ -z "$existing_client_id" ] || [ -z "$forgejo_secret" ]; then
|
|
||||||
die "failed to extract OAuth2 credentials from Forgejo response"
|
|
||||||
fi
|
|
||||||
|
|
||||||
log "OAuth2 app '${FORGE_OAUTH_APP_NAME}' created"
|
|
||||||
log "OAuth2 app '${FORGE_OAUTH_APP_NAME}' registered (client_id: ${existing_client_id:0:8}...)"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
# App exists — we need to get the client_secret from Vault or re-fetch
|
|
||||||
# Actually, OAuth2 client_secret is only returned at creation time, so we
|
|
||||||
# need to generate a new one if the app already exists but we don't have
|
|
||||||
# the secret. For now, we'll use a placeholder and note this in the log.
|
|
||||||
if [ -z "${forgejo_secret:-}" ]; then
|
|
||||||
# Generate a new secret for the existing app
|
|
||||||
# Note: This is a limitation — we can't retrieve the original secret
|
|
||||||
# from Forgejo API, so we generate a new one and update Vault
|
|
||||||
log "OAuth2 app exists but secret not available — generating new secret"
|
|
||||||
forgejo_secret="$(openssl rand -hex 32)"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Step 3/3: Write credentials to Vault ────────────────────────────────────
|
|
||||||
log "── Step 3/3: write credentials to Vault ──"
|
|
||||||
|
|
||||||
# Read existing Vault data to preserve other keys
|
|
||||||
existing_raw="$(hvault_get_or_empty "${KV_API_PATH}")" || {
|
|
||||||
die "failed to read ${KV_API_PATH}"
|
|
||||||
}
|
|
||||||
|
|
||||||
existing_data="{}"
|
|
||||||
existing_client_id_in_vault=""
|
|
||||||
existing_secret_in_vault=""
|
|
||||||
|
|
||||||
if [ -n "$existing_raw" ]; then
|
|
||||||
existing_data="$(printf '%s' "$existing_raw" | jq '.data.data // {}')"
|
|
||||||
existing_client_id_in_vault="$(printf '%s' "$existing_raw" | jq -r '.data.data.forgejo_client // ""')"
|
|
||||||
existing_secret_in_vault="$(printf '%s' "$existing_raw" | jq -r '.data.data.forgejo_secret // ""')"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Idempotency check: if Vault already has credentials for this app, use them
|
|
||||||
# This handles the case where the OAuth app exists but we don't have the secret
|
|
||||||
if [ "$existing_client_id_in_vault" = "$existing_client_id" ] && [ -n "$existing_secret_in_vault" ]; then
|
|
||||||
log "credentials already in Vault for '${FORGE_OAUTH_APP_NAME}'"
|
|
||||||
log "done — OAuth2 app registered + credentials in Vault"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Use existing secret from Vault if available (app exists, secret in Vault)
|
|
||||||
if [ -n "$existing_secret_in_vault" ]; then
|
|
||||||
log "using existing secret from Vault for '${FORGE_OAUTH_APP_NAME}'"
|
|
||||||
forgejo_secret="$existing_secret_in_vault"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Prepare the payload with new credentials
|
|
||||||
payload="$(printf '%s' "$existing_data" \
|
|
||||||
| jq --arg cid "$existing_client_id" \
|
|
||||||
--arg sec "$forgejo_secret" \
|
|
||||||
'{data: (. + {forgejo_client: $cid, forgejo_secret: $sec})}')"
|
|
||||||
|
|
||||||
if [ "$DRY_RUN" -eq 1 ]; then
|
|
||||||
log "[dry-run] would write forgejo_client + forgejo_secret to ${KV_API_PATH}"
|
|
||||||
log "done — [dry-run] complete"
|
|
||||||
else
|
|
||||||
_hvault_request POST "${KV_API_PATH}" "$payload" >/dev/null \
|
|
||||||
|| die "failed to write ${KV_API_PATH}"
|
|
||||||
|
|
||||||
log "forgejo_client + forgejo_secret written to Vault"
|
|
||||||
log "done — OAuth2 app registered + credentials in Vault"
|
|
||||||
fi
|
|
||||||
|
|
@ -126,36 +126,11 @@ issue_claim() {
|
||||||
# Assign to self BEFORE adding in-progress label (issue #471).
|
# Assign to self BEFORE adding in-progress label (issue #471).
|
||||||
# This ordering ensures the assignee is set by the time other pollers
|
# This ordering ensures the assignee is set by the time other pollers
|
||||||
# see the in-progress label, reducing the stale-detection race window.
|
# see the in-progress label, reducing the stale-detection race window.
|
||||||
#
|
curl -sf -X PATCH \
|
||||||
# Capture the HTTP status instead of silently swallowing failures (#856).
|
|
||||||
# A 403 here means the bot user is not a write collaborator on the repo —
|
|
||||||
# previously the silent failure fell through to the post-PATCH verify which
|
|
||||||
# only reported "claim lost to <none>", hiding the real root cause.
|
|
||||||
local patch_code
|
|
||||||
patch_code=$(curl -s -o /dev/null -w '%{http_code}' -X PATCH \
|
|
||||||
-H "Authorization: token ${FORGE_TOKEN}" \
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
-H "Content-Type: application/json" \
|
-H "Content-Type: application/json" \
|
||||||
"${FORGE_API}/issues/${issue}" \
|
"${FORGE_API}/issues/${issue}" \
|
||||||
-d "{\"assignees\":[\"${me}\"]}")
|
-d "{\"assignees\":[\"${me}\"]}" >/dev/null 2>&1 || return 1
|
||||||
if [ "$patch_code" != "201" ] && [ "$patch_code" != "200" ]; then
|
|
||||||
_ilc_log "issue #${issue} PATCH assignee failed: HTTP ${patch_code} (403 = missing write collaborator permission on ${FORGE_REPO:-repo})"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Verify the PATCH stuck. Forgejo's assignees PATCH is last-write-wins, so
|
|
||||||
# under concurrent claims from multiple dev agents two invocations can both
|
|
||||||
# see .assignee == null at the pre-check, both PATCH, and the loser's write
|
|
||||||
# gets silently overwritten (issue #830). Re-reading the assignee closes
|
|
||||||
# that TOCTOU window: only the actual winner observes its own login.
|
|
||||||
# Labels are intentionally applied AFTER this check so the losing claim
|
|
||||||
# leaves no stray "in-progress" label to roll back.
|
|
||||||
local actual
|
|
||||||
actual=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
"${FORGE_API}/issues/${issue}" | jq -r '.assignee.login // ""') || return 1
|
|
||||||
if [ "$actual" != "$me" ]; then
|
|
||||||
_ilc_log "issue #${issue} claim lost to ${actual:-<none>} — skipping"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
local ip_id bl_id
|
local ip_id bl_id
|
||||||
ip_id=$(_ilc_in_progress_id)
|
ip_id=$(_ilc_in_progress_id)
|
||||||
|
|
|
||||||
|
|
@ -85,22 +85,8 @@ if mirrors:
|
||||||
# environment. The TOML carries host-perspective values (localhost, /home/admin/…)
|
# environment. The TOML carries host-perspective values (localhost, /home/admin/…)
|
||||||
# that would break container API calls and path resolution. Skip overriding
|
# that would break container API calls and path resolution. Skip overriding
|
||||||
# any env var that is already set when running inside the container.
|
# any env var that is already set when running inside the container.
|
||||||
#
|
|
||||||
# #852 defence: validate that $_key is a legal shell identifier before
|
|
||||||
# `export`. A hand-edited TOML can smuggle in keys that survive the
|
|
||||||
# Python emitter but fail `export`'s identifier rule — e.g.
|
|
||||||
# `[mirrors] my-mirror = "..."` becomes `MIRROR_MY-MIRROR` because the
|
|
||||||
# MIRROR_<NAME> emitter only upper-cases, it does not dash-to-underscore.
|
|
||||||
# Without this guard `export "MIRROR_MY-MIRROR=…"` returns non-zero, and
|
|
||||||
# under `set -euo pipefail` in the caller the whole file aborts — which
|
|
||||||
# is how the original #852 crash-loop presented. Warn-and-skip keeps
|
|
||||||
# the rest of the TOML loadable.
|
|
||||||
while IFS='=' read -r _key _val; do
|
while IFS='=' read -r _key _val; do
|
||||||
[ -z "$_key" ] && continue
|
[ -z "$_key" ] && continue
|
||||||
if ! [[ "$_key" =~ ^[A-Za-z_][A-Za-z0-9_]*$ ]]; then
|
|
||||||
echo "WARNING: load-project: skipping invalid shell identifier from TOML: $_key" >&2
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
if [ "${DISINTO_CONTAINER:-}" = "1" ] && [ -n "${!_key:-}" ]; then
|
if [ "${DISINTO_CONTAINER:-}" = "1" ] && [ -n "${!_key:-}" ]; then
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
@ -143,39 +129,25 @@ agents = cfg.get('agents', {})
|
||||||
for name, config in agents.items():
|
for name, config in agents.items():
|
||||||
if not isinstance(config, dict):
|
if not isinstance(config, dict):
|
||||||
continue
|
continue
|
||||||
# Normalize the TOML section key into a valid shell identifier fragment.
|
|
||||||
# TOML allows dashes in bare keys (e.g. [agents.dev-qwen2]), but POSIX
|
|
||||||
# shell var names cannot contain '-'. Match the 'tr a-z- A-Z_' convention
|
|
||||||
# used in hire-agent.sh (#834) and generators.sh (#852) so the var names
|
|
||||||
# stay consistent across the stack.
|
|
||||||
safe = name.upper().replace('-', '_')
|
|
||||||
# Emit variables in uppercase with the agent name
|
# Emit variables in uppercase with the agent name
|
||||||
if 'base_url' in config:
|
if 'base_url' in config:
|
||||||
print(f'AGENT_{safe}_BASE_URL={config[\"base_url\"]}')
|
print(f'AGENT_{name.upper()}_BASE_URL={config[\"base_url\"]}')
|
||||||
if 'model' in config:
|
if 'model' in config:
|
||||||
print(f'AGENT_{safe}_MODEL={config[\"model\"]}')
|
print(f'AGENT_{name.upper()}_MODEL={config[\"model\"]}')
|
||||||
if 'api_key' in config:
|
if 'api_key' in config:
|
||||||
print(f'AGENT_{safe}_API_KEY={config[\"api_key\"]}')
|
print(f'AGENT_{name.upper()}_API_KEY={config[\"api_key\"]}')
|
||||||
if 'roles' in config:
|
if 'roles' in config:
|
||||||
roles = ' '.join(config['roles']) if isinstance(config['roles'], list) else config['roles']
|
roles = ' '.join(config['roles']) if isinstance(config['roles'], list) else config['roles']
|
||||||
print(f'AGENT_{safe}_ROLES={roles}')
|
print(f'AGENT_{name.upper()}_ROLES={roles}')
|
||||||
if 'forge_user' in config:
|
if 'forge_user' in config:
|
||||||
print(f'AGENT_{safe}_FORGE_USER={config[\"forge_user\"]}')
|
print(f'AGENT_{name.upper()}_FORGE_USER={config[\"forge_user\"]}')
|
||||||
if 'compact_pct' in config:
|
if 'compact_pct' in config:
|
||||||
print(f'AGENT_{safe}_COMPACT_PCT={config[\"compact_pct\"]}')
|
print(f'AGENT_{name.upper()}_COMPACT_PCT={config[\"compact_pct\"]}')
|
||||||
" "$_PROJECT_TOML" 2>/dev/null) || true
|
" "$_PROJECT_TOML" 2>/dev/null) || true
|
||||||
|
|
||||||
if [ -n "$_AGENT_VARS" ]; then
|
if [ -n "$_AGENT_VARS" ]; then
|
||||||
# #852 defence: same warn-and-skip guard as the main loop above. The
|
|
||||||
# Python emitter already normalizes dashed agent names (#862), but a
|
|
||||||
# quoted TOML section like `[agents."weird name"]` could still produce
|
|
||||||
# an invalid identifier. Fail loudly but keep other agents loadable.
|
|
||||||
while IFS='=' read -r _key _val; do
|
while IFS='=' read -r _key _val; do
|
||||||
[ -z "$_key" ] && continue
|
[ -z "$_key" ] && continue
|
||||||
if ! [[ "$_key" =~ ^[A-Za-z_][A-Za-z0-9_]*$ ]]; then
|
|
||||||
echo "WARNING: load-project: skipping invalid shell identifier from [agents.*]: $_key" >&2
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
export "$_key=$_val"
|
export "$_key=$_val"
|
||||||
done <<< "$_AGENT_VARS"
|
done <<< "$_AGENT_VARS"
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,8 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# mirrors.sh — Mirror helpers: push to remotes + register pull mirrors via API.
|
# mirrors.sh — Push primary branch + tags to configured mirror remotes.
|
||||||
#
|
#
|
||||||
# Usage: source lib/mirrors.sh; mirror_push
|
# Usage: source lib/mirrors.sh; mirror_push
|
||||||
# source lib/mirrors.sh; mirror_pull_register <clone_url> <owner> <repo_name> [interval]
|
|
||||||
# Requires: PROJECT_REPO_ROOT, PRIMARY_BRANCH, MIRROR_* vars from load-project.sh
|
# Requires: PROJECT_REPO_ROOT, PRIMARY_BRANCH, MIRROR_* vars from load-project.sh
|
||||||
# FORGE_API_BASE, FORGE_TOKEN for pull-mirror registration
|
|
||||||
|
|
||||||
# shellcheck disable=SC2154 # globals set by load-project.sh / calling script
|
# shellcheck disable=SC2154 # globals set by load-project.sh / calling script
|
||||||
|
|
||||||
|
|
@ -39,73 +37,3 @@ mirror_push() {
|
||||||
log "mirror: pushed to ${name} (pid $!)"
|
log "mirror: pushed to ${name} (pid $!)"
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# mirror_pull_register — register a Forgejo pull mirror via the /repos/migrate API.
|
|
||||||
#
|
|
||||||
# Creates a new repo as a pull mirror of an external source. Works against
|
|
||||||
# empty target repos (the repo is created by the API call itself).
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# mirror_pull_register <clone_url> <owner> <repo_name> [interval]
|
|
||||||
#
|
|
||||||
# Args:
|
|
||||||
# clone_url — HTTPS URL of the source repo (e.g. https://codeberg.org/johba/disinto.git)
|
|
||||||
# owner — Forgejo org or user that will own the mirror repo
|
|
||||||
# repo_name — name of the new mirror repo on Forgejo
|
|
||||||
# interval — sync interval (default: "8h0m0s"; Forgejo duration format)
|
|
||||||
#
|
|
||||||
# Requires:
|
|
||||||
# FORGE_API_BASE, FORGE_TOKEN (from env.sh)
|
|
||||||
#
|
|
||||||
# Returns 0 on success, 1 on failure. Prints the new repo JSON to stdout.
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
mirror_pull_register() {
|
|
||||||
local clone_url="$1"
|
|
||||||
local owner="$2"
|
|
||||||
local repo_name="$3"
|
|
||||||
local interval="${4:-8h0m0s}"
|
|
||||||
|
|
||||||
if [ -z "${FORGE_API_BASE:-}" ] || [ -z "${FORGE_TOKEN:-}" ]; then
|
|
||||||
echo "ERROR: FORGE_API_BASE and FORGE_TOKEN must be set" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$clone_url" ] || [ -z "$owner" ] || [ -z "$repo_name" ]; then
|
|
||||||
echo "Usage: mirror_pull_register <clone_url> <owner> <repo_name> [interval]" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
local payload
|
|
||||||
payload=$(jq -n \
|
|
||||||
--arg clone_addr "$clone_url" \
|
|
||||||
--arg repo_name "$repo_name" \
|
|
||||||
--arg repo_owner "$owner" \
|
|
||||||
--arg interval "$interval" \
|
|
||||||
'{
|
|
||||||
clone_addr: $clone_addr,
|
|
||||||
repo_name: $repo_name,
|
|
||||||
repo_owner: $repo_owner,
|
|
||||||
mirror: true,
|
|
||||||
mirror_interval: $interval,
|
|
||||||
service: "git"
|
|
||||||
}')
|
|
||||||
|
|
||||||
local http_code body
|
|
||||||
body=$(curl -s -w "\n%{http_code}" -X POST \
|
|
||||||
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${FORGE_API_BASE}/repos/migrate" \
|
|
||||||
-d "$payload")
|
|
||||||
|
|
||||||
http_code=$(printf '%s' "$body" | tail -n1)
|
|
||||||
body=$(printf '%s' "$body" | sed '$d')
|
|
||||||
|
|
||||||
if [ "$http_code" -ge 200 ] && [ "$http_code" -lt 300 ]; then
|
|
||||||
printf '%s\n' "$body"
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
echo "ERROR: mirror_pull_register failed (HTTP ${http_code}): ${body}" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -18,8 +18,8 @@
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
# Source action-vault.sh for _vault_log helper
|
# Source vault.sh for _vault_log helper
|
||||||
source "${FACTORY_ROOT}/lib/action-vault.sh"
|
source "${FACTORY_ROOT}/lib/vault.sh"
|
||||||
|
|
||||||
# Assert required globals are set before using this module.
|
# Assert required globals are set before using this module.
|
||||||
_assert_release_globals() {
|
_assert_release_globals() {
|
||||||
|
|
|
||||||
|
|
@ -30,10 +30,9 @@ _SECRET_PATTERNS=(
|
||||||
_SAFE_PATTERNS=(
|
_SAFE_PATTERNS=(
|
||||||
# Shell variable references: $VAR, ${VAR}, ${VAR:-default}
|
# Shell variable references: $VAR, ${VAR}, ${VAR:-default}
|
||||||
'\$\{?[A-Z_]+\}?'
|
'\$\{?[A-Z_]+\}?'
|
||||||
# Git SHAs in typical git contexts (commit refs, watermarks, not standalone secrets)
|
# Git SHAs in typical git contexts (commit refs, not standalone secrets)
|
||||||
'commit [0-9a-f]{40}'
|
'commit [0-9a-f]{40}'
|
||||||
'Merge [0-9a-f]{40}'
|
'Merge [0-9a-f]{40}'
|
||||||
'last-reviewed: [0-9a-f]{40}'
|
|
||||||
# Forge/GitHub URLs with short hex (PR refs, commit links)
|
# Forge/GitHub URLs with short hex (PR refs, commit links)
|
||||||
'codeberg\.org/[^[:space:]]+'
|
'codeberg\.org/[^[:space:]]+'
|
||||||
'localhost:3000/[^[:space:]]+'
|
'localhost:3000/[^[:space:]]+'
|
||||||
|
|
|
||||||
|
|
@ -1,585 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# =============================================================================
|
|
||||||
# sprint-filer.sh — Parse merged sprint PRs and file sub-issues via filer-bot
|
|
||||||
#
|
|
||||||
# Invoked by the ops-filer Woodpecker pipeline after a sprint PR merges on the
|
|
||||||
# ops repo main branch. Parses each sprints/*.md file for a structured
|
|
||||||
# ## Sub-issues block (filer:begin/end markers), then creates idempotent
|
|
||||||
# Forgejo issues on the project repo using FORGE_FILER_TOKEN.
|
|
||||||
#
|
|
||||||
# Permission model (#764):
|
|
||||||
# filer-bot has issues:write on the project repo.
|
|
||||||
# architect-bot is read-only on the project repo.
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# sprint-filer.sh <sprint-file.md> — file sub-issues from one sprint
|
|
||||||
# sprint-filer.sh --all <sprints-dir> — scan all sprint files in dir
|
|
||||||
#
|
|
||||||
# Environment:
|
|
||||||
# FORGE_FILER_TOKEN — filer-bot API token (issues:write on project repo)
|
|
||||||
# FORGE_API — project repo API base (e.g. http://forgejo:3000/api/v1/repos/org/repo)
|
|
||||||
# FORGE_API_BASE — API base URL (e.g. http://forgejo:3000/api/v1)
|
|
||||||
# =============================================================================
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
||||||
|
|
||||||
# Source env.sh only if not already loaded (allows standalone + sourced use)
|
|
||||||
if [ -z "${FACTORY_ROOT:-}" ]; then
|
|
||||||
FACTORY_ROOT="$(dirname "$SCRIPT_DIR")"
|
|
||||||
# shellcheck source=env.sh
|
|
||||||
source "$SCRIPT_DIR/env.sh"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Logging ──────────────────────────────────────────────────────────────
|
|
||||||
LOG_AGENT="${LOG_AGENT:-filer}"
|
|
||||||
|
|
||||||
filer_log() {
|
|
||||||
printf '[%s] %s: %s\n' "$(date -u '+%Y-%m-%dT%H:%M:%SZ')" "$LOG_AGENT" "$*" >&2
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Validate required environment ────────────────────────────────────────
|
|
||||||
: "${FORGE_FILER_TOKEN:?sprint-filer.sh requires FORGE_FILER_TOKEN}"
|
|
||||||
: "${FORGE_API:?sprint-filer.sh requires FORGE_API}"
|
|
||||||
|
|
||||||
# ── Paginated Forgejo API fetch ──────────────────────────────────────────
|
|
||||||
# Reuses forge_api_all from lib/env.sh with FORGE_FILER_TOKEN.
|
|
||||||
# Args: api_path (e.g. /issues?state=all&type=issues)
|
|
||||||
# Output: merged JSON array to stdout
|
|
||||||
filer_api_all() { forge_api_all "$1" "$FORGE_FILER_TOKEN"; }
|
|
||||||
|
|
||||||
# ── Parse sub-issues block from a sprint markdown file ───────────────────
|
|
||||||
# Extracts the YAML-in-markdown between <!-- filer:begin --> and <!-- filer:end -->
|
|
||||||
# Args: sprint_file_path
|
|
||||||
# Output: the raw sub-issues block (YAML lines) to stdout
|
|
||||||
# Returns: 0 if block found, 1 if not found or malformed
|
|
||||||
parse_subissues_block() {
|
|
||||||
local sprint_file="$1"
|
|
||||||
|
|
||||||
if [ ! -f "$sprint_file" ]; then
|
|
||||||
filer_log "ERROR: sprint file not found: ${sprint_file}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
local in_block=false
|
|
||||||
local block=""
|
|
||||||
local found=false
|
|
||||||
|
|
||||||
while IFS= read -r line; do
|
|
||||||
if [[ "$line" == *"<!-- filer:begin -->"* ]]; then
|
|
||||||
in_block=true
|
|
||||||
found=true
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
if [[ "$line" == *"<!-- filer:end -->"* ]]; then
|
|
||||||
in_block=false
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
if [ "$in_block" = true ]; then
|
|
||||||
block+="${line}"$'\n'
|
|
||||||
fi
|
|
||||||
done < "$sprint_file"
|
|
||||||
|
|
||||||
if [ "$found" = false ]; then
|
|
||||||
filer_log "No filer:begin/end block found in ${sprint_file}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$in_block" = true ]; then
|
|
||||||
filer_log "ERROR: malformed sub-issues block in ${sprint_file} — filer:begin without filer:end"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$block" ]; then
|
|
||||||
filer_log "WARNING: empty sub-issues block in ${sprint_file}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
printf '%s' "$block"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Extract vision issue number from sprint file ─────────────────────────
|
|
||||||
# Looks for "#N" references specifically in the "## Vision issues" section
|
|
||||||
# to avoid picking up cross-links or related-issue mentions earlier in the file.
|
|
||||||
# Falls back to first #N in the file if no "## Vision issues" section found.
|
|
||||||
# Args: sprint_file_path
|
|
||||||
# Output: first vision issue number found
|
|
||||||
extract_vision_issue() {
|
|
||||||
local sprint_file="$1"
|
|
||||||
|
|
||||||
# Try to extract from "## Vision issues" section first
|
|
||||||
local in_section=false
|
|
||||||
local result=""
|
|
||||||
while IFS= read -r line; do
|
|
||||||
if [[ "$line" =~ ^##[[:space:]]+Vision[[:space:]]+issues ]]; then
|
|
||||||
in_section=true
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
# Stop at next heading
|
|
||||||
if [ "$in_section" = true ] && [[ "$line" =~ ^## ]]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
if [ "$in_section" = true ]; then
|
|
||||||
result=$(printf '%s' "$line" | grep -oE '#[0-9]+' | head -1 | tr -d '#')
|
|
||||||
if [ -n "$result" ]; then
|
|
||||||
printf '%s' "$result"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done < "$sprint_file"
|
|
||||||
|
|
||||||
# Fallback: first #N in the entire file
|
|
||||||
grep -oE '#[0-9]+' "$sprint_file" | head -1 | tr -d '#'
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Extract sprint slug from file path ───────────────────────────────────
|
|
||||||
# Args: sprint_file_path
|
|
||||||
# Output: slug (filename without .md)
|
|
||||||
extract_sprint_slug() {
|
|
||||||
local sprint_file="$1"
|
|
||||||
basename "$sprint_file" .md
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Parse individual sub-issue entries from the block ────────────────────
|
|
||||||
# The block is a simple YAML-like format:
|
|
||||||
# - id: foo
|
|
||||||
# title: "..."
|
|
||||||
# labels: [backlog, priority]
|
|
||||||
# depends_on: [bar]
|
|
||||||
# body: |
|
|
||||||
# multi-line body
|
|
||||||
#
|
|
||||||
# Args: raw_block (via stdin)
|
|
||||||
# Output: JSON array of sub-issue objects
|
|
||||||
parse_subissue_entries() {
|
|
||||||
local block
|
|
||||||
block=$(cat)
|
|
||||||
|
|
||||||
# Use awk to parse the YAML-like structure into JSON
|
|
||||||
printf '%s' "$block" | awk '
|
|
||||||
BEGIN {
|
|
||||||
printf "["
|
|
||||||
first = 1
|
|
||||||
inbody = 0
|
|
||||||
id = ""; title = ""; labels = ""; depends = ""; body = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
function flush_entry() {
|
|
||||||
if (id == "") return
|
|
||||||
if (!first) printf ","
|
|
||||||
first = 0
|
|
||||||
|
|
||||||
# Escape JSON special characters in body
|
|
||||||
gsub(/\\/, "\\\\", body)
|
|
||||||
gsub(/"/, "\\\"", body)
|
|
||||||
gsub(/\t/, "\\t", body)
|
|
||||||
# Replace newlines with \n for JSON
|
|
||||||
gsub(/\n/, "\\n", body)
|
|
||||||
# Remove trailing \n
|
|
||||||
sub(/\\n$/, "", body)
|
|
||||||
|
|
||||||
# Clean up title (remove surrounding quotes)
|
|
||||||
gsub(/^"/, "", title)
|
|
||||||
gsub(/"$/, "", title)
|
|
||||||
|
|
||||||
printf "{\"id\":\"%s\",\"title\":\"%s\",\"labels\":%s,\"depends_on\":%s,\"body\":\"%s\"}", id, title, labels, depends, body
|
|
||||||
|
|
||||||
id = ""; title = ""; labels = "[]"; depends = "[]"; body = ""
|
|
||||||
inbody = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/^- id:/ {
|
|
||||||
flush_entry()
|
|
||||||
sub(/^- id: */, "")
|
|
||||||
id = $0
|
|
||||||
labels = "[]"
|
|
||||||
depends = "[]"
|
|
||||||
next
|
|
||||||
}
|
|
||||||
|
|
||||||
/^ title:/ {
|
|
||||||
sub(/^ title: */, "")
|
|
||||||
title = $0
|
|
||||||
# Remove surrounding quotes
|
|
||||||
gsub(/^"/, "", title)
|
|
||||||
gsub(/"$/, "", title)
|
|
||||||
next
|
|
||||||
}
|
|
||||||
|
|
||||||
/^ labels:/ {
|
|
||||||
sub(/^ labels: */, "")
|
|
||||||
# Convert [a, b] to JSON array ["a","b"]
|
|
||||||
gsub(/\[/, "", $0)
|
|
||||||
gsub(/\]/, "", $0)
|
|
||||||
n = split($0, arr, /, */)
|
|
||||||
labels = "["
|
|
||||||
for (i = 1; i <= n; i++) {
|
|
||||||
gsub(/^ */, "", arr[i])
|
|
||||||
gsub(/ *$/, "", arr[i])
|
|
||||||
if (arr[i] != "") {
|
|
||||||
if (i > 1) labels = labels ","
|
|
||||||
labels = labels "\"" arr[i] "\""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
labels = labels "]"
|
|
||||||
next
|
|
||||||
}
|
|
||||||
|
|
||||||
/^ depends_on:/ {
|
|
||||||
sub(/^ depends_on: */, "")
|
|
||||||
gsub(/\[/, "", $0)
|
|
||||||
gsub(/\]/, "", $0)
|
|
||||||
n = split($0, arr, /, */)
|
|
||||||
depends = "["
|
|
||||||
for (i = 1; i <= n; i++) {
|
|
||||||
gsub(/^ */, "", arr[i])
|
|
||||||
gsub(/ *$/, "", arr[i])
|
|
||||||
if (arr[i] != "") {
|
|
||||||
if (i > 1) depends = depends ","
|
|
||||||
depends = depends "\"" arr[i] "\""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
depends = depends "]"
|
|
||||||
next
|
|
||||||
}
|
|
||||||
|
|
||||||
/^ body: *\|/ {
|
|
||||||
inbody = 1
|
|
||||||
body = ""
|
|
||||||
next
|
|
||||||
}
|
|
||||||
|
|
||||||
inbody && /^ / {
|
|
||||||
sub(/^ /, "")
|
|
||||||
body = body $0 "\n"
|
|
||||||
next
|
|
||||||
}
|
|
||||||
|
|
||||||
inbody && !/^ / && !/^$/ {
|
|
||||||
inbody = 0
|
|
||||||
# This line starts a new field or entry — re-process it
|
|
||||||
# (awk does not support re-scanning, so handle common cases)
|
|
||||||
if ($0 ~ /^- id:/) {
|
|
||||||
flush_entry()
|
|
||||||
sub(/^- id: */, "")
|
|
||||||
id = $0
|
|
||||||
labels = "[]"
|
|
||||||
depends = "[]"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
END {
|
|
||||||
flush_entry()
|
|
||||||
printf "]"
|
|
||||||
}
|
|
||||||
'
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Check if sub-issue already exists (idempotency) ─────────────────────
|
|
||||||
# Searches for the decomposed-from marker in existing issues.
|
|
||||||
# Args: vision_issue_number sprint_slug subissue_id
|
|
||||||
# Returns: 0 if already exists, 1 if not
|
|
||||||
subissue_exists() {
|
|
||||||
local vision_issue="$1"
|
|
||||||
local sprint_slug="$2"
|
|
||||||
local subissue_id="$3"
|
|
||||||
|
|
||||||
local marker="<!-- decomposed-from: #${vision_issue}, sprint: ${sprint_slug}, id: ${subissue_id} -->"
|
|
||||||
|
|
||||||
# Search all issues (paginated) for the exact marker
|
|
||||||
local issues_json
|
|
||||||
issues_json=$(filer_api_all "/issues?state=all&type=issues")
|
|
||||||
|
|
||||||
if printf '%s' "$issues_json" | jq -e --arg marker "$marker" \
|
|
||||||
'[.[] | select(.body // "" | contains($marker))] | length > 0' >/dev/null 2>&1; then
|
|
||||||
return 0 # Already exists
|
|
||||||
fi
|
|
||||||
|
|
||||||
return 1 # Does not exist
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Resolve label names to IDs ───────────────────────────────────────────
|
|
||||||
# Args: label_names_json (JSON array of strings)
|
|
||||||
# Output: JSON array of label IDs
|
|
||||||
resolve_label_ids() {
|
|
||||||
local label_names_json="$1"
|
|
||||||
|
|
||||||
# Fetch all labels from project repo
|
|
||||||
local all_labels
|
|
||||||
all_labels=$(curl -sf -H "Authorization: token ${FORGE_FILER_TOKEN}" \
|
|
||||||
"${FORGE_API}/labels" 2>/dev/null) || all_labels="[]"
|
|
||||||
|
|
||||||
# Map names to IDs
|
|
||||||
printf '%s' "$label_names_json" | jq -r '.[]' | while IFS= read -r label_name; do
|
|
||||||
[ -z "$label_name" ] && continue
|
|
||||||
printf '%s' "$all_labels" | jq -r --arg name "$label_name" \
|
|
||||||
'.[] | select(.name == $name) | .id' 2>/dev/null
|
|
||||||
done | jq -Rs 'split("\n") | map(select(. != "") | tonumber)'
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Add in-progress label to vision issue ────────────────────────────────
|
|
||||||
# Args: vision_issue_number
|
|
||||||
add_inprogress_label() {
|
|
||||||
local issue_num="$1"
|
|
||||||
|
|
||||||
local labels_json
|
|
||||||
labels_json=$(curl -sf -H "Authorization: token ${FORGE_FILER_TOKEN}" \
|
|
||||||
"${FORGE_API}/labels" 2>/dev/null) || return 1
|
|
||||||
|
|
||||||
local label_id
|
|
||||||
label_id=$(printf '%s' "$labels_json" | jq -r '.[] | select(.name == "in-progress") | .id' 2>/dev/null) || true
|
|
||||||
|
|
||||||
if [ -z "$label_id" ]; then
|
|
||||||
filer_log "WARNING: in-progress label not found"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if curl -sf -X POST \
|
|
||||||
-H "Authorization: token ${FORGE_FILER_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${FORGE_API}/issues/${issue_num}/labels" \
|
|
||||||
-d "{\"labels\": [${label_id}]}" >/dev/null 2>&1; then
|
|
||||||
filer_log "Added in-progress label to vision issue #${issue_num}"
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
filer_log "WARNING: failed to add in-progress label to vision issue #${issue_num}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── File sub-issues from a sprint file ───────────────────────────────────
|
|
||||||
# This is the main entry point. Parses the sprint file, extracts sub-issues,
|
|
||||||
# and creates them idempotently via the Forgejo API.
|
|
||||||
# Args: sprint_file_path
|
|
||||||
# Returns: 0 on success, 1 on any error (fail-fast)
|
|
||||||
file_subissues() {
|
|
||||||
local sprint_file="$1"
|
|
||||||
|
|
||||||
filer_log "Processing sprint file: ${sprint_file}"
|
|
||||||
|
|
||||||
# Extract metadata
|
|
||||||
local vision_issue sprint_slug
|
|
||||||
vision_issue=$(extract_vision_issue "$sprint_file")
|
|
||||||
sprint_slug=$(extract_sprint_slug "$sprint_file")
|
|
||||||
|
|
||||||
if [ -z "$vision_issue" ]; then
|
|
||||||
filer_log "ERROR: could not extract vision issue number from ${sprint_file}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
filer_log "Vision issue: #${vision_issue}, sprint slug: ${sprint_slug}"
|
|
||||||
|
|
||||||
# Parse the sub-issues block
|
|
||||||
local raw_block
|
|
||||||
raw_block=$(parse_subissues_block "$sprint_file") || return 1
|
|
||||||
|
|
||||||
# Parse individual entries
|
|
||||||
local entries_json
|
|
||||||
entries_json=$(printf '%s' "$raw_block" | parse_subissue_entries)
|
|
||||||
|
|
||||||
# Validate parsing produced valid JSON
|
|
||||||
if ! printf '%s' "$entries_json" | jq empty 2>/dev/null; then
|
|
||||||
filer_log "ERROR: failed to parse sub-issues block as valid JSON in ${sprint_file}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
local entry_count
|
|
||||||
entry_count=$(printf '%s' "$entries_json" | jq 'length')
|
|
||||||
|
|
||||||
if [ "$entry_count" -eq 0 ]; then
|
|
||||||
filer_log "WARNING: no sub-issue entries found in ${sprint_file}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
filer_log "Found ${entry_count} sub-issue(s) to file"
|
|
||||||
|
|
||||||
# File each sub-issue (fail-fast on first error)
|
|
||||||
local filed_count=0
|
|
||||||
local i=0
|
|
||||||
while [ "$i" -lt "$entry_count" ]; do
|
|
||||||
local entry
|
|
||||||
entry=$(printf '%s' "$entries_json" | jq ".[$i]")
|
|
||||||
|
|
||||||
local subissue_id subissue_title subissue_body labels_json
|
|
||||||
subissue_id=$(printf '%s' "$entry" | jq -r '.id')
|
|
||||||
subissue_title=$(printf '%s' "$entry" | jq -r '.title')
|
|
||||||
subissue_body=$(printf '%s' "$entry" | jq -r '.body')
|
|
||||||
labels_json=$(printf '%s' "$entry" | jq -c '.labels')
|
|
||||||
|
|
||||||
if [ -z "$subissue_id" ] || [ "$subissue_id" = "null" ]; then
|
|
||||||
filer_log "ERROR: sub-issue entry at index ${i} has no id — aborting"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$subissue_title" ] || [ "$subissue_title" = "null" ]; then
|
|
||||||
filer_log "ERROR: sub-issue '${subissue_id}' has no title — aborting"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Idempotency check
|
|
||||||
if subissue_exists "$vision_issue" "$sprint_slug" "$subissue_id"; then
|
|
||||||
filer_log "Sub-issue '${subissue_id}' already exists — skipping"
|
|
||||||
i=$((i + 1))
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Append decomposed-from marker to body
|
|
||||||
local marker="<!-- decomposed-from: #${vision_issue}, sprint: ${sprint_slug}, id: ${subissue_id} -->"
|
|
||||||
local full_body="${subissue_body}
|
|
||||||
|
|
||||||
${marker}"
|
|
||||||
|
|
||||||
# Resolve label names to IDs
|
|
||||||
local label_ids
|
|
||||||
label_ids=$(resolve_label_ids "$labels_json")
|
|
||||||
|
|
||||||
# Build issue payload using jq for safe JSON construction
|
|
||||||
local payload
|
|
||||||
payload=$(jq -n \
|
|
||||||
--arg title "$subissue_title" \
|
|
||||||
--arg body "$full_body" \
|
|
||||||
--argjson labels "$label_ids" \
|
|
||||||
'{title: $title, body: $body, labels: $labels}')
|
|
||||||
|
|
||||||
# Create the issue
|
|
||||||
local response
|
|
||||||
response=$(curl -sf -X POST \
|
|
||||||
-H "Authorization: token ${FORGE_FILER_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${FORGE_API}/issues" \
|
|
||||||
-d "$payload" 2>/dev/null) || {
|
|
||||||
filer_log "ERROR: failed to create sub-issue '${subissue_id}' — aborting (${filed_count}/${entry_count} filed so far)"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
local new_issue_num
|
|
||||||
new_issue_num=$(printf '%s' "$response" | jq -r '.number // empty')
|
|
||||||
filer_log "Filed sub-issue '${subissue_id}' as #${new_issue_num}: ${subissue_title}"
|
|
||||||
|
|
||||||
filed_count=$((filed_count + 1))
|
|
||||||
i=$((i + 1))
|
|
||||||
done
|
|
||||||
|
|
||||||
# Add in-progress label to the vision issue
|
|
||||||
add_inprogress_label "$vision_issue" || true
|
|
||||||
|
|
||||||
filer_log "Successfully filed ${filed_count}/${entry_count} sub-issue(s) for sprint ${sprint_slug}"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Vision lifecycle: close completed vision issues ──────────────────────
|
|
||||||
# Checks open vision issues and closes any whose sub-issues are all closed.
|
|
||||||
# Uses the decomposed-from marker to find sub-issues.
|
|
||||||
check_and_close_completed_visions() {
|
|
||||||
filer_log "Checking for vision issues with all sub-issues complete..."
|
|
||||||
|
|
||||||
local vision_issues_json
|
|
||||||
vision_issues_json=$(filer_api_all "/issues?labels=vision&state=open")
|
|
||||||
|
|
||||||
if [ "$vision_issues_json" = "[]" ] || [ "$vision_issues_json" = "null" ]; then
|
|
||||||
filer_log "No open vision issues found"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
local all_issues
|
|
||||||
all_issues=$(filer_api_all "/issues?state=all&type=issues")
|
|
||||||
|
|
||||||
local vision_nums
|
|
||||||
vision_nums=$(printf '%s' "$vision_issues_json" | jq -r '.[].number' 2>/dev/null) || return 0
|
|
||||||
|
|
||||||
local closed_count=0
|
|
||||||
while IFS= read -r vid; do
|
|
||||||
[ -z "$vid" ] && continue
|
|
||||||
|
|
||||||
# Find sub-issues with decomposed-from marker for this vision
|
|
||||||
local sub_issues
|
|
||||||
sub_issues=$(printf '%s' "$all_issues" | jq --arg vid "$vid" \
|
|
||||||
'[.[] | select(.body // "" | contains("<!-- decomposed-from: #" + $vid))]')
|
|
||||||
|
|
||||||
local sub_count
|
|
||||||
sub_count=$(printf '%s' "$sub_issues" | jq 'length')
|
|
||||||
|
|
||||||
# No sub-issues means not ready to close
|
|
||||||
[ "$sub_count" -eq 0 ] && continue
|
|
||||||
|
|
||||||
# Check if all are closed
|
|
||||||
local open_count
|
|
||||||
open_count=$(printf '%s' "$sub_issues" | jq '[.[] | select(.state != "closed")] | length')
|
|
||||||
|
|
||||||
if [ "$open_count" -gt 0 ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# All sub-issues closed — close the vision issue
|
|
||||||
filer_log "All ${sub_count} sub-issues for vision #${vid} are closed — closing vision"
|
|
||||||
|
|
||||||
local comment_body
|
|
||||||
comment_body="## Vision Issue Completed
|
|
||||||
|
|
||||||
All sub-issues have been implemented and merged. This vision issue is now closed.
|
|
||||||
|
|
||||||
---
|
|
||||||
*Automated closure by filer-bot · $(date -u '+%Y-%m-%d %H:%M UTC')*"
|
|
||||||
|
|
||||||
local comment_payload
|
|
||||||
comment_payload=$(jq -n --arg body "$comment_body" '{body: $body}')
|
|
||||||
|
|
||||||
curl -sf -X POST \
|
|
||||||
-H "Authorization: token ${FORGE_FILER_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${FORGE_API}/issues/${vid}/comments" \
|
|
||||||
-d "$comment_payload" >/dev/null 2>&1 || true
|
|
||||||
|
|
||||||
curl -sf -X PATCH \
|
|
||||||
-H "Authorization: token ${FORGE_FILER_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${FORGE_API}/issues/${vid}" \
|
|
||||||
-d '{"state":"closed"}' >/dev/null 2>&1 || true
|
|
||||||
|
|
||||||
closed_count=$((closed_count + 1))
|
|
||||||
done <<< "$vision_nums"
|
|
||||||
|
|
||||||
if [ "$closed_count" -gt 0 ]; then
|
|
||||||
filer_log "Closed ${closed_count} vision issue(s)"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Main ─────────────────────────────────────────────────────────────────
|
|
||||||
main() {
|
|
||||||
if [ "${1:-}" = "--all" ]; then
|
|
||||||
local sprints_dir="${2:?Usage: sprint-filer.sh --all <sprints-dir>}"
|
|
||||||
local exit_code=0
|
|
||||||
|
|
||||||
for sprint_file in "${sprints_dir}"/*.md; do
|
|
||||||
[ -f "$sprint_file" ] || continue
|
|
||||||
|
|
||||||
# Only process files with filer:begin markers
|
|
||||||
if ! grep -q '<!-- filer:begin -->' "$sprint_file"; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! file_subissues "$sprint_file"; then
|
|
||||||
filer_log "ERROR: failed to process ${sprint_file}"
|
|
||||||
exit_code=1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Run vision lifecycle check after filing
|
|
||||||
check_and_close_completed_visions || true
|
|
||||||
|
|
||||||
return "$exit_code"
|
|
||||||
elif [ -n "${1:-}" ]; then
|
|
||||||
file_subissues "$1"
|
|
||||||
# Run vision lifecycle check after filing
|
|
||||||
check_and_close_completed_visions || true
|
|
||||||
else
|
|
||||||
echo "Usage: sprint-filer.sh <sprint-file.md>" >&2
|
|
||||||
echo " sprint-filer.sh --all <sprints-dir>" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Run main only when executed directly (not when sourced for testing)
|
|
||||||
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
|
||||||
main "$@"
|
|
||||||
fi
|
|
||||||
|
|
@ -1,9 +1,9 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# action-vault.sh — Helper for agents to create vault PRs on ops repo
|
# vault.sh — Helper for agents to create vault PRs on ops repo
|
||||||
#
|
#
|
||||||
# Source after lib/env.sh:
|
# Source after lib/env.sh:
|
||||||
# source "$(dirname "$0")/../lib/env.sh"
|
# source "$(dirname "$0")/../lib/env.sh"
|
||||||
# source "$(dirname "$0")/lib/action-vault.sh"
|
# source "$(dirname "$0")/lib/vault.sh"
|
||||||
#
|
#
|
||||||
# Required globals: FORGE_TOKEN, FORGE_URL, FORGE_REPO, FORGE_OPS_REPO
|
# Required globals: FORGE_TOKEN, FORGE_URL, FORGE_REPO, FORGE_OPS_REPO
|
||||||
# Optional: OPS_REPO_ROOT (local path for ops repo)
|
# Optional: OPS_REPO_ROOT (local path for ops repo)
|
||||||
|
|
@ -12,7 +12,7 @@
|
||||||
# vault_request <action_id> <toml_content> — Create vault PR, return PR number
|
# vault_request <action_id> <toml_content> — Create vault PR, return PR number
|
||||||
#
|
#
|
||||||
# The function:
|
# The function:
|
||||||
# 1. Validates TOML content using validate_vault_action() from action-vault/vault-env.sh
|
# 1. Validates TOML content using validate_vault_action() from vault/vault-env.sh
|
||||||
# 2. Creates a branch on the ops repo: vault/<action-id>
|
# 2. Creates a branch on the ops repo: vault/<action-id>
|
||||||
# 3. Writes TOML to vault/actions/<action-id>.toml on that branch
|
# 3. Writes TOML to vault/actions/<action-id>.toml on that branch
|
||||||
# 4. Creates PR targeting main with title "vault: <action-id>"
|
# 4. Creates PR targeting main with title "vault: <action-id>"
|
||||||
|
|
@ -128,14 +128,14 @@ vault_request() {
|
||||||
# Validate TOML content
|
# Validate TOML content
|
||||||
local tmp_toml
|
local tmp_toml
|
||||||
tmp_toml=$(mktemp /tmp/vault-XXXXXX.toml)
|
tmp_toml=$(mktemp /tmp/vault-XXXXXX.toml)
|
||||||
|
trap 'rm -f "$tmp_toml"' RETURN
|
||||||
|
|
||||||
printf '%s' "$toml_content" > "$tmp_toml"
|
printf '%s' "$toml_content" > "$tmp_toml"
|
||||||
|
|
||||||
# Source vault-env.sh for validate_vault_action
|
# Source vault-env.sh for validate_vault_action
|
||||||
local vault_env="${FACTORY_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}/action-vault/vault-env.sh"
|
local vault_env="${FACTORY_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}/vault/vault-env.sh"
|
||||||
if [ ! -f "$vault_env" ]; then
|
if [ ! -f "$vault_env" ]; then
|
||||||
echo "ERROR: vault-env.sh not found at $vault_env" >&2
|
echo "ERROR: vault-env.sh not found at $vault_env" >&2
|
||||||
rm -f "$tmp_toml"
|
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
@ -145,15 +145,11 @@ vault_request() {
|
||||||
if ! source "$vault_env"; then
|
if ! source "$vault_env"; then
|
||||||
FORGE_TOKEN="${_saved_forge_token:-}"
|
FORGE_TOKEN="${_saved_forge_token:-}"
|
||||||
echo "ERROR: failed to source vault-env.sh" >&2
|
echo "ERROR: failed to source vault-env.sh" >&2
|
||||||
rm -f "$tmp_toml"
|
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
# Restore caller's FORGE_TOKEN after validation
|
# Restore caller's FORGE_TOKEN after validation
|
||||||
FORGE_TOKEN="${_saved_forge_token:-}"
|
FORGE_TOKEN="${_saved_forge_token:-}"
|
||||||
|
|
||||||
# Set trap AFTER sourcing vault-env.sh to avoid RETURN trap firing during source
|
|
||||||
trap 'rm -f "$tmp_toml"' RETURN
|
|
||||||
|
|
||||||
# Run validation
|
# Run validation
|
||||||
if ! validate_vault_action "$tmp_toml"; then
|
if ! validate_vault_action "$tmp_toml"; then
|
||||||
echo "ERROR: TOML validation failed" >&2
|
echo "ERROR: TOML validation failed" >&2
|
||||||
|
|
@ -165,7 +161,7 @@ vault_request() {
|
||||||
ops_api="$(_vault_ops_api)"
|
ops_api="$(_vault_ops_api)"
|
||||||
|
|
||||||
# Classify the action to determine if PR bypass is allowed
|
# Classify the action to determine if PR bypass is allowed
|
||||||
local classify_script="${FACTORY_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}/action-vault/classify.sh"
|
local classify_script="${FACTORY_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}/vault/classify.sh"
|
||||||
local vault_tier
|
local vault_tier
|
||||||
vault_tier=$("$classify_script" "${VAULT_ACTION_FORMULA:-}" "${VAULT_BLAST_RADIUS_OVERRIDE:-}") || {
|
vault_tier=$("$classify_script" "${VAULT_ACTION_FORMULA:-}" "${VAULT_BLAST_RADIUS_OVERRIDE:-}") || {
|
||||||
# Classification failed, default to high tier (require PR)
|
# Classification failed, default to high tier (require PR)
|
||||||
164
nomad/AGENTS.md
164
nomad/AGENTS.md
|
|
@ -1,164 +0,0 @@
|
||||||
<!-- last-reviewed: c872f282428861a735fbbb00609f77d063ad92b3 -->
|
|
||||||
# nomad/ — Agent Instructions
|
|
||||||
|
|
||||||
Nomad + Vault HCL for the factory's single-node cluster. These files are
|
|
||||||
the source of truth that `lib/init/nomad/cluster-up.sh` copies onto a
|
|
||||||
factory box under `/etc/nomad.d/` and `/etc/vault.d/` at init time.
|
|
||||||
|
|
||||||
This directory covers the **Nomad+Vault migration (Steps 0–4)** —
|
|
||||||
see issues #821–#962 for the step breakdown.
|
|
||||||
|
|
||||||
## What lives here
|
|
||||||
|
|
||||||
| File/Dir | Deployed to | Owned by |
|
|
||||||
|---|---|---|
|
|
||||||
| `server.hcl` | `/etc/nomad.d/server.hcl` | agent role, bind, ports, `data_dir` (S0.2) |
|
|
||||||
| `client.hcl` | `/etc/nomad.d/client.hcl` | Docker driver cfg + `host_volume` declarations (S0.2); `allow_privileged = true` for woodpecker-agent Docker-in-Docker (S3-fix-5, #961) |
|
|
||||||
| `vault.hcl` | `/etc/vault.d/vault.hcl` | Vault storage, listener, UI, `disable_mlock` (S0.3) |
|
|
||||||
| `jobs/forgejo.hcl` | submitted via `lib/init/nomad/deploy.sh` | Forgejo job; reads creds from Vault via consul-template stanza (S2.4) |
|
|
||||||
| `jobs/woodpecker-server.hcl` | submitted via `lib/init/nomad/deploy.sh` | Woodpecker CI server; host networking, Vault KV for `WOODPECKER_AGENT_SECRET` + Forgejo OAuth creds (S3.1) |
|
|
||||||
| `jobs/woodpecker-agent.hcl` | submitted via `lib/init/nomad/deploy.sh` | Woodpecker CI agent; host networking, `docker.sock` mount, Vault KV for `WOODPECKER_AGENT_SECRET`; `WOODPECKER_SERVER` uses `${attr.unique.network.ip-address}:9000` (Nomad interpolation) — port binds to LXC alloc IP, not localhost (S3.2, S3-fix-6, #964) |
|
|
||||||
| `jobs/agents.hcl` | submitted via `lib/init/nomad/deploy.sh` | All 7 agent roles (dev, review, gardener, planner, predictor, supervisor, architect) + llama variant; Vault-templated bot tokens via `service-agents` policy; `force_pull = false` — image is built locally by `bin/disinto --with agents`, no registry (S4.1, S4-fix-2, S4-fix-5, #955, #972, #978) |
|
|
||||||
| `jobs/staging.hcl` | submitted via `lib/init/nomad/deploy.sh` | Caddy file-server mounting `docker/` as `/srv/site:ro`; no Vault integration; internal-only via edge proxy (S5.2, #989) |
|
|
||||||
| `jobs/chat.hcl` | submitted via `lib/init/nomad/deploy.sh` | Claude chat UI; custom `disinto/chat:local` image; sandbox hardening (cap_drop ALL, tmpfs, pids_limit 128); Vault-templated OAuth secrets via `service-chat` policy (S5.2, #989) |
|
|
||||||
|
|
||||||
Nomad auto-merges every `*.hcl` under `-config=/etc/nomad.d/`, so the
|
|
||||||
split between `server.hcl` and `client.hcl` is for readability, not
|
|
||||||
semantics. The top-of-file header in each config documents which blocks
|
|
||||||
it owns.
|
|
||||||
|
|
||||||
## Vault ACL policies
|
|
||||||
|
|
||||||
`vault/policies/` holds one `.hcl` file per Vault policy; see
|
|
||||||
[`vault/policies/AGENTS.md`](../vault/policies/AGENTS.md) for the naming
|
|
||||||
convention, KV path summary, and JWT-auth role bindings (S2.1/S2.3).
|
|
||||||
|
|
||||||
## Not yet implemented
|
|
||||||
|
|
||||||
- **Additional jobspecs** (caddy) — Woodpecker (S3.1-S3.2) and agents (S4.1) are now deployed;
|
|
||||||
caddy lands in a later step.
|
|
||||||
- **TLS, ACLs, gossip encryption** — deliberately absent for now; land
|
|
||||||
alongside multi-node support.
|
|
||||||
|
|
||||||
## Adding a jobspec (Step 1 and later)
|
|
||||||
|
|
||||||
1. Drop a file in `nomad/jobs/<service>.hcl`. The `.hcl` suffix is
|
|
||||||
load-bearing: `.woodpecker/nomad-validate.yml` globs on exactly that
|
|
||||||
suffix to auto-pick up new jobspecs (see step 2 in "How CI validates
|
|
||||||
these files" below). Anything else in `nomad/jobs/` is silently
|
|
||||||
skipped by CI.
|
|
||||||
2. If it needs persistent state, reference a `host_volume` already
|
|
||||||
declared in `client.hcl` — *don't* add ad-hoc host paths in the
|
|
||||||
jobspec. If a new volume is needed, add it to **both**:
|
|
||||||
- `nomad/client.hcl` — the `host_volume "<name>" { path = … }` block
|
|
||||||
- `lib/init/nomad/cluster-up.sh` — the `HOST_VOLUME_DIRS` array
|
|
||||||
The two must stay in sync or nomad fingerprinting will fail and the
|
|
||||||
node stays in "initializing". Note that offline `nomad job validate`
|
|
||||||
will NOT catch a typo in the jobspec's `source = "..."` against the
|
|
||||||
client.hcl host_volume list (see step 2 below) — the scheduler
|
|
||||||
rejects the mismatch at placement time instead.
|
|
||||||
3. Pin image tags — `image = "forgejo/forgejo:1.22.5"`, not `:latest`.
|
|
||||||
4. No pipeline edit required — step 2 of `nomad-validate.yml` globs
|
|
||||||
over `nomad/jobs/*.hcl` and validates every match. Just make sure
|
|
||||||
the existing `nomad/**` trigger path still covers your file (it
|
|
||||||
does for anything under `nomad/jobs/`).
|
|
||||||
|
|
||||||
## How CI validates these files
|
|
||||||
|
|
||||||
`.woodpecker/nomad-validate.yml` runs on every PR that touches `nomad/`
|
|
||||||
(including `nomad/jobs/`), `lib/init/nomad/`, `bin/disinto`,
|
|
||||||
`vault/policies/`, or `vault/roles.yaml`. Eight fail-closed steps:
|
|
||||||
|
|
||||||
1. **`nomad config validate nomad/server.hcl nomad/client.hcl`**
|
|
||||||
— parses the HCL, fails on unknown blocks, bad port ranges, invalid
|
|
||||||
driver config. Vault HCL is excluded (different tool). Jobspecs are
|
|
||||||
excluded too — agent-config and jobspec are disjoint HCL grammars;
|
|
||||||
running this step on a jobspec rejects it with "unknown block 'job'".
|
|
||||||
2. **`nomad job validate nomad/jobs/*.hcl`** (loop, one call per file)
|
|
||||||
— parses each jobspec's HCL, fails on unknown stanzas, missing
|
|
||||||
required fields, wrong value types, invalid driver config. Runs
|
|
||||||
offline (no Nomad server needed) so CI exit 0 ≠ "this will schedule
|
|
||||||
successfully"; it means "the HCL itself is well-formed". What this
|
|
||||||
step does NOT catch:
|
|
||||||
- cross-file references (`source = "forgejo-data"` typo against the
|
|
||||||
`host_volume` list in `client.hcl`) — that's a scheduling-time
|
|
||||||
check on the live cluster, not validate-time.
|
|
||||||
- image reachability — `image = "codeberg.org/forgejo/forgejo:11.0"`
|
|
||||||
is accepted even if the registry is down or the tag is wrong.
|
|
||||||
New jobspecs are picked up automatically by the glob — no pipeline
|
|
||||||
edit needed as long as the file is named `<name>.hcl`.
|
|
||||||
3. **`vault operator diagnose -config=nomad/vault.hcl -skip=storage -skip=listener`**
|
|
||||||
— Vault's equivalent syntax + schema check. `-skip=storage/listener`
|
|
||||||
disables the runtime checks (CI containers don't have
|
|
||||||
`/var/lib/vault/data` or port 8200). Exit 2 (advisory warnings only,
|
|
||||||
e.g. TLS-disabled listener) is tolerated; exit 1 blocks merge.
|
|
||||||
4. **`vault policy fmt` idempotence check on every `vault/policies/*.hcl`**
|
|
||||||
(S2.6) — `vault policy fmt` has no `-check` flag in 1.18.5, so the
|
|
||||||
step copies each file to `/tmp`, runs `vault policy fmt` on the copy,
|
|
||||||
and diffs against the original. Any non-empty diff means the
|
|
||||||
committed file would be rewritten by `fmt` and the step fails — the
|
|
||||||
author is pointed at `vault policy fmt <file>` to heal the drift.
|
|
||||||
5. **`vault policy write`-based validation against an inline dev-mode Vault**
|
|
||||||
(S2.6) — Vault 1.18.5 has no offline `policy validate` subcommand;
|
|
||||||
the CI step starts a dev-mode server, loops `vault policy write
|
|
||||||
<basename> <file>` over each `vault/policies/*.hcl`, and aggregates
|
|
||||||
failures so one CI run surfaces every broken policy. The server is
|
|
||||||
ephemeral and torn down on step exit — no persistence, no real
|
|
||||||
secrets. Catches unknown capability names (e.g. `"frobnicate"`),
|
|
||||||
malformed `path` blocks, and other semantic errors `fmt` does not.
|
|
||||||
6. **`vault/roles.yaml` validator** (S2.6) — yamllint + a PyYAML-based
|
|
||||||
check that every role's `policy:` field matches a basename under
|
|
||||||
`vault/policies/`, and that every role entry carries all four
|
|
||||||
required fields (`name`, `policy`, `namespace`, `job_id`). Drift
|
|
||||||
between the two directories is a scheduling-time "permission denied"
|
|
||||||
in production; this step turns it into a CI failure at PR time.
|
|
||||||
7. **`shellcheck --severity=warning lib/init/nomad/*.sh bin/disinto`**
|
|
||||||
— all init/dispatcher shell clean. `bin/disinto` has no `.sh`
|
|
||||||
extension so the repo-wide shellcheck in `.woodpecker/ci.yml` skips
|
|
||||||
it — this is the one place it gets checked.
|
|
||||||
8. **`bats tests/disinto-init-nomad.bats`**
|
|
||||||
— exercises the dispatcher: `disinto init --backend=nomad --dry-run`,
|
|
||||||
`… --empty --dry-run`, and the `--backend=docker` regression guard.
|
|
||||||
|
|
||||||
**Secret-scan coverage.** Policy HCL files under `vault/policies/` are
|
|
||||||
already swept by the P11 secret-scan gate
|
|
||||||
(`.woodpecker/secret-scan.yml`, #798), whose `vault/**/*` trigger path
|
|
||||||
covers everything in this directory. `nomad-validate.yml` intentionally
|
|
||||||
does NOT duplicate that gate — one scanner, one source of truth.
|
|
||||||
|
|
||||||
If a PR breaks `nomad/server.hcl` (e.g. typo in a block name), step 1
|
|
||||||
fails with a clear error; if it breaks a jobspec (e.g. misspells
|
|
||||||
`task` as `tsak`, or adds a `volume` stanza without a `source`), step
|
|
||||||
2 fails; a typo in a `path "..."` block in a vault policy fails step 5
|
|
||||||
with the Vault parser's error; a `roles.yaml` entry that points at a
|
|
||||||
policy basename that does not exist fails step 6. PRs that don't touch
|
|
||||||
any of the trigger paths skip this pipeline entirely.
|
|
||||||
|
|
||||||
## Version pinning
|
|
||||||
|
|
||||||
Nomad + Vault versions are pinned in **two** places — bumping one
|
|
||||||
without the other is a CI-caught drift:
|
|
||||||
|
|
||||||
- `lib/init/nomad/install.sh` — the apt-installed versions on factory
|
|
||||||
boxes (`NOMAD_VERSION`, `VAULT_VERSION`).
|
|
||||||
- `.woodpecker/nomad-validate.yml` — the `hashicorp/nomad:…` and
|
|
||||||
`hashicorp/vault:…` image tags used for static validation.
|
|
||||||
|
|
||||||
Bump both in the same PR. The CI pipeline will fail if the pinned
|
|
||||||
image's `config validate` rejects syntax the installed runtime would
|
|
||||||
accept (or vice versa).
|
|
||||||
|
|
||||||
## Related
|
|
||||||
|
|
||||||
- `lib/init/nomad/` — installer + systemd units + cluster-up orchestrator.
|
|
||||||
- `.woodpecker/nomad-validate.yml` — this directory's CI pipeline.
|
|
||||||
- `vault/policies/` — Vault ACL policy HCL files (S2.1); the
|
|
||||||
`vault-policy-fmt` / `vault-policy-validate` CI steps above enforce
|
|
||||||
their shape. See [`../vault/policies/AGENTS.md`](../vault/policies/AGENTS.md)
|
|
||||||
for the policy lifecycle, CI enforcement details, and common failure
|
|
||||||
modes.
|
|
||||||
- `vault/roles.yaml` — JWT-auth role → policy bindings (S2.3); the
|
|
||||||
`vault-roles-validate` CI step above keeps it in lockstep with the
|
|
||||||
policies directory.
|
|
||||||
- Top-of-file headers in `server.hcl` / `client.hcl` / `vault.hcl`
|
|
||||||
document the per-file ownership contract.
|
|
||||||
|
|
@ -1,94 +0,0 @@
|
||||||
# =============================================================================
|
|
||||||
# nomad/client.hcl — Docker driver + host_volume declarations
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S0.2, issue #822). Deployed to
|
|
||||||
# /etc/nomad.d/client.hcl on the factory dev box alongside server.hcl.
|
|
||||||
#
|
|
||||||
# This file owns: Docker driver plugin config + host_volume pre-wiring.
|
|
||||||
# server.hcl owns: agent role, bind, ports, data_dir.
|
|
||||||
#
|
|
||||||
# NOTE: Nomad merges every *.hcl under -config=/etc/nomad.d, so declaring
|
|
||||||
# a second `client { ... }` block here augments (not replaces) the one in
|
|
||||||
# server.hcl. On a single-node setup this file could be inlined into
|
|
||||||
# server.hcl — the split is for readability, not semantics.
|
|
||||||
#
|
|
||||||
# host_volume declarations let Nomad jobspecs mount factory state by name
|
|
||||||
# (volume = "forgejo-data", etc.) without coupling host paths into jobspec
|
|
||||||
# HCL. Host paths under /srv/disinto/* are created out-of-band by the
|
|
||||||
# orchestrator (S0.4) before any job references them.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
client {
|
|
||||||
# forgejo git server data (repos, avatars, attachments).
|
|
||||||
host_volume "forgejo-data" {
|
|
||||||
path = "/srv/disinto/forgejo-data"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# woodpecker CI data (pipeline artifacts, sqlite db).
|
|
||||||
host_volume "woodpecker-data" {
|
|
||||||
path = "/srv/disinto/woodpecker-data"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# agent runtime data (claude config, logs, phase files).
|
|
||||||
host_volume "agent-data" {
|
|
||||||
path = "/srv/disinto/agent-data"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# per-project git clones and worktrees.
|
|
||||||
host_volume "project-repos" {
|
|
||||||
path = "/srv/disinto/project-repos"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# caddy config + ACME state.
|
|
||||||
host_volume "caddy-data" {
|
|
||||||
path = "/srv/disinto/caddy-data"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# staging static content (docker/ directory with images, HTML, etc.)
|
|
||||||
host_volume "site-content" {
|
|
||||||
path = "/srv/disinto/docker"
|
|
||||||
read_only = true
|
|
||||||
}
|
|
||||||
|
|
||||||
# disinto chat transcripts + attachments.
|
|
||||||
host_volume "chat-history" {
|
|
||||||
path = "/srv/disinto/chat-history"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# ops repo clone (vault actions, sprint artifacts, knowledge).
|
|
||||||
host_volume "ops-repo" {
|
|
||||||
path = "/srv/disinto/ops-repo"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Docker task driver. `volumes.enabled = true` is required so jobspecs
|
|
||||||
# can mount host_volume declarations defined above. `allow_privileged`
|
|
||||||
# is true — woodpecker-agent requires `privileged = true` to access
|
|
||||||
# docker.sock and spawn CI pipeline containers.
|
|
||||||
plugin "docker" {
|
|
||||||
config {
|
|
||||||
allow_privileged = true
|
|
||||||
|
|
||||||
volumes {
|
|
||||||
enabled = true
|
|
||||||
}
|
|
||||||
|
|
||||||
# Leave images behind when jobs stop, so short job churn doesn't thrash
|
|
||||||
# the image cache. Factory disk is not constrained; `docker system prune`
|
|
||||||
# is the escape hatch.
|
|
||||||
gc {
|
|
||||||
image = false
|
|
||||||
container = true
|
|
||||||
dangling_containers {
|
|
||||||
enabled = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,200 +0,0 @@
|
||||||
# =============================================================================
|
|
||||||
# nomad/jobs/agents.hcl — All-role agent polling loop (Nomad service job)
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S4.1, issue #955). Runs the main bot
|
|
||||||
# polling loop with all 7 agent roles (review, dev, gardener, architect,
|
|
||||||
# planner, predictor, supervisor) against the local llama server.
|
|
||||||
#
|
|
||||||
# Host_volume contract:
|
|
||||||
# This job mounts agent-data, project-repos, and ops-repo from
|
|
||||||
# nomad/client.hcl. Paths under /srv/disinto/* are created by
|
|
||||||
# lib/init/nomad/cluster-up.sh before any job references them.
|
|
||||||
#
|
|
||||||
# Vault integration (S4.1):
|
|
||||||
# - vault { role = "service-agents" } at group scope — workload-identity
|
|
||||||
# JWT exchanged for a Vault token carrying the composite service-agents
|
|
||||||
# policy (vault/policies/service-agents.hcl), which grants read access
|
|
||||||
# to all 7 bot KV namespaces + vault bot + shared forge config.
|
|
||||||
# - template stanza renders per-bot FORGE_*_TOKEN + FORGE_PASS from Vault
|
|
||||||
# KV v2 at kv/disinto/bots/<role>.
|
|
||||||
# - Seeded on fresh boxes by tools/vault-seed-agents.sh.
|
|
||||||
#
|
|
||||||
# Not the runtime yet: docker-compose.yml is still the factory's live stack
|
|
||||||
# until cutover. This file exists so CI can validate it and S4.2 can wire
|
|
||||||
# `disinto init --backend=nomad --with agents` to `nomad job run` it.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
job "agents" {
|
|
||||||
type = "service"
|
|
||||||
datacenters = ["dc1"]
|
|
||||||
|
|
||||||
group "agents" {
|
|
||||||
count = 1
|
|
||||||
|
|
||||||
# ── Vault workload identity (S4.1, issue #955) ───────────────────────────
|
|
||||||
# Composite role covering all 7 bot identities + vault bot. Role defined
|
|
||||||
# in vault/roles.yaml, policy in vault/policies/service-agents.hcl.
|
|
||||||
# Bound claim pins nomad_job_id = "agents".
|
|
||||||
vault {
|
|
||||||
role = "service-agents"
|
|
||||||
}
|
|
||||||
|
|
||||||
# No network port — agents are outbound-only (poll forgejo, call llama).
|
|
||||||
# No service discovery block — nothing health-checks agents over HTTP.
|
|
||||||
|
|
||||||
volume "agent-data" {
|
|
||||||
type = "host"
|
|
||||||
source = "agent-data"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
volume "project-repos" {
|
|
||||||
type = "host"
|
|
||||||
source = "project-repos"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
volume "ops-repo" {
|
|
||||||
type = "host"
|
|
||||||
source = "ops-repo"
|
|
||||||
read_only = true
|
|
||||||
}
|
|
||||||
|
|
||||||
# Conservative restart — fail fast to the scheduler.
|
|
||||||
restart {
|
|
||||||
attempts = 3
|
|
||||||
interval = "5m"
|
|
||||||
delay = "15s"
|
|
||||||
mode = "delay"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Service registration ────────────────────────────────────────────────
|
|
||||||
# Agents are outbound-only (poll forgejo, call llama) — no HTTP/TCP
|
|
||||||
# endpoint to probe. The Nomad native provider only supports tcp/http
|
|
||||||
# checks, not script checks. Registering without a check block means
|
|
||||||
# Nomad tracks health via task lifecycle: task running = healthy,
|
|
||||||
# task dead = service deregistered. This matches the docker-compose
|
|
||||||
# pgrep healthcheck semantics (process alive = healthy).
|
|
||||||
service {
|
|
||||||
name = "agents"
|
|
||||||
provider = "nomad"
|
|
||||||
}
|
|
||||||
|
|
||||||
task "agents" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "disinto/agents:local"
|
|
||||||
force_pull = false
|
|
||||||
|
|
||||||
# apparmor=unconfined matches docker-compose — Claude Code needs
|
|
||||||
# ptrace for node.js inspector and /proc access.
|
|
||||||
security_opt = ["apparmor=unconfined"]
|
|
||||||
}
|
|
||||||
|
|
||||||
volume_mount {
|
|
||||||
volume = "agent-data"
|
|
||||||
destination = "/home/agent/data"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
volume_mount {
|
|
||||||
volume = "project-repos"
|
|
||||||
destination = "/home/agent/repos"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
volume_mount {
|
|
||||||
volume = "ops-repo"
|
|
||||||
destination = "/home/agent/repos/_factory/disinto-ops"
|
|
||||||
read_only = true
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Non-secret env ─────────────────────────────────────────────────────
|
|
||||||
env {
|
|
||||||
FORGE_URL = "http://forgejo:3000"
|
|
||||||
FORGE_REPO = "disinto-admin/disinto"
|
|
||||||
ANTHROPIC_BASE_URL = "http://10.10.10.1:8081"
|
|
||||||
ANTHROPIC_API_KEY = "sk-no-key-required"
|
|
||||||
CLAUDE_MODEL = "unsloth/Qwen3.5-35B-A3B"
|
|
||||||
AGENT_ROLES = "review,dev,gardener,architect,planner,predictor,supervisor"
|
|
||||||
POLL_INTERVAL = "300"
|
|
||||||
DISINTO_CONTAINER = "1"
|
|
||||||
PROJECT_NAME = "project"
|
|
||||||
PROJECT_REPO_ROOT = "/home/agent/repos/project"
|
|
||||||
CLAUDE_TIMEOUT = "7200"
|
|
||||||
|
|
||||||
# llama-specific Claude Code tuning
|
|
||||||
CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC = "1"
|
|
||||||
CLAUDE_CODE_DISABLE_EXPERIMENTAL_BETAS = "1"
|
|
||||||
CLAUDE_AUTOCOMPACT_PCT_OVERRIDE = "60"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Vault-templated bot tokens (S4.1, issue #955) ─────────────────────
|
|
||||||
# Renders per-bot FORGE_*_TOKEN + FORGE_PASS from Vault KV v2.
|
|
||||||
# Each `with secret ...` block reads one bot's KV path; the `else`
|
|
||||||
# branch emits short placeholders on fresh installs where the path
|
|
||||||
# is absent. Seed with tools/vault-seed-agents.sh.
|
|
||||||
#
|
|
||||||
# Placeholder values kept < 16 chars to avoid secret-scan CI failures.
|
|
||||||
# error_on_missing_key = false prevents template-pending hangs.
|
|
||||||
template {
|
|
||||||
destination = "secrets/bots.env"
|
|
||||||
env = true
|
|
||||||
change_mode = "restart"
|
|
||||||
error_on_missing_key = false
|
|
||||||
data = <<EOT
|
|
||||||
{{- with secret "kv/data/disinto/bots/dev" -}}
|
|
||||||
FORGE_TOKEN={{ .Data.data.token }}
|
|
||||||
FORGE_PASS={{ .Data.data.pass }}
|
|
||||||
{{- else -}}
|
|
||||||
# WARNING: run tools/vault-seed-agents.sh
|
|
||||||
FORGE_TOKEN=seed-me
|
|
||||||
FORGE_PASS=seed-me
|
|
||||||
{{- end }}
|
|
||||||
{{- with secret "kv/data/disinto/bots/review" -}}
|
|
||||||
FORGE_REVIEW_TOKEN={{ .Data.data.token }}
|
|
||||||
{{- else -}}
|
|
||||||
FORGE_REVIEW_TOKEN=seed-me
|
|
||||||
{{- end }}
|
|
||||||
{{- with secret "kv/data/disinto/bots/gardener" -}}
|
|
||||||
FORGE_GARDENER_TOKEN={{ .Data.data.token }}
|
|
||||||
{{- else -}}
|
|
||||||
FORGE_GARDENER_TOKEN=seed-me
|
|
||||||
{{- end }}
|
|
||||||
{{- with secret "kv/data/disinto/bots/architect" -}}
|
|
||||||
FORGE_ARCHITECT_TOKEN={{ .Data.data.token }}
|
|
||||||
{{- else -}}
|
|
||||||
FORGE_ARCHITECT_TOKEN=seed-me
|
|
||||||
{{- end }}
|
|
||||||
{{- with secret "kv/data/disinto/bots/planner" -}}
|
|
||||||
FORGE_PLANNER_TOKEN={{ .Data.data.token }}
|
|
||||||
{{- else -}}
|
|
||||||
FORGE_PLANNER_TOKEN=seed-me
|
|
||||||
{{- end }}
|
|
||||||
{{- with secret "kv/data/disinto/bots/predictor" -}}
|
|
||||||
FORGE_PREDICTOR_TOKEN={{ .Data.data.token }}
|
|
||||||
{{- else -}}
|
|
||||||
FORGE_PREDICTOR_TOKEN=seed-me
|
|
||||||
{{- end }}
|
|
||||||
{{- with secret "kv/data/disinto/bots/supervisor" -}}
|
|
||||||
FORGE_SUPERVISOR_TOKEN={{ .Data.data.token }}
|
|
||||||
{{- else -}}
|
|
||||||
FORGE_SUPERVISOR_TOKEN=seed-me
|
|
||||||
{{- end }}
|
|
||||||
{{- with secret "kv/data/disinto/bots/vault" -}}
|
|
||||||
FORGE_VAULT_TOKEN={{ .Data.data.token }}
|
|
||||||
{{- else -}}
|
|
||||||
FORGE_VAULT_TOKEN=seed-me
|
|
||||||
{{- end }}
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
|
|
||||||
# Agents run Claude/llama sessions — need CPU + memory headroom.
|
|
||||||
resources {
|
|
||||||
cpu = 500
|
|
||||||
memory = 1024
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,152 +0,0 @@
|
||||||
# =============================================================================
|
|
||||||
# nomad/jobs/chat.hcl — Claude chat UI (Nomad service job)
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S5.2, issue #989). Lightweight service
|
|
||||||
# job for the Claude chat UI with sandbox hardening (#706).
|
|
||||||
#
|
|
||||||
# Build:
|
|
||||||
# Custom image built from docker/chat/Dockerfile as disinto/chat:local
|
|
||||||
# (same :local pattern as disinto/agents:local).
|
|
||||||
#
|
|
||||||
# Sandbox hardening (#706):
|
|
||||||
# - Read-only root filesystem (enforced via entrypoint)
|
|
||||||
# - tmpfs /tmp:size=64m for runtime temp files
|
|
||||||
# - cap_drop ALL (no Linux capabilities)
|
|
||||||
# - pids_limit 128 (prevent fork bombs)
|
|
||||||
# - mem_limit 512m (matches compose sandbox hardening)
|
|
||||||
#
|
|
||||||
# Vault integration:
|
|
||||||
# - vault { role = "service-chat" } at group scope
|
|
||||||
# - Template stanza renders CHAT_OAUTH_CLIENT_ID, CHAT_OAUTH_CLIENT_SECRET,
|
|
||||||
# FORWARD_AUTH_SECRET from kv/disinto/shared/chat
|
|
||||||
# - Seeded on fresh boxes by tools/vault-seed-chat.sh
|
|
||||||
#
|
|
||||||
# Host volume:
|
|
||||||
# - chat-history → /var/lib/chat/history (persists conversation history)
|
|
||||||
#
|
|
||||||
# Not the runtime yet: docker-compose.yml is still the factory's live stack
|
|
||||||
# until cutover. This file exists so CI can validate it and S5.2 can wire
|
|
||||||
# `disinto init --backend=nomad --with chat` to `nomad job run` it.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
job "chat" {
|
|
||||||
type = "service"
|
|
||||||
datacenters = ["dc1"]
|
|
||||||
|
|
||||||
group "chat" {
|
|
||||||
count = 1
|
|
||||||
|
|
||||||
# ── Vault workload identity (S5.2, issue #989) ───────────────────────────
|
|
||||||
# Role `service-chat` defined in vault/roles.yaml, policy in
|
|
||||||
# vault/policies/service-chat.hcl. Bound claim pins nomad_job_id = "chat".
|
|
||||||
vault {
|
|
||||||
role = "service-chat"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Network ──────────────────────────────────────────────────────────────
|
|
||||||
# External port 8080 for chat UI access (via edge proxy or direct).
|
|
||||||
network {
|
|
||||||
port "http" {
|
|
||||||
static = 8080
|
|
||||||
to = 8080
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Host volumes ─────────────────────────────────────────────────────────
|
|
||||||
# chat-history volume: declared in nomad/client.hcl, path
|
|
||||||
# /srv/disinto/chat-history on the factory box.
|
|
||||||
volume "chat-history" {
|
|
||||||
type = "host"
|
|
||||||
source = "chat-history"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Restart policy ───────────────────────────────────────────────────────
|
|
||||||
restart {
|
|
||||||
attempts = 3
|
|
||||||
interval = "5m"
|
|
||||||
delay = "15s"
|
|
||||||
mode = "delay"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Service registration ─────────────────────────────────────────────────
|
|
||||||
service {
|
|
||||||
name = "chat"
|
|
||||||
port = "http"
|
|
||||||
provider = "nomad"
|
|
||||||
|
|
||||||
check {
|
|
||||||
type = "http"
|
|
||||||
path = "/health"
|
|
||||||
interval = "10s"
|
|
||||||
timeout = "3s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task "chat" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "disinto/chat:local"
|
|
||||||
force_pull = false
|
|
||||||
# Sandbox hardening (#706): cap_drop ALL (no Linux capabilities)
|
|
||||||
# tmpfs /tmp for runtime files (64MB)
|
|
||||||
# pids_limit 128 (prevent fork bombs)
|
|
||||||
# ReadonlyRootfs enforced via entrypoint script (fails if running as root)
|
|
||||||
cap_drop = ["ALL"]
|
|
||||||
tmpfs = ["/tmp:size=64m"]
|
|
||||||
pids_limit = 128
|
|
||||||
# Security options for sandbox hardening
|
|
||||||
# apparmor=unconfined needed for Claude CLI ptrace access
|
|
||||||
# no-new-privileges prevents privilege escalation
|
|
||||||
security_opt = ["apparmor=unconfined", "no-new-privileges"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Volume mounts ──────────────────────────────────────────────────────
|
|
||||||
# Mount chat-history for conversation persistence
|
|
||||||
volume_mount {
|
|
||||||
volume = "chat-history"
|
|
||||||
destination = "/var/lib/chat/history"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Environment: secrets from Vault (S5.2) ──────────────────────────────
|
|
||||||
# CHAT_OAUTH_CLIENT_ID, CHAT_OAUTH_CLIENT_SECRET, FORWARD_AUTH_SECRET
|
|
||||||
# rendered from kv/disinto/shared/chat via template stanza.
|
|
||||||
env {
|
|
||||||
FORGE_URL = "http://forgejo:3000"
|
|
||||||
CHAT_MAX_REQUESTS_PER_HOUR = "60"
|
|
||||||
CHAT_MAX_REQUESTS_PER_DAY = "1000"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Vault-templated secrets (S5.2, issue #989) ─────────────────────────
|
|
||||||
# Renders chat-secrets.env from Vault KV v2 at kv/disinto/shared/chat.
|
|
||||||
# Placeholder values kept < 16 chars to avoid secret-scan CI failures.
|
|
||||||
template {
|
|
||||||
destination = "secrets/chat-secrets.env"
|
|
||||||
env = true
|
|
||||||
change_mode = "restart"
|
|
||||||
error_on_missing_key = false
|
|
||||||
data = <<EOT
|
|
||||||
{{- with secret "kv/data/disinto/shared/chat" -}}
|
|
||||||
CHAT_OAUTH_CLIENT_ID={{ .Data.data.chat_oauth_client_id }}
|
|
||||||
CHAT_OAUTH_CLIENT_SECRET={{ .Data.data.chat_oauth_client_secret }}
|
|
||||||
FORWARD_AUTH_SECRET={{ .Data.data.forward_auth_secret }}
|
|
||||||
{{- else -}}
|
|
||||||
# WARNING: run tools/vault-seed-chat.sh
|
|
||||||
CHAT_OAUTH_CLIENT_ID=seed-me
|
|
||||||
CHAT_OAUTH_CLIENT_SECRET=seed-me
|
|
||||||
FORWARD_AUTH_SECRET=seed-me
|
|
||||||
{{- end -}}
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Sandbox hardening (S5.2, #706) ────────────────────────────────────
|
|
||||||
# Memory = 512MB (matches docker-compose sandbox hardening)
|
|
||||||
resources {
|
|
||||||
cpu = 200
|
|
||||||
memory = 512
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,193 +0,0 @@
|
||||||
# =============================================================================
|
|
||||||
# nomad/jobs/edge.hcl — Edge proxy (Caddy + dispatcher sidecar) (Nomad service job)
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S5.1, issue #988). Caddy reverse proxy
|
|
||||||
# routes traffic to Forgejo, Woodpecker, staging, and chat services. The
|
|
||||||
# dispatcher sidecar polls disinto-ops for vault actions and dispatches them
|
|
||||||
# via Nomad batch jobs.
|
|
||||||
#
|
|
||||||
# Host_volume contract:
|
|
||||||
# This job mounts caddy-data from nomad/client.hcl. Path
|
|
||||||
# /srv/disinto/caddy-data is created by lib/init/nomad/cluster-up.sh before
|
|
||||||
# any job references it. Keep the `source = "caddy-data"` below in sync
|
|
||||||
# with the host_volume stanza in client.hcl.
|
|
||||||
#
|
|
||||||
# Build step (S5.1):
|
|
||||||
# docker/edge/Dockerfile is custom (adds bash, jq, curl, git, docker-cli,
|
|
||||||
# python3, openssh-client, autossh to caddy:latest). Build as
|
|
||||||
# disinto/edge:local using the same pattern as disinto/agents:local.
|
|
||||||
# Command: docker build -t disinto/edge:local -f docker/edge/Dockerfile docker/edge
|
|
||||||
#
|
|
||||||
# Not the runtime yet: docker-compose.yml is still the factory's live stack
|
|
||||||
# until cutover. This file exists so CI can validate it and S5.2 can wire
|
|
||||||
# `disinto init --backend=nomad --with edge` to `nomad job run` it.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
job "edge" {
|
|
||||||
type = "service"
|
|
||||||
datacenters = ["dc1"]
|
|
||||||
|
|
||||||
group "edge" {
|
|
||||||
count = 1
|
|
||||||
|
|
||||||
# ── Vault workload identity for dispatcher (S5.1, issue #988) ──────────
|
|
||||||
# Service role for dispatcher task to fetch vault actions from KV v2.
|
|
||||||
# Role defined in vault/roles.yaml, policy in vault/policies/dispatcher.hcl.
|
|
||||||
vault {
|
|
||||||
role = "service-dispatcher"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Network ports (S5.1, issue #988) ──────────────────────────────────
|
|
||||||
# Caddy listens on :80 and :443. Expose both on the host.
|
|
||||||
network {
|
|
||||||
port "http" {
|
|
||||||
static = 80
|
|
||||||
to = 80
|
|
||||||
}
|
|
||||||
|
|
||||||
port "https" {
|
|
||||||
static = 443
|
|
||||||
to = 443
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Host-volume mounts (S5.1, issue #988) ─────────────────────────────
|
|
||||||
# caddy-data: ACME certificates, Caddy config state.
|
|
||||||
volume "caddy-data" {
|
|
||||||
type = "host"
|
|
||||||
source = "caddy-data"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# ops-repo: disinto-ops clone for vault actions polling.
|
|
||||||
volume "ops-repo" {
|
|
||||||
type = "host"
|
|
||||||
source = "ops-repo"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Conservative restart policy ───────────────────────────────────────
|
|
||||||
# Caddy should be stable; dispatcher may restart on errors.
|
|
||||||
restart {
|
|
||||||
attempts = 3
|
|
||||||
interval = "5m"
|
|
||||||
delay = "15s"
|
|
||||||
mode = "delay"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Service registration ───────────────────────────────────────────────
|
|
||||||
# Caddy is an HTTP reverse proxy — health check on port 80.
|
|
||||||
service {
|
|
||||||
name = "edge"
|
|
||||||
port = "http"
|
|
||||||
provider = "nomad"
|
|
||||||
|
|
||||||
check {
|
|
||||||
type = "http"
|
|
||||||
path = "/"
|
|
||||||
interval = "10s"
|
|
||||||
timeout = "3s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Caddy task (S5.1, issue #988) ─────────────────────────────────────
|
|
||||||
task "caddy" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
# Use pre-built disinto/edge:local image (custom Dockerfile adds
|
|
||||||
# bash, jq, curl, git, docker-cli, python3, openssh-client, autossh).
|
|
||||||
image = "disinto/edge:local"
|
|
||||||
force_pull = false
|
|
||||||
ports = ["http", "https"]
|
|
||||||
|
|
||||||
# apparmor=unconfined matches docker-compose — needed for autossh
|
|
||||||
# in the entrypoint script.
|
|
||||||
security_opt = ["apparmor=unconfined"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Mount caddy-data volume for ACME state and config directory.
|
|
||||||
# Caddyfile is mounted at /etc/caddy/Caddyfile by entrypoint-edge.sh.
|
|
||||||
volume_mount {
|
|
||||||
volume = "caddy-data"
|
|
||||||
destination = "/data"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Non-secret env ───────────────────────────────────────────────────
|
|
||||||
env {
|
|
||||||
FORGE_URL = "http://forgejo:3000"
|
|
||||||
FORGE_REPO = "disinto-admin/disinto"
|
|
||||||
DISINTO_CONTAINER = "1"
|
|
||||||
PROJECT_NAME = "disinto"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Caddy needs CPU + memory headroom for reverse proxy work.
|
|
||||||
resources {
|
|
||||||
cpu = 200
|
|
||||||
memory = 256
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Dispatcher task (S5.1, issue #988) ────────────────────────────────
|
|
||||||
task "dispatcher" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
# Use same disinto/agents:local image as other agents.
|
|
||||||
image = "disinto/agents:local"
|
|
||||||
force_pull = false
|
|
||||||
|
|
||||||
# apparmor=unconfined matches docker-compose.
|
|
||||||
security_opt = ["apparmor=unconfined"]
|
|
||||||
|
|
||||||
# Mount docker.sock via bind-volume (not host volume) for legacy
|
|
||||||
# docker backend compat. Nomad host volumes require named volumes
|
|
||||||
# from client.hcl; socket files cannot be host volumes.
|
|
||||||
volumes = ["/var/run/docker.sock:/var/run/docker.sock:ro"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Mount ops-repo for vault actions polling.
|
|
||||||
volume_mount {
|
|
||||||
volume = "ops-repo"
|
|
||||||
destination = "/home/agent/repos/disinto-ops"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Vault-templated secrets (S5.1, issue #988) ──────────────────────
|
|
||||||
# Renders FORGE_TOKEN from Vault KV v2 for ops repo access.
|
|
||||||
template {
|
|
||||||
destination = "secrets/dispatcher.env"
|
|
||||||
env = true
|
|
||||||
change_mode = "restart"
|
|
||||||
error_on_missing_key = false
|
|
||||||
data = <<EOT
|
|
||||||
{{- with secret "kv/data/disinto/bots/vault" -}}
|
|
||||||
FORGE_TOKEN={{ .Data.data.token }}
|
|
||||||
{{- else -}}
|
|
||||||
# WARNING: kv/disinto/bots/vault is empty — run tools/vault-seed-agents.sh
|
|
||||||
FORGE_TOKEN=seed-me
|
|
||||||
{{- end }}
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Non-secret env ───────────────────────────────────────────────────
|
|
||||||
env {
|
|
||||||
DISPATCHER_BACKEND = "nomad"
|
|
||||||
FORGE_URL = "http://forgejo:3000"
|
|
||||||
FORGE_REPO = "disinto-admin/disinto"
|
|
||||||
FORGE_OPS_REPO = "disinto-admin/disinto-ops"
|
|
||||||
PRIMARY_BRANCH = "main"
|
|
||||||
DISINTO_CONTAINER = "1"
|
|
||||||
OPS_REPO_ROOT = "/home/agent/repos/disinto-ops"
|
|
||||||
FORGE_ADMIN_USERS = "vault-bot,admin"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Dispatcher is lightweight — minimal CPU + memory.
|
|
||||||
resources {
|
|
||||||
cpu = 100
|
|
||||||
memory = 256
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,189 +0,0 @@
|
||||||
# =============================================================================
|
|
||||||
# nomad/jobs/forgejo.hcl — Forgejo git server (Nomad service job)
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S1.1, issue #840; S2.4, issue #882).
|
|
||||||
# First jobspec to land under nomad/jobs/ — proves the docker driver +
|
|
||||||
# host_volume plumbing from Step 0 (client.hcl) by running a real factory
|
|
||||||
# service. S2.4 layered Vault integration on top: admin/internal secrets
|
|
||||||
# now render via workload identity + template stanza instead of inline env.
|
|
||||||
#
|
|
||||||
# Host_volume contract:
|
|
||||||
# This job mounts the `forgejo-data` host_volume declared in
|
|
||||||
# nomad/client.hcl. That volume is backed by /srv/disinto/forgejo-data on
|
|
||||||
# the factory box, created by lib/init/nomad/cluster-up.sh before any job
|
|
||||||
# references it. Keep the `source = "forgejo-data"` below in sync with the
|
|
||||||
# host_volume stanza in client.hcl — drift = scheduling failures.
|
|
||||||
#
|
|
||||||
# Vault integration (S2.4):
|
|
||||||
# - vault { role = "service-forgejo" } at the group scope — the task's
|
|
||||||
# workload-identity JWT is exchanged for a Vault token carrying the
|
|
||||||
# policy named on that role. Role + policy are defined in
|
|
||||||
# vault/roles.yaml + vault/policies/service-forgejo.hcl.
|
|
||||||
# - template { destination = "secrets/forgejo.env" env = true } pulls
|
|
||||||
# FORGEJO__security__{SECRET_KEY,INTERNAL_TOKEN} out of Vault KV v2
|
|
||||||
# at kv/disinto/shared/forgejo and merges them into the task env.
|
|
||||||
# Seeded on fresh boxes by tools/vault-seed-forgejo.sh.
|
|
||||||
# - Non-secret env (DB type, ROOT_URL, ports, registration lockdown,
|
|
||||||
# webhook allow-list) stays inline below — not sensitive, not worth
|
|
||||||
# round-tripping through Vault.
|
|
||||||
#
|
|
||||||
# Not the runtime yet: docker-compose.yml is still the factory's live stack
|
|
||||||
# until cutover. This file exists so CI can validate it and S1.3 can wire
|
|
||||||
# `disinto init --backend=nomad --with forgejo` to `nomad job run` it.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
job "forgejo" {
|
|
||||||
type = "service"
|
|
||||||
datacenters = ["dc1"]
|
|
||||||
|
|
||||||
group "forgejo" {
|
|
||||||
count = 1
|
|
||||||
|
|
||||||
# ── Vault workload identity (S2.4, issue #882) ─────────────────────────
|
|
||||||
# `role = "service-forgejo"` is defined in vault/roles.yaml and
|
|
||||||
# applied by tools/vault-apply-roles.sh (S2.3). The role's bound
|
|
||||||
# claim pins nomad_job_id = "forgejo" — renaming this jobspec's
|
|
||||||
# `job "forgejo"` without updating vault/roles.yaml will make token
|
|
||||||
# exchange fail at placement with a "claim mismatch" error.
|
|
||||||
vault {
|
|
||||||
role = "service-forgejo"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Static :3000 matches docker-compose's published port so the rest of
|
|
||||||
# the factory (agents, woodpecker, caddy) keeps reaching forgejo at the
|
|
||||||
# same host:port during and after cutover. `to = 3000` maps the host
|
|
||||||
# port into the container's :3000 listener.
|
|
||||||
network {
|
|
||||||
port "http" {
|
|
||||||
static = 3000
|
|
||||||
to = 3000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Host-volume mount: declared in nomad/client.hcl, path
|
|
||||||
# /srv/disinto/forgejo-data on the factory box.
|
|
||||||
volume "forgejo-data" {
|
|
||||||
type = "host"
|
|
||||||
source = "forgejo-data"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# Conservative restart policy — fail fast to the scheduler instead of
|
|
||||||
# spinning on a broken image/config. 3 attempts over 5m, then back off.
|
|
||||||
restart {
|
|
||||||
attempts = 3
|
|
||||||
interval = "5m"
|
|
||||||
delay = "15s"
|
|
||||||
mode = "delay"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Native Nomad service discovery (no Consul in this factory cluster).
|
|
||||||
# Health check gates the service as healthy only after the API is up;
|
|
||||||
# initial_status is deliberately unset so Nomad waits for the first
|
|
||||||
# probe to pass before marking the allocation healthy on boot.
|
|
||||||
service {
|
|
||||||
name = "forgejo"
|
|
||||||
port = "http"
|
|
||||||
provider = "nomad"
|
|
||||||
|
|
||||||
check {
|
|
||||||
type = "http"
|
|
||||||
path = "/api/v1/version"
|
|
||||||
interval = "10s"
|
|
||||||
timeout = "3s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task "forgejo" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "codeberg.org/forgejo/forgejo:11.0"
|
|
||||||
ports = ["http"]
|
|
||||||
}
|
|
||||||
|
|
||||||
volume_mount {
|
|
||||||
volume = "forgejo-data"
|
|
||||||
destination = "/data"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# Non-secret env — DB type, public URL, ports, install lock,
|
|
||||||
# registration lockdown, webhook allow-list. Nothing sensitive here,
|
|
||||||
# so this stays inline. Secret-bearing env (SECRET_KEY, INTERNAL_TOKEN)
|
|
||||||
# lives in the template stanza below and is merged into task env.
|
|
||||||
env {
|
|
||||||
FORGEJO__database__DB_TYPE = "sqlite3"
|
|
||||||
FORGEJO__server__ROOT_URL = "http://forgejo:3000/"
|
|
||||||
FORGEJO__server__HTTP_PORT = "3000"
|
|
||||||
FORGEJO__security__INSTALL_LOCK = "true"
|
|
||||||
FORGEJO__service__DISABLE_REGISTRATION = "true"
|
|
||||||
FORGEJO__webhook__ALLOWED_HOST_LIST = "private"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Vault-templated secrets env (S2.4, issue #882) ──────────────────
|
|
||||||
# Renders `<task-dir>/secrets/forgejo.env` (per-alloc secrets dir,
|
|
||||||
# never on disk on the host root filesystem, never in `nomad job
|
|
||||||
# inspect` output). `env = true` merges every KEY=VAL line into the
|
|
||||||
# task environment. `change_mode = "restart"` re-runs the task
|
|
||||||
# whenever a watched secret's value in Vault changes — so `vault kv
|
|
||||||
# put …` alone is enough to roll new secrets; no manual
|
|
||||||
# `nomad alloc restart` required (though that also works — it
|
|
||||||
# forces a re-render).
|
|
||||||
#
|
|
||||||
# Vault path: `kv/data/disinto/shared/forgejo`. The literal `/data/`
|
|
||||||
# segment is required by consul-template for KV v2 mounts — without
|
|
||||||
# it the template would read from a KV v1 path that doesn't exist
|
|
||||||
# (the policy in vault/policies/service-forgejo.hcl grants
|
|
||||||
# `kv/data/disinto/shared/forgejo/*`, confirming v2).
|
|
||||||
#
|
|
||||||
# Empty-Vault fallback (`with ... else ...`): on a fresh LXC where
|
|
||||||
# the KV path is absent, consul-template's `with` short-circuits to
|
|
||||||
# the `else` branch. Emitting visible placeholders (instead of no
|
|
||||||
# env vars) means the container still boots, but with obviously-bad
|
|
||||||
# secrets that an operator will spot in `env | grep FORGEJO` —
|
|
||||||
# better than forgejo silently regenerating SECRET_KEY on every
|
|
||||||
# restart and invalidating every prior session. Seed the path with
|
|
||||||
# tools/vault-seed-forgejo.sh to replace the placeholders.
|
|
||||||
#
|
|
||||||
# Placeholder values are kept short on purpose: the repo-wide
|
|
||||||
# secret-scan (.woodpecker/secret-scan.yml → lib/secret-scan.sh)
|
|
||||||
# flags `TOKEN=<16+ non-space chars>` as a plaintext secret, so a
|
|
||||||
# descriptive long placeholder (e.g. "run-tools-vault-seed-...") on
|
|
||||||
# the INTERNAL_TOKEN line would fail CI on every PR that touched
|
|
||||||
# this file. "seed-me" is < 16 chars and still distinctive enough
|
|
||||||
# to surface in a `grep FORGEJO__security__` audit. The template
|
|
||||||
# comment below carries the operator-facing fix pointer.
|
|
||||||
# `error_on_missing_key = false` stops consul-template from blocking
|
|
||||||
# the alloc on template-pending when the Vault KV path exists but a
|
|
||||||
# referenced key is absent (or the path itself is absent and the
|
|
||||||
# else-branch placeholders are used). Without this, a fresh-LXC
|
|
||||||
# `disinto init --with forgejo` against an empty Vault hangs on
|
|
||||||
# template-pending until deploy.sh times out (issue #912, bug #4).
|
|
||||||
template {
|
|
||||||
destination = "secrets/forgejo.env"
|
|
||||||
env = true
|
|
||||||
change_mode = "restart"
|
|
||||||
error_on_missing_key = false
|
|
||||||
data = <<EOT
|
|
||||||
{{- with secret "kv/data/disinto/shared/forgejo" -}}
|
|
||||||
FORGEJO__security__SECRET_KEY={{ .Data.data.secret_key }}
|
|
||||||
FORGEJO__security__INTERNAL_TOKEN={{ .Data.data.internal_token }}
|
|
||||||
{{- else -}}
|
|
||||||
# WARNING: kv/disinto/shared/forgejo is empty — run tools/vault-seed-forgejo.sh
|
|
||||||
FORGEJO__security__SECRET_KEY=seed-me
|
|
||||||
FORGEJO__security__INTERNAL_TOKEN=seed-me
|
|
||||||
{{- end -}}
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
|
|
||||||
# Baseline — tune once we have real usage numbers under nomad. The
|
|
||||||
# docker-compose stack runs forgejo uncapped; these limits exist so
|
|
||||||
# an unhealthy forgejo can't starve the rest of the node.
|
|
||||||
resources {
|
|
||||||
cpu = 300
|
|
||||||
memory = 512
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,86 +0,0 @@
|
||||||
# =============================================================================
|
|
||||||
# nomad/jobs/staging.hcl — Staging file server (Nomad service job)
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S5.2, issue #989). Lightweight service job
|
|
||||||
# for the staging file server using Caddy as a static file server.
|
|
||||||
#
|
|
||||||
# Mount contract:
|
|
||||||
# This job mounts the `docker/` directory as `/srv/site` (read-only).
|
|
||||||
# The docker/ directory contains static content (images, HTML, etc.)
|
|
||||||
# served to staging environment users.
|
|
||||||
#
|
|
||||||
# Network:
|
|
||||||
# No external port exposed — edge proxy routes to it internally.
|
|
||||||
# Service discovery via Nomad native provider for internal routing.
|
|
||||||
#
|
|
||||||
# Not the runtime yet: docker-compose.yml is still the factory's live stack
|
|
||||||
# until cutover. This file exists so CI can validate it and S5.2 can wire
|
|
||||||
# `disinto init --backend=nomad --with staging` to `nomad job run` it.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
job "staging" {
|
|
||||||
type = "service"
|
|
||||||
datacenters = ["dc1"]
|
|
||||||
|
|
||||||
group "staging" {
|
|
||||||
count = 1
|
|
||||||
|
|
||||||
# No Vault integration needed — no secrets required (static file server)
|
|
||||||
|
|
||||||
# Internal service — no external port. Edge proxy routes internally.
|
|
||||||
network {
|
|
||||||
port "http" {
|
|
||||||
static = 80
|
|
||||||
to = 80
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
volume "site-content" {
|
|
||||||
type = "host"
|
|
||||||
source = "site-content"
|
|
||||||
read_only = true
|
|
||||||
}
|
|
||||||
|
|
||||||
restart {
|
|
||||||
attempts = 3
|
|
||||||
interval = "5m"
|
|
||||||
delay = "15s"
|
|
||||||
mode = "delay"
|
|
||||||
}
|
|
||||||
|
|
||||||
service {
|
|
||||||
name = "staging"
|
|
||||||
port = "http"
|
|
||||||
provider = "nomad"
|
|
||||||
|
|
||||||
check {
|
|
||||||
type = "http"
|
|
||||||
path = "/"
|
|
||||||
interval = "10s"
|
|
||||||
timeout = "3s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task "staging" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "caddy:alpine"
|
|
||||||
ports = ["http"]
|
|
||||||
args = ["file-server", "--root", "/srv/site"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Mount docker/ directory as /srv/site:ro (static content)
|
|
||||||
volume_mount {
|
|
||||||
volume = "site-content"
|
|
||||||
destination = "/srv/site"
|
|
||||||
read_only = true
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
cpu = 100
|
|
||||||
memory = 256
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,132 +0,0 @@
|
||||||
# =============================================================================
|
|
||||||
# nomad/jobs/vault-runner.hcl — Parameterized batch job for vault action dispatch
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S5.3, issue #990). Replaces the
|
|
||||||
# `docker run --rm vault-runner-${action_id}` pattern in dispatcher.sh with
|
|
||||||
# a Nomad-native parameterized batch job. Dispatched by the edge dispatcher
|
|
||||||
# (S5.4) via `nomad job dispatch`.
|
|
||||||
#
|
|
||||||
# Parameterized meta:
|
|
||||||
# action_id — vault action identifier (used by entrypoint-runner.sh)
|
|
||||||
# secrets_csv — comma-separated secret names (e.g. "GITHUB_TOKEN,DEPLOY_KEY")
|
|
||||||
#
|
|
||||||
# Vault integration (approach A — pre-defined templates):
|
|
||||||
# All 6 known runner secrets are rendered via template stanzas with
|
|
||||||
# error_on_missing_key = false. Secrets not granted by the dispatch's
|
|
||||||
# Vault policies render as empty strings. The dispatcher (S5.4) sets
|
|
||||||
# vault { policies = [...] } per-dispatch based on the action TOML's
|
|
||||||
# secrets=[...] list, scoping access to only the declared secrets.
|
|
||||||
#
|
|
||||||
# Cleanup: Nomad garbage-collects completed batch dispatches automatically.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
job "vault-runner" {
|
|
||||||
type = "batch"
|
|
||||||
datacenters = ["dc1"]
|
|
||||||
|
|
||||||
parameterized {
|
|
||||||
meta_required = ["action_id", "secrets_csv"]
|
|
||||||
}
|
|
||||||
|
|
||||||
group "runner" {
|
|
||||||
count = 1
|
|
||||||
|
|
||||||
# ── Vault workload identity ──────────────────────────────────────────────
|
|
||||||
# Per-dispatch policies are composed by the dispatcher (S5.4) based on the
|
|
||||||
# action TOML's secrets=[...] list. Each policy grants read access to
|
|
||||||
# exactly one kv/data/disinto/runner/<NAME> path. Roles defined in
|
|
||||||
# vault/roles.yaml (runner-<NAME>), policies in vault/policies/.
|
|
||||||
vault {}
|
|
||||||
|
|
||||||
volume "ops-repo" {
|
|
||||||
type = "host"
|
|
||||||
source = "ops-repo"
|
|
||||||
read_only = true
|
|
||||||
}
|
|
||||||
|
|
||||||
# No restart for batch — fail fast, let the dispatcher handle retries.
|
|
||||||
restart {
|
|
||||||
attempts = 0
|
|
||||||
mode = "fail"
|
|
||||||
}
|
|
||||||
|
|
||||||
task "runner" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "disinto/agents:local"
|
|
||||||
force_pull = false
|
|
||||||
entrypoint = ["bash"]
|
|
||||||
args = [
|
|
||||||
"/home/agent/disinto/docker/runner/entrypoint-runner.sh",
|
|
||||||
"${NOMAD_META_action_id}",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
volume_mount {
|
|
||||||
volume = "ops-repo"
|
|
||||||
destination = "/home/agent/ops"
|
|
||||||
read_only = true
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Non-secret env ───────────────────────────────────────────────────────
|
|
||||||
env {
|
|
||||||
DISINTO_CONTAINER = "1"
|
|
||||||
FACTORY_ROOT = "/home/agent/disinto"
|
|
||||||
OPS_REPO_ROOT = "/home/agent/ops"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Vault-templated runner secrets (approach A) ────────────────────────
|
|
||||||
# Pre-defined templates for all 6 known runner secrets. Each renders
|
|
||||||
# from kv/data/disinto/runner/<NAME>. Secrets not granted by the
|
|
||||||
# dispatch's Vault policies produce empty env vars (harmless).
|
|
||||||
# error_on_missing_key = false prevents template-pending hangs when
|
|
||||||
# a secret path is absent or the policy doesn't grant access.
|
|
||||||
#
|
|
||||||
# Placeholder values kept < 16 chars to avoid secret-scan CI failures.
|
|
||||||
template {
|
|
||||||
destination = "secrets/runner.env"
|
|
||||||
env = true
|
|
||||||
error_on_missing_key = false
|
|
||||||
data = <<EOT
|
|
||||||
{{- with secret "kv/data/disinto/runner/GITHUB_TOKEN" -}}
|
|
||||||
GITHUB_TOKEN={{ .Data.data.value }}
|
|
||||||
{{- else -}}
|
|
||||||
GITHUB_TOKEN=
|
|
||||||
{{- end }}
|
|
||||||
{{- with secret "kv/data/disinto/runner/CODEBERG_TOKEN" -}}
|
|
||||||
CODEBERG_TOKEN={{ .Data.data.value }}
|
|
||||||
{{- else -}}
|
|
||||||
CODEBERG_TOKEN=
|
|
||||||
{{- end }}
|
|
||||||
{{- with secret "kv/data/disinto/runner/CLAWHUB_TOKEN" -}}
|
|
||||||
CLAWHUB_TOKEN={{ .Data.data.value }}
|
|
||||||
{{- else -}}
|
|
||||||
CLAWHUB_TOKEN=
|
|
||||||
{{- end }}
|
|
||||||
{{- with secret "kv/data/disinto/runner/DEPLOY_KEY" -}}
|
|
||||||
DEPLOY_KEY={{ .Data.data.value }}
|
|
||||||
{{- else -}}
|
|
||||||
DEPLOY_KEY=
|
|
||||||
{{- end }}
|
|
||||||
{{- with secret "kv/data/disinto/runner/NPM_TOKEN" -}}
|
|
||||||
NPM_TOKEN={{ .Data.data.value }}
|
|
||||||
{{- else -}}
|
|
||||||
NPM_TOKEN=
|
|
||||||
{{- end }}
|
|
||||||
{{- with secret "kv/data/disinto/runner/DOCKER_HUB_TOKEN" -}}
|
|
||||||
DOCKER_HUB_TOKEN={{ .Data.data.value }}
|
|
||||||
{{- else -}}
|
|
||||||
DOCKER_HUB_TOKEN=
|
|
||||||
{{- end }}
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
|
|
||||||
# Formula execution headroom — matches agents.hcl baseline.
|
|
||||||
resources {
|
|
||||||
cpu = 500
|
|
||||||
memory = 1024
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,144 +0,0 @@
|
||||||
# =============================================================================
|
|
||||||
# nomad/jobs/woodpecker-agent.hcl — Woodpecker CI agent (Nomad service job)
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S3.2, issue #935).
|
|
||||||
# Drop-in for the current docker-compose setup with host networking +
|
|
||||||
# docker.sock mount, enabling the agent to spawn containers via the
|
|
||||||
# mounted socket.
|
|
||||||
#
|
|
||||||
# Host networking:
|
|
||||||
# Uses network_mode = "host" to match the compose setup. The Woodpecker
|
|
||||||
# server gRPC endpoint is addressed via Nomad service discovery using
|
|
||||||
# the host's IP address (10.10.10.x:9000), since the server's port
|
|
||||||
# binding in Nomad binds to the allocation's IP, not localhost.
|
|
||||||
#
|
|
||||||
# Vault integration:
|
|
||||||
# - vault { role = "service-woodpecker-agent" } at the group scope — the
|
|
||||||
# task's workload-identity JWT is exchanged for a Vault token carrying
|
|
||||||
# the policy named on that role. Role + policy are defined in
|
|
||||||
# vault/roles.yaml + vault/policies/service-woodpecker.hcl.
|
|
||||||
# - template stanza pulls WOODPECKER_AGENT_SECRET from Vault KV v2
|
|
||||||
# at kv/disinto/shared/woodpecker and writes it to secrets/agent.env.
|
|
||||||
# Seeded on fresh boxes by tools/vault-seed-woodpecker.sh.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
job "woodpecker-agent" {
|
|
||||||
type = "service"
|
|
||||||
datacenters = ["dc1"]
|
|
||||||
|
|
||||||
group "woodpecker-agent" {
|
|
||||||
count = 1
|
|
||||||
|
|
||||||
# ── Vault workload identity ─────────────────────────────────────────
|
|
||||||
# `role = "service-woodpecker-agent"` is defined in vault/roles.yaml and
|
|
||||||
# applied by tools/vault-apply-roles.sh. The role's bound
|
|
||||||
# claim pins nomad_job_id = "woodpecker-agent" — renaming this
|
|
||||||
# jobspec's `job "woodpecker-agent"` without updating vault/roles.yaml
|
|
||||||
# will make token exchange fail at placement with a "claim mismatch"
|
|
||||||
# error.
|
|
||||||
vault {
|
|
||||||
role = "service-woodpecker-agent"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Health check port: static 3333 for Nomad service discovery. The agent
|
|
||||||
# exposes :3333/healthz for Nomad to probe.
|
|
||||||
network {
|
|
||||||
port "healthz" {
|
|
||||||
static = 3333
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Native Nomad service discovery for the health check endpoint.
|
|
||||||
service {
|
|
||||||
name = "woodpecker-agent"
|
|
||||||
port = "healthz"
|
|
||||||
provider = "nomad"
|
|
||||||
|
|
||||||
check {
|
|
||||||
type = "http"
|
|
||||||
path = "/healthz"
|
|
||||||
interval = "15s"
|
|
||||||
timeout = "3s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Conservative restart policy — fail fast to the scheduler instead of
|
|
||||||
# spinning on a broken image/config. 3 attempts over 5m, then back off.
|
|
||||||
restart {
|
|
||||||
attempts = 3
|
|
||||||
interval = "5m"
|
|
||||||
delay = "15s"
|
|
||||||
mode = "delay"
|
|
||||||
}
|
|
||||||
|
|
||||||
task "woodpecker-agent" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "woodpeckerci/woodpecker-agent:v3"
|
|
||||||
network_mode = "host"
|
|
||||||
privileged = true
|
|
||||||
volumes = ["/var/run/docker.sock:/var/run/docker.sock"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Non-secret env — server address, gRPC security, concurrency limit,
|
|
||||||
# and health check endpoint. Nothing sensitive here.
|
|
||||||
#
|
|
||||||
# WOODPECKER_SERVER uses Nomad's attribute template to get the host's
|
|
||||||
# IP address (10.10.10.x). The server's gRPC port 9000 is bound via
|
|
||||||
# Nomad's port stanza to the allocation's IP (not localhost), so the
|
|
||||||
# agent must use the LXC's eth0 IP, not 127.0.0.1.
|
|
||||||
env {
|
|
||||||
WOODPECKER_SERVER = "${attr.unique.network.ip-address}:9000"
|
|
||||||
WOODPECKER_GRPC_SECURE = "false"
|
|
||||||
WOODPECKER_MAX_WORKFLOWS = "1"
|
|
||||||
WOODPECKER_HEALTHCHECK_ADDR = ":3333"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Vault-templated agent secret ──────────────────────────────────
|
|
||||||
# Renders <task-dir>/secrets/agent.env (per-alloc secrets dir,
|
|
||||||
# never on disk on the host root filesystem, never in `nomad job
|
|
||||||
# inspect` output). `env = true` merges WOODPECKER_AGENT_SECRET
|
|
||||||
# from the file into the task environment.
|
|
||||||
#
|
|
||||||
# Vault path: `kv/data/disinto/shared/woodpecker`. The literal
|
|
||||||
# `/data/` segment is required by consul-template for KV v2 mounts.
|
|
||||||
#
|
|
||||||
# Empty-Vault fallback (`with ... else ...`): on a fresh LXC where
|
|
||||||
# the KV path is absent, consul-template's `with` short-circuits to
|
|
||||||
# the `else` branch. Emitting a visible placeholder means the
|
|
||||||
# container still boots, but with an obviously-bad secret that an
|
|
||||||
# operator will spot — better than the agent failing silently with
|
|
||||||
# auth errors. Seed the path with tools/vault-seed-woodpecker.sh
|
|
||||||
# to replace the placeholder.
|
|
||||||
#
|
|
||||||
# Placeholder values are kept short on purpose: the repo-wide
|
|
||||||
# secret-scan (.woodpecker/secret-scan.yml → lib/secret-scan.sh)
|
|
||||||
# flags `TOKEN=<16+ non-space chars>` as a plaintext secret, so a
|
|
||||||
# descriptive long placeholder would fail CI on every PR that touched
|
|
||||||
# this file. "seed-me" is < 16 chars and still distinctive enough
|
|
||||||
# to surface in a `grep WOODPECKER` audit.
|
|
||||||
template {
|
|
||||||
destination = "secrets/agent.env"
|
|
||||||
env = true
|
|
||||||
change_mode = "restart"
|
|
||||||
error_on_missing_key = false
|
|
||||||
data = <<EOT
|
|
||||||
{{- with secret "kv/data/disinto/shared/woodpecker" -}}
|
|
||||||
WOODPECKER_AGENT_SECRET={{ .Data.data.agent_secret }}
|
|
||||||
{{- else -}}
|
|
||||||
# WARNING: kv/disinto/shared/woodpecker is empty — run tools/vault-seed-woodpecker.sh
|
|
||||||
WOODPECKER_AGENT_SECRET=seed-me
|
|
||||||
{{- end -}}
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
|
|
||||||
# Baseline — tune once we have real usage numbers under nomad.
|
|
||||||
# Conservative limits so an unhealthy agent can't starve the node.
|
|
||||||
resources {
|
|
||||||
cpu = 200
|
|
||||||
memory = 256
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,173 +0,0 @@
|
||||||
# =============================================================================
|
|
||||||
# nomad/jobs/woodpecker-server.hcl — Woodpecker CI server (Nomad service job)
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S3.1, issue #934).
|
|
||||||
# Runs the Woodpecker CI web UI + gRPC endpoint as a Nomad service job,
|
|
||||||
# reading its Forgejo OAuth + agent secret from Vault via workload identity.
|
|
||||||
#
|
|
||||||
# Host_volume contract:
|
|
||||||
# This job mounts the `woodpecker-data` host_volume declared in
|
|
||||||
# nomad/client.hcl. That volume is backed by /srv/disinto/woodpecker-data
|
|
||||||
# on the factory box, created by lib/init/nomad/cluster-up.sh before any
|
|
||||||
# job references it. Keep the `source = "woodpecker-data"` below in sync
|
|
||||||
# with the host_volume stanza in client.hcl — drift = scheduling failures.
|
|
||||||
#
|
|
||||||
# Vault integration (S2.4 pattern):
|
|
||||||
# - vault { role = "service-woodpecker" } at the group scope — the task's
|
|
||||||
# workload-identity JWT is exchanged for a Vault token carrying the
|
|
||||||
# policy named on that role. Role + policy are defined in
|
|
||||||
# vault/roles.yaml + vault/policies/service-woodpecker.hcl.
|
|
||||||
# - template { destination = "secrets/wp.env" env = true } pulls
|
|
||||||
# WOODPECKER_AGENT_SECRET, WOODPECKER_FORGEJO_CLIENT, and
|
|
||||||
# WOODPECKER_FORGEJO_SECRET out of Vault KV v2 at
|
|
||||||
# kv/disinto/shared/woodpecker and merges them into the task env.
|
|
||||||
# Agent secret seeded by tools/vault-seed-woodpecker.sh; OAuth
|
|
||||||
# client/secret seeded by S3.3 (wp-oauth-register.sh).
|
|
||||||
# - Non-secret env (DB driver, Forgejo URL, host URL, open registration)
|
|
||||||
# stays inline below — not sensitive, not worth round-tripping through
|
|
||||||
# Vault.
|
|
||||||
#
|
|
||||||
# Not the runtime yet: docker-compose.yml is still the factory's live stack
|
|
||||||
# until cutover. This file exists so CI can validate it and S3.4 can wire
|
|
||||||
# `disinto init --backend=nomad --with woodpecker` to `nomad job run` it.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
job "woodpecker-server" {
|
|
||||||
type = "service"
|
|
||||||
datacenters = ["dc1"]
|
|
||||||
|
|
||||||
group "woodpecker-server" {
|
|
||||||
count = 1
|
|
||||||
|
|
||||||
# ── Vault workload identity (S2.4 pattern) ──────────────────────────────
|
|
||||||
# `role = "service-woodpecker"` is defined in vault/roles.yaml and
|
|
||||||
# applied by tools/vault-apply-roles.sh (S2.3). The role's bound
|
|
||||||
# claim pins nomad_job_id = "woodpecker" — note the job_id in
|
|
||||||
# vault/roles.yaml is "woodpecker" (matching the roles.yaml entry),
|
|
||||||
# but the actual Nomad job name here is "woodpecker-server". Update
|
|
||||||
# vault/roles.yaml job_id to "woodpecker-server" if the bound claim
|
|
||||||
# enforces an exact match at placement.
|
|
||||||
vault {
|
|
||||||
role = "service-woodpecker"
|
|
||||||
}
|
|
||||||
|
|
||||||
# HTTP UI (:8000) + gRPC agent endpoint (:9000). Static ports match
|
|
||||||
# docker-compose's published ports so the rest of the factory keeps
|
|
||||||
# reaching woodpecker at the same host:port during and after cutover.
|
|
||||||
network {
|
|
||||||
port "http" {
|
|
||||||
static = 8000
|
|
||||||
to = 8000
|
|
||||||
}
|
|
||||||
port "grpc" {
|
|
||||||
static = 9000
|
|
||||||
to = 9000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Host-volume mount: declared in nomad/client.hcl, path
|
|
||||||
# /srv/disinto/woodpecker-data on the factory box.
|
|
||||||
volume "woodpecker-data" {
|
|
||||||
type = "host"
|
|
||||||
source = "woodpecker-data"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# Conservative restart policy — fail fast to the scheduler instead of
|
|
||||||
# spinning on a broken image/config. 3 attempts over 5m, then back off.
|
|
||||||
restart {
|
|
||||||
attempts = 3
|
|
||||||
interval = "5m"
|
|
||||||
delay = "15s"
|
|
||||||
mode = "delay"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Native Nomad service discovery (no Consul in this factory cluster).
|
|
||||||
# Health check gates the service as healthy only after the HTTP API is
|
|
||||||
# up; initial_status is deliberately unset so Nomad waits for the first
|
|
||||||
# probe to pass before marking the allocation healthy on boot.
|
|
||||||
service {
|
|
||||||
name = "woodpecker"
|
|
||||||
port = "http"
|
|
||||||
provider = "nomad"
|
|
||||||
|
|
||||||
check {
|
|
||||||
type = "http"
|
|
||||||
path = "/healthz"
|
|
||||||
interval = "10s"
|
|
||||||
timeout = "3s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task "woodpecker-server" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "woodpeckerci/woodpecker-server:v3"
|
|
||||||
ports = ["http", "grpc"]
|
|
||||||
}
|
|
||||||
|
|
||||||
volume_mount {
|
|
||||||
volume = "woodpecker-data"
|
|
||||||
destination = "/var/lib/woodpecker"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# Non-secret env — Forgejo integration flags, public URL, DB driver.
|
|
||||||
# Nothing sensitive here, so this stays inline. Secret-bearing env
|
|
||||||
# (agent secret, OAuth client/secret) lives in the template stanza
|
|
||||||
# below and is merged into task env.
|
|
||||||
env {
|
|
||||||
WOODPECKER_FORGEJO = "true"
|
|
||||||
WOODPECKER_FORGEJO_URL = "http://forgejo:3000"
|
|
||||||
WOODPECKER_HOST = "http://woodpecker:8000"
|
|
||||||
WOODPECKER_OPEN = "true"
|
|
||||||
WOODPECKER_DATABASE_DRIVER = "sqlite3"
|
|
||||||
WOODPECKER_DATABASE_DATASOURCE = "/var/lib/woodpecker/woodpecker.sqlite"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Vault-templated secrets env (S2.4 pattern) ─────────────────────────
|
|
||||||
# Renders `<task-dir>/secrets/wp.env` (per-alloc secrets dir, never on
|
|
||||||
# disk on the host root filesystem). `env = true` merges every KEY=VAL
|
|
||||||
# line into the task environment. `change_mode = "restart"` re-runs the
|
|
||||||
# task whenever a watched secret's value in Vault changes.
|
|
||||||
#
|
|
||||||
# Vault path: `kv/data/disinto/shared/woodpecker`. The literal `/data/`
|
|
||||||
# segment is required by consul-template for KV v2 mounts.
|
|
||||||
#
|
|
||||||
# Empty-Vault fallback (`with ... else ...`): on a fresh LXC where
|
|
||||||
# the KV path is absent, consul-template's `with` short-circuits to
|
|
||||||
# the `else` branch. Emitting visible placeholders means the container
|
|
||||||
# still boots, but with obviously-bad secrets. Seed the path with
|
|
||||||
# tools/vault-seed-woodpecker.sh (agent_secret) and S3.3's
|
|
||||||
# wp-oauth-register.sh (forgejo_client, forgejo_secret).
|
|
||||||
#
|
|
||||||
# Placeholder values are kept short on purpose: the repo-wide
|
|
||||||
# secret-scan flags `TOKEN=<16+ non-space chars>` as a plaintext
|
|
||||||
# secret; "seed-me" is < 16 chars and still distinctive.
|
|
||||||
template {
|
|
||||||
destination = "secrets/wp.env"
|
|
||||||
env = true
|
|
||||||
change_mode = "restart"
|
|
||||||
error_on_missing_key = false
|
|
||||||
data = <<EOT
|
|
||||||
{{- with secret "kv/data/disinto/shared/woodpecker" -}}
|
|
||||||
WOODPECKER_AGENT_SECRET={{ .Data.data.agent_secret }}
|
|
||||||
WOODPECKER_FORGEJO_CLIENT={{ .Data.data.forgejo_client }}
|
|
||||||
WOODPECKER_FORGEJO_SECRET={{ .Data.data.forgejo_secret }}
|
|
||||||
{{- else -}}
|
|
||||||
# WARNING: kv/disinto/shared/woodpecker is empty — run tools/vault-seed-woodpecker.sh + S3.3
|
|
||||||
WOODPECKER_AGENT_SECRET=seed-me
|
|
||||||
WOODPECKER_FORGEJO_CLIENT=seed-me
|
|
||||||
WOODPECKER_FORGEJO_SECRET=seed-me
|
|
||||||
{{- end -}}
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
cpu = 300
|
|
||||||
memory = 512
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,76 +0,0 @@
|
||||||
# =============================================================================
|
|
||||||
# nomad/server.hcl — Single-node combined server+client configuration
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S0.2, issue #822). Deployed to
|
|
||||||
# /etc/nomad.d/server.hcl on the factory dev box alongside client.hcl.
|
|
||||||
#
|
|
||||||
# This file owns: agent role, ports, bind, data directory.
|
|
||||||
# client.hcl owns: Docker driver plugin config + host_volume declarations.
|
|
||||||
#
|
|
||||||
# NOTE: On single-node setups these two files could be merged into one
|
|
||||||
# (Nomad auto-merges every *.hcl under -config=/etc/nomad.d). The split is
|
|
||||||
# purely for readability — role/bind/port vs. plugin/volume wiring.
|
|
||||||
#
|
|
||||||
# This is a factory dev-box baseline — TLS, ACLs, gossip encryption, and
|
|
||||||
# consul/vault integration are deliberately absent and land in later steps.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
data_dir = "/var/lib/nomad"
|
|
||||||
bind_addr = "127.0.0.1"
|
|
||||||
log_level = "INFO"
|
|
||||||
|
|
||||||
# All Nomad agent traffic stays on localhost — the factory box does not
|
|
||||||
# federate with peers. Ports are the Nomad defaults, pinned here so that
|
|
||||||
# future changes to these numbers are a visible diff.
|
|
||||||
ports {
|
|
||||||
http = 4646
|
|
||||||
rpc = 4647
|
|
||||||
serf = 4648
|
|
||||||
}
|
|
||||||
|
|
||||||
# Single-node combined mode: this agent is both the only server and the
|
|
||||||
# only client. bootstrap_expect=1 makes the server quorum-of-one.
|
|
||||||
server {
|
|
||||||
enabled = true
|
|
||||||
bootstrap_expect = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
client {
|
|
||||||
enabled = true
|
|
||||||
}
|
|
||||||
|
|
||||||
# Advertise localhost to self to avoid surprises if the default IP
|
|
||||||
# autodetection picks a transient interface (e.g. docker0, wg0).
|
|
||||||
advertise {
|
|
||||||
http = "127.0.0.1"
|
|
||||||
rpc = "127.0.0.1"
|
|
||||||
serf = "127.0.0.1"
|
|
||||||
}
|
|
||||||
|
|
||||||
# UI on by default — same bind as http, no TLS (localhost only).
|
|
||||||
ui {
|
|
||||||
enabled = true
|
|
||||||
}
|
|
||||||
|
|
||||||
# ─── Vault integration (S2.3, issue #881) ───────────────────────────────────
|
|
||||||
# Nomad jobs exchange their short-lived workload-identity JWT (signed by
|
|
||||||
# nomad's built-in signer at /.well-known/jwks.json on :4646) for a Vault
|
|
||||||
# token carrying the policies named by the role in `vault { role = "..." }`
|
|
||||||
# of each jobspec — no shared VAULT_TOKEN in job env.
|
|
||||||
#
|
|
||||||
# The JWT auth path (jwt-nomad) + per-role bindings live on the Vault
|
|
||||||
# side, written by lib/init/nomad/vault-nomad-auth.sh + tools/vault-apply-roles.sh.
|
|
||||||
# Roles are defined in vault/roles.yaml.
|
|
||||||
#
|
|
||||||
# `default_identity.aud = ["vault.io"]` matches bound_audiences on every
|
|
||||||
# role in vault/roles.yaml — a drift here would silently break every job's
|
|
||||||
# Vault token exchange at placement time.
|
|
||||||
vault {
|
|
||||||
enabled = true
|
|
||||||
address = "http://127.0.0.1:8200"
|
|
||||||
|
|
||||||
default_identity {
|
|
||||||
aud = ["vault.io"]
|
|
||||||
ttl = "1h"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,41 +0,0 @@
|
||||||
# =============================================================================
|
|
||||||
# nomad/vault.hcl — Single-node Vault configuration (dev-persisted seal)
|
|
||||||
#
|
|
||||||
# Part of the Nomad+Vault migration (S0.3, issue #823). Deployed to
|
|
||||||
# /etc/vault.d/vault.hcl on the factory dev box.
|
|
||||||
#
|
|
||||||
# Seal model: the single unseal key lives on disk at /etc/vault.d/unseal.key
|
|
||||||
# (0400 root) and is read by systemd ExecStartPost on every boot. This is
|
|
||||||
# the factory-dev-box-acceptable tradeoff — seal-key theft equals vault
|
|
||||||
# theft, but we avoid running a second Vault to auto-unseal the first.
|
|
||||||
#
|
|
||||||
# This is a factory dev-box baseline — TLS, HA, Raft storage, and audit
|
|
||||||
# devices are deliberately absent. Storage is the `file` backend (single
|
|
||||||
# node only). Listener is localhost-only, so no external TLS is needed.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
# File storage backend — single-node only, no HA, no raft. State lives in
|
|
||||||
# /var/lib/vault/data which is created (root:root 0700) by
|
|
||||||
# lib/init/nomad/systemd-vault.sh before the unit starts.
|
|
||||||
storage "file" {
|
|
||||||
path = "/var/lib/vault/data"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Localhost-only listener. TLS is disabled because all callers are on the
|
|
||||||
# same box — flipping this to tls_disable=false is an audit-worthy change
|
|
||||||
# paired with cert provisioning.
|
|
||||||
listener "tcp" {
|
|
||||||
address = "127.0.0.1:8200"
|
|
||||||
tls_disable = true
|
|
||||||
}
|
|
||||||
|
|
||||||
# mlock prevents Vault's in-memory secrets from being swapped to disk. We
|
|
||||||
# keep it enabled; the systemd unit grants CAP_IPC_LOCK so mlock() succeeds.
|
|
||||||
disable_mlock = false
|
|
||||||
|
|
||||||
# Advertised API address — used by Vault clients on this host. Matches
|
|
||||||
# the listener above.
|
|
||||||
api_addr = "http://127.0.0.1:8200"
|
|
||||||
|
|
||||||
# UI on by default — same bind as listener, no TLS (localhost only).
|
|
||||||
ui = true
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: c872f282428861a735fbbb00609f77d063ad92b3 -->
|
<!-- last-reviewed: ec7dff854a71d64f427a4e6d6874de8e6dee822c -->
|
||||||
# Planner Agent
|
# Planner Agent
|
||||||
|
|
||||||
**Role**: Strategic planning using a Prerequisite Tree (Theory of Constraints),
|
**Role**: Strategic planning using a Prerequisite Tree (Theory of Constraints),
|
||||||
|
|
@ -34,9 +34,7 @@ will then sections) and marks the prerequisite as blocked-on-vault in the tree.
|
||||||
Deduplication: checks pending/ + approved/ + fired/ before creating.
|
Deduplication: checks pending/ + approved/ + fired/ before creating.
|
||||||
Phase 4 (journal-and-memory): write updated prerequisite tree + daily journal
|
Phase 4 (journal-and-memory): write updated prerequisite tree + daily journal
|
||||||
entry (committed to ops repo) and update `$OPS_REPO_ROOT/knowledge/planner-memory.md`.
|
entry (committed to ops repo) and update `$OPS_REPO_ROOT/knowledge/planner-memory.md`.
|
||||||
Phase 5 (commit-ops): commit all ops repo changes to a `planner/run-YYYY-MM-DD`
|
Phase 5 (commit-ops): commit all ops repo changes, push directly.
|
||||||
branch, then create a PR and walk it to merge via review-bot (`pr_create` →
|
|
||||||
`pr_walk_to_merge`), mirroring the architect's ops flow. No direct push to main.
|
|
||||||
AGENTS.md maintenance is handled by the Gardener.
|
AGENTS.md maintenance is handled by the Gardener.
|
||||||
|
|
||||||
**Artifacts use `$OPS_REPO_ROOT`**: All planner artifacts (journal,
|
**Artifacts use `$OPS_REPO_ROOT`**: All planner artifacts (journal,
|
||||||
|
|
@ -57,7 +55,7 @@ nervous system component, not work.
|
||||||
creates tmux session, injects formula prompt, monitors phase file, handles crash recovery, cleans up
|
creates tmux session, injects formula prompt, monitors phase file, handles crash recovery, cleans up
|
||||||
- `formulas/run-planner.toml` — Execution spec: six steps (preflight,
|
- `formulas/run-planner.toml` — Execution spec: six steps (preflight,
|
||||||
prediction-triage, update-prerequisite-tree, file-at-constraints,
|
prediction-triage, update-prerequisite-tree, file-at-constraints,
|
||||||
journal-and-memory, commit-ops-changes) with `needs` dependencies. Claude
|
journal-and-memory, commit-and-pr) with `needs` dependencies. Claude
|
||||||
executes all steps in a single interactive session with tool access
|
executes all steps in a single interactive session with tool access
|
||||||
- `formulas/groom-backlog.toml` — Grooming formula for backlog triage and
|
- `formulas/groom-backlog.toml` — Grooming formula for backlog triage and
|
||||||
grooming. (Note: the planner no longer dispatches breakdown mode — complex
|
grooming. (Note: the planner no longer dispatches breakdown mode — complex
|
||||||
|
|
|
||||||
|
|
@ -10,9 +10,7 @@
|
||||||
# 2. Load formula (formulas/run-planner.toml)
|
# 2. Load formula (formulas/run-planner.toml)
|
||||||
# 3. Context: VISION.md, AGENTS.md, ops:RESOURCES.md, structural graph,
|
# 3. Context: VISION.md, AGENTS.md, ops:RESOURCES.md, structural graph,
|
||||||
# planner memory, journal entries
|
# planner memory, journal entries
|
||||||
# 4. Create ops branch planner/run-YYYY-MM-DD for changes
|
# 4. agent_run(worktree, prompt) → Claude plans, may push knowledge updates
|
||||||
# 5. agent_run(worktree, prompt) → Claude plans, commits to ops branch
|
|
||||||
# 6. If ops branch has commits: pr_create → pr_walk_to_merge (review-bot)
|
|
||||||
#
|
#
|
||||||
# Usage:
|
# Usage:
|
||||||
# planner-run.sh [projects/disinto.toml] # project config (default: disinto)
|
# planner-run.sh [projects/disinto.toml] # project config (default: disinto)
|
||||||
|
|
@ -24,11 +22,10 @@ FACTORY_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||||
|
|
||||||
# Accept project config from argument; default to disinto (planner is disinto infrastructure)
|
# Accept project config from argument; default to disinto (planner is disinto infrastructure)
|
||||||
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
||||||
# Set override BEFORE sourcing env.sh so it survives any later re-source of
|
|
||||||
# env.sh from nested shells / claude -p tools (#762, #747)
|
|
||||||
export FORGE_TOKEN_OVERRIDE="${FORGE_PLANNER_TOKEN:-}"
|
|
||||||
# shellcheck source=../lib/env.sh
|
# shellcheck source=../lib/env.sh
|
||||||
source "$FACTORY_ROOT/lib/env.sh"
|
source "$FACTORY_ROOT/lib/env.sh"
|
||||||
|
# Use planner-bot's own Forgejo identity (#747)
|
||||||
|
FORGE_TOKEN="${FORGE_PLANNER_TOKEN:-${FORGE_TOKEN}}"
|
||||||
# shellcheck source=../lib/formula-session.sh
|
# shellcheck source=../lib/formula-session.sh
|
||||||
source "$FACTORY_ROOT/lib/formula-session.sh"
|
source "$FACTORY_ROOT/lib/formula-session.sh"
|
||||||
# shellcheck source=../lib/worktree.sh
|
# shellcheck source=../lib/worktree.sh
|
||||||
|
|
@ -37,10 +34,6 @@ source "$FACTORY_ROOT/lib/worktree.sh"
|
||||||
source "$FACTORY_ROOT/lib/guard.sh"
|
source "$FACTORY_ROOT/lib/guard.sh"
|
||||||
# shellcheck source=../lib/agent-sdk.sh
|
# shellcheck source=../lib/agent-sdk.sh
|
||||||
source "$FACTORY_ROOT/lib/agent-sdk.sh"
|
source "$FACTORY_ROOT/lib/agent-sdk.sh"
|
||||||
# shellcheck source=../lib/ci-helpers.sh
|
|
||||||
source "$FACTORY_ROOT/lib/ci-helpers.sh"
|
|
||||||
# shellcheck source=../lib/pr-lifecycle.sh
|
|
||||||
source "$FACTORY_ROOT/lib/pr-lifecycle.sh"
|
|
||||||
|
|
||||||
LOG_FILE="${DISINTO_LOG_DIR}/planner/planner.log"
|
LOG_FILE="${DISINTO_LOG_DIR}/planner/planner.log"
|
||||||
# shellcheck disable=SC2034 # consumed by agent-sdk.sh
|
# shellcheck disable=SC2034 # consumed by agent-sdk.sh
|
||||||
|
|
@ -152,69 +145,12 @@ ${PROMPT_FOOTER}"
|
||||||
# ── Create worktree ──────────────────────────────────────────────────────
|
# ── Create worktree ──────────────────────────────────────────────────────
|
||||||
formula_worktree_setup "$WORKTREE"
|
formula_worktree_setup "$WORKTREE"
|
||||||
|
|
||||||
# ── Prepare ops branch for PR-based merge (#765) ────────────────────────
|
|
||||||
PLANNER_OPS_BRANCH="planner/run-$(date -u +%Y-%m-%d)"
|
|
||||||
(
|
|
||||||
cd "$OPS_REPO_ROOT"
|
|
||||||
git fetch origin "${PRIMARY_BRANCH}" --quiet 2>/dev/null || true
|
|
||||||
git checkout "${PRIMARY_BRANCH}" --quiet 2>/dev/null || true
|
|
||||||
git pull --ff-only origin "${PRIMARY_BRANCH}" --quiet 2>/dev/null || true
|
|
||||||
# Create (or reset to) a fresh branch from PRIMARY_BRANCH
|
|
||||||
git checkout -B "$PLANNER_OPS_BRANCH" "origin/${PRIMARY_BRANCH}" --quiet 2>/dev/null || \
|
|
||||||
git checkout -b "$PLANNER_OPS_BRANCH" --quiet 2>/dev/null || true
|
|
||||||
)
|
|
||||||
log "ops branch: ${PLANNER_OPS_BRANCH}"
|
|
||||||
|
|
||||||
# ── Run agent ─────────────────────────────────────────────────────────────
|
# ── Run agent ─────────────────────────────────────────────────────────────
|
||||||
export CLAUDE_MODEL="opus"
|
export CLAUDE_MODEL="opus"
|
||||||
|
|
||||||
agent_run --worktree "$WORKTREE" "$PROMPT"
|
agent_run --worktree "$WORKTREE" "$PROMPT"
|
||||||
log "agent_run complete"
|
log "agent_run complete"
|
||||||
|
|
||||||
# ── PR lifecycle: create PR on ops repo and walk to merge (#765) ─────────
|
|
||||||
OPS_FORGE_API="${FORGE_API_BASE}/repos/${FORGE_OPS_REPO}"
|
|
||||||
ops_has_commits=false
|
|
||||||
if ! git -C "$OPS_REPO_ROOT" diff --quiet "origin/${PRIMARY_BRANCH}..${PLANNER_OPS_BRANCH}" 2>/dev/null; then
|
|
||||||
ops_has_commits=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$ops_has_commits" = "true" ]; then
|
|
||||||
log "ops branch has commits — creating PR"
|
|
||||||
# Push the branch to the ops remote
|
|
||||||
git -C "$OPS_REPO_ROOT" push origin "$PLANNER_OPS_BRANCH" --quiet 2>/dev/null || \
|
|
||||||
git -C "$OPS_REPO_ROOT" push --force-with-lease origin "$PLANNER_OPS_BRANCH" 2>/dev/null
|
|
||||||
|
|
||||||
# Temporarily point FORGE_API at the ops repo for pr-lifecycle functions
|
|
||||||
ORIG_FORGE_API="$FORGE_API"
|
|
||||||
export FORGE_API="$OPS_FORGE_API"
|
|
||||||
# Ops repo typically has no Woodpecker CI — skip CI polling
|
|
||||||
ORIG_WOODPECKER_REPO_ID="${WOODPECKER_REPO_ID:-2}"
|
|
||||||
export WOODPECKER_REPO_ID="0"
|
|
||||||
|
|
||||||
PR_NUM=$(pr_create "$PLANNER_OPS_BRANCH" \
|
|
||||||
"chore: planner run $(date -u +%Y-%m-%d)" \
|
|
||||||
"Automated planner run — updates prerequisite tree, memory, and vault items." \
|
|
||||||
"${PRIMARY_BRANCH}" \
|
|
||||||
"$OPS_FORGE_API") || true
|
|
||||||
|
|
||||||
if [ -n "$PR_NUM" ]; then
|
|
||||||
log "ops PR #${PR_NUM} created — walking to merge"
|
|
||||||
SESSION_ID=$(cat "$SID_FILE" 2>/dev/null || echo "planner-$$")
|
|
||||||
pr_walk_to_merge "$PR_NUM" "$SESSION_ID" "$OPS_REPO_ROOT" 1 2 || {
|
|
||||||
log "ops PR #${PR_NUM} walk finished: ${_PR_WALK_EXIT_REASON:-unknown}"
|
|
||||||
}
|
|
||||||
log "ops PR #${PR_NUM} result: ${_PR_WALK_EXIT_REASON:-unknown}"
|
|
||||||
else
|
|
||||||
log "WARNING: failed to create ops PR for branch ${PLANNER_OPS_BRANCH}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Restore original FORGE_API
|
|
||||||
export FORGE_API="$ORIG_FORGE_API"
|
|
||||||
export WOODPECKER_REPO_ID="$ORIG_WOODPECKER_REPO_ID"
|
|
||||||
else
|
|
||||||
log "no ops changes — skipping PR creation"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Persist watermarks so next run can skip if nothing changed
|
# Persist watermarks so next run can skip if nothing changed
|
||||||
mkdir -p "$FACTORY_ROOT/state"
|
mkdir -p "$FACTORY_ROOT/state"
|
||||||
echo "$CURRENT_SHA" > "$LAST_SHA_FILE"
|
echo "$CURRENT_SHA" > "$LAST_SHA_FILE"
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: c872f282428861a735fbbb00609f77d063ad92b3 -->
|
<!-- last-reviewed: ec7dff854a71d64f427a4e6d6874de8e6dee822c -->
|
||||||
# Predictor Agent
|
# Predictor Agent
|
||||||
|
|
||||||
**Role**: Abstract adversary (the "goblin"). Runs a 2-step formula
|
**Role**: Abstract adversary (the "goblin"). Runs a 2-step formula
|
||||||
|
|
|
||||||
|
|
@ -23,11 +23,10 @@ FACTORY_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||||
|
|
||||||
# Accept project config from argument; default to disinto
|
# Accept project config from argument; default to disinto
|
||||||
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
||||||
# Set override BEFORE sourcing env.sh so it survives any later re-source of
|
|
||||||
# env.sh from nested shells / claude -p tools (#762, #747)
|
|
||||||
export FORGE_TOKEN_OVERRIDE="${FORGE_PREDICTOR_TOKEN:-}"
|
|
||||||
# shellcheck source=../lib/env.sh
|
# shellcheck source=../lib/env.sh
|
||||||
source "$FACTORY_ROOT/lib/env.sh"
|
source "$FACTORY_ROOT/lib/env.sh"
|
||||||
|
# Use predictor-bot's own Forgejo identity (#747)
|
||||||
|
FORGE_TOKEN="${FORGE_PREDICTOR_TOKEN:-${FORGE_TOKEN}}"
|
||||||
# shellcheck source=../lib/formula-session.sh
|
# shellcheck source=../lib/formula-session.sh
|
||||||
source "$FACTORY_ROOT/lib/formula-session.sh"
|
source "$FACTORY_ROOT/lib/formula-session.sh"
|
||||||
# shellcheck source=../lib/worktree.sh
|
# shellcheck source=../lib/worktree.sh
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: c872f282428861a735fbbb00609f77d063ad92b3 -->
|
<!-- last-reviewed: ec7dff854a71d64f427a4e6d6874de8e6dee822c -->
|
||||||
# Review Agent
|
# Review Agent
|
||||||
|
|
||||||
**Role**: AI-powered PR review — post structured findings and formal
|
**Role**: AI-powered PR review — post structured findings and formal
|
||||||
|
|
|
||||||
|
|
@ -59,21 +59,6 @@ fi
|
||||||
|
|
||||||
mkdir -p "$EVIDENCE_DIR"
|
mkdir -p "$EVIDENCE_DIR"
|
||||||
|
|
||||||
# Verify input is Caddy JSON format (not Combined Log Format or other)
|
|
||||||
first_line=$(grep -m1 '.' "$CADDY_LOG" || true)
|
|
||||||
if [ -z "$first_line" ]; then
|
|
||||||
log "WARN: Caddy access log is empty at ${CADDY_LOG}"
|
|
||||||
echo "WARN: Caddy access log is empty — nothing to parse." >&2
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
if ! printf '%s\n' "$first_line" | jq empty 2>/dev/null; then
|
|
||||||
preview="${first_line:0:200}"
|
|
||||||
log "ERROR: Input file is not Caddy JSON format (expected structured JSON access log). Got: ${preview}"
|
|
||||||
echo "ERROR: Input file is not Caddy JSON format (expected structured JSON access log)." >&2
|
|
||||||
echo "Got: ${preview}" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Parse access log ────────────────────────────────────────────────────────
|
# ── Parse access log ────────────────────────────────────────────────────────
|
||||||
|
|
||||||
log "Parsing ${CADDY_LOG} for entries since $(date -u -d "@${CUTOFF_TS}" +%Y-%m-%dT%H:%M:%SZ 2>/dev/null || echo "${CUTOFF_TS}")"
|
log "Parsing ${CADDY_LOG} for entries since $(date -u -d "@${CUTOFF_TS}" +%Y-%m-%dT%H:%M:%SZ 2>/dev/null || echo "${CUTOFF_TS}")"
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: c872f282428861a735fbbb00609f77d063ad92b3 -->
|
<!-- last-reviewed: ec7dff854a71d64f427a4e6d6874de8e6dee822c -->
|
||||||
# Supervisor Agent
|
# Supervisor Agent
|
||||||
|
|
||||||
**Role**: Health monitoring and auto-remediation, executed as a formula-driven
|
**Role**: Health monitoring and auto-remediation, executed as a formula-driven
|
||||||
|
|
@ -7,11 +7,13 @@ then runs an interactive Claude session (sonnet) that assesses health, auto-fixe
|
||||||
issues, and writes a daily journal. When blocked on external
|
issues, and writes a daily journal. When blocked on external
|
||||||
resources or human decisions, files vault items instead of escalating directly.
|
resources or human decisions, files vault items instead of escalating directly.
|
||||||
|
|
||||||
**Trigger**: `supervisor-run.sh` is invoked by two polling loops:
|
**Trigger**: `supervisor-run.sh` is invoked by the polling loop in `docker/edge/entrypoint-edge.sh`
|
||||||
- **Agents container** (`docker/agents/entrypoint.sh`): every `SUPERVISOR_INTERVAL` seconds (default 1200 = 20 min). Controlled by the `supervisor` role in `AGENT_ROLES` (included in the default seven-role set since P1/#801). Logs to `supervisor.log` in the agents container.
|
every 20 minutes (line 50-53). Sources `lib/guard.sh` and calls `check_active supervisor` first
|
||||||
- **Edge container** (`docker/edge/entrypoint-edge.sh`): separate loop in the edge container (line 169-172). Runs independently of the agents container's polling schedule.
|
— skips if `$FACTORY_ROOT/state/.supervisor-active` is absent. Then runs `claude -p` via
|
||||||
|
`agent-sdk.sh`, injects `formulas/run-supervisor.toml` with pre-collected metrics as context,
|
||||||
Both invoke the same `supervisor-run.sh`. Sources `lib/guard.sh` and calls `check_active supervisor` first — skips if `$FACTORY_ROOT/state/.supervisor-active` is absent. Then runs `claude -p` via `agent-sdk.sh`, injects `formulas/run-supervisor.toml` with pre-collected metrics as context, and cleans up on completion or timeout.
|
and cleans up on completion or timeout (20 min max session). Note: the supervisor runs in the
|
||||||
|
**edge container** (`entrypoint-edge.sh`), not the agent container — this distinction matters
|
||||||
|
for operators debugging the factory.
|
||||||
|
|
||||||
**Key files**:
|
**Key files**:
|
||||||
- `supervisor/supervisor-run.sh` — Polling loop participant + orchestrator: lock, memory guard,
|
- `supervisor/supervisor-run.sh` — Polling loop participant + orchestrator: lock, memory guard,
|
||||||
|
|
@ -24,18 +26,10 @@ Both invoke the same `supervisor-run.sh`. Sources `lib/guard.sh` and calls `chec
|
||||||
files for `PHASE:escalate` entries and auto-removes any whose linked issue
|
files for `PHASE:escalate` entries and auto-removes any whose linked issue
|
||||||
is confirmed closed (24h grace period after closure to avoid races). Reports
|
is confirmed closed (24h grace period after closure to avoid races). Reports
|
||||||
**stale crashed worktrees** (worktrees preserved after crash) — supervisor
|
**stale crashed worktrees** (worktrees preserved after crash) — supervisor
|
||||||
housekeeping removes them after 24h. Collects **Woodpecker agent health**
|
housekeeping removes them after 24h
|
||||||
(added #933): container `disinto-woodpecker-agent` health/running status,
|
|
||||||
gRPC error count in last 20 min, fast-failure pipeline count (<60s, last 15 min),
|
|
||||||
and overall health verdict (healthy/unhealthy). Unhealthy verdict triggers
|
|
||||||
automatic container restart + `blocked:ci_exhausted` issue recovery in
|
|
||||||
`supervisor-run.sh` before the Claude session starts.
|
|
||||||
- `formulas/run-supervisor.toml` — Execution spec: five steps (preflight review,
|
- `formulas/run-supervisor.toml` — Execution spec: five steps (preflight review,
|
||||||
health-assessment, decide-actions, report, journal) with `needs` dependencies.
|
health-assessment, decide-actions, report, journal) with `needs` dependencies.
|
||||||
Claude evaluates all metrics and takes actions in a single interactive session.
|
Claude evaluates all metrics and takes actions in a single interactive session
|
||||||
Health-assessment now includes P2 **Woodpecker agent unhealthy** classification
|
|
||||||
(container not running, ≥3 gRPC errors/20m, or ≥3 fast-failure pipelines/15m);
|
|
||||||
decide-actions documents the pre-session auto-recovery path
|
|
||||||
- `$OPS_REPO_ROOT/knowledge/*.md` — Domain-specific remediation guides (memory,
|
- `$OPS_REPO_ROOT/knowledge/*.md` — Domain-specific remediation guides (memory,
|
||||||
disk, CI, git, dev-agent, review-agent, forge)
|
disk, CI, git, dev-agent, review-agent, forge)
|
||||||
|
|
||||||
|
|
@ -45,7 +39,6 @@ P3 (degraded PRs, circular deps, stale deps), P4 (housekeeping).
|
||||||
**Environment variables consumed**:
|
**Environment variables consumed**:
|
||||||
- `FORGE_TOKEN`, `FORGE_SUPERVISOR_TOKEN` (falls back to FORGE_TOKEN), `FORGE_REPO`, `FORGE_API`, `PROJECT_NAME`, `PROJECT_REPO_ROOT`, `OPS_REPO_ROOT`
|
- `FORGE_TOKEN`, `FORGE_SUPERVISOR_TOKEN` (falls back to FORGE_TOKEN), `FORGE_REPO`, `FORGE_API`, `PROJECT_NAME`, `PROJECT_REPO_ROOT`, `OPS_REPO_ROOT`
|
||||||
- `PRIMARY_BRANCH`, `CLAUDE_MODEL` (set to sonnet by supervisor-run.sh)
|
- `PRIMARY_BRANCH`, `CLAUDE_MODEL` (set to sonnet by supervisor-run.sh)
|
||||||
- `SUPERVISOR_INTERVAL` — polling interval in seconds for agents container (default 1200 = 20 min)
|
|
||||||
- `WOODPECKER_TOKEN`, `WOODPECKER_SERVER`, `WOODPECKER_DB_PASSWORD`, `WOODPECKER_DB_USER`, `WOODPECKER_DB_HOST`, `WOODPECKER_DB_NAME` — CI database queries
|
- `WOODPECKER_TOKEN`, `WOODPECKER_SERVER`, `WOODPECKER_DB_PASSWORD`, `WOODPECKER_DB_USER`, `WOODPECKER_DB_HOST`, `WOODPECKER_DB_NAME` — CI database queries
|
||||||
|
|
||||||
**Degraded mode (Issue #544)**: When `OPS_REPO_ROOT` is not set or the directory doesn't exist, the supervisor runs in degraded mode:
|
**Degraded mode (Issue #544)**: When `OPS_REPO_ROOT` is not set or the directory doesn't exist, the supervisor runs in degraded mode:
|
||||||
|
|
@ -55,6 +48,5 @@ P3 (degraded PRs, circular deps, stale deps), P4 (housekeeping).
|
||||||
- Logs a WARNING message at startup indicating degraded mode
|
- Logs a WARNING message at startup indicating degraded mode
|
||||||
|
|
||||||
**Lifecycle**: supervisor-run.sh (invoked by polling loop every 20min, `check_active supervisor`)
|
**Lifecycle**: supervisor-run.sh (invoked by polling loop every 20min, `check_active supervisor`)
|
||||||
→ lock + memory guard → run preflight.sh (collect metrics) → **WP agent health recovery**
|
→ lock + memory guard → run preflight.sh (collect metrics) → load formula + context → run
|
||||||
(if unhealthy: restart container + recover ci_exhausted issues) → load formula + context → run
|
|
||||||
claude -p via agent-sdk.sh → Claude assesses health, auto-fixes, writes journal → `PHASE:done`.
|
claude -p via agent-sdk.sh → Claude assesses health, auto-fixes, writes journal → `PHASE:done`.
|
||||||
|
|
|
||||||
|
|
@ -224,108 +224,3 @@ for _vf in "${_va_root}"/*.md; do
|
||||||
done
|
done
|
||||||
[ "$_found_vault" = false ] && echo " None"
|
[ "$_found_vault" = false ] && echo " None"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# ── Woodpecker Agent Health ────────────────────────────────────────────────
|
|
||||||
|
|
||||||
echo "## Woodpecker Agent Health"
|
|
||||||
|
|
||||||
# Check WP agent container health status
|
|
||||||
_wp_container="disinto-woodpecker-agent"
|
|
||||||
_wp_health_status="unknown"
|
|
||||||
_wp_health_start=""
|
|
||||||
|
|
||||||
if command -v docker &>/dev/null; then
|
|
||||||
# Get health status via docker inspect
|
|
||||||
_wp_health_status=$(docker inspect "$_wp_container" --format '{{.State.Health.Status}}' 2>/dev/null || echo "not_found")
|
|
||||||
if [ "$_wp_health_status" = "not_found" ] || [ -z "$_wp_health_status" ]; then
|
|
||||||
# Container may not exist or not have health check configured
|
|
||||||
_wp_health_status=$(docker inspect "$_wp_container" --format '{{.State.Status}}' 2>/dev/null || echo "not_found")
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get container start time for age calculation
|
|
||||||
_wp_start_time=$(docker inspect "$_wp_container" --format '{{.State.StartedAt}}' 2>/dev/null || echo "")
|
|
||||||
if [ -n "$_wp_start_time" ] && [ "$_wp_start_time" != "0001-01-01T00:00:00Z" ]; then
|
|
||||||
_wp_health_start=$(date -d "$_wp_start_time" '+%Y-%m-%d %H:%M UTC' 2>/dev/null || echo "$_wp_start_time")
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Container: $_wp_container"
|
|
||||||
echo "Status: $_wp_health_status"
|
|
||||||
[ -n "$_wp_health_start" ] && echo "Started: $_wp_health_start"
|
|
||||||
|
|
||||||
# Check for gRPC errors in agent logs (last 20 minutes)
|
|
||||||
_wp_grpc_errors=0
|
|
||||||
if [ "$_wp_health_status" != "not_found" ] && [ -n "$_wp_health_status" ]; then
|
|
||||||
_wp_grpc_errors=$(docker logs --since 20m "$_wp_container" 2>&1 | grep -c 'grpc error' || echo "0")
|
|
||||||
echo "gRPC errors (last 20m): $_wp_grpc_errors"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Fast-failure heuristic: check for pipelines completing in <60s
|
|
||||||
_wp_fast_failures=0
|
|
||||||
_wp_recent_failures=""
|
|
||||||
if [ -n "${WOODPECKER_REPO_ID:-}" ] && [ "${WOODPECKER_REPO_ID}" != "0" ]; then
|
|
||||||
_now=$(date +%s)
|
|
||||||
_pipelines=$(woodpecker_api "/repos/${WOODPECKER_REPO_ID}/pipelines?perPage=100" 2>/dev/null || echo '[]')
|
|
||||||
|
|
||||||
# Count failures with duration < 60s in last 15 minutes
|
|
||||||
_wp_fast_failures=$(echo "$_pipelines" | jq --argjson now "$_now" '
|
|
||||||
[.[] | select(.status == "failure") | select((.finished - .started) < 60) | select(($now - .finished) < 900)]
|
|
||||||
| length' 2>/dev/null || echo "0")
|
|
||||||
|
|
||||||
if [ "$_wp_fast_failures" -gt 0 ]; then
|
|
||||||
_wp_recent_failures=$(echo "$_pipelines" | jq -r --argjson now "$_now" '
|
|
||||||
[.[] | select(.status == "failure") | select((.finished - .started) < 60) | select(($now - .finished) < 900)]
|
|
||||||
| .[] | "\(.number)\t\((.finished - .started))s"' 2>/dev/null || echo "")
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Fast-fail pipelines (<60s, last 15m): $_wp_fast_failures"
|
|
||||||
if [ -n "$_wp_recent_failures" ] && [ "$_wp_fast_failures" -gt 0 ]; then
|
|
||||||
echo "Recent failures:"
|
|
||||||
echo "$_wp_recent_failures" | while IFS=$'\t' read -r _num _dur; do
|
|
||||||
echo " #$_num: ${_dur}"
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Determine overall WP agent health
|
|
||||||
_wp_agent_healthy=true
|
|
||||||
_wp_health_reason=""
|
|
||||||
|
|
||||||
if [ "$_wp_health_status" = "not_found" ]; then
|
|
||||||
_wp_agent_healthy=false
|
|
||||||
_wp_health_reason="Container not running"
|
|
||||||
elif [ "$_wp_health_status" = "unhealthy" ]; then
|
|
||||||
_wp_agent_healthy=false
|
|
||||||
_wp_health_reason="Container health check failed"
|
|
||||||
elif [ "$_wp_health_status" != "running" ]; then
|
|
||||||
_wp_agent_healthy=false
|
|
||||||
_wp_health_reason="Container not in running state: $_wp_health_status"
|
|
||||||
elif [ "$_wp_grpc_errors" -ge 3 ]; then
|
|
||||||
_wp_agent_healthy=false
|
|
||||||
_wp_health_reason="High gRPC error count (>=3 in 20m)"
|
|
||||||
elif [ "$_wp_fast_failures" -ge 3 ]; then
|
|
||||||
_wp_agent_healthy=false
|
|
||||||
_wp_health_reason="High fast-failure count (>=3 in 15m)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "WP Agent Health: $([ "$_wp_agent_healthy" = true ] && echo "healthy" || echo "UNHEALTHY")"
|
|
||||||
[ -n "$_wp_health_reason" ] && echo "Reason: $_wp_health_reason"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# ── WP Agent Health History (for idempotency) ──────────────────────────────
|
|
||||||
|
|
||||||
echo "## WP Agent Health History"
|
|
||||||
# Track last restart timestamp to avoid duplicate restarts in same run
|
|
||||||
_WP_HEALTH_HISTORY_FILE="${DISINTO_LOG_DIR}/supervisor/wp-agent-health.history"
|
|
||||||
_wp_last_restart="never"
|
|
||||||
_wp_last_restart_ts=0
|
|
||||||
|
|
||||||
if [ -f "$_WP_HEALTH_HISTORY_FILE" ]; then
|
|
||||||
_wp_last_restart_ts=$(grep -m1 '^LAST_RESTART_TS=' "$_WP_HEALTH_HISTORY_FILE" 2>/dev/null | cut -d= -f2 || echo "0")
|
|
||||||
if [ -n "$_wp_last_restart_ts" ] && [ "$_wp_last_restart_ts" -gt 0 ] 2>/dev/null; then
|
|
||||||
_wp_last_restart=$(date -d "@$_wp_last_restart_ts" '+%Y-%m-%d %H:%M UTC' 2>/dev/null || echo "$_wp_last_restart_ts")
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
echo "Last restart: $_wp_last_restart"
|
|
||||||
echo ""
|
|
||||||
|
|
|
||||||
|
|
@ -25,11 +25,10 @@ FACTORY_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||||
|
|
||||||
# Accept project config from argument; default to disinto
|
# Accept project config from argument; default to disinto
|
||||||
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
||||||
# Set override BEFORE sourcing env.sh so it survives any later re-source of
|
|
||||||
# env.sh from nested shells / claude -p tools (#762, #747)
|
|
||||||
export FORGE_TOKEN_OVERRIDE="${FORGE_SUPERVISOR_TOKEN:-}"
|
|
||||||
# shellcheck source=../lib/env.sh
|
# shellcheck source=../lib/env.sh
|
||||||
source "$FACTORY_ROOT/lib/env.sh"
|
source "$FACTORY_ROOT/lib/env.sh"
|
||||||
|
# Use supervisor-bot's own Forgejo identity (#747)
|
||||||
|
FORGE_TOKEN="${FORGE_SUPERVISOR_TOKEN:-${FORGE_TOKEN}}"
|
||||||
# shellcheck source=../lib/formula-session.sh
|
# shellcheck source=../lib/formula-session.sh
|
||||||
source "$FACTORY_ROOT/lib/formula-session.sh"
|
source "$FACTORY_ROOT/lib/formula-session.sh"
|
||||||
# shellcheck source=../lib/worktree.sh
|
# shellcheck source=../lib/worktree.sh
|
||||||
|
|
@ -47,9 +46,6 @@ SID_FILE="/tmp/supervisor-session-${PROJECT_NAME}.sid"
|
||||||
SCRATCH_FILE="/tmp/supervisor-${PROJECT_NAME}-scratch.md"
|
SCRATCH_FILE="/tmp/supervisor-${PROJECT_NAME}-scratch.md"
|
||||||
WORKTREE="/tmp/${PROJECT_NAME}-supervisor-run"
|
WORKTREE="/tmp/${PROJECT_NAME}-supervisor-run"
|
||||||
|
|
||||||
# WP agent container name (configurable via env var)
|
|
||||||
export WP_AGENT_CONTAINER_NAME="${WP_AGENT_CONTAINER_NAME:-disinto-woodpecker-agent}"
|
|
||||||
|
|
||||||
# Override LOG_AGENT for consistent agent identification
|
# Override LOG_AGENT for consistent agent identification
|
||||||
# shellcheck disable=SC2034 # consumed by agent-sdk.sh and env.sh log()
|
# shellcheck disable=SC2034 # consumed by agent-sdk.sh and env.sh log()
|
||||||
LOG_AGENT="supervisor"
|
LOG_AGENT="supervisor"
|
||||||
|
|
@ -169,160 +165,6 @@ ${FORMULA_CONTENT}
|
||||||
${SCRATCH_INSTRUCTION}
|
${SCRATCH_INSTRUCTION}
|
||||||
${PROMPT_FOOTER}"
|
${PROMPT_FOOTER}"
|
||||||
|
|
||||||
# ── WP Agent Health Recovery ──────────────────────────────────────────────
|
|
||||||
# Check preflight output for WP agent health issues and trigger recovery if needed
|
|
||||||
_WP_HEALTH_CHECK_FILE="${DISINTO_LOG_DIR}/supervisor/wp-agent-health-check.md"
|
|
||||||
echo "$PREFLIGHT_OUTPUT" > "$_WP_HEALTH_CHECK_FILE"
|
|
||||||
|
|
||||||
# Extract WP agent health status from preflight output
|
|
||||||
# Note: match exact "healthy" not "UNHEALTHY" (substring issue)
|
|
||||||
_wp_agent_healthy=$(grep "^WP Agent Health: healthy$" "$_WP_HEALTH_CHECK_FILE" 2>/dev/null && echo "true" || echo "false")
|
|
||||||
_wp_health_reason=$(grep "^Reason:" "$_WP_HEALTH_CHECK_FILE" 2>/dev/null | sed 's/^Reason: //' || echo "")
|
|
||||||
|
|
||||||
if [ "$_wp_agent_healthy" = "false" ] && [ -n "$_wp_health_reason" ]; then
|
|
||||||
log "WP agent detected as UNHEALTHY: $_wp_health_reason"
|
|
||||||
|
|
||||||
# Check for idempotency guard - have we already restarted in this run?
|
|
||||||
_WP_HEALTH_HISTORY_FILE="${DISINTO_LOG_DIR}/supervisor/wp-agent-health.history"
|
|
||||||
_wp_last_restart_ts=0
|
|
||||||
_wp_last_restart="never"
|
|
||||||
if [ -f "$_WP_HEALTH_HISTORY_FILE" ]; then
|
|
||||||
_wp_last_restart_ts=$(grep -m1 '^LAST_RESTART_TS=' "$_WP_HEALTH_HISTORY_FILE" 2>/dev/null | cut -d= -f2 || echo "0")
|
|
||||||
if [ -n "$_wp_last_restart_ts" ] && [ "$_wp_last_restart_ts" != "0" ] 2>/dev/null; then
|
|
||||||
_wp_last_restart=$(date -d "@$_wp_last_restart_ts" '+%Y-%m-%d %H:%M UTC' 2>/dev/null || echo "$_wp_last_restart_ts")
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
_current_ts=$(date +%s)
|
|
||||||
_restart_threshold=300 # 5 minutes between restarts
|
|
||||||
|
|
||||||
if [ -z "$_wp_last_restart_ts" ] || [ "$_wp_last_restart_ts" = "0" ] || [ $((_current_ts - _wp_last_restart_ts)) -gt $_restart_threshold ]; then
|
|
||||||
log "Triggering WP agent restart..."
|
|
||||||
|
|
||||||
# Restart the WP agent container
|
|
||||||
if docker restart "$WP_AGENT_CONTAINER_NAME" >/dev/null 2>&1; then
|
|
||||||
_restart_time=$(date -u '+%Y-%m-%d %H:%M UTC')
|
|
||||||
log "Successfully restarted WP agent container: $WP_AGENT_CONTAINER_NAME"
|
|
||||||
|
|
||||||
# Update history file
|
|
||||||
echo "LAST_RESTART_TS=$_current_ts" > "$_WP_HEALTH_HISTORY_FILE"
|
|
||||||
echo "LAST_RESTART_TIME=$_restart_time" >> "$_WP_HEALTH_HISTORY_FILE"
|
|
||||||
|
|
||||||
# Post recovery notice to journal
|
|
||||||
_journal_file="${OPS_JOURNAL_ROOT}/$(date -u +%Y-%m-%d).md"
|
|
||||||
if [ -f "$_journal_file" ]; then
|
|
||||||
{
|
|
||||||
echo ""
|
|
||||||
echo "### WP Agent Recovery - $_restart_time"
|
|
||||||
echo ""
|
|
||||||
echo "WP agent was unhealthy: $_wp_health_reason"
|
|
||||||
echo "Container restarted automatically."
|
|
||||||
} >> "$_journal_file"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Scan for issues updated in the last 30 minutes with blocked: ci_exhausted label
|
|
||||||
log "Scanning for ci_exhausted issues updated in last 30 minutes..."
|
|
||||||
_now_epoch=$(date +%s)
|
|
||||||
_thirty_min_ago=$(( _now_epoch - 1800 ))
|
|
||||||
|
|
||||||
# Fetch open issues with blocked label
|
|
||||||
_blocked_issues=$(forge_api GET "/issues?state=open&labels=blocked&type=issues&limit=100" 2>/dev/null || echo "[]")
|
|
||||||
_blocked_count=$(echo "$_blocked_issues" | jq 'length' 2>/dev/null || echo "0")
|
|
||||||
|
|
||||||
_issues_processed=0
|
|
||||||
_issues_recovered=0
|
|
||||||
|
|
||||||
if [ "$_blocked_count" -gt 0 ]; then
|
|
||||||
# Process each blocked issue
|
|
||||||
echo "$_blocked_issues" | jq -c '.[]' 2>/dev/null | while IFS= read -r issue_json; do
|
|
||||||
[ -z "$issue_json" ] && continue
|
|
||||||
|
|
||||||
_issue_num=$(echo "$issue_json" | jq -r '.number // empty')
|
|
||||||
_issue_updated=$(echo "$issue_json" | jq -r '.updated_at // empty')
|
|
||||||
_issue_labels=$(echo "$issue_json" | jq -r '.labels | map(.name) | join(",")' 2>/dev/null || echo "")
|
|
||||||
|
|
||||||
# Check if issue has ci_exhausted label
|
|
||||||
if ! echo "$_issue_labels" | grep -q "ci_exhausted"; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Parse updated_at timestamp
|
|
||||||
_issue_updated_epoch=$(date -d "$_issue_updated" +%s 2>/dev/null || echo "0")
|
|
||||||
_time_since_update=$(( _now_epoch - _issue_updated_epoch ))
|
|
||||||
|
|
||||||
# Check if updated in last 30 minutes
|
|
||||||
if [ "$_time_since_update" -lt 1800 ] && [ "$_time_since_update" -ge 0 ]; then
|
|
||||||
_issues_processed=$(( _issues_processed + 1 ))
|
|
||||||
|
|
||||||
# Check for idempotency guard - already swept by supervisor?
|
|
||||||
_issue_body=$(echo "$issue_json" | jq -r '.body // ""' 2>/dev/null || echo "")
|
|
||||||
if echo "$_issue_body" | grep -q "<!-- supervisor-swept -->"; then
|
|
||||||
log "Issue #$_issue_num already swept by supervisor, skipping"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
log "Processing ci_exhausted issue #$_issue_num (updated $_time_since_update seconds ago)"
|
|
||||||
|
|
||||||
# Get issue assignee
|
|
||||||
_issue_assignee=$(echo "$issue_json" | jq -r '.assignee.login // empty' 2>/dev/null || echo "")
|
|
||||||
|
|
||||||
# Unassign the issue
|
|
||||||
if [ -n "$_issue_assignee" ]; then
|
|
||||||
log "Unassigning issue #$_issue_num from $_issue_assignee"
|
|
||||||
curl -sf -X PATCH \
|
|
||||||
-H "Authorization: token ${FORGE_SUPERVISOR_TOKEN:-$FORGE_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${FORGE_API}/issues/$_issue_num" \
|
|
||||||
-d '{"assignees":[]}' >/dev/null 2>&1 || true
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Remove blocked label
|
|
||||||
_blocked_label_id=$(forge_api GET "/labels" 2>/dev/null | jq -r '.[] | select(.name == "blocked") | .id' 2>/dev/null || echo "")
|
|
||||||
if [ -n "$_blocked_label_id" ]; then
|
|
||||||
log "Removing blocked label from issue #$_issue_num"
|
|
||||||
curl -sf -X DELETE \
|
|
||||||
-H "Authorization: token ${FORGE_SUPERVISOR_TOKEN:-$FORGE_TOKEN}" \
|
|
||||||
"${FORGE_API}/issues/$_issue_num/labels/$_blocked_label_id" >/dev/null 2>&1 || true
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Add comment about infra-flake recovery
|
|
||||||
_recovery_comment=$(cat <<EOF
|
|
||||||
<!-- supervisor-swept -->
|
|
||||||
|
|
||||||
**Automated Recovery — $(date -u '+%Y-%m-%d %H:%M UTC')**
|
|
||||||
|
|
||||||
CI agent was unhealthy between $_restart_time and now. The prior retry budget may have been spent on infra flake, not real failures.
|
|
||||||
|
|
||||||
**Recovery Actions:**
|
|
||||||
- Unassigned from pool and returned for fresh attempt
|
|
||||||
- CI agent container restarted
|
|
||||||
- Related pipelines will be retriggered automatically
|
|
||||||
|
|
||||||
**Next Steps:**
|
|
||||||
Please re-attempt this issue. The CI environment has been refreshed.
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
curl -sf -X POST \
|
|
||||||
-H "Authorization: token ${FORGE_SUPERVISOR_TOKEN:-$FORGE_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${FORGE_API}/issues/$_issue_num/comments" \
|
|
||||||
-d "$(jq -n --arg body "$_recovery_comment" '{body: $body}')" >/dev/null 2>&1 || true
|
|
||||||
|
|
||||||
log "Recovered issue #$_issue_num - returned to pool"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
log "WP agent restart and issue recovery complete"
|
|
||||||
else
|
|
||||||
log "ERROR: Failed to restart WP agent container"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log "WP agent restart already performed in this run (since $_wp_last_restart), skipping"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ── Run agent ─────────────────────────────────────────────────────────────
|
# ── Run agent ─────────────────────────────────────────────────────────────
|
||||||
agent_run --worktree "$WORKTREE" "$PROMPT"
|
agent_run --worktree "$WORKTREE" "$PROMPT"
|
||||||
log "agent_run complete"
|
log "agent_run complete"
|
||||||
|
|
|
||||||
|
|
@ -1,428 +0,0 @@
|
||||||
#!/usr/bin/env bats
|
|
||||||
# =============================================================================
|
|
||||||
# tests/disinto-init-nomad.bats — Regression guard for `disinto init`
|
|
||||||
# backend dispatch (S0.5, issue #825).
|
|
||||||
#
|
|
||||||
# Exercises the three CLI paths the Nomad+Vault migration cares about:
|
|
||||||
# 1. --backend=nomad --dry-run → cluster-up step list
|
|
||||||
# 2. --backend=nomad --empty --dry-run → same, with "--empty" banner
|
|
||||||
# 3. --backend=docker --dry-run → docker path unaffected
|
|
||||||
#
|
|
||||||
# A throw-away `placeholder/repo` slug satisfies the CLI's positional-arg
|
|
||||||
# requirement (the nomad dispatcher never touches it). --dry-run on both
|
|
||||||
# backends short-circuits before any network/filesystem mutation, so the
|
|
||||||
# suite is hermetic — no Forgejo, no sudo, no real cluster.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
setup_file() {
|
|
||||||
export DISINTO_ROOT
|
|
||||||
DISINTO_ROOT="$(cd "$(dirname "$BATS_TEST_FILENAME")/.." && pwd)"
|
|
||||||
export DISINTO_BIN="${DISINTO_ROOT}/bin/disinto"
|
|
||||||
[ -x "$DISINTO_BIN" ] || {
|
|
||||||
echo "disinto binary not executable: $DISINTO_BIN" >&2
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── --backend=nomad --dry-run ────────────────────────────────────────────────
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --dry-run exits 0 and prints the step list" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
|
|
||||||
# Dispatcher banner (cluster-up mode, no --empty).
|
|
||||||
[[ "$output" == *"nomad backend: default (cluster-up; jobs deferred to Step 1)"* ]]
|
|
||||||
|
|
||||||
# All nine cluster-up dry-run steps, in order.
|
|
||||||
[[ "$output" == *"[dry-run] Step 1/9: install nomad + vault binaries + docker daemon"* ]]
|
|
||||||
[[ "$output" == *"[dry-run] Step 2/9: write + enable nomad.service (NOT started)"* ]]
|
|
||||||
[[ "$output" == *"[dry-run] Step 3/9: write + enable vault.service + vault.hcl (NOT started)"* ]]
|
|
||||||
[[ "$output" == *"[dry-run] Step 4/9: create host-volume dirs under /srv/disinto/"* ]]
|
|
||||||
[[ "$output" == *"[dry-run] Step 5/9: install /etc/nomad.d/server.hcl + client.hcl from repo"* ]]
|
|
||||||
[[ "$output" == *"[dry-run] Step 6/9: first-run vault init + persist unseal.key + root.token"* ]]
|
|
||||||
[[ "$output" == *"[dry-run] Step 7/9: systemctl start vault + poll until unsealed"* ]]
|
|
||||||
[[ "$output" == *"[dry-run] Step 8/9: systemctl start nomad + poll until ≥1 node ready"* ]]
|
|
||||||
[[ "$output" == *"[dry-run] Step 9/9: write /etc/profile.d/disinto-nomad.sh"* ]]
|
|
||||||
|
|
||||||
[[ "$output" == *"Dry run complete — no changes made."* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── --backend=nomad --empty --dry-run ────────────────────────────────────────
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --empty --dry-run prints the --empty banner + step list" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --empty --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
|
|
||||||
# --empty changes the dispatcher banner but not the step list — Step 1
|
|
||||||
# of the migration will branch on $empty to gate job deployment; today
|
|
||||||
# both modes invoke the same cluster-up dry-run.
|
|
||||||
[[ "$output" == *"nomad backend: --empty (cluster-up only, no jobs)"* ]]
|
|
||||||
[[ "$output" == *"[dry-run] Step 1/9: install nomad + vault binaries + docker daemon"* ]]
|
|
||||||
[[ "$output" == *"Dry run complete — no changes made."* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── --backend=docker (regression guard) ──────────────────────────────────────
|
|
||||||
|
|
||||||
@test "disinto init --backend=docker does NOT dispatch to the nomad path" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=docker --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
|
|
||||||
# Negative assertion: the nomad dispatcher banners must be absent.
|
|
||||||
[[ "$output" != *"nomad backend:"* ]]
|
|
||||||
[[ "$output" != *"[dry-run] Step 1/9: install nomad + vault binaries + docker daemon"* ]]
|
|
||||||
|
|
||||||
# Positive assertion: docker-path output still appears — the existing
|
|
||||||
# docker dry-run printed "=== disinto init ===" before listing the
|
|
||||||
# intended forge/compose actions.
|
|
||||||
[[ "$output" == *"=== disinto init ==="* ]]
|
|
||||||
[[ "$output" == *"── Dry-run: intended actions ────"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Flag syntax: --flag=value vs --flag value ────────────────────────────────
|
|
||||||
|
|
||||||
# Both forms must work. The bin/disinto flag loop has separate cases for
|
|
||||||
# `--backend value` and `--backend=value`; a regression in either would
|
|
||||||
# silently route to the docker default, which is the worst failure mode
|
|
||||||
# for a mid-migration dispatcher ("loud-failing stub" lesson from S0.4).
|
|
||||||
@test "disinto init --backend nomad (space-separated) dispatches to nomad" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend nomad --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"nomad backend: default"* ]]
|
|
||||||
[[ "$output" == *"[dry-run] Step 1/9: install nomad + vault binaries + docker daemon"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Flag validation ──────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
@test "--backend=bogus is rejected with a clear error" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=bogus --dry-run
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"invalid --backend value"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "--empty without --backend=nomad is rejected" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=docker --empty --dry-run
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"--empty is only valid with --backend=nomad"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Positional vs flag-first invocation (#835) ───────────────────────────────
|
|
||||||
#
|
|
||||||
# Before the #835 fix, disinto_init eagerly consumed $1 as repo_url *before*
|
|
||||||
# argparse ran. That swallowed `--backend=nomad` as a repo_url and then
|
|
||||||
# complained that `--empty` required a nomad backend — the nonsense error
|
|
||||||
# flagged during S0.1 end-to-end verification. The cases below pin the CLI
|
|
||||||
# to the post-fix contract: the nomad path accepts flag-first invocation,
|
|
||||||
# the docker path still errors helpfully on a missing repo_url.
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --empty --dry-run (no positional) dispatches to nomad" {
|
|
||||||
run "$DISINTO_BIN" init --backend=nomad --empty --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"nomad backend: --empty (cluster-up only, no jobs)"* ]]
|
|
||||||
[[ "$output" == *"[dry-run] Step 1/9: install nomad + vault binaries + docker daemon"* ]]
|
|
||||||
# The bug symptom must be absent — backend was misdetected as docker
|
|
||||||
# when --backend=nomad got swallowed as repo_url.
|
|
||||||
[[ "$output" != *"--empty is only valid with --backend=nomad"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend nomad --dry-run (space-separated, no positional) dispatches to nomad" {
|
|
||||||
run "$DISINTO_BIN" init --backend nomad --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"nomad backend: default"* ]]
|
|
||||||
[[ "$output" == *"[dry-run] Step 1/9: install nomad + vault binaries + docker daemon"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init (no args) still errors with 'repo URL required'" {
|
|
||||||
run "$DISINTO_BIN" init
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"repo URL required"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=docker (no positional) errors with 'repo URL required', not 'Unknown option'" {
|
|
||||||
run "$DISINTO_BIN" init --backend=docker
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"repo URL required"* ]]
|
|
||||||
[[ "$output" != *"Unknown option"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── --with flag tests ─────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --with forgejo --dry-run prints deploy plan" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with forgejo --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"services to deploy: forgejo"* ]]
|
|
||||||
[[ "$output" == *"[deploy] [dry-run] nomad job validate"* ]]
|
|
||||||
[[ "$output" == *"[deploy] [dry-run] nomad job run -detach"* ]]
|
|
||||||
[[ "$output" == *"[deploy] dry-run complete"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# S2.6 / #928 — every --with <svc> that ships tools/vault-seed-<svc>.sh
|
|
||||||
# must auto-invoke the seeder before deploy.sh runs. forgejo is the
|
|
||||||
# only service with a seeder today, so the dry-run plan must include
|
|
||||||
# its seed line when --with forgejo is set. The seed block must also
|
|
||||||
# appear BEFORE the deploy block (seeded secrets must exist before
|
|
||||||
# nomad reads the template stanza) — pinned here by scanning output
|
|
||||||
# order. Services without a seeder (e.g. unknown hypothetical future
|
|
||||||
# ones) are silently skipped by the loop convention.
|
|
||||||
@test "disinto init --backend=nomad --with forgejo --dry-run prints seed plan before deploy plan" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with forgejo --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"Vault seed dry-run"* ]]
|
|
||||||
[[ "$output" == *"tools/vault-seed-forgejo.sh --dry-run"* ]]
|
|
||||||
# Order: seed header must appear before deploy header.
|
|
||||||
local seed_line deploy_line
|
|
||||||
seed_line=$(echo "$output" | grep -n "Vault seed dry-run" | head -1 | cut -d: -f1)
|
|
||||||
deploy_line=$(echo "$output" | grep -n "Deploy services dry-run" | head -1 | cut -d: -f1)
|
|
||||||
[ -n "$seed_line" ]
|
|
||||||
[ -n "$deploy_line" ]
|
|
||||||
[ "$seed_line" -lt "$deploy_line" ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Regression guard (PR #929 review): `sudo -n VAR=val -- cmd` is subject
|
|
||||||
# to sudoers env_reset policy and silently drops VAULT_ADDR unless it's
|
|
||||||
# in env_keep (it isn't in default configs). vault-seed-forgejo.sh
|
|
||||||
# requires VAULT_ADDR and dies at its own precondition check if unset,
|
|
||||||
# so the non-root branch MUST invoke `sudo -n -- env VAR=val cmd` so
|
|
||||||
# that `env` sets the variable in the child process regardless of
|
|
||||||
# sudoers policy. This grep-level guard catches a revert to the unsafe
|
|
||||||
# form that silently broke non-root seed runs on a fresh LXC.
|
|
||||||
@test "seed loop invokes sudo via 'env VAR=val' (bypasses sudoers env_reset)" {
|
|
||||||
run grep -F 'sudo -n -- env "VAULT_ADDR=' "$DISINTO_BIN"
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
# Negative: no bare `sudo -n "VAR=val" --` form anywhere in the file.
|
|
||||||
run grep -F 'sudo -n "VAULT_ADDR=' "$DISINTO_BIN"
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --with forgejo,forgejo --dry-run handles comma-separated services" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with forgejo,forgejo --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"services to deploy: forgejo,forgejo"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=docker --with forgejo errors with '--with requires --backend=nomad'" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=docker --with forgejo
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"--with requires --backend=nomad"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --empty --with forgejo errors with mutually exclusive" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --empty --with forgejo
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"--empty and --with are mutually exclusive"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --with unknown-service errors with unknown service" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with unknown-service --dry-run
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"unknown service"* ]]
|
|
||||||
[[ "$output" == *"known: forgejo, woodpecker-server, woodpecker-agent, agents, staging, chat, edge"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# S3.4: woodpecker auto-expansion and forgejo auto-inclusion
|
|
||||||
@test "disinto init --backend=nomad --with woodpecker auto-expands to server+agent" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with woodpecker --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"services to deploy: forgejo,woodpecker-server,woodpecker-agent"* ]]
|
|
||||||
[[ "$output" == *"deployment order: forgejo woodpecker-server woodpecker-agent"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --with woodpecker auto-includes forgejo with note" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with woodpecker --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"Note: --with woodpecker implies --with forgejo"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --with forgejo,woodpecker expands woodpecker" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with forgejo,woodpecker --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
# Order follows input: forgejo first, then woodpecker expanded
|
|
||||||
[[ "$output" == *"services to deploy: forgejo,woodpecker-server,woodpecker-agent"* ]]
|
|
||||||
[[ "$output" == *"deployment order: forgejo woodpecker-server woodpecker-agent"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --with woodpecker seeds both forgejo and woodpecker" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with woodpecker --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"tools/vault-seed-forgejo.sh --dry-run"* ]]
|
|
||||||
[[ "$output" == *"tools/vault-seed-woodpecker.sh --dry-run"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --with forgejo,woodpecker deploys all three services" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with forgejo,woodpecker --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"[deploy] [dry-run] nomad job validate"*"forgejo.hcl"* ]]
|
|
||||||
[[ "$output" == *"[deploy] [dry-run] nomad job validate"*"woodpecker-server.hcl"* ]]
|
|
||||||
[[ "$output" == *"[deploy] [dry-run] nomad job validate"*"woodpecker-agent.hcl"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --with forgejo (flag=value syntax) works" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with=forgejo --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"services to deploy: forgejo"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --with forgejo --empty --dry-run rejects in any order" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --with forgejo --backend=nomad --empty --dry-run
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"--empty and --with are mutually exclusive"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── --import-env / --import-sops / --age-key (S2.5, #883) ────────────────────
|
|
||||||
#
|
|
||||||
# Step 2.5 wires Vault policies + JWT auth + optional KV import into
|
|
||||||
# `disinto init --backend=nomad`. The tests below exercise the flag
|
|
||||||
# grammar (who-requires-whom + who-requires-backend=nomad) and the
|
|
||||||
# dry-run plan shape (each --import-* flag prints its own path line,
|
|
||||||
# independently). A prior attempt at this issue regressed the "print
|
|
||||||
# every set flag" invariant by using if/elif — covered by the
|
|
||||||
# "--import-env --import-sops --age-key" case.
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --import-env only is accepted" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --import-env /tmp/.env --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"--import-env"* ]]
|
|
||||||
[[ "$output" == *"env file: /tmp/.env"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --import-sops without --age-key errors" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --import-sops /tmp/.env.vault.enc --dry-run
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"--import-sops requires --age-key"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --age-key without --import-sops errors" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --age-key /tmp/keys.txt --dry-run
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"--age-key requires --import-sops"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=docker --import-env errors with backend requirement" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=docker --import-env /tmp/.env
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"--import-env, --import-sops, and --age-key require --backend=nomad"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --import-sops --age-key --dry-run shows import plan" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --import-sops /tmp/.env.vault.enc --age-key /tmp/keys.txt --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"Vault import dry-run"* ]]
|
|
||||||
[[ "$output" == *"--import-sops"* ]]
|
|
||||||
[[ "$output" == *"--age-key"* ]]
|
|
||||||
[[ "$output" == *"sops file: /tmp/.env.vault.enc"* ]]
|
|
||||||
[[ "$output" == *"age key: /tmp/keys.txt"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# When all three flags are set, each one must print its own path line —
|
|
||||||
# if/elif regressed this to "only one printed" in a prior attempt (#883).
|
|
||||||
@test "disinto init --backend=nomad --import-env --import-sops --age-key --dry-run shows full import plan" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --import-env /tmp/.env --import-sops /tmp/.env.vault.enc --age-key /tmp/keys.txt --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"Vault import dry-run"* ]]
|
|
||||||
[[ "$output" == *"env file: /tmp/.env"* ]]
|
|
||||||
[[ "$output" == *"sops file: /tmp/.env.vault.enc"* ]]
|
|
||||||
[[ "$output" == *"age key: /tmp/keys.txt"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad without import flags shows skip message" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"no --import-env/--import-sops"* ]]
|
|
||||||
[[ "$output" == *"skipping"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --import-env --import-sops --age-key --with forgejo --dry-run shows all plans" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --import-env /tmp/.env --import-sops /tmp/.env.vault.enc --age-key /tmp/keys.txt --with forgejo --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"Vault import dry-run"* ]]
|
|
||||||
[[ "$output" == *"Vault policies dry-run"* ]]
|
|
||||||
[[ "$output" == *"Vault auth dry-run"* ]]
|
|
||||||
[[ "$output" == *"Deploy services dry-run"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --dry-run prints policies + auth plan even without --import-*" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
# Policies + auth run on every nomad path (idempotent), so the dry-run
|
|
||||||
# plan always lists them — regardless of whether --import-* is set.
|
|
||||||
[[ "$output" == *"Vault policies dry-run"* ]]
|
|
||||||
[[ "$output" == *"Vault auth dry-run"* ]]
|
|
||||||
[[ "$output" != *"Vault import dry-run"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# --import-env=PATH (=-form) must work alongside --import-env PATH.
|
|
||||||
@test "disinto init --backend=nomad --import-env=PATH (equals form) works" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --import-env=/tmp/.env --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"env file: /tmp/.env"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# --empty short-circuits after cluster-up: no policies, no auth, no
|
|
||||||
# import, no deploy. The dry-run plan must match that — cluster-up plan
|
|
||||||
# appears, but none of the S2.x section banners do.
|
|
||||||
@test "disinto init --backend=nomad --empty --dry-run skips policies/auth/import sections" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --empty --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
# Cluster-up still runs (it's what --empty brings up).
|
|
||||||
[[ "$output" == *"Cluster-up dry-run"* ]]
|
|
||||||
# Policies + auth + import must NOT appear under --empty.
|
|
||||||
[[ "$output" != *"Vault policies dry-run"* ]]
|
|
||||||
[[ "$output" != *"Vault auth dry-run"* ]]
|
|
||||||
[[ "$output" != *"Vault import dry-run"* ]]
|
|
||||||
[[ "$output" != *"no --import-env/--import-sops"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# --empty + any --import-* flag silently does nothing (import is skipped),
|
|
||||||
# so the CLI rejects the combination up front rather than letting it
|
|
||||||
# look like the import "succeeded".
|
|
||||||
@test "disinto init --backend=nomad --empty --import-env errors" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --empty --import-env /tmp/.env --dry-run
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"--empty and --import-env/--import-sops/--age-key are mutually exclusive"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --empty --import-sops --age-key errors" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --empty --import-sops /tmp/.env.vault.enc --age-key /tmp/keys.txt --dry-run
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"--empty and --import-env/--import-sops/--age-key are mutually exclusive"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# S4.2: agents service auto-expansion and dependencies
|
|
||||||
@test "disinto init --backend=nomad --with agents auto-includes forgejo and woodpecker" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with agents --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"services to deploy: forgejo,agents,woodpecker-server,woodpecker-agent"* ]]
|
|
||||||
[[ "$output" == *"Note: --with agents implies --with forgejo"* ]]
|
|
||||||
[[ "$output" == *"Note: --with agents implies --with woodpecker"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --with agents deploys in correct order" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with agents --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"deployment order: forgejo woodpecker-server woodpecker-agent agents"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --with agents seeds agents service" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with agents --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"tools/vault-seed-forgejo.sh --dry-run"* ]]
|
|
||||||
[[ "$output" == *"tools/vault-seed-woodpecker.sh --dry-run"* ]]
|
|
||||||
[[ "$output" == *"tools/vault-seed-agents.sh --dry-run"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --with agents deploys all four services" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with agents --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"[deploy] [dry-run] nomad job validate"*"forgejo.hcl"* ]]
|
|
||||||
[[ "$output" == *"[deploy] [dry-run] nomad job validate"*"woodpecker-server.hcl"* ]]
|
|
||||||
[[ "$output" == *"[deploy] [dry-run] nomad job validate"*"woodpecker-agent.hcl"* ]]
|
|
||||||
[[ "$output" == *"[deploy] [dry-run] nomad job validate"*"agents.hcl"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "disinto init --backend=nomad --with woodpecker,agents expands correctly" {
|
|
||||||
run "$DISINTO_BIN" init placeholder/repo --backend=nomad --with woodpecker,agents --dry-run
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
# woodpecker expands to server+agent, agents is already explicit
|
|
||||||
# forgejo is auto-included by agents
|
|
||||||
[[ "$output" == *"services to deploy: forgejo,woodpecker-server,woodpecker-agent,agents"* ]]
|
|
||||||
[[ "$output" == *"deployment order: forgejo woodpecker-server woodpecker-agent agents"* ]]
|
|
||||||
}
|
|
||||||
20
tests/fixtures/.env.vault.enc
vendored
20
tests/fixtures/.env.vault.enc
vendored
|
|
@ -1,20 +0,0 @@
|
||||||
{
|
|
||||||
"data": "ENC[AES256_GCM,data:SsLdIiZDVkkV1bbKeHQ8A1K/4vgXQFJF8y4J87GGwsGa13lNnPoqRaCmPAtuQr3hR5JNqARUhFp8aEusyzwi/lZLU2Reo32YjE26ObVOHf47EGmmHM/tEgh6u0fa1AmFtuqJVQzhG2eZhJmZJFgdRH36+bhdBwI1mkORmsRNtBPHHjtQJDbsgN47maDhuP4B7WvB4/TdnJ++GNMlMbyrbr0pEf2uqqOVO55cJ3I4v/Jcg8tq0clPuW1k5dNFsmFSMbbjE5N25EGrc7oEH5GVZ6I6L6p0Fzyj/MV4hKacboFHiZmBZgRQ,iv:UnXTa800G3PW4IaErkPBIZKjPHAU3LmiCvAqDdhFE/Q=,tag:kdWpHQ8fEPGFlmfVoTMskA==,type:str]",
|
|
||||||
"sops": {
|
|
||||||
"kms": null,
|
|
||||||
"gcp_kms": null,
|
|
||||||
"azure_kv": null,
|
|
||||||
"hc_vault": null,
|
|
||||||
"age": [
|
|
||||||
{
|
|
||||||
"recipient": "age1ztkm8yvdk42m2cn4dj2v9ptfknq8wpgr3ry9dpmtmlaeas6p7yyqft0ldg",
|
|
||||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBrVUlmaEdTNU1iMGg4dFA4\nNFNOSzlBc1NER1U3SHlwVFU1dm5tR1kyeldzCjZ2NXI3MjR4Zkd1RVBKNzJoQ1Jm\nQWpEZU5VMkNuYnhTTVJNc0RpTXlIZE0KLS0tIDFpQ2tlN0MzL1NuS2hKZU5JTG9B\nNWxXMzE0bGZpQkVBTnhWRXZBQlhrc1EKG76DM98cCuqIwUkbfJWHhJdYV77O9r8Q\nRJrq6jH59Gcp9W8iHg/aeShPHZFEOLg1q9azV9Wt9FjJn3SxyTmgvA==\n-----END AGE ENCRYPTED FILE-----\n"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"lastmodified": "2026-04-16T15:43:34Z",
|
|
||||||
"mac": "ENC[AES256_GCM,data:jVRr2TxSZH2paD2doIX4JwCqo5wiPYfTowpj189w1IVlS0EY/XQoqxiWbunX/LmIDdQlTPCSe/vTp1EJA0cx6vzN2xENrwsfzCP6dwDGaRlZhH3V0CVhtfHIkMTEKWrAUx5hFtiwJPkLYUUYi5aRWRxhZQM1eBeRvuGKdlwvmHA=,iv:H57a61AfVNLrlg+4aMl9mwXI5O38O5ZoRhpxe2PTTkY=,tag:2jwH1855VNYlKseTE/XtTg==,type:str]",
|
|
||||||
"pgp": null,
|
|
||||||
"unencrypted_suffix": "_unencrypted",
|
|
||||||
"version": "3.9.4"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
5
tests/fixtures/age-keys.txt
vendored
5
tests/fixtures/age-keys.txt
vendored
|
|
@ -1,5 +0,0 @@
|
||||||
# Test age key for sops
|
|
||||||
# Generated: 2026-04-16
|
|
||||||
# Public key: age1ztkm8yvdk42m2cn4dj2v9ptfknq8wpgr3ry9dpmtmlaeas6p7yyqft0ldg
|
|
||||||
|
|
||||||
AGE-SECRET-KEY-1PCQQX37MTZDGES76H9TGQN5XTG2ZZX2UUR87KR784NZ4MQ3NJ56S0Z23SF
|
|
||||||
40
tests/fixtures/dot-env-complete
vendored
40
tests/fixtures/dot-env-complete
vendored
|
|
@ -1,40 +0,0 @@
|
||||||
# Test fixture .env file for vault-import.sh
|
|
||||||
# This file contains all expected keys for the import test
|
|
||||||
|
|
||||||
# Generic forge creds
|
|
||||||
FORGE_TOKEN=generic-forge-token
|
|
||||||
FORGE_PASS=generic-forge-pass
|
|
||||||
FORGE_ADMIN_TOKEN=generic-admin-token
|
|
||||||
|
|
||||||
# Bot tokens (review, dev, gardener, architect, planner, predictor, supervisor, vault)
|
|
||||||
FORGE_REVIEW_TOKEN=review-token
|
|
||||||
FORGE_REVIEW_PASS=review-pass
|
|
||||||
FORGE_DEV_TOKEN=dev-token
|
|
||||||
FORGE_DEV_PASS=dev-pass
|
|
||||||
FORGE_GARDENER_TOKEN=gardener-token
|
|
||||||
FORGE_GARDENER_PASS=gardener-pass
|
|
||||||
FORGE_ARCHITECT_TOKEN=architect-token
|
|
||||||
FORGE_ARCHITECT_PASS=architect-pass
|
|
||||||
FORGE_PLANNER_TOKEN=planner-token
|
|
||||||
FORGE_PLANNER_PASS=planner-pass
|
|
||||||
FORGE_PREDICTOR_TOKEN=predictor-token
|
|
||||||
FORGE_PREDICTOR_PASS=predictor-pass
|
|
||||||
FORGE_SUPERVISOR_TOKEN=supervisor-token
|
|
||||||
FORGE_SUPERVISOR_PASS=supervisor-pass
|
|
||||||
FORGE_VAULT_TOKEN=vault-token
|
|
||||||
FORGE_VAULT_PASS=vault-pass
|
|
||||||
|
|
||||||
# Llama bot
|
|
||||||
FORGE_TOKEN_LLAMA=llama-token
|
|
||||||
FORGE_PASS_LLAMA=llama-pass
|
|
||||||
|
|
||||||
# Woodpecker secrets
|
|
||||||
WOODPECKER_AGENT_SECRET=wp-agent-secret
|
|
||||||
WP_FORGEJO_CLIENT=wp-forgejo-client
|
|
||||||
WP_FORGEJO_SECRET=wp-forgejo-secret
|
|
||||||
WOODPECKER_TOKEN=wp-token
|
|
||||||
|
|
||||||
# Chat secrets
|
|
||||||
FORWARD_AUTH_SECRET=forward-auth-secret
|
|
||||||
CHAT_OAUTH_CLIENT_ID=chat-client-id
|
|
||||||
CHAT_OAUTH_CLIENT_SECRET=chat-client-secret
|
|
||||||
27
tests/fixtures/dot-env-incomplete
vendored
27
tests/fixtures/dot-env-incomplete
vendored
|
|
@ -1,27 +0,0 @@
|
||||||
# Test fixture .env file with missing required keys
|
|
||||||
# This file is intentionally missing some keys to test error handling
|
|
||||||
|
|
||||||
# Generic forge creds - missing FORGE_ADMIN_TOKEN
|
|
||||||
FORGE_TOKEN=generic-forge-token
|
|
||||||
FORGE_PASS=generic-forge-pass
|
|
||||||
|
|
||||||
# Bot tokens - missing several roles
|
|
||||||
FORGE_REVIEW_TOKEN=review-token
|
|
||||||
FORGE_REVIEW_PASS=review-pass
|
|
||||||
FORGE_DEV_TOKEN=dev-token
|
|
||||||
FORGE_DEV_PASS=dev-pass
|
|
||||||
|
|
||||||
# Llama bot - missing (only token, no pass)
|
|
||||||
FORGE_TOKEN_LLAMA=llama-token
|
|
||||||
# FORGE_PASS_LLAMA=llama-pass
|
|
||||||
|
|
||||||
# Woodpecker secrets - missing some
|
|
||||||
WOODPECKER_AGENT_SECRET=wp-agent-secret
|
|
||||||
# WP_FORGEJO_CLIENT=wp-forgejo-client
|
|
||||||
# WP_FORGEJO_SECRET=wp-forgejo-secret
|
|
||||||
# WOODPECKER_TOKEN=wp-token
|
|
||||||
|
|
||||||
# Chat secrets - missing some
|
|
||||||
FORWARD_AUTH_SECRET=forward-auth-secret
|
|
||||||
# CHAT_OAUTH_CLIENT_ID=chat-client-id
|
|
||||||
# CHAT_OAUTH_CLIENT_SECRET=chat-client-secret
|
|
||||||
6
tests/fixtures/dot-env.vault.plain
vendored
6
tests/fixtures/dot-env.vault.plain
vendored
|
|
@ -1,6 +0,0 @@
|
||||||
GITHUB_TOKEN=github-test-token-abc123
|
|
||||||
CODEBERG_TOKEN=codeberg-test-token-def456
|
|
||||||
CLAWHUB_TOKEN=clawhub-test-token-ghi789
|
|
||||||
DEPLOY_KEY=deploy-key-test-jkl012
|
|
||||||
NPM_TOKEN=npm-test-token-mno345
|
|
||||||
DOCKER_HUB_TOKEN=dockerhub-test-token-pqr678
|
|
||||||
|
|
@ -1,161 +0,0 @@
|
||||||
#!/usr/bin/env bats
|
|
||||||
# =============================================================================
|
|
||||||
# tests/lib-generators.bats — Regression guard for the #849 fix.
|
|
||||||
#
|
|
||||||
# Before #849, `_generate_local_model_services` emitted the forge-user env
|
|
||||||
# variable keyed by service name (`FORGE_BOT_USER_${service_name^^}`), so for
|
|
||||||
# an `[agents.llama]` block with `forge_user = "dev-qwen"` the compose file
|
|
||||||
# contained `FORGE_BOT_USER_LLAMA: "dev-qwen"`. That suffix diverges from the
|
|
||||||
# `FORGE_TOKEN_<FORGE_USER>` / `FORGE_PASS_<FORGE_USER>` convention that the
|
|
||||||
# same block uses two lines above, and it doesn't even round-trip through a
|
|
||||||
# dash-containing service name (`dev-qwen` → `DEV-QWEN`, which is not a valid
|
|
||||||
# shell identifier — see #852).
|
|
||||||
#
|
|
||||||
# The fix keys on `$user_upper` (already computed from `forge_user` via
|
|
||||||
# `tr 'a-z-' 'A-Z_'`), yielding `FORGE_BOT_USER_DEV_QWEN: "dev-qwen"`.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
setup() {
|
|
||||||
ROOT="$(cd "$(dirname "$BATS_TEST_FILENAME")/.." && pwd)"
|
|
||||||
export FACTORY_ROOT="${BATS_TEST_TMPDIR}/factory"
|
|
||||||
mkdir -p "${FACTORY_ROOT}/projects"
|
|
||||||
|
|
||||||
# Minimal compose skeleton that `_generate_local_model_services` can splice into.
|
|
||||||
# It only needs a `volumes:` marker line and nothing below it that would be
|
|
||||||
# re-read after the splice.
|
|
||||||
cat > "${FACTORY_ROOT}/docker-compose.yml" <<'EOF'
|
|
||||||
services:
|
|
||||||
agents:
|
|
||||||
image: placeholder
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
agent-data:
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "local-model agent service emits FORGE_BOT_USER keyed by forge_user (#849)" {
|
|
||||||
cat > "${FACTORY_ROOT}/projects/test.toml" <<'EOF'
|
|
||||||
name = "test"
|
|
||||||
repo = "test-owner/test-repo"
|
|
||||||
forge_url = "http://localhost:3000"
|
|
||||||
|
|
||||||
[agents.llama]
|
|
||||||
base_url = "http://10.10.10.1:8081"
|
|
||||||
model = "qwen"
|
|
||||||
api_key = "sk-no-key-required"
|
|
||||||
roles = ["dev"]
|
|
||||||
forge_user = "dev-qwen"
|
|
||||||
compact_pct = 60
|
|
||||||
EOF
|
|
||||||
|
|
||||||
run bash -c "
|
|
||||||
set -euo pipefail
|
|
||||||
source '${ROOT}/lib/generators.sh'
|
|
||||||
_generate_local_model_services '${FACTORY_ROOT}/docker-compose.yml'
|
|
||||||
cat '${FACTORY_ROOT}/docker-compose.yml'
|
|
||||||
"
|
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
# New, forge_user-keyed suffix is present with the right value.
|
|
||||||
[[ "$output" == *'FORGE_BOT_USER_DEV_QWEN: "dev-qwen"'* ]]
|
|
||||||
# Legacy service-name-keyed suffix must not be emitted.
|
|
||||||
[[ "$output" != *'FORGE_BOT_USER_LLAMA'* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "local-model agent service emits local image ref + build: fallback (#853)" {
|
|
||||||
# Before #853 the generator emitted `image: ghcr.io/disinto/agents:<tag>` for
|
|
||||||
# every hired agent. The ghcr image isn't publicly pullable and the running
|
|
||||||
# deployment has no credentials, so `docker compose up` failed with `denied`.
|
|
||||||
# The fix: emit the registry-less local name (matches `disinto init --build`
|
|
||||||
# and the legacy agents-llama stanza) plus a build: directive so hosts
|
|
||||||
# without a pre-built image can rebuild locally.
|
|
||||||
cat > "${FACTORY_ROOT}/projects/test.toml" <<'EOF'
|
|
||||||
name = "test"
|
|
||||||
repo = "test-owner/test-repo"
|
|
||||||
forge_url = "http://localhost:3000"
|
|
||||||
|
|
||||||
[agents.dev-qwen2]
|
|
||||||
base_url = "http://10.10.10.1:8081"
|
|
||||||
model = "qwen"
|
|
||||||
api_key = "sk-no-key-required"
|
|
||||||
roles = ["dev"]
|
|
||||||
forge_user = "dev-qwen2"
|
|
||||||
EOF
|
|
||||||
|
|
||||||
run bash -c "
|
|
||||||
set -euo pipefail
|
|
||||||
source '${ROOT}/lib/generators.sh'
|
|
||||||
_generate_local_model_services '${FACTORY_ROOT}/docker-compose.yml'
|
|
||||||
cat '${FACTORY_ROOT}/docker-compose.yml'
|
|
||||||
"
|
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
# Local image ref — no ghcr prefix.
|
|
||||||
[[ "$output" == *'image: disinto/agents:${DISINTO_IMAGE_TAG:-latest}'* ]]
|
|
||||||
[[ "$output" != *'image: ghcr.io/disinto/agents'* ]]
|
|
||||||
# build: fallback so hosts without a pre-built image can rebuild.
|
|
||||||
[[ "$output" == *'dockerfile: docker/agents/Dockerfile'* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "local-model agent service emits pull_policy: build so docker compose up rebuilds on source change (#887)" {
|
|
||||||
# Without pull_policy: build, `docker compose up -d --force-recreate` reuses
|
|
||||||
# the cached `disinto/agents:latest` image and silently runs stale
|
|
||||||
# docker/agents/entrypoint.sh even after the repo is updated. `pull_policy:
|
|
||||||
# build` forces a rebuild on every up; BuildKit layer cache makes unchanged
|
|
||||||
# rebuilds near-instant. The alternative was requiring every operator to
|
|
||||||
# remember `--build` on every invocation, which was the bug that prompted
|
|
||||||
# #887 (2h of debugging a fix that was merged but never reached the container).
|
|
||||||
cat > "${FACTORY_ROOT}/projects/test.toml" <<'EOF'
|
|
||||||
name = "test"
|
|
||||||
repo = "test-owner/test-repo"
|
|
||||||
forge_url = "http://localhost:3000"
|
|
||||||
|
|
||||||
[agents.dev-qwen2]
|
|
||||||
base_url = "http://10.10.10.1:8081"
|
|
||||||
model = "qwen"
|
|
||||||
api_key = "sk-no-key-required"
|
|
||||||
roles = ["dev"]
|
|
||||||
forge_user = "dev-qwen2"
|
|
||||||
EOF
|
|
||||||
|
|
||||||
run bash -c "
|
|
||||||
set -euo pipefail
|
|
||||||
source '${ROOT}/lib/generators.sh'
|
|
||||||
_generate_local_model_services '${FACTORY_ROOT}/docker-compose.yml'
|
|
||||||
cat '${FACTORY_ROOT}/docker-compose.yml'
|
|
||||||
"
|
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *'pull_policy: build'* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "local-model agent service keys FORGE_BOT_USER to forge_user even when it differs from service name (#849)" {
|
|
||||||
# Exercise the case the issue calls out: two agents in the same factory
|
|
||||||
# whose service names are identical (`[agents.llama]`) but whose
|
|
||||||
# forge_users diverge would previously both have emitted
|
|
||||||
# `FORGE_BOT_USER_LLAMA`. With the fix each emission carries its own
|
|
||||||
# forge_user-derived suffix.
|
|
||||||
cat > "${FACTORY_ROOT}/projects/a.toml" <<'EOF'
|
|
||||||
name = "a"
|
|
||||||
repo = "a/a"
|
|
||||||
forge_url = "http://localhost:3000"
|
|
||||||
|
|
||||||
[agents.dev]
|
|
||||||
base_url = "http://10.10.10.1:8081"
|
|
||||||
model = "qwen"
|
|
||||||
api_key = "sk-no-key-required"
|
|
||||||
roles = ["dev"]
|
|
||||||
forge_user = "review-qwen"
|
|
||||||
EOF
|
|
||||||
|
|
||||||
run bash -c "
|
|
||||||
set -euo pipefail
|
|
||||||
source '${ROOT}/lib/generators.sh'
|
|
||||||
_generate_local_model_services '${FACTORY_ROOT}/docker-compose.yml'
|
|
||||||
cat '${FACTORY_ROOT}/docker-compose.yml'
|
|
||||||
"
|
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *'FORGE_BOT_USER_REVIEW_QWEN: "review-qwen"'* ]]
|
|
||||||
[[ "$output" != *'FORGE_BOT_USER_DEV:'* ]]
|
|
||||||
}
|
|
||||||
|
|
@ -1,215 +0,0 @@
|
||||||
#!/usr/bin/env bats
|
|
||||||
# tests/lib-hvault.bats — Unit tests for lib/hvault.sh
|
|
||||||
#
|
|
||||||
# Runs against a dev-mode Vault server (single binary, no LXC needed).
|
|
||||||
# CI launches vault server -dev inline before running these tests.
|
|
||||||
|
|
||||||
VAULT_BIN="${VAULT_BIN:-vault}"
|
|
||||||
|
|
||||||
setup_file() {
|
|
||||||
export TEST_DIR
|
|
||||||
TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")/.." && pwd)"
|
|
||||||
|
|
||||||
# Start dev-mode vault on a random port
|
|
||||||
export VAULT_DEV_PORT
|
|
||||||
VAULT_DEV_PORT="$(shuf -i 18200-18299 -n 1)"
|
|
||||||
export VAULT_ADDR="http://127.0.0.1:${VAULT_DEV_PORT}"
|
|
||||||
|
|
||||||
"$VAULT_BIN" server -dev \
|
|
||||||
-dev-listen-address="127.0.0.1:${VAULT_DEV_PORT}" \
|
|
||||||
-dev-root-token-id="test-root-token" \
|
|
||||||
-dev-no-store-token \
|
|
||||||
&>"${BATS_FILE_TMPDIR}/vault.log" &
|
|
||||||
export VAULT_PID=$!
|
|
||||||
|
|
||||||
export VAULT_TOKEN="test-root-token"
|
|
||||||
|
|
||||||
# Wait for vault to be ready (up to 10s)
|
|
||||||
local i=0
|
|
||||||
while ! curl -sf "${VAULT_ADDR}/v1/sys/health" >/dev/null 2>&1; do
|
|
||||||
sleep 0.5
|
|
||||||
i=$((i + 1))
|
|
||||||
if [ "$i" -ge 20 ]; then
|
|
||||||
echo "Vault failed to start. Log:" >&2
|
|
||||||
cat "${BATS_FILE_TMPDIR}/vault.log" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
teardown_file() {
|
|
||||||
if [ -n "${VAULT_PID:-}" ]; then
|
|
||||||
kill "$VAULT_PID" 2>/dev/null || true
|
|
||||||
wait "$VAULT_PID" 2>/dev/null || true
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
setup() {
|
|
||||||
# Source the module under test
|
|
||||||
source "${TEST_DIR}/lib/hvault.sh"
|
|
||||||
export VAULT_ADDR VAULT_TOKEN
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── hvault_kv_put + hvault_kv_get ────────────────────────────────────────────
|
|
||||||
|
|
||||||
@test "hvault_kv_put writes and hvault_kv_get reads a secret" {
|
|
||||||
run hvault_kv_put "test/myapp" "username=admin" "password=s3cret"
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
|
|
||||||
run hvault_kv_get "test/myapp"
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
echo "$output" | jq -e '.username == "admin"'
|
|
||||||
echo "$output" | jq -e '.password == "s3cret"'
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hvault_kv_get extracts a single key" {
|
|
||||||
hvault_kv_put "test/single" "foo=bar" "baz=qux"
|
|
||||||
|
|
||||||
run hvault_kv_get "test/single" "foo"
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[ "$output" = "bar" ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hvault_kv_get fails for missing key" {
|
|
||||||
hvault_kv_put "test/keymiss" "exists=yes"
|
|
||||||
|
|
||||||
run hvault_kv_get "test/keymiss" "nope"
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hvault_kv_get fails for missing path" {
|
|
||||||
run hvault_kv_get "test/does-not-exist-$(date +%s)"
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hvault_kv_put fails without KEY=VAL" {
|
|
||||||
run hvault_kv_put "test/bad"
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
echo "$output" | grep -q '"error":true' || echo "$stderr" | grep -q '"error":true'
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hvault_kv_put rejects malformed pair (no =)" {
|
|
||||||
run hvault_kv_put "test/bad2" "noequals"
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hvault_kv_get fails without PATH" {
|
|
||||||
run hvault_kv_get
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── hvault_kv_list ───────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
@test "hvault_kv_list lists keys at a path" {
|
|
||||||
hvault_kv_put "test/listdir/a" "k=1"
|
|
||||||
hvault_kv_put "test/listdir/b" "k=2"
|
|
||||||
|
|
||||||
run hvault_kv_list "test/listdir"
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
echo "$output" | jq -e '. | length >= 2'
|
|
||||||
echo "$output" | jq -e 'index("a")'
|
|
||||||
echo "$output" | jq -e 'index("b")'
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hvault_kv_list fails on nonexistent path" {
|
|
||||||
run hvault_kv_list "test/no-such-path-$(date +%s)"
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hvault_kv_list fails without PATH" {
|
|
||||||
run hvault_kv_list
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── hvault_policy_apply ──────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
@test "hvault_policy_apply creates a policy" {
|
|
||||||
local pfile="${BATS_TEST_TMPDIR}/test-policy.hcl"
|
|
||||||
cat > "$pfile" <<'HCL'
|
|
||||||
path "kv/data/test/*" {
|
|
||||||
capabilities = ["read"]
|
|
||||||
}
|
|
||||||
HCL
|
|
||||||
|
|
||||||
run hvault_policy_apply "test-reader" "$pfile"
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
|
|
||||||
# Verify the policy exists via Vault API
|
|
||||||
run curl -sf -H "X-Vault-Token: ${VAULT_TOKEN}" \
|
|
||||||
"${VAULT_ADDR}/v1/sys/policies/acl/test-reader"
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
echo "$output" | jq -e '.data.policy' | grep -q "kv/data/test"
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hvault_policy_apply is idempotent" {
|
|
||||||
local pfile="${BATS_TEST_TMPDIR}/idem-policy.hcl"
|
|
||||||
printf 'path "kv/*" { capabilities = ["list"] }\n' > "$pfile"
|
|
||||||
|
|
||||||
run hvault_policy_apply "idem-policy" "$pfile"
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
|
|
||||||
# Apply again — should succeed
|
|
||||||
run hvault_policy_apply "idem-policy" "$pfile"
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hvault_policy_apply fails with missing file" {
|
|
||||||
run hvault_policy_apply "bad-policy" "/nonexistent/policy.hcl"
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hvault_policy_apply fails without args" {
|
|
||||||
run hvault_policy_apply
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── hvault_token_lookup ──────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
@test "hvault_token_lookup returns token info" {
|
|
||||||
run hvault_token_lookup
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
echo "$output" | jq -e '.policies'
|
|
||||||
echo "$output" | jq -e '.accessor'
|
|
||||||
echo "$output" | jq -e 'has("ttl")'
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hvault_token_lookup fails without VAULT_TOKEN" {
|
|
||||||
unset VAULT_TOKEN
|
|
||||||
run hvault_token_lookup
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hvault_token_lookup fails without VAULT_ADDR" {
|
|
||||||
unset VAULT_ADDR
|
|
||||||
run hvault_token_lookup
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── hvault_jwt_login ─────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
@test "hvault_jwt_login fails without VAULT_ADDR" {
|
|
||||||
unset VAULT_ADDR
|
|
||||||
run hvault_jwt_login "myrole" "fakejwt"
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hvault_jwt_login fails without args" {
|
|
||||||
run hvault_jwt_login
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hvault_jwt_login returns error for unconfigured jwt auth" {
|
|
||||||
# JWT auth backend is not enabled in dev mode by default — expect failure
|
|
||||||
run hvault_jwt_login "myrole" "eyJhbGciOiJSUzI1NiJ9.fake.sig"
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Env / prereq errors ─────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
@test "all functions fail with structured JSON error when VAULT_ADDR unset" {
|
|
||||||
unset VAULT_ADDR
|
|
||||||
for fn in hvault_kv_get hvault_kv_put hvault_kv_list hvault_policy_apply hvault_token_lookup; do
|
|
||||||
run $fn "dummy" "dummy"
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
@ -1,212 +0,0 @@
|
||||||
#!/usr/bin/env bats
|
|
||||||
# =============================================================================
|
|
||||||
# tests/lib-issue-claim.bats — Regression guard for the issue_claim TOCTOU
|
|
||||||
# fix landed in #830.
|
|
||||||
#
|
|
||||||
# Before the fix, two dev agents polling concurrently could both observe
|
|
||||||
# `.assignee == null`, both PATCH the assignee, and Forgejo's last-write-wins
|
|
||||||
# semantics would leave the loser believing it had claimed successfully.
|
|
||||||
# Two agents would then implement the same issue and collide at the PR/branch
|
|
||||||
# stage.
|
|
||||||
#
|
|
||||||
# The fix re-reads the assignee after the PATCH and aborts when it doesn't
|
|
||||||
# match self, with label writes moved AFTER the verification so a losing
|
|
||||||
# claim leaves no stray `in-progress` label.
|
|
||||||
#
|
|
||||||
# These tests stub `curl` with a bash function so each call tree can be
|
|
||||||
# driven through a specific response sequence (pre-check, PATCH, re-read)
|
|
||||||
# without a live Forgejo. The stub records every HTTP call to
|
|
||||||
# `$CALLS_LOG` for assertions.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
setup() {
|
|
||||||
ROOT="$(cd "$(dirname "$BATS_TEST_FILENAME")/.." && pwd)"
|
|
||||||
export FACTORY_ROOT="$ROOT"
|
|
||||||
export FORGE_TOKEN="dummy-token"
|
|
||||||
export FORGE_URL="https://forge.example.test"
|
|
||||||
export FORGE_API="${FORGE_URL}/api/v1"
|
|
||||||
|
|
||||||
export CALLS_LOG="${BATS_TEST_TMPDIR}/curl-calls.log"
|
|
||||||
: > "$CALLS_LOG"
|
|
||||||
export ISSUE_GET_COUNT_FILE="${BATS_TEST_TMPDIR}/issue-get-count"
|
|
||||||
echo 0 > "$ISSUE_GET_COUNT_FILE"
|
|
||||||
|
|
||||||
# Scenario knobs — overridden per @test.
|
|
||||||
export MOCK_ME="bot"
|
|
||||||
export MOCK_INITIAL_ASSIGNEE=""
|
|
||||||
export MOCK_RECHECK_ASSIGNEE="bot"
|
|
||||||
|
|
||||||
# Stand-in for lib/env.sh's forge_api (we don't source env.sh — too
|
|
||||||
# much unrelated setup). Shape mirrors the real helper closely enough
|
|
||||||
# that _ilc_ensure_label_id() works.
|
|
||||||
forge_api() {
|
|
||||||
local method="$1" path="$2"
|
|
||||||
shift 2
|
|
||||||
curl -sf -X "$method" \
|
|
||||||
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${FORGE_API}${path}" "$@"
|
|
||||||
}
|
|
||||||
|
|
||||||
# curl shim — parses method + URL out of the argv and dispatches
|
|
||||||
# canned responses per endpoint. Every call gets logged as
|
|
||||||
# `METHOD URL` (one line) to $CALLS_LOG for later grep-based asserts.
|
|
||||||
curl() {
|
|
||||||
local method="GET" url="" arg want_code=""
|
|
||||||
while [ $# -gt 0 ]; do
|
|
||||||
arg="$1"
|
|
||||||
case "$arg" in
|
|
||||||
-X) method="$2"; shift 2 ;;
|
|
||||||
-H|-d|--data-binary|-o) shift 2 ;;
|
|
||||||
-w) want_code="$2"; shift 2 ;;
|
|
||||||
-sf|-s|-f|--silent|--fail) shift ;;
|
|
||||||
*) url="$arg"; shift ;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
printf '%s %s\n' "$method" "$url" >> "$CALLS_LOG"
|
|
||||||
|
|
||||||
case "$method $url" in
|
|
||||||
"GET ${FORGE_URL}/api/v1/user")
|
|
||||||
printf '{"login":"%s"}' "$MOCK_ME"
|
|
||||||
;;
|
|
||||||
"GET ${FORGE_API}/issues/"*)
|
|
||||||
# Distinguish pre-check (first GET) from re-read (subsequent GETs)
|
|
||||||
# via a counter file that persists across curl invocations in the
|
|
||||||
# same test.
|
|
||||||
local n
|
|
||||||
n=$(cat "$ISSUE_GET_COUNT_FILE")
|
|
||||||
n=$((n + 1))
|
|
||||||
echo "$n" > "$ISSUE_GET_COUNT_FILE"
|
|
||||||
local who
|
|
||||||
if [ "$n" -eq 1 ]; then
|
|
||||||
who="$MOCK_INITIAL_ASSIGNEE"
|
|
||||||
else
|
|
||||||
who="$MOCK_RECHECK_ASSIGNEE"
|
|
||||||
fi
|
|
||||||
if [ -z "$who" ]; then
|
|
||||||
printf '{"assignee":null}'
|
|
||||||
else
|
|
||||||
printf '{"assignee":{"login":"%s"}}' "$who"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
"PATCH ${FORGE_API}/issues/"*)
|
|
||||||
# Accept any PATCH; body ignored. When caller asked for the HTTP
|
|
||||||
# status via `-w '%{http_code}'` (issue_claim does this since #856
|
|
||||||
# to surface 403s from missing collaborator permission), emit the
|
|
||||||
# code configured by the scenario (default 200).
|
|
||||||
if [ "$want_code" = '%{http_code}' ]; then
|
|
||||||
printf '%s' "${MOCK_PATCH_CODE:-200}"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
"GET ${FORGE_API}/labels")
|
|
||||||
printf '[]'
|
|
||||||
;;
|
|
||||||
"POST ${FORGE_API}/labels")
|
|
||||||
printf '{"id":99}'
|
|
||||||
;;
|
|
||||||
"POST ${FORGE_API}/issues/"*"/labels")
|
|
||||||
:
|
|
||||||
;;
|
|
||||||
"DELETE ${FORGE_API}/issues/"*"/labels/"*)
|
|
||||||
:
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
return 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# shellcheck source=../lib/issue-lifecycle.sh
|
|
||||||
source "${ROOT}/lib/issue-lifecycle.sh"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── helpers ──────────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
# count_calls METHOD URL — count matching lines in $CALLS_LOG.
|
|
||||||
count_calls() {
|
|
||||||
local method="$1" url="$2"
|
|
||||||
grep -cF "${method} ${url}" "$CALLS_LOG" 2>/dev/null || echo 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── happy path ───────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
@test "issue_claim returns 0 when re-read confirms self (no regression, single agent)" {
|
|
||||||
export MOCK_ME="bot"
|
|
||||||
export MOCK_INITIAL_ASSIGNEE=""
|
|
||||||
export MOCK_RECHECK_ASSIGNEE="bot"
|
|
||||||
|
|
||||||
run issue_claim 42
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
|
|
||||||
# Exactly two GETs to /issues/42 — pre-check and post-PATCH re-read.
|
|
||||||
[ "$(count_calls GET "${FORGE_API}/issues/42")" -eq 2 ]
|
|
||||||
|
|
||||||
# Assignee PATCH fired.
|
|
||||||
[ "$(count_calls PATCH "${FORGE_API}/issues/42")" -eq 1 ]
|
|
||||||
|
|
||||||
# in-progress label added (POST /issues/42/labels).
|
|
||||||
[ "$(count_calls POST "${FORGE_API}/issues/42/labels")" -eq 1 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── lost race ────────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
@test "issue_claim returns 1 and leaves no stray in-progress when re-read shows another agent" {
|
|
||||||
export MOCK_ME="bot"
|
|
||||||
export MOCK_INITIAL_ASSIGNEE=""
|
|
||||||
export MOCK_RECHECK_ASSIGNEE="rival"
|
|
||||||
|
|
||||||
run issue_claim 42
|
|
||||||
[ "$status" -eq 1 ]
|
|
||||||
[[ "$output" == *"claim lost to rival"* ]]
|
|
||||||
|
|
||||||
# Re-read happened (two GETs) — this is the new verification step.
|
|
||||||
[ "$(count_calls GET "${FORGE_API}/issues/42")" -eq 2 ]
|
|
||||||
|
|
||||||
# PATCH happened (losers still PATCH before verifying).
|
|
||||||
[ "$(count_calls PATCH "${FORGE_API}/issues/42")" -eq 1 ]
|
|
||||||
|
|
||||||
# CRITICAL: no in-progress label operations on a lost claim.
|
|
||||||
# (No need to roll back what was never written.)
|
|
||||||
[ "$(count_calls POST "${FORGE_API}/issues/42/labels")" -eq 0 ]
|
|
||||||
[ "$(count_calls GET "${FORGE_API}/labels")" -eq 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── PATCH HTTP error surfacing (#856) ───────────────────────────────────────
|
|
||||||
|
|
||||||
@test "issue_claim logs specific HTTP code on PATCH failure (403 = missing collaborator)" {
|
|
||||||
export MOCK_ME="bot"
|
|
||||||
export MOCK_INITIAL_ASSIGNEE=""
|
|
||||||
export MOCK_RECHECK_ASSIGNEE=""
|
|
||||||
export MOCK_PATCH_CODE="403"
|
|
||||||
|
|
||||||
run issue_claim 42
|
|
||||||
[ "$status" -eq 1 ]
|
|
||||||
|
|
||||||
# The new log message names the HTTP code explicitly — without this,
|
|
||||||
# a missing-collaborator setup (#856) falls through to the post-PATCH
|
|
||||||
# verify and masquerades as "claim lost to <none>".
|
|
||||||
[[ "$output" == *"PATCH assignee failed: HTTP 403"* ]]
|
|
||||||
|
|
||||||
# No re-read on PATCH failure (we bail before reaching the verify step).
|
|
||||||
[ "$(count_calls GET "${FORGE_API}/issues/42")" -eq 1 ]
|
|
||||||
[ "$(count_calls PATCH "${FORGE_API}/issues/42")" -eq 1 ]
|
|
||||||
[ "$(count_calls POST "${FORGE_API}/issues/42/labels")" -eq 0 ]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── pre-check skip ──────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
@test "issue_claim skips early (no PATCH) when pre-check shows another assignee" {
|
|
||||||
export MOCK_ME="bot"
|
|
||||||
export MOCK_INITIAL_ASSIGNEE="rival"
|
|
||||||
export MOCK_RECHECK_ASSIGNEE="rival"
|
|
||||||
|
|
||||||
run issue_claim 42
|
|
||||||
[ "$status" -eq 1 ]
|
|
||||||
[[ "$output" == *"already assigned to rival"* ]]
|
|
||||||
|
|
||||||
# Only the pre-check GET — no PATCH, no re-read, no labels.
|
|
||||||
[ "$(count_calls GET "${FORGE_API}/issues/42")" -eq 1 ]
|
|
||||||
[ "$(count_calls PATCH "${FORGE_API}/issues/42")" -eq 0 ]
|
|
||||||
[ "$(count_calls POST "${FORGE_API}/issues/42/labels")" -eq 0 ]
|
|
||||||
}
|
|
||||||
|
|
@ -1,253 +0,0 @@
|
||||||
#!/usr/bin/env bats
|
|
||||||
# =============================================================================
|
|
||||||
# tests/lib-load-project.bats — Regression guard for the #862 fix.
|
|
||||||
#
|
|
||||||
# TOML allows dashes in bare keys, so `[agents.dev-qwen2]` is a valid section
|
|
||||||
# header. Before #862, load-project.sh translated the section name into a
|
|
||||||
# shell variable name via Python's `.upper()` alone, which kept the dash and
|
|
||||||
# produced `AGENT_DEV-QWEN2_BASE_URL`. `export "AGENT_DEV-QWEN2_..."` is
|
|
||||||
# rejected by bash ("not a valid identifier"), and with `set -euo pipefail`
|
|
||||||
# anywhere up-stack that error aborts load-project.sh — effectively crashing
|
|
||||||
# the factory on the N+1 run after a dashed agent was hired.
|
|
||||||
#
|
|
||||||
# The fix normalizes via `.upper().replace('-', '_')`, matching the
|
|
||||||
# `tr 'a-z-' 'A-Z_'` convention already used in hire-agent.sh and
|
|
||||||
# generators.sh.
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
setup() {
|
|
||||||
ROOT="$(cd "$(dirname "$BATS_TEST_FILENAME")/.." && pwd)"
|
|
||||||
TOML="${BATS_TEST_TMPDIR}/test.toml"
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "dashed [agents.*] section name parses without error" {
|
|
||||||
cat > "$TOML" <<EOF
|
|
||||||
name = "test"
|
|
||||||
repo = "test-owner/test-repo"
|
|
||||||
forge_url = "http://localhost:3000"
|
|
||||||
|
|
||||||
[agents.dev-qwen2]
|
|
||||||
base_url = "http://10.10.10.1:8081"
|
|
||||||
model = "unsloth/Qwen3.5-35B-A3B"
|
|
||||||
api_key = "sk-no-key-required"
|
|
||||||
roles = ["dev"]
|
|
||||||
forge_user = "dev-qwen2"
|
|
||||||
compact_pct = 60
|
|
||||||
EOF
|
|
||||||
|
|
||||||
run bash -c "
|
|
||||||
set -euo pipefail
|
|
||||||
source '${ROOT}/lib/load-project.sh' '$TOML'
|
|
||||||
echo \"BASE=\${AGENT_DEV_QWEN2_BASE_URL:-MISSING}\"
|
|
||||||
echo \"MODEL=\${AGENT_DEV_QWEN2_MODEL:-MISSING}\"
|
|
||||||
echo \"ROLES=\${AGENT_DEV_QWEN2_ROLES:-MISSING}\"
|
|
||||||
echo \"FORGE_USER=\${AGENT_DEV_QWEN2_FORGE_USER:-MISSING}\"
|
|
||||||
echo \"COMPACT=\${AGENT_DEV_QWEN2_COMPACT_PCT:-MISSING}\"
|
|
||||||
"
|
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"BASE=http://10.10.10.1:8081"* ]]
|
|
||||||
[[ "$output" == *"MODEL=unsloth/Qwen3.5-35B-A3B"* ]]
|
|
||||||
[[ "$output" == *"ROLES=dev"* ]]
|
|
||||||
[[ "$output" == *"FORGE_USER=dev-qwen2"* ]]
|
|
||||||
[[ "$output" == *"COMPACT=60"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "dashless [agents.*] section name still works" {
|
|
||||||
cat > "$TOML" <<EOF
|
|
||||||
name = "test"
|
|
||||||
repo = "test-owner/test-repo"
|
|
||||||
forge_url = "http://localhost:3000"
|
|
||||||
|
|
||||||
[agents.llama]
|
|
||||||
base_url = "http://10.10.10.1:8081"
|
|
||||||
model = "qwen"
|
|
||||||
api_key = "sk-no-key-required"
|
|
||||||
roles = ["dev"]
|
|
||||||
forge_user = "dev-llama"
|
|
||||||
compact_pct = 60
|
|
||||||
EOF
|
|
||||||
|
|
||||||
run bash -c "
|
|
||||||
set -euo pipefail
|
|
||||||
source '${ROOT}/lib/load-project.sh' '$TOML'
|
|
||||||
echo \"BASE=\${AGENT_LLAMA_BASE_URL:-MISSING}\"
|
|
||||||
echo \"MODEL=\${AGENT_LLAMA_MODEL:-MISSING}\"
|
|
||||||
"
|
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"BASE=http://10.10.10.1:8081"* ]]
|
|
||||||
[[ "$output" == *"MODEL=qwen"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "multiple dashes in [agents.*] name all normalized" {
|
|
||||||
cat > "$TOML" <<EOF
|
|
||||||
name = "test"
|
|
||||||
repo = "test-owner/test-repo"
|
|
||||||
forge_url = "http://localhost:3000"
|
|
||||||
|
|
||||||
[agents.review-qwen-3b]
|
|
||||||
base_url = "http://10.10.10.1:8082"
|
|
||||||
model = "qwen-3b"
|
|
||||||
api_key = "sk-no-key-required"
|
|
||||||
roles = ["review"]
|
|
||||||
forge_user = "review-qwen-3b"
|
|
||||||
compact_pct = 60
|
|
||||||
EOF
|
|
||||||
|
|
||||||
run bash -c "
|
|
||||||
set -euo pipefail
|
|
||||||
source '${ROOT}/lib/load-project.sh' '$TOML'
|
|
||||||
echo \"BASE=\${AGENT_REVIEW_QWEN_3B_BASE_URL:-MISSING}\"
|
|
||||||
"
|
|
||||||
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"BASE=http://10.10.10.1:8082"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hire-agent rejects dash-starting agent name" {
|
|
||||||
run bash -c "
|
|
||||||
FACTORY_ROOT='${ROOT}' \
|
|
||||||
FORGE_URL='http://127.0.0.1:1' \
|
|
||||||
FORGE_TOKEN=x \
|
|
||||||
bash -c '
|
|
||||||
set -euo pipefail
|
|
||||||
source \"\${FACTORY_ROOT}/lib/hire-agent.sh\"
|
|
||||||
disinto_hire_an_agent -foo dev
|
|
||||||
'
|
|
||||||
"
|
|
||||||
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"invalid agent name"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hire-agent rejects uppercase agent name" {
|
|
||||||
run bash -c "
|
|
||||||
FACTORY_ROOT='${ROOT}' \
|
|
||||||
FORGE_URL='http://127.0.0.1:1' \
|
|
||||||
FORGE_TOKEN=x \
|
|
||||||
bash -c '
|
|
||||||
set -euo pipefail
|
|
||||||
source \"\${FACTORY_ROOT}/lib/hire-agent.sh\"
|
|
||||||
disinto_hire_an_agent DevQwen dev
|
|
||||||
'
|
|
||||||
"
|
|
||||||
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"invalid agent name"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hire-agent rejects underscore agent name" {
|
|
||||||
run bash -c "
|
|
||||||
FACTORY_ROOT='${ROOT}' \
|
|
||||||
FORGE_URL='http://127.0.0.1:1' \
|
|
||||||
FORGE_TOKEN=x \
|
|
||||||
bash -c '
|
|
||||||
set -euo pipefail
|
|
||||||
source \"\${FACTORY_ROOT}/lib/hire-agent.sh\"
|
|
||||||
disinto_hire_an_agent dev_qwen dev
|
|
||||||
'
|
|
||||||
"
|
|
||||||
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"invalid agent name"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hire-agent rejects trailing dash agent name" {
|
|
||||||
run bash -c "
|
|
||||||
FACTORY_ROOT='${ROOT}' \
|
|
||||||
FORGE_URL='http://127.0.0.1:1' \
|
|
||||||
FORGE_TOKEN=x \
|
|
||||||
bash -c '
|
|
||||||
set -euo pipefail
|
|
||||||
source \"\${FACTORY_ROOT}/lib/hire-agent.sh\"
|
|
||||||
disinto_hire_an_agent dev- dev
|
|
||||||
'
|
|
||||||
"
|
|
||||||
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"invalid agent name"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hire-agent rejects consecutive-dash agent name" {
|
|
||||||
run bash -c "
|
|
||||||
FACTORY_ROOT='${ROOT}' \
|
|
||||||
FORGE_URL='http://127.0.0.1:1' \
|
|
||||||
FORGE_TOKEN=x \
|
|
||||||
bash -c '
|
|
||||||
set -euo pipefail
|
|
||||||
source \"\${FACTORY_ROOT}/lib/hire-agent.sh\"
|
|
||||||
disinto_hire_an_agent dev--qwen dev
|
|
||||||
'
|
|
||||||
"
|
|
||||||
|
|
||||||
[ "$status" -ne 0 ]
|
|
||||||
[[ "$output" == *"invalid agent name"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# -------------------------------------------------------------------------
|
|
||||||
# #852 defence: the export loops must warn-and-skip invalid identifiers
|
|
||||||
# rather than tank `set -euo pipefail`. Hire-agent's up-front reject
|
|
||||||
# (tests above) is the primary line of defence, but a hand-edited TOML —
|
|
||||||
# e.g. [mirrors] my-mirror = "…" or a quoted [agents."weird name"] — can
|
|
||||||
# still produce invalid shell identifiers downstream. The guard keeps
|
|
||||||
# the factory loading the rest of the file instead of crash-looping.
|
|
||||||
# -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
@test "[mirrors] dashed key: warn-and-skip, does not crash under set -e" {
|
|
||||||
cat > "$TOML" <<EOF
|
|
||||||
name = "test"
|
|
||||||
repo = "test-owner/test-repo"
|
|
||||||
forge_url = "http://localhost:3000"
|
|
||||||
|
|
||||||
[mirrors]
|
|
||||||
good = "https://example.com/good"
|
|
||||||
bad-name = "https://example.com/bad"
|
|
||||||
EOF
|
|
||||||
|
|
||||||
run bash -c "
|
|
||||||
set -euo pipefail
|
|
||||||
source '${ROOT}/lib/load-project.sh' '$TOML' 2>&1
|
|
||||||
echo \"GOOD=\${MIRROR_GOOD:-MISSING}\"
|
|
||||||
"
|
|
||||||
|
|
||||||
# Whole load did not abort under set -e.
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
# The valid mirror still loads.
|
|
||||||
[[ "$output" == *"GOOD=https://example.com/good"* ]]
|
|
||||||
# The invalid one triggers a warning; load continues instead of crashing.
|
|
||||||
[[ "$output" == *"skipping invalid shell identifier"* ]]
|
|
||||||
[[ "$output" == *"MIRROR_BAD-NAME"* ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "[agents.*] quoted section with space: warn-and-skip, does not crash" {
|
|
||||||
# TOML permits quoted keys with arbitrary characters. A hand-edited
|
|
||||||
# `[agents."weird name"]` would survive the Python .replace('-', '_')
|
|
||||||
# (because it has no dash) but still contains a space, which would
|
|
||||||
# yield AGENT_WEIRD NAME_BASE_URL — not a valid identifier.
|
|
||||||
cat > "$TOML" <<'EOF'
|
|
||||||
name = "test"
|
|
||||||
repo = "test-owner/test-repo"
|
|
||||||
forge_url = "http://localhost:3000"
|
|
||||||
|
|
||||||
[agents.llama]
|
|
||||||
base_url = "http://10.10.10.1:8081"
|
|
||||||
model = "qwen"
|
|
||||||
|
|
||||||
[agents."weird name"]
|
|
||||||
base_url = "http://10.10.10.1:8082"
|
|
||||||
model = "qwen-bad"
|
|
||||||
EOF
|
|
||||||
|
|
||||||
run bash -c "
|
|
||||||
set -euo pipefail
|
|
||||||
source '${ROOT}/lib/load-project.sh' '$TOML' 2>&1
|
|
||||||
echo \"LLAMA=\${AGENT_LLAMA_BASE_URL:-MISSING}\"
|
|
||||||
"
|
|
||||||
|
|
||||||
# The sane sibling must still be loaded despite the malformed neighbour.
|
|
||||||
[ "$status" -eq 0 ]
|
|
||||||
[[ "$output" == *"LLAMA=http://10.10.10.1:8081"* ]]
|
|
||||||
# The invalid agent's identifier triggers a warning and is skipped.
|
|
||||||
[[ "$output" == *"skipping invalid shell identifier"* ]]
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue