Compare commits
122 commits
fix/issue-
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 75bec43c4a | |||
|
|
06ead3a19d | ||
| 74f49e1c2f | |||
|
|
de00400bc4 | ||
| 32ab84a87c | |||
|
|
c236350e00 | ||
| c363ee0aea | |||
|
|
519742e5e7 | ||
| 131d0471f2 | |||
|
|
4487d1512c | ||
|
|
ef40433fff | ||
| 7513e93d6d | |||
|
|
0bfa31da49 | ||
|
|
8e885bed02 | ||
| 34447d31dc | |||
|
|
9d8f322005 | ||
| 55cce66468 | |||
|
|
14458f1f17 | ||
|
|
fbb246c626 | ||
| faf6490877 | |||
|
|
88b377ecfb | ||
|
|
d020847772 | ||
| 98ec610645 | |||
|
|
f8c3ada077 | ||
| 8315a4ecf5 | |||
|
|
b6f2d83a28 | ||
|
|
2465841b84 | ||
| 5c40b59359 | |||
|
|
19f10e33e6 | ||
| 6a4ca5c3a0 | |||
|
|
8799a8c676 | ||
| 3b366ad96e | |||
|
|
aa298eb2ad | ||
|
|
9dbc43ab23 | ||
| 1d4e28843e | |||
|
|
f90702f930 | ||
| defec3b255 | |||
|
|
88676e65ae | ||
| a87dcdf40b | |||
| b8cb8c5c32 | |||
|
|
0937707fe5 | ||
|
|
e9a018db5c | ||
| 18190874ca | |||
|
|
5a2a9e1c74 | ||
| 182c40b9fc | |||
|
|
241ce96046 | ||
|
|
987413ab3a | ||
| 02e86c3589 | |||
|
|
175716a847 | ||
| d6c8fd8127 | |||
|
|
5dda6dc8e9 | ||
| 49cc870f54 | |||
|
|
ec7bc8ff2c | ||
| f27c66a7e0 | |||
|
|
53ce7ad475 | ||
| c644660bda | |||
| 91f36b2692 | |||
|
|
a8d393f3bd | ||
| d0c0ef724a | |||
|
|
539862679d | ||
| 250788952f | |||
|
|
0104ac06a8 | ||
| c71b6d4f95 | |||
|
|
92f19cb2b3 | ||
| be463c5b43 | |||
|
|
0baac1a7d8 | ||
| 0db4c84818 | |||
| 378da77adf | |||
|
|
fd9ba028bc | ||
|
|
707aae287a | ||
|
|
0be36dd502 | ||
|
|
2c9b8e386f | ||
|
|
04ff8a6e85 | ||
| 10c7a88416 | |||
|
|
66ba93a840 | ||
|
|
aff9f0fcef | ||
| c7a1c444e9 | |||
|
|
8a5537fefc | ||
| 34fd7868e4 | |||
|
|
0b4905af3d | ||
| cdb0408466 | |||
|
|
32420c619d | ||
| 3757d9d919 | |||
| b95e2da645 | |||
|
|
5733a10858 | ||
|
|
9b0ecc40dc | ||
| ba3a11fa9d | |||
|
|
6af8f002f5 | ||
| c5b0b1dc23 | |||
|
|
a08d87d0f3 | ||
| 59717558d4 | |||
| 409a796556 | |||
|
|
7f2198cc76 | ||
|
|
de8243b93f | ||
| 38713ab030 | |||
|
|
2979580171 | ||
| 4e53f508d9 | |||
| 4200cb13c6 | |||
|
|
02915456ae | ||
|
|
05bc926906 | ||
| c4ca1e930d | |||
|
|
246ed9050d | ||
| 4fcbca1bef | |||
|
|
3f8c0321ed | ||
| 79346fd501 | |||
|
|
0c4f00a86c | ||
| ec7dff854a | |||
|
|
e275c35fa8 | ||
| 12d9f52903 | |||
|
|
aeda17a601 | ||
| 9d778f6fd6 | |||
|
|
6d148d669b | ||
|
|
dae15410ab | ||
| eaf0f724fa | |||
|
|
d367c9d258 | ||
| d5e823771b | |||
|
|
3b4238d17f | ||
| 1ea5346c91 | |||
| 99becf027e | |||
|
|
0bc027a25a | ||
|
|
ff79e64fc8 | ||
|
|
f8ac1d2ae2 |
77 changed files with 5192 additions and 981 deletions
|
|
@ -1,8 +1,7 @@
|
||||||
# Secrets — prevent .env files from being baked into the image
|
# Secrets — prevent .env files and encrypted secrets from being baked into the image
|
||||||
.env
|
.env
|
||||||
.env.enc
|
.env.enc
|
||||||
.env.vault
|
secrets/
|
||||||
.env.vault.enc
|
|
||||||
|
|
||||||
# Version control — .git is huge and not needed in image
|
# Version control — .git is huge and not needed in image
|
||||||
.git
|
.git
|
||||||
|
|
|
||||||
37
.env.example
37
.env.example
|
|
@ -45,7 +45,9 @@ FORGE_PREDICTOR_TOKEN= # [SECRET] predictor-bot API token
|
||||||
FORGE_PREDICTOR_PASS= # [SECRET] predictor-bot password for git HTTP push
|
FORGE_PREDICTOR_PASS= # [SECRET] predictor-bot password for git HTTP push
|
||||||
FORGE_ARCHITECT_TOKEN= # [SECRET] architect-bot API token
|
FORGE_ARCHITECT_TOKEN= # [SECRET] architect-bot API token
|
||||||
FORGE_ARCHITECT_PASS= # [SECRET] architect-bot password for git HTTP push
|
FORGE_ARCHITECT_PASS= # [SECRET] architect-bot password for git HTTP push
|
||||||
FORGE_BOT_USERNAMES=dev-bot,review-bot,planner-bot,gardener-bot,vault-bot,supervisor-bot,predictor-bot,architect-bot
|
FORGE_FILER_TOKEN= # [SECRET] filer-bot API token (issues:write on project repo only)
|
||||||
|
FORGE_FILER_PASS= # [SECRET] filer-bot password for git HTTP push
|
||||||
|
FORGE_BOT_USERNAMES=dev-bot,review-bot,planner-bot,gardener-bot,vault-bot,supervisor-bot,predictor-bot,architect-bot,filer-bot
|
||||||
|
|
||||||
# ── Backwards compatibility ───────────────────────────────────────────────
|
# ── Backwards compatibility ───────────────────────────────────────────────
|
||||||
# If CODEBERG_TOKEN is set but FORGE_TOKEN is not, env.sh falls back to
|
# If CODEBERG_TOKEN is set but FORGE_TOKEN is not, env.sh falls back to
|
||||||
|
|
@ -61,6 +63,10 @@ FORGE_BOT_USERNAMES=dev-bot,review-bot,planner-bot,gardener-bot,vault-bot,superv
|
||||||
WOODPECKER_TOKEN= # [SECRET] Woodpecker API token
|
WOODPECKER_TOKEN= # [SECRET] Woodpecker API token
|
||||||
WOODPECKER_SERVER=http://localhost:8000 # [CONFIG] Woodpecker server URL
|
WOODPECKER_SERVER=http://localhost:8000 # [CONFIG] Woodpecker server URL
|
||||||
WOODPECKER_AGENT_SECRET= # [SECRET] shared secret for server↔agent auth (auto-generated)
|
WOODPECKER_AGENT_SECRET= # [SECRET] shared secret for server↔agent auth (auto-generated)
|
||||||
|
# Woodpecker privileged-plugin allowlist — comma-separated image names
|
||||||
|
# Add plugins/docker (and others) here to allow privileged execution
|
||||||
|
WOODPECKER_PLUGINS_PRIVILEGED=plugins/docker
|
||||||
|
|
||||||
# WOODPECKER_REPO_ID — now per-project, set in projects/*.toml [ci] section
|
# WOODPECKER_REPO_ID — now per-project, set in projects/*.toml [ci] section
|
||||||
|
|
||||||
# Woodpecker Postgres (for direct DB queries)
|
# Woodpecker Postgres (for direct DB queries)
|
||||||
|
|
@ -73,27 +79,46 @@ WOODPECKER_DB_NAME=woodpecker # [CONFIG] Postgres database name
|
||||||
CHAT_OAUTH_CLIENT_ID= # [SECRET] Chat OAuth2 client ID (auto-generated by init)
|
CHAT_OAUTH_CLIENT_ID= # [SECRET] Chat OAuth2 client ID (auto-generated by init)
|
||||||
CHAT_OAUTH_CLIENT_SECRET= # [SECRET] Chat OAuth2 client secret (auto-generated by init)
|
CHAT_OAUTH_CLIENT_SECRET= # [SECRET] Chat OAuth2 client secret (auto-generated by init)
|
||||||
DISINTO_CHAT_ALLOWED_USERS= # [CONFIG] CSV of allowed usernames (disinto-admin always allowed)
|
DISINTO_CHAT_ALLOWED_USERS= # [CONFIG] CSV of allowed usernames (disinto-admin always allowed)
|
||||||
|
FORWARD_AUTH_SECRET= # [SECRET] Shared secret for Caddy ↔ chat forward_auth (#709)
|
||||||
|
|
||||||
# ── Vault-only secrets (DO NOT put these in .env) ────────────────────────
|
# ── Vault-only secrets (DO NOT put these in .env) ────────────────────────
|
||||||
# These tokens grant access to external systems (GitHub, ClawHub, deploy targets).
|
# These tokens grant access to external systems (GitHub, ClawHub, deploy targets).
|
||||||
# They live ONLY in .env.vault.enc and are injected into the ephemeral runner
|
# They live ONLY in secrets/<NAME>.enc (age-encrypted, one file per key) and are
|
||||||
# container at fire time (#745). lib/env.sh explicitly unsets them so agents
|
# decrypted into the ephemeral runner container at fire time (#745, #777).
|
||||||
# can never hold them directly — all external actions go through vault dispatch.
|
# lib/env.sh explicitly unsets them so agents can never hold them directly —
|
||||||
|
# all external actions go through vault dispatch.
|
||||||
#
|
#
|
||||||
# GITHUB_TOKEN — GitHub API access (publish, deploy, post)
|
# GITHUB_TOKEN — GitHub API access (publish, deploy, post)
|
||||||
# CLAWHUB_TOKEN — ClawHub registry credentials (publish)
|
# CLAWHUB_TOKEN — ClawHub registry credentials (publish)
|
||||||
|
# CADDY_SSH_KEY — SSH key for Caddy log collection
|
||||||
# (deploy keys) — SSH keys for deployment targets
|
# (deploy keys) — SSH keys for deployment targets
|
||||||
#
|
#
|
||||||
# To manage vault secrets: disinto secrets edit-vault
|
# To manage secrets: disinto secrets add/show/remove/list
|
||||||
# (vault redesign in progress: PR-based approval, see #73-#77)
|
|
||||||
|
|
||||||
# ── Project-specific secrets ──────────────────────────────────────────────
|
# ── Project-specific secrets ──────────────────────────────────────────────
|
||||||
# Store all project secrets here so formulas reference env vars, never hardcode.
|
# Store all project secrets here so formulas reference env vars, never hardcode.
|
||||||
BASE_RPC_URL= # [SECRET] on-chain RPC endpoint
|
BASE_RPC_URL= # [SECRET] on-chain RPC endpoint
|
||||||
|
|
||||||
|
# ── Local Qwen dev agent (optional) ──────────────────────────────────────
|
||||||
|
# Set ENABLE_LLAMA_AGENT=1 to emit agents-llama in docker-compose.yml.
|
||||||
|
# Requires a running llama-server reachable at ANTHROPIC_BASE_URL.
|
||||||
|
# See docs/agents-llama.md for details.
|
||||||
|
ENABLE_LLAMA_AGENT=0 # [CONFIG] 1 = enable agents-llama service
|
||||||
|
ANTHROPIC_BASE_URL= # [CONFIG] e.g. http://host.docker.internal:8081
|
||||||
|
|
||||||
# ── Tuning ────────────────────────────────────────────────────────────────
|
# ── Tuning ────────────────────────────────────────────────────────────────
|
||||||
CLAUDE_TIMEOUT=7200 # [CONFIG] max seconds per Claude invocation
|
CLAUDE_TIMEOUT=7200 # [CONFIG] max seconds per Claude invocation
|
||||||
|
|
||||||
|
# ── Host paths (Nomad-portable) ────────────────────────────────────────────
|
||||||
|
# These env vars externalize host-side bind-mount paths from docker-compose.yml.
|
||||||
|
# At cutover, Nomad jobspecs reference the same vars — no path translation.
|
||||||
|
# Defaults point at current paths so an empty .env override still works.
|
||||||
|
CLAUDE_BIN_DIR=/usr/local/bin/claude # [CONFIG] host path to claude CLI binary (resolved by `disinto init`)
|
||||||
|
CLAUDE_CONFIG_FILE=${HOME}/.claude.json # [CONFIG] host path to claude config JSON file
|
||||||
|
CLAUDE_DIR=${HOME}/.claude # [CONFIG] host path to .claude directory (reproduce/edge)
|
||||||
|
AGENT_SSH_DIR=${HOME}/.ssh # [CONFIG] host path to SSH keys directory
|
||||||
|
SOPS_AGE_DIR=${HOME}/.config/sops/age # [CONFIG] host path to SOPS age key directory
|
||||||
|
|
||||||
# ── Claude Code shared OAuth state ─────────────────────────────────────────
|
# ── Claude Code shared OAuth state ─────────────────────────────────────────
|
||||||
# Shared directory used by every factory container so Claude Code's internal
|
# Shared directory used by every factory container so Claude Code's internal
|
||||||
# proper-lockfile-based OAuth refresh lock works across containers. Both
|
# proper-lockfile-based OAuth refresh lock works across containers. Both
|
||||||
|
|
|
||||||
4
.gitignore
vendored
4
.gitignore
vendored
|
|
@ -3,7 +3,6 @@
|
||||||
|
|
||||||
# Encrypted secrets — safe to commit (SOPS-encrypted with age)
|
# Encrypted secrets — safe to commit (SOPS-encrypted with age)
|
||||||
!.env.enc
|
!.env.enc
|
||||||
!.env.vault.enc
|
|
||||||
!.sops.yaml
|
!.sops.yaml
|
||||||
|
|
||||||
# Per-box project config (generated by disinto init)
|
# Per-box project config (generated by disinto init)
|
||||||
|
|
@ -33,6 +32,9 @@ docker/agents/bin/
|
||||||
# Note: This file is now committed to track volume mount configuration
|
# Note: This file is now committed to track volume mount configuration
|
||||||
# docker-compose.yml
|
# docker-compose.yml
|
||||||
|
|
||||||
|
# Generated Caddyfile — single source of truth is generate_caddyfile in lib/generators.sh
|
||||||
|
docker/Caddyfile
|
||||||
|
|
||||||
# Python bytecode
|
# Python bytecode
|
||||||
__pycache__/
|
__pycache__/
|
||||||
*.pyc
|
*.pyc
|
||||||
|
|
|
||||||
|
|
@ -98,50 +98,38 @@ echo "syntax check done"
|
||||||
|
|
||||||
echo "=== 2/2 Function resolution ==="
|
echo "=== 2/2 Function resolution ==="
|
||||||
|
|
||||||
# Required lib files for LIB_FUNS construction. Missing any of these means the
|
# Enumerate ALL lib/*.sh files in stable lexicographic order (#742).
|
||||||
# checkout is incomplete or the test is misconfigured — fail loudly, do NOT
|
# Previous approach used a hand-maintained REQUIRED_LIBS list, which silently
|
||||||
# silently produce a partial LIB_FUNS list (that masquerades as "undef" errors
|
# became incomplete as new libs were added, producing partial LIB_FUNS that
|
||||||
# in unrelated scripts; see #600).
|
# caused non-deterministic "undef" failures.
|
||||||
REQUIRED_LIBS=(
|
|
||||||
lib/agent-sdk.sh lib/env.sh lib/ci-helpers.sh lib/load-project.sh
|
|
||||||
lib/secret-scan.sh lib/formula-session.sh lib/mirrors.sh lib/guard.sh
|
|
||||||
lib/pr-lifecycle.sh lib/issue-lifecycle.sh lib/worktree.sh
|
|
||||||
)
|
|
||||||
|
|
||||||
for f in "${REQUIRED_LIBS[@]}"; do
|
|
||||||
if [ ! -f "$f" ]; then
|
|
||||||
printf 'FAIL [missing-lib] expected %s but it is not present at smoke time\n' "$f" >&2
|
|
||||||
printf ' pwd=%s\n' "$(pwd)" >&2
|
|
||||||
printf ' ls lib/=%s\n' "$(ls lib/ 2>&1 | tr '\n' ' ')" >&2
|
|
||||||
echo '=== SMOKE TEST FAILED (precondition) ===' >&2
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Functions provided by shared lib files (available to all agent scripts via source).
|
|
||||||
#
|
#
|
||||||
# Included — these are inline-sourced by agent scripts:
|
# Excluded from LIB_FUNS (not sourced inline by agents):
|
||||||
# lib/env.sh — sourced by every agent (log, forge_api, etc.)
|
|
||||||
# lib/agent-sdk.sh — sourced by SDK agents (agent_run, agent_recover_session)
|
|
||||||
# lib/ci-helpers.sh — sourced by pollers and review (ci_passed, classify_pipeline_failure, etc.)
|
|
||||||
# lib/load-project.sh — sourced by env.sh when PROJECT_TOML is set
|
|
||||||
# lib/secret-scan.sh — standalone CLI tool, run directly (not sourced)
|
|
||||||
# lib/formula-session.sh — sourced by formula-driven agents (acquire_run_lock, check_memory, etc.)
|
|
||||||
# lib/mirrors.sh — sourced by merge sites (mirror_push)
|
|
||||||
# lib/guard.sh — sourced by all polling-loop entry points (check_active)
|
|
||||||
# lib/issue-lifecycle.sh — sourced by agents for issue claim/release/block/deps
|
|
||||||
# lib/worktree.sh — sourced by agents for worktree create/recover/cleanup/preserve
|
|
||||||
#
|
|
||||||
# Excluded — not sourced inline by agents:
|
|
||||||
# lib/tea-helpers.sh — sourced conditionally by env.sh (tea_file_issue, etc.); checked standalone below
|
|
||||||
# lib/ci-debug.sh — standalone CLI tool, run directly (not sourced)
|
# lib/ci-debug.sh — standalone CLI tool, run directly (not sourced)
|
||||||
# lib/parse-deps.sh — executed via `bash lib/parse-deps.sh` (not sourced)
|
# lib/parse-deps.sh — executed via `bash lib/parse-deps.sh` (not sourced)
|
||||||
# lib/hooks/*.sh — Claude Code hook scripts, executed by the harness (not sourced)
|
# lib/hooks/*.sh — Claude Code hook scripts, executed by the harness (not sourced)
|
||||||
#
|
EXCLUDED_LIBS="lib/ci-debug.sh lib/parse-deps.sh"
|
||||||
# If a new lib file is added and sourced by agents, add it to LIB_FUNS below
|
|
||||||
# and add a check_script call for it in the lib files section further down.
|
# Build the list of lib files in deterministic order (LC_ALL=C sort).
|
||||||
|
# Fail loudly if no lib files are found — checkout is broken.
|
||||||
|
mapfile -t ALL_LIBS < <(LC_ALL=C find lib -maxdepth 1 -name '*.sh' -print | LC_ALL=C sort)
|
||||||
|
if [ "${#ALL_LIBS[@]}" -eq 0 ]; then
|
||||||
|
echo 'FAIL [no-libs] no lib/*.sh files found at smoke time' >&2
|
||||||
|
printf ' pwd=%s\n' "$(pwd)" >&2
|
||||||
|
echo '=== SMOKE TEST FAILED (precondition) ===' >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Build LIB_FUNS from all non-excluded lib files.
|
||||||
|
# Use set -e inside the subshell so a failed get_fns aborts loudly
|
||||||
|
# instead of silently shrinking the function list.
|
||||||
LIB_FUNS=$(
|
LIB_FUNS=$(
|
||||||
for f in "${REQUIRED_LIBS[@]}"; do get_fns "$f"; done | sort -u
|
set -e
|
||||||
|
for f in "${ALL_LIBS[@]}"; do
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
skip=0; for ex in $EXCLUDED_LIBS; do [ "$f" = "$ex" ] && skip=1; done
|
||||||
|
[ "$skip" -eq 1 ] && continue
|
||||||
|
get_fns "$f"
|
||||||
|
done | sort -u
|
||||||
)
|
)
|
||||||
|
|
||||||
# Known external commands and shell builtins — never flag these
|
# Known external commands and shell builtins — never flag these
|
||||||
|
|
@ -192,13 +180,14 @@ check_script() {
|
||||||
while IFS= read -r fn; do
|
while IFS= read -r fn; do
|
||||||
[ -z "$fn" ] && continue
|
[ -z "$fn" ] && continue
|
||||||
is_known_cmd "$fn" && continue
|
is_known_cmd "$fn" && continue
|
||||||
if ! printf '%s\n' "$all_fns" | grep -qxF "$fn"; then
|
# Use here-string (<<<) instead of pipe to avoid SIGPIPE race (#742):
|
||||||
|
# with pipefail, `printf | grep -q` can fail when grep closes the pipe
|
||||||
|
# early after finding a match, causing printf to get SIGPIPE (exit 141).
|
||||||
|
# This produced non-deterministic false "undef" failures.
|
||||||
|
if ! grep -qxF "$fn" <<< "$all_fns"; then
|
||||||
printf 'FAIL [undef] %s: %s\n' "$script" "$fn"
|
printf 'FAIL [undef] %s: %s\n' "$script" "$fn"
|
||||||
# Diagnostic dump (#600): if the function is expected to be in a known lib,
|
printf ' all_fns count: %d\n' "$(grep -c . <<< "$all_fns")"
|
||||||
# print what the actual all_fns set looks like so we can tell whether the
|
printf ' LIB_FUNS contains "%s": %s\n' "$fn" "$(grep -cxF "$fn" <<< "$LIB_FUNS")"
|
||||||
# function is genuinely missing or whether the resolution loop is broken.
|
|
||||||
printf ' all_fns count: %d\n' "$(printf '%s\n' "$all_fns" | wc -l)"
|
|
||||||
printf ' LIB_FUNS contains "%s": %s\n' "$fn" "$(printf '%s\n' "$LIB_FUNS" | grep -cxF "$fn")"
|
|
||||||
printf ' defining lib (if any): %s\n' "$(grep -l "^[[:space:]]*${fn}[[:space:]]*()" lib/*.sh 2>/dev/null | tr '\n' ' ')"
|
printf ' defining lib (if any): %s\n' "$(grep -l "^[[:space:]]*${fn}[[:space:]]*()" lib/*.sh 2>/dev/null | tr '\n' ' ')"
|
||||||
FAILED=1
|
FAILED=1
|
||||||
fi
|
fi
|
||||||
|
|
@ -224,6 +213,7 @@ check_script lib/issue-lifecycle.sh lib/secret-scan.sh
|
||||||
# Still checked for function resolution against LIB_FUNS + own definitions.
|
# Still checked for function resolution against LIB_FUNS + own definitions.
|
||||||
check_script lib/ci-debug.sh
|
check_script lib/ci-debug.sh
|
||||||
check_script lib/parse-deps.sh
|
check_script lib/parse-deps.sh
|
||||||
|
check_script lib/sprint-filer.sh
|
||||||
|
|
||||||
# Agent scripts — list cross-sourced files where function scope flows across files.
|
# Agent scripts — list cross-sourced files where function scope flows across files.
|
||||||
check_script dev/dev-agent.sh
|
check_script dev/dev-agent.sh
|
||||||
|
|
|
||||||
|
|
@ -292,6 +292,8 @@ def main() -> int:
|
||||||
"21aec56a99d5252b23fb9a38b895e8e8": "Verification helper: check body for Decomposed from pattern",
|
"21aec56a99d5252b23fb9a38b895e8e8": "Verification helper: check body for Decomposed from pattern",
|
||||||
"60ea98b3604557d539193b2a6624e232": "Verification helper: append sub-issue number",
|
"60ea98b3604557d539193b2a6624e232": "Verification helper: append sub-issue number",
|
||||||
"9f6ae8e7811575b964279d8820494eb0": "Verification helper: for loop done pattern",
|
"9f6ae8e7811575b964279d8820494eb0": "Verification helper: for loop done pattern",
|
||||||
|
# Standard lib source block shared across formula-driven agent run scripts
|
||||||
|
"330e5809a00b95ade1a5fce2d749b94b": "Standard lib source block (env.sh, formula-session.sh, worktree.sh, guard.sh, agent-sdk.sh)",
|
||||||
}
|
}
|
||||||
|
|
||||||
if not sh_files:
|
if not sh_files:
|
||||||
|
|
|
||||||
64
.woodpecker/publish-images.yml
Normal file
64
.woodpecker/publish-images.yml
Normal file
|
|
@ -0,0 +1,64 @@
|
||||||
|
# .woodpecker/publish-images.yml — Build and push versioned container images
|
||||||
|
# Triggered on tag pushes (e.g. v1.2.3). Builds and pushes:
|
||||||
|
# - ghcr.io/disinto/agents:<tag>
|
||||||
|
# - ghcr.io/disinto/reproduce:<tag>
|
||||||
|
# - ghcr.io/disinto/edge:<tag>
|
||||||
|
#
|
||||||
|
# Requires GHCR_TOKEN secret configured in Woodpecker with push access
|
||||||
|
# to ghcr.io/disinto.
|
||||||
|
|
||||||
|
when:
|
||||||
|
event: tag
|
||||||
|
ref: refs/tags/v*
|
||||||
|
|
||||||
|
clone:
|
||||||
|
git:
|
||||||
|
image: alpine/git
|
||||||
|
commands:
|
||||||
|
- AUTH_URL=$(printf '%s' "$CI_REPO_CLONE_URL" | sed "s|://|://token:$FORGE_TOKEN@|")
|
||||||
|
- git clone --depth 1 "$AUTH_URL" .
|
||||||
|
- git fetch --depth 1 origin "$CI_COMMIT_REF"
|
||||||
|
- git checkout FETCH_HEAD
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: build-and-push-agents
|
||||||
|
image: plugins/docker
|
||||||
|
settings:
|
||||||
|
repo: ghcr.io/disinto/agents
|
||||||
|
registry: ghcr.io
|
||||||
|
dockerfile: docker/agents/Dockerfile
|
||||||
|
context: .
|
||||||
|
tags:
|
||||||
|
- ${CI_COMMIT_TAG}
|
||||||
|
- latest
|
||||||
|
username: disinto
|
||||||
|
password:
|
||||||
|
from_secret: GHCR_TOKEN
|
||||||
|
|
||||||
|
- name: build-and-push-reproduce
|
||||||
|
image: plugins/docker
|
||||||
|
settings:
|
||||||
|
repo: ghcr.io/disinto/reproduce
|
||||||
|
registry: ghcr.io
|
||||||
|
dockerfile: docker/reproduce/Dockerfile
|
||||||
|
context: .
|
||||||
|
tags:
|
||||||
|
- ${CI_COMMIT_TAG}
|
||||||
|
- latest
|
||||||
|
username: disinto
|
||||||
|
password:
|
||||||
|
from_secret: GHCR_TOKEN
|
||||||
|
|
||||||
|
- name: build-and-push-edge
|
||||||
|
image: plugins/docker
|
||||||
|
settings:
|
||||||
|
repo: ghcr.io/disinto/edge
|
||||||
|
registry: ghcr.io
|
||||||
|
dockerfile: docker/edge/Dockerfile
|
||||||
|
context: docker/edge
|
||||||
|
tags:
|
||||||
|
- ${CI_COMMIT_TAG}
|
||||||
|
- latest
|
||||||
|
username: disinto
|
||||||
|
password:
|
||||||
|
from_secret: GHCR_TOKEN
|
||||||
68
.woodpecker/run-secret-scan.sh
Normal file
68
.woodpecker/run-secret-scan.sh
Normal file
|
|
@ -0,0 +1,68 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
# run-secret-scan.sh — CI wrapper for lib/secret-scan.sh
|
||||||
|
#
|
||||||
|
# Scans files changed in this PR for plaintext secrets.
|
||||||
|
# Exits non-zero if any secret is detected.
|
||||||
|
|
||||||
|
# shellcheck source=../lib/secret-scan.sh
|
||||||
|
source lib/secret-scan.sh
|
||||||
|
|
||||||
|
# Path patterns considered secret-adjacent
|
||||||
|
SECRET_PATH_PATTERNS=(
|
||||||
|
'\.env'
|
||||||
|
'tools/vault-.*\.sh'
|
||||||
|
'nomad/'
|
||||||
|
'vault/'
|
||||||
|
'action-vault/'
|
||||||
|
'lib/hvault\.sh'
|
||||||
|
'lib/action-vault\.sh'
|
||||||
|
)
|
||||||
|
|
||||||
|
# Build a single regex from patterns
|
||||||
|
path_regex=$(printf '%s|' "${SECRET_PATH_PATTERNS[@]}")
|
||||||
|
path_regex="${path_regex%|}"
|
||||||
|
|
||||||
|
# Get files changed in this PR vs target branch.
|
||||||
|
# Note: shallow clone (depth 50) may lack the merge base for very large PRs,
|
||||||
|
# causing git diff to fail — || true means the gate skips rather than blocks.
|
||||||
|
changed_files=$(git diff --name-only --diff-filter=ACMR "origin/${CI_COMMIT_TARGET_BRANCH}...HEAD" || true)
|
||||||
|
|
||||||
|
if [ -z "$changed_files" ]; then
|
||||||
|
echo "secret-scan: no changed files found, skipping"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Filter to secret-adjacent paths only
|
||||||
|
target_files=$(printf '%s\n' "$changed_files" | grep -E "$path_regex" || true)
|
||||||
|
|
||||||
|
if [ -z "$target_files" ]; then
|
||||||
|
echo "secret-scan: no secret-adjacent files changed, skipping"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "secret-scan: scanning $(printf '%s\n' "$target_files" | wc -l) file(s):"
|
||||||
|
printf ' %s\n' "$target_files"
|
||||||
|
|
||||||
|
failures=0
|
||||||
|
while IFS= read -r file; do
|
||||||
|
# Skip deleted files / non-existent
|
||||||
|
[ -f "$file" ] || continue
|
||||||
|
# Skip binary files
|
||||||
|
file -b --mime-encoding "$file" 2>/dev/null | grep -q binary && continue
|
||||||
|
|
||||||
|
content=$(cat "$file")
|
||||||
|
if ! scan_for_secrets "$content"; then
|
||||||
|
echo "FAIL: secret detected in $file"
|
||||||
|
failures=$((failures + 1))
|
||||||
|
fi
|
||||||
|
done <<< "$target_files"
|
||||||
|
|
||||||
|
if [ "$failures" -gt 0 ]; then
|
||||||
|
echo ""
|
||||||
|
echo "secret-scan: $failures file(s) contain potential secrets — merge blocked"
|
||||||
|
echo "If these are false positives, verify patterns in lib/secret-scan.sh"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "secret-scan: all files clean"
|
||||||
32
.woodpecker/secret-scan.yml
Normal file
32
.woodpecker/secret-scan.yml
Normal file
|
|
@ -0,0 +1,32 @@
|
||||||
|
# .woodpecker/secret-scan.yml — Block PRs that leak plaintext secrets
|
||||||
|
#
|
||||||
|
# Triggers on pull requests touching secret-adjacent paths.
|
||||||
|
# Sources lib/secret-scan.sh and scans each changed file's content.
|
||||||
|
# Exits non-zero if any potential secret is detected.
|
||||||
|
|
||||||
|
when:
|
||||||
|
- event: pull_request
|
||||||
|
path:
|
||||||
|
- ".env*"
|
||||||
|
- "tools/vault-*.sh"
|
||||||
|
- "nomad/**/*"
|
||||||
|
- "vault/**/*"
|
||||||
|
- "action-vault/**/*"
|
||||||
|
- "lib/hvault.sh"
|
||||||
|
- "lib/action-vault.sh"
|
||||||
|
|
||||||
|
clone:
|
||||||
|
git:
|
||||||
|
image: alpine/git
|
||||||
|
commands:
|
||||||
|
- AUTH_URL=$(printf '%s' "$CI_REPO_CLONE_URL" | sed "s|://|://token:$FORGE_TOKEN@|")
|
||||||
|
- git clone --depth 50 "$AUTH_URL" .
|
||||||
|
- git fetch --depth 50 origin "$CI_COMMIT_REF" "$CI_COMMIT_TARGET_BRANCH"
|
||||||
|
- git checkout FETCH_HEAD
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: secret-scan
|
||||||
|
image: alpine:3
|
||||||
|
commands:
|
||||||
|
- apk add --no-cache bash git grep file
|
||||||
|
- bash .woodpecker/run-secret-scan.sh
|
||||||
28
AGENTS.md
28
AGENTS.md
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: 3e65878093bbbcea6dfe4db341f82dc89d4e0ac0 -->
|
<!-- last-reviewed: c363ee0aea2ae447daab28c2c850d6abefc8c6b5 -->
|
||||||
# Disinto — Agent Instructions
|
# Disinto — Agent Instructions
|
||||||
|
|
||||||
## What this repo is
|
## What this repo is
|
||||||
|
|
@ -31,19 +31,19 @@ disinto/ (code repo)
|
||||||
├── supervisor/ supervisor-run.sh — formula-driven health monitoring (polling-loop executor)
|
├── supervisor/ supervisor-run.sh — formula-driven health monitoring (polling-loop executor)
|
||||||
│ preflight.sh — pre-flight data collection for supervisor formula
|
│ preflight.sh — pre-flight data collection for supervisor formula
|
||||||
├── architect/ architect-run.sh — strategic decomposition of vision into sprints
|
├── architect/ architect-run.sh — strategic decomposition of vision into sprints
|
||||||
├── vault/ vault-env.sh — shared env setup (vault redesign in progress, see #73-#77)
|
├── action-vault/ vault-env.sh — shared env setup (vault redesign in progress, see #73-#77)
|
||||||
│ SCHEMA.md — vault item schema documentation
|
│ SCHEMA.md — vault item schema documentation
|
||||||
│ validate.sh — vault item validator
|
│ validate.sh — vault item validator
|
||||||
│ examples/ — example vault action TOMLs (promote, publish, release, webhook-call)
|
│ examples/ — example vault action TOMLs (promote, publish, release, webhook-call)
|
||||||
├── lib/ env.sh, agent-sdk.sh, ci-helpers.sh, ci-debug.sh, load-project.sh, parse-deps.sh, guard.sh, mirrors.sh, pr-lifecycle.sh, issue-lifecycle.sh, worktree.sh, formula-session.sh, stack-lock.sh, forge-setup.sh, forge-push.sh, ops-setup.sh, ci-setup.sh, generators.sh, hire-agent.sh, release.sh, build-graph.py,
|
├── lib/ env.sh, agent-sdk.sh, ci-helpers.sh, ci-debug.sh, load-project.sh, parse-deps.sh, guard.sh, mirrors.sh, pr-lifecycle.sh, issue-lifecycle.sh, worktree.sh, formula-session.sh, stack-lock.sh, forge-setup.sh, forge-push.sh, ops-setup.sh, ci-setup.sh, generators.sh, hire-agent.sh, release.sh, build-graph.py, branch-protection.sh, secret-scan.sh, tea-helpers.sh, action-vault.sh, ci-log-reader.py, git-creds.sh, sprint-filer.sh, hvault.sh
|
||||||
│ branch-protection.sh, secret-scan.sh, tea-helpers.sh, vault.sh, ci-log-reader.py, git-creds.sh
|
|
||||||
│ hooks/ — Claude Code session hooks (on-compact-reinject, on-idle-stop, on-phase-change, on-pretooluse-guard, on-session-end, on-stop-failure)
|
│ hooks/ — Claude Code session hooks (on-compact-reinject, on-idle-stop, on-phase-change, on-pretooluse-guard, on-session-end, on-stop-failure)
|
||||||
├── projects/ *.toml.example — templates; *.toml — local per-box config (gitignored)
|
├── projects/ *.toml.example — templates; *.toml — local per-box config (gitignored)
|
||||||
├── formulas/ Issue templates (TOML specs for multi-step agent tasks)
|
├── formulas/ Issue templates (TOML specs for multi-step agent tasks)
|
||||||
├── docker/ Dockerfiles and entrypoints: reproduce, triage, edge dispatcher, chat (server.py, Dockerfile, ui/)
|
├── docker/ Dockerfiles and entrypoints: reproduce, triage, edge dispatcher, chat (server.py, entrypoint-chat.sh, Dockerfile, ui/)
|
||||||
|
├── tools/ Operational tools: edge-control/ (register.sh, install.sh, verify-chat-sandbox.sh)
|
||||||
├── docs/ Protocol docs (PHASE-PROTOCOL.md, EVIDENCE-ARCHITECTURE.md)
|
├── docs/ Protocol docs (PHASE-PROTOCOL.md, EVIDENCE-ARCHITECTURE.md)
|
||||||
├── site/ disinto.ai website content
|
├── site/ disinto.ai website content
|
||||||
├── tests/ Test files (mock-forgejo.py, smoke-init.sh)
|
├── tests/ Test files (mock-forgejo.py, smoke-init.sh, lib-hvault.bats)
|
||||||
├── templates/ Issue templates
|
├── templates/ Issue templates
|
||||||
├── bin/ The `disinto` CLI script
|
├── bin/ The `disinto` CLI script
|
||||||
├── disinto-factory/ Setup documentation and skill
|
├── disinto-factory/ Setup documentation and skill
|
||||||
|
|
@ -86,7 +86,7 @@ Each agent has a `.profile` repository on Forgejo storing `knowledge/lessons-lea
|
||||||
- All scripts start with `#!/usr/bin/env bash` and `set -euo pipefail`
|
- All scripts start with `#!/usr/bin/env bash` and `set -euo pipefail`
|
||||||
- Source shared environment: `source "$(dirname "$0")/../lib/env.sh"`
|
- Source shared environment: `source "$(dirname "$0")/../lib/env.sh"`
|
||||||
- Log to `$LOGFILE` using the `log()` function from env.sh or defined locally
|
- Log to `$LOGFILE` using the `log()` function from env.sh or defined locally
|
||||||
- Never hardcode secrets — agent secrets come from `.env.enc`, vault secrets from `.env.vault.enc` (or `.env`/`.env.vault` fallback)
|
- Never hardcode secrets — agent secrets come from `.env.enc`, vault secrets from `secrets/<NAME>.enc` (age-encrypted, one file per key)
|
||||||
- Never embed secrets in issue bodies, PR descriptions, or comments — use env var references (e.g. `$BASE_RPC_URL`)
|
- Never embed secrets in issue bodies, PR descriptions, or comments — use env var references (e.g. `$BASE_RPC_URL`)
|
||||||
- ShellCheck must pass (CI runs `shellcheck` on all `.sh` files)
|
- ShellCheck must pass (CI runs `shellcheck` on all `.sh` files)
|
||||||
- Avoid duplicate code — shared helpers go in `lib/`
|
- Avoid duplicate code — shared helpers go in `lib/`
|
||||||
|
|
@ -113,10 +113,13 @@ bash dev/phase-test.sh
|
||||||
| Supervisor | `supervisor/` | Health monitoring | [supervisor/AGENTS.md](supervisor/AGENTS.md) |
|
| Supervisor | `supervisor/` | Health monitoring | [supervisor/AGENTS.md](supervisor/AGENTS.md) |
|
||||||
| Planner | `planner/` | Strategic planning | [planner/AGENTS.md](planner/AGENTS.md) |
|
| Planner | `planner/` | Strategic planning | [planner/AGENTS.md](planner/AGENTS.md) |
|
||||||
| Predictor | `predictor/` | Infrastructure pattern detection | [predictor/AGENTS.md](predictor/AGENTS.md) |
|
| Predictor | `predictor/` | Infrastructure pattern detection | [predictor/AGENTS.md](predictor/AGENTS.md) |
|
||||||
| Architect | `architect/` | Strategic decomposition | [architect/AGENTS.md](architect/AGENTS.md) |
|
| Architect | `architect/` | Strategic decomposition (read-only on project repo) | [architect/AGENTS.md](architect/AGENTS.md) |
|
||||||
|
| Filer | `lib/sprint-filer.sh` | Sub-issue filing from merged sprint PRs | ops repo pipeline (deferred, see #779) |
|
||||||
| Reproduce | `docker/reproduce/` | Bug reproduction using Playwright MCP | `formulas/reproduce.toml` |
|
| Reproduce | `docker/reproduce/` | Bug reproduction using Playwright MCP | `formulas/reproduce.toml` |
|
||||||
| Triage | `docker/reproduce/` | Deep root cause analysis | `formulas/triage.toml` |
|
| Triage | `docker/reproduce/` | Deep root cause analysis | `formulas/triage.toml` |
|
||||||
| Edge dispatcher | `docker/edge/` | Polls ops repo for vault actions, executes via Claude sessions | `docker/edge/dispatcher.sh` |
|
| Edge dispatcher | `docker/edge/` | Polls ops repo for vault actions, executes via Claude sessions | `docker/edge/dispatcher.sh` |
|
||||||
|
| agents-llama | `docker/agents/` (same image) | Local-Qwen dev agent (`AGENT_ROLES=dev`), gated on `ENABLE_LLAMA_AGENT=1` | [docs/agents-llama.md](docs/agents-llama.md) |
|
||||||
|
| agents-llama-all | `docker/agents/` (same image) | Local-Qwen all-roles agent (all 7 roles), profile `agents-llama-all` | [docs/agents-llama.md](docs/agents-llama.md) |
|
||||||
|
|
||||||
> **Vault:** Being redesigned as a PR-based approval workflow (issues #73-#77).
|
> **Vault:** Being redesigned as a PR-based approval workflow (issues #73-#77).
|
||||||
> See [docs/VAULT.md](docs/VAULT.md) for the vault PR workflow details.
|
> See [docs/VAULT.md](docs/VAULT.md) for the vault PR workflow details.
|
||||||
|
|
@ -135,7 +138,7 @@ Issues flow: `backlog` → `in-progress` → PR → CI → review → merge →
|
||||||
|---|---|---|
|
|---|---|---|
|
||||||
| `backlog` | Issue is queued for implementation. Dev-poll picks the first ready one. | Planner, gardener, humans |
|
| `backlog` | Issue is queued for implementation. Dev-poll picks the first ready one. | Planner, gardener, humans |
|
||||||
| `priority` | Queue tier above plain backlog. Issues with both `priority` and `backlog` are picked before plain `backlog` issues. FIFO within each tier. | Planner, humans |
|
| `priority` | Queue tier above plain backlog. Issues with both `priority` and `backlog` are picked before plain `backlog` issues. FIFO within each tier. | Planner, humans |
|
||||||
| `in-progress` | Dev-agent is actively working on this issue. Only one issue per project is in-progress at a time. | dev-agent.sh (claims issue) |
|
| `in-progress` | Dev-agent is actively working on this issue. Only one issue per project is in-progress at a time. Also set on vision issues by filer-bot when sub-issues are filed (#764). | dev-agent.sh (claims issue), filer-bot (vision issues) |
|
||||||
| `blocked` | Issue is stuck — agent session failed, crashed, timed out, or CI exhausted. Diagnostic comment on the issue has details. Also used for unmet dependencies. | dev-agent.sh, dev-poll.sh (on failure) |
|
| `blocked` | Issue is stuck — agent session failed, crashed, timed out, or CI exhausted. Diagnostic comment on the issue has details. Also used for unmet dependencies. | dev-agent.sh, dev-poll.sh (on failure) |
|
||||||
| `tech-debt` | Pre-existing issue flagged by AI reviewer, not introduced by a PR. | review-pr.sh (auto-created follow-ups) |
|
| `tech-debt` | Pre-existing issue flagged by AI reviewer, not introduced by a PR. | review-pr.sh (auto-created follow-ups) |
|
||||||
| `underspecified` | Dev-agent refused the issue as too large or vague. | dev-poll.sh (on preflight `too_large`), dev-agent.sh (on mid-run `too_large` refusal) |
|
| `underspecified` | Dev-agent refused the issue as too large or vague. | dev-poll.sh (on preflight `too_large`), dev-agent.sh (on mid-run `too_large` refusal) |
|
||||||
|
|
@ -177,8 +180,8 @@ Humans write these. Agents read and enforce them.
|
||||||
| AD-002 | **Concurrency is bounded per LLM backend, not per project.** One concurrent Claude session per OAuth credential pool; one concurrent session per llama-server instance. Containers with disjoint backends may run in parallel. | The single-thread invariant is about *backends*, not pipelines. **(a) Anthropic OAuth credentials race on token refresh** — each container uses a per-session `CLAUDE_CONFIG_DIR`, so Claude Code's native lockfile-based OAuth refresh handles contention automatically without external serialization. (Legacy: set `CLAUDE_EXTERNAL_LOCK=1` to re-enable the old `flock session.lock` wrapper for rollback.) **(b) llama-server has finite VRAM and one KV cache** — parallel inference thrashes the cache and risks OOM. All llama-backed agents serialize on the same lock. **(c) Disjoint backends are free to parallelize.** Today `disinto-agents` (Anthropic OAuth, runs `review,gardener`) runs concurrently with `disinto-agents-llama` (llama, runs `dev`) on the same project — they share neither OAuth state nor llama VRAM. **(d) Per-project work-conflict safety** (no duplicate dev work, no merge conflicts on the same branch) is enforced by `issue_claim` (assignee + `in-progress` label) and per-issue worktrees — that's a separate guard that does NOT depend on this AD. |
|
| AD-002 | **Concurrency is bounded per LLM backend, not per project.** One concurrent Claude session per OAuth credential pool; one concurrent session per llama-server instance. Containers with disjoint backends may run in parallel. | The single-thread invariant is about *backends*, not pipelines. **(a) Anthropic OAuth credentials race on token refresh** — each container uses a per-session `CLAUDE_CONFIG_DIR`, so Claude Code's native lockfile-based OAuth refresh handles contention automatically without external serialization. (Legacy: set `CLAUDE_EXTERNAL_LOCK=1` to re-enable the old `flock session.lock` wrapper for rollback.) **(b) llama-server has finite VRAM and one KV cache** — parallel inference thrashes the cache and risks OOM. All llama-backed agents serialize on the same lock. **(c) Disjoint backends are free to parallelize.** Today `disinto-agents` (Anthropic OAuth, runs `review,gardener`) runs concurrently with `disinto-agents-llama` (llama, runs `dev`) on the same project — they share neither OAuth state nor llama VRAM. **(d) Per-project work-conflict safety** (no duplicate dev work, no merge conflicts on the same branch) is enforced by `issue_claim` (assignee + `in-progress` label) and per-issue worktrees — that's a separate guard that does NOT depend on this AD. |
|
||||||
| AD-003 | The runtime creates and destroys, the formula preserves. | Runtime manages worktrees/sessions/temp. Formulas commit knowledge to git before signaling done. |
|
| AD-003 | The runtime creates and destroys, the formula preserves. | Runtime manages worktrees/sessions/temp. Formulas commit knowledge to git before signaling done. |
|
||||||
| AD-004 | Event-driven > polling > fixed delays. | Never `waitForTimeout` or hardcoded sleep. Use phase files, webhooks, or poll loops with backoff. |
|
| AD-004 | Event-driven > polling > fixed delays. | Never `waitForTimeout` or hardcoded sleep. Use phase files, webhooks, or poll loops with backoff. |
|
||||||
| AD-005 | Secrets via env var indirection, never in issue bodies. | Issue bodies become code. Agent secrets go in `.env.enc`, vault secrets in `.env.vault.enc` (SOPS-encrypted when available; plaintext `.env`/`.env.vault` fallback supported). Referenced as `$VAR_NAME`. Runner gets only vault secrets; agents get only agent secrets. |
|
| AD-005 | Secrets via env var indirection, never in issue bodies. | Issue bodies become code. Agent secrets go in `.env.enc` (SOPS-encrypted), vault secrets in `secrets/<NAME>.enc` (age-encrypted, one file per key). Referenced as `$VAR_NAME`. Runner gets only vault secrets; agents get only agent secrets. |
|
||||||
| AD-006 | External actions go through vault dispatch, never direct. | Agents build addressables; only the vault exercises them (publishes, deploys, posts). Tokens for external systems (`GITHUB_TOKEN`, `CLAWHUB_TOKEN`, deploy keys) live only in `.env.vault.enc` and are injected into the ephemeral runner container. `lib/env.sh` unsets them so agents never hold them. PRs with direct external actions without vault dispatch get REQUEST_CHANGES. (Vault redesign in progress: PR-based approval on ops repo, see #73-#77) |
|
| AD-006 | External actions go through vault dispatch, never direct. | Agents build addressables; only the vault exercises them (publishes, deploys, posts). Tokens for external systems (`GITHUB_TOKEN`, `CLAWHUB_TOKEN`, deploy keys) live only in `secrets/<NAME>.enc` and are decrypted into the ephemeral runner container. `lib/env.sh` unsets them so agents never hold them. PRs with direct external actions without vault dispatch get REQUEST_CHANGES. (Vault redesign in progress: PR-based approval on ops repo, see #73-#77) |
|
||||||
|
|
||||||
**Who enforces what:**
|
**Who enforces what:**
|
||||||
- **Gardener** checks open backlog issues against ADs during grooming; closes violations with a comment referencing the AD number.
|
- **Gardener** checks open backlog issues against ADs during grooming; closes violations with a comment referencing the AD number.
|
||||||
|
|
@ -186,8 +189,6 @@ Humans write these. Agents read and enforce them.
|
||||||
- **Dev-agent** reads AGENTS.md before implementing; refuses work that violates ADs.
|
- **Dev-agent** reads AGENTS.md before implementing; refuses work that violates ADs.
|
||||||
- **AD-002 is a runtime invariant; nothing for the gardener to check at issue-groom time.** OAuth concurrency is handled by per-session `CLAUDE_CONFIG_DIR` isolation (with `CLAUDE_EXTERNAL_LOCK` as a rollback flag). Per-issue work is enforced by `issue_claim`. A violation manifests as a 401 or VRAM OOM in agent logs, not as a malformed issue.
|
- **AD-002 is a runtime invariant; nothing for the gardener to check at issue-groom time.** OAuth concurrency is handled by per-session `CLAUDE_CONFIG_DIR` isolation (with `CLAUDE_EXTERNAL_LOCK` as a rollback flag). Per-issue work is enforced by `issue_claim`. A violation manifests as a 401 or VRAM OOM in agent logs, not as a malformed issue.
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Phase-Signaling Protocol
|
## Phase-Signaling Protocol
|
||||||
|
|
||||||
When running as a persistent tmux session, Claude must signal the orchestrator
|
When running as a persistent tmux session, Claude must signal the orchestrator
|
||||||
|
|
@ -196,5 +197,4 @@ at each phase boundary by writing to a phase file (e.g.
|
||||||
|
|
||||||
Key phases: `PHASE:awaiting_ci` → `PHASE:awaiting_review` → `PHASE:done`.
|
Key phases: `PHASE:awaiting_ci` → `PHASE:awaiting_review` → `PHASE:done`.
|
||||||
Also: `PHASE:escalate` (needs human input), `PHASE:failed`.
|
Also: `PHASE:escalate` (needs human input), `PHASE:failed`.
|
||||||
|
|
||||||
See [docs/PHASE-PROTOCOL.md](docs/PHASE-PROTOCOL.md) for the complete spec, orchestrator reaction matrix, sequence diagram, and crash recovery.
|
See [docs/PHASE-PROTOCOL.md](docs/PHASE-PROTOCOL.md) for the complete spec, orchestrator reaction matrix, sequence diagram, and crash recovery.
|
||||||
|
|
|
||||||
|
|
@ -50,7 +50,7 @@ blast_radius = "low" # optional: overrides policy.toml tier ("low"|"medium
|
||||||
|
|
||||||
## Secret Names
|
## Secret Names
|
||||||
|
|
||||||
Secret names must be defined in `.env.vault.enc` on the ops repo. The vault validates that requested secrets exist in the allowlist before execution.
|
Secret names must have a corresponding `secrets/<NAME>.enc` file (age-encrypted). The vault validates that requested secrets exist in the allowlist before execution.
|
||||||
|
|
||||||
Common secret names:
|
Common secret names:
|
||||||
- `CLAWHUB_TOKEN` - Token for ClawHub skill publishing
|
- `CLAWHUB_TOKEN` - Token for ClawHub skill publishing
|
||||||
|
|
@ -28,7 +28,7 @@ fi
|
||||||
# VAULT ACTION VALIDATION
|
# VAULT ACTION VALIDATION
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
|
|
||||||
# Allowed secret names - must match keys in .env.vault.enc
|
# Allowed secret names - must match files in secrets/<NAME>.enc
|
||||||
VAULT_ALLOWED_SECRETS="CLAWHUB_TOKEN GITHUB_TOKEN CODEBERG_TOKEN DEPLOY_KEY NPM_TOKEN DOCKER_HUB_TOKEN"
|
VAULT_ALLOWED_SECRETS="CLAWHUB_TOKEN GITHUB_TOKEN CODEBERG_TOKEN DEPLOY_KEY NPM_TOKEN DOCKER_HUB_TOKEN"
|
||||||
|
|
||||||
# Allowed mount aliases — well-known file-based credential directories
|
# Allowed mount aliases — well-known file-based credential directories
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: 3e65878093bbbcea6dfe4db341f82dc89d4e0ac0 -->
|
<!-- last-reviewed: c363ee0aea2ae447daab28c2c850d6abefc8c6b5 -->
|
||||||
# Architect — Agent Instructions
|
# Architect — Agent Instructions
|
||||||
|
|
||||||
## What this agent is
|
## What this agent is
|
||||||
|
|
@ -10,9 +10,9 @@ converses with humans through PR comments.
|
||||||
## Role
|
## Role
|
||||||
|
|
||||||
- **Input**: Vision issues from VISION.md, prerequisite tree from ops repo
|
- **Input**: Vision issues from VISION.md, prerequisite tree from ops repo
|
||||||
- **Output**: Sprint proposals as PRs on the ops repo, sub-issue files
|
- **Output**: Sprint proposals as PRs on the ops repo (with embedded `## Sub-issues` blocks)
|
||||||
- **Mechanism**: Bash-driven orchestration in `architect-run.sh`, pitching formula via `formulas/run-architect.toml`
|
- **Mechanism**: Bash-driven orchestration in `architect-run.sh`, pitching formula via `formulas/run-architect.toml`
|
||||||
- **Identity**: `architect-bot` on Forgejo
|
- **Identity**: `architect-bot` on Forgejo (READ-ONLY on project repo, write on ops repo only — #764)
|
||||||
|
|
||||||
## Responsibilities
|
## Responsibilities
|
||||||
|
|
||||||
|
|
@ -24,16 +24,17 @@ converses with humans through PR comments.
|
||||||
acceptance criteria and dependencies
|
acceptance criteria and dependencies
|
||||||
4. **Human conversation**: Respond to PR comments, refine sprint proposals based
|
4. **Human conversation**: Respond to PR comments, refine sprint proposals based
|
||||||
on human feedback
|
on human feedback
|
||||||
5. **Sub-issue filing**: After design forks are resolved, file concrete sub-issues
|
5. **Sub-issue definition**: Define concrete sub-issues in the `## Sub-issues`
|
||||||
for implementation
|
block of the sprint spec. Filing is handled by `filer-bot` after sprint PR
|
||||||
|
merge (#764)
|
||||||
|
|
||||||
## Formula
|
## Formula
|
||||||
|
|
||||||
The architect pitching is driven by `formulas/run-architect.toml`. This formula defines
|
The architect pitching is driven by `formulas/run-architect.toml`. This formula defines
|
||||||
the steps for:
|
the steps for:
|
||||||
- Research: analyzing vision items and prerequisite tree
|
- Research: analyzing vision items and prerequisite tree
|
||||||
- Pitch: creating structured sprint PRs
|
- Pitch: creating structured sprint PRs with embedded `## Sub-issues` blocks
|
||||||
- Sub-issue filing: creating concrete implementation issues
|
- Design Q&A: refining the sprint via PR comments after human ACCEPT
|
||||||
|
|
||||||
## Bash-driven orchestration
|
## Bash-driven orchestration
|
||||||
|
|
||||||
|
|
@ -57,21 +58,31 @@ APPROVED review → start design questions (model posts Q1:, adds Design forks s
|
||||||
↓
|
↓
|
||||||
Answers received → continue Q&A (model processes answers, posts follow-ups)
|
Answers received → continue Q&A (model processes answers, posts follow-ups)
|
||||||
↓
|
↓
|
||||||
All forks resolved → sub-issue filing (model files implementation issues)
|
All forks resolved → finalize ## Sub-issues section in sprint spec
|
||||||
|
↓
|
||||||
|
Sprint PR merged → filer-bot files sub-issues on project repo (#764)
|
||||||
↓
|
↓
|
||||||
REJECT review → close PR + journal (model processes rejection, bash merges PR)
|
REJECT review → close PR + journal (model processes rejection, bash merges PR)
|
||||||
```
|
```
|
||||||
|
|
||||||
### Vision issue lifecycle
|
### Vision issue lifecycle
|
||||||
|
|
||||||
Vision issues decompose into sprint sub-issues tracked via "Decomposed from #N" in sub-issue bodies. The architect automatically closes vision issues when all sub-issues are closed:
|
Vision issues decompose into sprint sub-issues. Sub-issues are defined in the
|
||||||
|
`## Sub-issues` block of the sprint spec (between `<!-- filer:begin -->` and
|
||||||
|
`<!-- filer:end -->` markers) and filed by `filer-bot` after the sprint PR merges
|
||||||
|
on the ops repo (#764).
|
||||||
|
|
||||||
1. Before picking new vision issues, the architect checks each open vision issue
|
Each filer-created sub-issue carries a `<!-- decomposed-from: #<vision>, sprint: <slug>, id: <id> -->`
|
||||||
2. For each, it queries for sub-issues with "Decomposed from #N" in their body (regardless of state)
|
marker in its body for idempotency and traceability.
|
||||||
3. If all sub-issues are closed, it posts a summary comment listing completed sub-issues
|
|
||||||
4. The vision issue is then closed automatically
|
|
||||||
|
|
||||||
This ensures vision issues transition from `open` → `closed` once their work is complete, without manual intervention.
|
The filer-bot (via `lib/sprint-filer.sh`) handles vision lifecycle:
|
||||||
|
1. After filing sub-issues, adds `in-progress` label to the vision issue
|
||||||
|
2. On each run, checks if all sub-issues for a vision are closed
|
||||||
|
3. If all closed, posts a summary comment and closes the vision issue
|
||||||
|
|
||||||
|
The architect no longer writes to the project repo — it is read-only (#764).
|
||||||
|
All project-repo writes (issue filing, label management, vision closure) are
|
||||||
|
handled by filer-bot with its narrowly-scoped `FORGE_FILER_TOKEN`.
|
||||||
|
|
||||||
### Session management
|
### Session management
|
||||||
|
|
||||||
|
|
@ -85,6 +96,7 @@ Run via `architect/architect-run.sh`, which:
|
||||||
- Acquires a poll-loop lock (via `acquire_lock`) and checks available memory
|
- Acquires a poll-loop lock (via `acquire_lock`) and checks available memory
|
||||||
- Cleans up per-issue scratch files from previous runs (`/tmp/architect-{project}-scratch-*.md`)
|
- Cleans up per-issue scratch files from previous runs (`/tmp/architect-{project}-scratch-*.md`)
|
||||||
- Sources shared libraries (env.sh, formula-session.sh)
|
- Sources shared libraries (env.sh, formula-session.sh)
|
||||||
|
- Exports `FORGE_TOKEN_OVERRIDE="${FORGE_ARCHITECT_TOKEN}"` BEFORE sourcing env.sh, ensuring architect-bot identity survives re-sourcing (#762)
|
||||||
- Uses FORGE_ARCHITECT_TOKEN for authentication
|
- Uses FORGE_ARCHITECT_TOKEN for authentication
|
||||||
- Processes existing architect PRs via bash-driven design phase
|
- Processes existing architect PRs via bash-driven design phase
|
||||||
- Loads the formula and builds context from VISION.md, AGENTS.md, and ops repo
|
- Loads the formula and builds context from VISION.md, AGENTS.md, and ops repo
|
||||||
|
|
@ -94,7 +106,9 @@ Run via `architect/architect-run.sh`, which:
|
||||||
- Selects up to `pitch_budget` (3 - open architect PRs) remaining vision issues
|
- Selects up to `pitch_budget` (3 - open architect PRs) remaining vision issues
|
||||||
- For each selected issue, invokes stateless `claude -p` with issue body + context
|
- For each selected issue, invokes stateless `claude -p` with issue body + context
|
||||||
- Creates PRs directly from pitch content (no scratch files)
|
- Creates PRs directly from pitch content (no scratch files)
|
||||||
- Agent is invoked only for response processing (ACCEPT/REJECT handling)
|
- Agent is invoked for stateless pitch generation and response processing (ACCEPT/REJECT handling)
|
||||||
|
- NOTE: architect-bot is read-only on the project repo (#764) — sub-issue filing
|
||||||
|
and in-progress label management are handled by filer-bot after sprint PR merge
|
||||||
|
|
||||||
**Multi-sprint pitching**: The architect pitches up to 3 sprints per run. Bash handles all state management:
|
**Multi-sprint pitching**: The architect pitches up to 3 sprints per run. Bash handles all state management:
|
||||||
- Fetches Forgejo API data (vision issues, open PRs, merged PRs)
|
- Fetches Forgejo API data (vision issues, open PRs, merged PRs)
|
||||||
|
|
@ -119,4 +133,5 @@ empty file not created, just document it).
|
||||||
- #100: Architect formula — research + design fork identification
|
- #100: Architect formula — research + design fork identification
|
||||||
- #101: Architect formula — sprint PR creation with questions
|
- #101: Architect formula — sprint PR creation with questions
|
||||||
- #102: Architect formula — answer parsing + sub-issue filing
|
- #102: Architect formula — answer parsing + sub-issue filing
|
||||||
|
- #764: Permission scoping — architect read-only on project repo, filer-bot files sub-issues
|
||||||
- #491: Refactor — bash-driven design phase with stateful session resumption
|
- #491: Refactor — bash-driven design phase with stateful session resumption
|
||||||
|
|
|
||||||
|
|
@ -34,10 +34,11 @@ FACTORY_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||||
|
|
||||||
# Accept project config from argument; default to disinto
|
# Accept project config from argument; default to disinto
|
||||||
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
||||||
|
# Set override BEFORE sourcing env.sh so it survives any later re-source of
|
||||||
|
# env.sh from nested shells / claude -p tools (#762, #747)
|
||||||
|
export FORGE_TOKEN_OVERRIDE="${FORGE_ARCHITECT_TOKEN:-}"
|
||||||
# shellcheck source=../lib/env.sh
|
# shellcheck source=../lib/env.sh
|
||||||
source "$FACTORY_ROOT/lib/env.sh"
|
source "$FACTORY_ROOT/lib/env.sh"
|
||||||
# Override FORGE_TOKEN with architect-bot's token (#747)
|
|
||||||
FORGE_TOKEN="${FORGE_ARCHITECT_TOKEN:-${FORGE_TOKEN}}"
|
|
||||||
# shellcheck source=../lib/formula-session.sh
|
# shellcheck source=../lib/formula-session.sh
|
||||||
source "$FACTORY_ROOT/lib/formula-session.sh"
|
source "$FACTORY_ROOT/lib/formula-session.sh"
|
||||||
# shellcheck source=../lib/worktree.sh
|
# shellcheck source=../lib/worktree.sh
|
||||||
|
|
@ -116,8 +117,8 @@ build_architect_prompt() {
|
||||||
You are the architect agent for ${FORGE_REPO}. Work through the formula below.
|
You are the architect agent for ${FORGE_REPO}. Work through the formula below.
|
||||||
|
|
||||||
Your role: strategic decomposition of vision issues into development sprints.
|
Your role: strategic decomposition of vision issues into development sprints.
|
||||||
Propose sprints via PRs on the ops repo, converse with humans through PR comments,
|
Propose sprints via PRs on the ops repo, converse with humans through PR comments.
|
||||||
and file sub-issues after design forks are resolved.
|
You are READ-ONLY on the project repo — sub-issues are filed by filer-bot after sprint PR merge (#764).
|
||||||
|
|
||||||
## Project context
|
## Project context
|
||||||
${CONTEXT_BLOCK}
|
${CONTEXT_BLOCK}
|
||||||
|
|
@ -144,8 +145,8 @@ build_architect_prompt_for_mode() {
|
||||||
You are the architect agent for ${FORGE_REPO}. Work through the formula below.
|
You are the architect agent for ${FORGE_REPO}. Work through the formula below.
|
||||||
|
|
||||||
Your role: strategic decomposition of vision issues into development sprints.
|
Your role: strategic decomposition of vision issues into development sprints.
|
||||||
Propose sprints via PRs on the ops repo, converse with humans through PR comments,
|
Propose sprints via PRs on the ops repo, converse with humans through PR comments.
|
||||||
and file sub-issues after design forks are resolved.
|
You are READ-ONLY on the project repo — sub-issues are filed by filer-bot after sprint PR merge (#764).
|
||||||
|
|
||||||
## CURRENT STATE: Approved PR awaiting initial design questions
|
## CURRENT STATE: Approved PR awaiting initial design questions
|
||||||
|
|
||||||
|
|
@ -156,10 +157,10 @@ design conversation has not yet started. Your task is to:
|
||||||
2. Identify the key design decisions that need human input
|
2. Identify the key design decisions that need human input
|
||||||
3. Post initial design questions (Q1:, Q2:, etc.) as comments on the PR
|
3. Post initial design questions (Q1:, Q2:, etc.) as comments on the PR
|
||||||
4. Add a `## Design forks` section to the PR body documenting the design decisions
|
4. Add a `## Design forks` section to the PR body documenting the design decisions
|
||||||
5. File sub-issues for each design fork path if applicable
|
5. Update the ## Sub-issues section in the sprint spec if design decisions affect decomposition
|
||||||
|
|
||||||
This is NOT a pitch phase — the pitch is already approved. This is the START
|
This is NOT a pitch phase — the pitch is already approved. This is the START
|
||||||
of the design Q&A phase.
|
of the design Q&A phase. Sub-issues are filed by filer-bot after sprint PR merge (#764).
|
||||||
|
|
||||||
## Project context
|
## Project context
|
||||||
${CONTEXT_BLOCK}
|
${CONTEXT_BLOCK}
|
||||||
|
|
@ -178,8 +179,8 @@ _PROMPT_EOF_
|
||||||
You are the architect agent for ${FORGE_REPO}. Work through the formula below.
|
You are the architect agent for ${FORGE_REPO}. Work through the formula below.
|
||||||
|
|
||||||
Your role: strategic decomposition of vision issues into development sprints.
|
Your role: strategic decomposition of vision issues into development sprints.
|
||||||
Propose sprints via PRs on the ops repo, converse with humans through PR comments,
|
Propose sprints via PRs on the ops repo, converse with humans through PR comments.
|
||||||
and file sub-issues after design forks are resolved.
|
You are READ-ONLY on the project repo — sub-issues are filed by filer-bot after sprint PR merge (#764).
|
||||||
|
|
||||||
## CURRENT STATE: Design Q&A in progress
|
## CURRENT STATE: Design Q&A in progress
|
||||||
|
|
||||||
|
|
@ -193,7 +194,7 @@ Your task is to:
|
||||||
2. Read human answers from PR comments
|
2. Read human answers from PR comments
|
||||||
3. Parse the answers and determine next steps
|
3. Parse the answers and determine next steps
|
||||||
4. Post follow-up questions if needed (Q3:, Q4:, etc.)
|
4. Post follow-up questions if needed (Q3:, Q4:, etc.)
|
||||||
5. If all design forks are resolved, file sub-issues for each path
|
5. If all design forks are resolved, finalize the ## Sub-issues section in the sprint spec
|
||||||
6. Update the `## Design forks` section as you progress
|
6. Update the `## Design forks` section as you progress
|
||||||
|
|
||||||
## Project context
|
## Project context
|
||||||
|
|
@ -417,205 +418,10 @@ fetch_vision_issues() {
|
||||||
"${FORGE_API}/issues?labels=vision&state=open&limit=100" 2>/dev/null || echo '[]'
|
"${FORGE_API}/issues?labels=vision&state=open&limit=100" 2>/dev/null || echo '[]'
|
||||||
}
|
}
|
||||||
|
|
||||||
# ── Helper: Fetch all sub-issues for a vision issue ───────────────────────
|
# NOTE: get_vision_subissues, all_subissues_closed, close_vision_issue,
|
||||||
# Sub-issues are identified by:
|
# check_and_close_completed_visions removed (#764) — architect-bot is read-only
|
||||||
# 1. Issues whose body contains "Decomposed from #N" pattern
|
# on the project repo. Vision lifecycle (closing completed visions, adding
|
||||||
# 2. Issues referenced in merged sprint PR bodies
|
# in-progress labels) is now handled by filer-bot via lib/sprint-filer.sh.
|
||||||
# Returns: newline-separated list of sub-issue numbers (empty if none)
|
|
||||||
# Args: vision_issue_number
|
|
||||||
get_vision_subissues() {
|
|
||||||
local vision_issue="$1"
|
|
||||||
local subissues=()
|
|
||||||
|
|
||||||
# Method 1: Find issues with "Decomposed from #N" in body
|
|
||||||
local issues_json
|
|
||||||
issues_json=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
"${FORGE_API}/issues?limit=100" 2>/dev/null) || true
|
|
||||||
|
|
||||||
if [ -n "$issues_json" ] && [ "$issues_json" != "null" ]; then
|
|
||||||
while IFS= read -r subissue_num; do
|
|
||||||
[ -z "$subissue_num" ] && continue
|
|
||||||
subissues+=("$subissue_num")
|
|
||||||
done <<< "$(printf '%s' "$issues_json" | jq -r --arg vid "$vision_issue" \
|
|
||||||
'[.[] | select(.number != ($vid | tonumber)) | select(.body // "" | contains("Decomposed from #" + $vid))] | .[].number' 2>/dev/null)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Method 2: Find issues referenced in merged sprint PR bodies
|
|
||||||
local prs_json
|
|
||||||
prs_json=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
"${FORGE_API_BASE}/repos/${FORGE_OPS_REPO}/pulls?state=closed&limit=100" 2>/dev/null) || true
|
|
||||||
|
|
||||||
if [ -n "$prs_json" ] && [ "$prs_json" != "null" ]; then
|
|
||||||
while IFS= read -r pr_num; do
|
|
||||||
[ -z "$pr_num" ] && continue
|
|
||||||
|
|
||||||
# Check if PR is merged and references the vision issue
|
|
||||||
local pr_details pr_body
|
|
||||||
pr_details=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
"${FORGE_API_BASE}/repos/${FORGE_OPS_REPO}/pulls/${pr_num}" 2>/dev/null) || continue
|
|
||||||
|
|
||||||
local is_merged
|
|
||||||
is_merged=$(printf '%s' "$pr_details" | jq -r '.merged // false') || continue
|
|
||||||
|
|
||||||
if [ "$is_merged" != "true" ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
pr_body=$(printf '%s' "$pr_details" | jq -r '.body // ""') || continue
|
|
||||||
|
|
||||||
# Extract all issue numbers from PR body
|
|
||||||
while IFS= read -r ref_issue; do
|
|
||||||
[ -z "$ref_issue" ] && continue
|
|
||||||
# Skip if already in list
|
|
||||||
local found=false
|
|
||||||
for existing in "${subissues[@]+"${subissues[@]}"}"; do
|
|
||||||
[ "$existing" = "$ref_issue" ] && found=true && break
|
|
||||||
done
|
|
||||||
if [ "$found" = false ]; then
|
|
||||||
subissues+=("$ref_issue")
|
|
||||||
fi
|
|
||||||
done <<< "$(printf '%s' "$pr_body" | grep -oE '#[0-9]+' | tr -d '#' | sort -u)"
|
|
||||||
done <<< "$(printf '%s' "$prs_json" | jq -r '.[] | select(.title | contains("architect:")) | .number')"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Output unique sub-issues
|
|
||||||
printf '%s\n' "${subissues[@]}" | sort -u | grep -v '^$' || true
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Helper: Check if all sub-issues of a vision issue are closed ───────────
|
|
||||||
# Returns: 0 if all sub-issues are closed, 1 if any are still open
|
|
||||||
# Args: vision_issue_number
|
|
||||||
all_subissues_closed() {
|
|
||||||
local vision_issue="$1"
|
|
||||||
local subissues
|
|
||||||
subissues=$(get_vision_subissues "$vision_issue")
|
|
||||||
|
|
||||||
# If no sub-issues found, parent cannot be considered complete
|
|
||||||
if [ -z "$subissues" ]; then
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check each sub-issue state
|
|
||||||
while IFS= read -r subissue_num; do
|
|
||||||
[ -z "$subissue_num" ] && continue
|
|
||||||
|
|
||||||
local sub_state
|
|
||||||
sub_state=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
"${FORGE_API}/issues/${subissue_num}" 2>/dev/null | jq -r '.state // "unknown"') || true
|
|
||||||
|
|
||||||
if [ "$sub_state" != "closed" ]; then
|
|
||||||
log "Sub-issue #${subissue_num} is ${sub_state} — vision issue #${vision_issue} not ready to close"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
done <<< "$subissues"
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Helper: Close vision issue with summary comment ────────────────────────
|
|
||||||
# Posts a comment listing all completed sub-issues before closing.
|
|
||||||
# Returns: 0 on success, 1 on failure
|
|
||||||
# Args: vision_issue_number
|
|
||||||
close_vision_issue() {
|
|
||||||
local vision_issue="$1"
|
|
||||||
local subissues
|
|
||||||
subissues=$(get_vision_subissues "$vision_issue")
|
|
||||||
|
|
||||||
# Build summary comment
|
|
||||||
local summary=""
|
|
||||||
local count=0
|
|
||||||
while IFS= read -r subissue_num; do
|
|
||||||
[ -z "$subissue_num" ] && continue
|
|
||||||
local sub_title
|
|
||||||
sub_title=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
"${FORGE_API}/issues/${subissue_num}" 2>/dev/null | jq -r '.title // "Untitled"') || sub_title="Untitled"
|
|
||||||
summary+="- #${subissue_num}: ${sub_title}"$'\n'
|
|
||||||
count=$((count + 1))
|
|
||||||
done <<< "$subissues"
|
|
||||||
|
|
||||||
local comment
|
|
||||||
comment=$(cat <<EOF
|
|
||||||
## Vision Issue Completed
|
|
||||||
|
|
||||||
All sub-issues have been implemented and merged. This vision issue is now closed.
|
|
||||||
|
|
||||||
### Completed sub-issues (${count}):
|
|
||||||
${summary}
|
|
||||||
---
|
|
||||||
*Automated closure by architect · $(date -u '+%Y-%m-%d %H:%M UTC')*
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
# Post comment before closing
|
|
||||||
local tmpfile tmpjson
|
|
||||||
tmpfile=$(mktemp /tmp/vision-close-XXXXXX.md)
|
|
||||||
tmpjson="${tmpfile}.json"
|
|
||||||
printf '%s' "$comment" > "$tmpfile"
|
|
||||||
jq -Rs '{body:.}' < "$tmpfile" > "$tmpjson"
|
|
||||||
|
|
||||||
if ! curl -sf -X POST \
|
|
||||||
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${FORGE_API}/issues/${vision_issue}/comments" \
|
|
||||||
--data-binary @"$tmpjson" >/dev/null 2>&1; then
|
|
||||||
log "WARNING: failed to post closure comment on vision issue #${vision_issue}"
|
|
||||||
rm -f "$tmpfile" "$tmpjson"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
rm -f "$tmpfile" "$tmpjson"
|
|
||||||
|
|
||||||
# Clear assignee and close the issue
|
|
||||||
curl -sf -X PATCH \
|
|
||||||
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${FORGE_API}/issues/${vision_issue}" \
|
|
||||||
-d '{"assignees":[]}' >/dev/null 2>&1 || true
|
|
||||||
|
|
||||||
curl -sf -X PATCH \
|
|
||||||
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${FORGE_API}/issues/${vision_issue}" \
|
|
||||||
-d '{"state":"closed"}' >/dev/null 2>&1 || true
|
|
||||||
|
|
||||||
log "Closed vision issue #${vision_issue} — all ${count} sub-issue(s) complete"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Lifecycle check: Close vision issues with all sub-issues complete ──────
|
|
||||||
# Runs before picking new vision issues for decomposition.
|
|
||||||
# Checks each open vision issue and closes it if all sub-issues are closed.
|
|
||||||
check_and_close_completed_visions() {
|
|
||||||
log "Checking for vision issues with all sub-issues complete..."
|
|
||||||
|
|
||||||
local vision_issues_json
|
|
||||||
vision_issues_json=$(fetch_vision_issues)
|
|
||||||
|
|
||||||
if [ -z "$vision_issues_json" ] || [ "$vision_issues_json" = "null" ]; then
|
|
||||||
log "No open vision issues found"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get all vision issue numbers
|
|
||||||
local vision_issue_nums
|
|
||||||
vision_issue_nums=$(printf '%s' "$vision_issues_json" | jq -r '.[].number' 2>/dev/null) || vision_issue_nums=""
|
|
||||||
|
|
||||||
local closed_count=0
|
|
||||||
while IFS= read -r vision_issue; do
|
|
||||||
[ -z "$vision_issue" ] && continue
|
|
||||||
|
|
||||||
if all_subissues_closed "$vision_issue"; then
|
|
||||||
if close_vision_issue "$vision_issue"; then
|
|
||||||
closed_count=$((closed_count + 1))
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done <<< "$vision_issue_nums"
|
|
||||||
|
|
||||||
if [ "$closed_count" -gt 0 ]; then
|
|
||||||
log "Closed ${closed_count} vision issue(s) with all sub-issues complete"
|
|
||||||
else
|
|
||||||
log "No vision issues ready for closure"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Helper: Fetch open architect PRs from ops repo Forgejo API ───────────
|
# ── Helper: Fetch open architect PRs from ops repo Forgejo API ───────────
|
||||||
# Returns: JSON array of architect PR objects
|
# Returns: JSON array of architect PR objects
|
||||||
|
|
@ -707,7 +513,23 @@ Instructions:
|
||||||
## Recommendation
|
## Recommendation
|
||||||
<architect's assessment: worth it / defer / alternative approach>
|
<architect's assessment: worth it / defer / alternative approach>
|
||||||
|
|
||||||
|
## Sub-issues
|
||||||
|
|
||||||
|
<!-- filer:begin -->
|
||||||
|
- id: <kebab-case-id>
|
||||||
|
title: \"vision(#${issue_num}): <concise sub-issue title>\"
|
||||||
|
labels: [backlog]
|
||||||
|
depends_on: []
|
||||||
|
body: |
|
||||||
|
## Goal
|
||||||
|
<what this sub-issue accomplishes>
|
||||||
|
## Acceptance criteria
|
||||||
|
- [ ] <criterion>
|
||||||
|
<!-- filer:end -->
|
||||||
|
|
||||||
IMPORTANT: Do NOT include design forks or questions. This is a go/no-go pitch.
|
IMPORTANT: Do NOT include design forks or questions. This is a go/no-go pitch.
|
||||||
|
The ## Sub-issues block is parsed by the filer-bot pipeline after sprint PR merge.
|
||||||
|
Each sub-issue between filer:begin/end markers becomes a Forgejo issue.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
@ -816,37 +638,8 @@ post_pr_footer() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# ── Helper: Add in-progress label to vision issue ────────────────────────
|
# NOTE: add_inprogress_label removed (#764) — architect-bot is read-only on
|
||||||
# Args: vision_issue_number
|
# project repo. in-progress label is now added by filer-bot via sprint-filer.sh.
|
||||||
add_inprogress_label() {
|
|
||||||
local issue_num="$1"
|
|
||||||
|
|
||||||
# Get label ID for 'in-progress'
|
|
||||||
local labels_json
|
|
||||||
labels_json=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
"${FORGE_API}/labels" 2>/dev/null) || return 1
|
|
||||||
|
|
||||||
local inprogress_label_id
|
|
||||||
inprogress_label_id=$(printf '%s' "$labels_json" | jq -r --arg label "in-progress" '.[] | select(.name == $label) | .id' 2>/dev/null) || true
|
|
||||||
|
|
||||||
if [ -z "$inprogress_label_id" ]; then
|
|
||||||
log "WARNING: in-progress label not found"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Add label to issue
|
|
||||||
if curl -sf -X POST \
|
|
||||||
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${FORGE_API}/issues/${issue_num}/labels" \
|
|
||||||
-d "{\"labels\": [${inprogress_label_id}]}" >/dev/null 2>&1; then
|
|
||||||
log "Added in-progress label to vision issue #${issue_num}"
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
log "WARNING: failed to add in-progress label to vision issue #${issue_num}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# ── Precondition checks in bash before invoking the model ─────────────────
|
# ── Precondition checks in bash before invoking the model ─────────────────
|
||||||
|
|
||||||
|
|
@ -896,9 +689,7 @@ if [ "${open_arch_prs:-0}" -ge 3 ]; then
|
||||||
log "3 open architect PRs found but responses detected — processing"
|
log "3 open architect PRs found but responses detected — processing"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# ── Lifecycle check: Close vision issues with all sub-issues complete ──────
|
# NOTE: Vision lifecycle check (close completed visions) moved to filer-bot (#764)
|
||||||
# Run before picking new vision issues for decomposition
|
|
||||||
check_and_close_completed_visions
|
|
||||||
|
|
||||||
# ── Bash-driven state management: Select vision issues for pitching ───────
|
# ── Bash-driven state management: Select vision issues for pitching ───────
|
||||||
# This logic is also documented in formulas/run-architect.toml preflight step
|
# This logic is also documented in formulas/run-architect.toml preflight step
|
||||||
|
|
@ -1034,8 +825,7 @@ for vision_issue in "${ARCHITECT_TARGET_ISSUES[@]}"; do
|
||||||
# Post footer comment
|
# Post footer comment
|
||||||
post_pr_footer "$pr_number"
|
post_pr_footer "$pr_number"
|
||||||
|
|
||||||
# Add in-progress label to vision issue
|
# NOTE: in-progress label is added by filer-bot after sprint PR merge (#764)
|
||||||
add_inprogress_label "$vision_issue"
|
|
||||||
|
|
||||||
pitch_count=$((pitch_count + 1))
|
pitch_count=$((pitch_count + 1))
|
||||||
log "Completed pitch for vision issue #${vision_issue} — PR #${pr_number}"
|
log "Completed pitch for vision issue #${vision_issue} — PR #${pr_number}"
|
||||||
|
|
|
||||||
458
bin/disinto
458
bin/disinto
|
|
@ -81,9 +81,12 @@ Init options:
|
||||||
--repo-root <path> Local clone path (default: ~/name)
|
--repo-root <path> Local clone path (default: ~/name)
|
||||||
--ci-id <n> Woodpecker CI repo ID (default: 0 = no CI)
|
--ci-id <n> Woodpecker CI repo ID (default: 0 = no CI)
|
||||||
--forge-url <url> Forge base URL (default: http://localhost:3000)
|
--forge-url <url> Forge base URL (default: http://localhost:3000)
|
||||||
|
--backend <value> Orchestration backend: docker (default) | nomad (stub, S0.1)
|
||||||
--bare Skip compose generation (bare-metal setup)
|
--bare Skip compose generation (bare-metal setup)
|
||||||
|
--build Use local docker build instead of registry images (dev mode)
|
||||||
--yes Skip confirmation prompts
|
--yes Skip confirmation prompts
|
||||||
--rotate-tokens Force regeneration of all bot tokens/passwords (idempotent by default)
|
--rotate-tokens Force regeneration of all bot tokens/passwords (idempotent by default)
|
||||||
|
--dry-run Print every intended action without executing
|
||||||
|
|
||||||
Hire an agent options:
|
Hire an agent options:
|
||||||
--formula <path> Path to role formula TOML (default: formulas/<role>.toml)
|
--formula <path> Path to role formula TOML (default: formulas/<role>.toml)
|
||||||
|
|
@ -642,6 +645,19 @@ prompt_admin_password() {
|
||||||
|
|
||||||
# ── init command ─────────────────────────────────────────────────────────────
|
# ── init command ─────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
# Nomad backend init — stub for the Nomad+Vault migration (issue #821, S0.1).
|
||||||
|
# Real implementation lands across S0.2–S0.5. Exists so --backend=nomad fails
|
||||||
|
# loud instead of silently routing through the docker path.
|
||||||
|
_disinto_init_nomad() {
|
||||||
|
local dry_run="${1:-false}"
|
||||||
|
if [ "$dry_run" = "true" ]; then
|
||||||
|
echo "nomad backend: stub — will be implemented by S0.2–S0.5"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
echo "ERROR: nomad backend not yet implemented (stub)" >&2
|
||||||
|
exit 99
|
||||||
|
}
|
||||||
|
|
||||||
disinto_init() {
|
disinto_init() {
|
||||||
local repo_url="${1:-}"
|
local repo_url="${1:-}"
|
||||||
if [ -z "$repo_url" ]; then
|
if [ -z "$repo_url" ]; then
|
||||||
|
|
@ -652,20 +668,40 @@ disinto_init() {
|
||||||
shift
|
shift
|
||||||
|
|
||||||
# Parse flags
|
# Parse flags
|
||||||
local branch="" repo_root="" ci_id="0" auto_yes=false forge_url_flag="" bare=false rotate_tokens=false
|
local branch="" repo_root="" ci_id="0" auto_yes=false forge_url_flag="" bare=false rotate_tokens=false use_build=false dry_run=false backend="docker"
|
||||||
while [ $# -gt 0 ]; do
|
while [ $# -gt 0 ]; do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
--branch) branch="$2"; shift 2 ;;
|
--branch) branch="$2"; shift 2 ;;
|
||||||
--repo-root) repo_root="$2"; shift 2 ;;
|
--repo-root) repo_root="$2"; shift 2 ;;
|
||||||
--ci-id) ci_id="$2"; shift 2 ;;
|
--ci-id) ci_id="$2"; shift 2 ;;
|
||||||
--forge-url) forge_url_flag="$2"; shift 2 ;;
|
--forge-url) forge_url_flag="$2"; shift 2 ;;
|
||||||
|
--backend) backend="$2"; shift 2 ;;
|
||||||
|
--backend=*) backend="${1#--backend=}"; shift ;;
|
||||||
--bare) bare=true; shift ;;
|
--bare) bare=true; shift ;;
|
||||||
|
--build) use_build=true; shift ;;
|
||||||
--yes) auto_yes=true; shift ;;
|
--yes) auto_yes=true; shift ;;
|
||||||
--rotate-tokens) rotate_tokens=true; shift ;;
|
--rotate-tokens) rotate_tokens=true; shift ;;
|
||||||
|
--dry-run) dry_run=true; shift ;;
|
||||||
*) echo "Unknown option: $1" >&2; exit 1 ;;
|
*) echo "Unknown option: $1" >&2; exit 1 ;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# Validate backend
|
||||||
|
case "$backend" in
|
||||||
|
docker|nomad) ;;
|
||||||
|
*) echo "Error: invalid --backend value '${backend}' (expected: docker|nomad)" >&2; exit 1 ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Dispatch on backend — nomad path is a stub for now (issue #821, S0.1).
|
||||||
|
# Subsequent S0.x issues will replace _disinto_init_nomad with real logic
|
||||||
|
# without touching flag parsing or this dispatch.
|
||||||
|
if [ "$backend" = "nomad" ]; then
|
||||||
|
_disinto_init_nomad "$dry_run"
|
||||||
|
# shellcheck disable=SC2317 # _disinto_init_nomad always exits today;
|
||||||
|
# `return` is defensive against future refactors.
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
# Export bare-metal flag for setup_forge
|
# Export bare-metal flag for setup_forge
|
||||||
export DISINTO_BARE="$bare"
|
export DISINTO_BARE="$bare"
|
||||||
|
|
||||||
|
|
@ -738,12 +774,92 @@ p.write_text(text)
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# ── Dry-run mode: report intended actions and exit ─────────────────────────
|
||||||
|
if [ "$dry_run" = true ]; then
|
||||||
|
echo ""
|
||||||
|
echo "── Dry-run: intended actions ────────────────────────────"
|
||||||
|
local env_file="${FACTORY_ROOT}/.env"
|
||||||
|
local rr="${repo_root:-/home/${USER}/${project_name}}"
|
||||||
|
|
||||||
|
if [ "$bare" = false ]; then
|
||||||
|
[ -f "${FACTORY_ROOT}/docker-compose.yml" ] \
|
||||||
|
&& echo "[skip] docker-compose.yml (exists)" \
|
||||||
|
|| echo "[create] docker-compose.yml"
|
||||||
|
fi
|
||||||
|
|
||||||
|
[ -f "$env_file" ] \
|
||||||
|
&& echo "[exists] .env" \
|
||||||
|
|| echo "[create] .env"
|
||||||
|
|
||||||
|
# Report token state from .env
|
||||||
|
if [ -f "$env_file" ]; then
|
||||||
|
local _var
|
||||||
|
for _var in FORGE_ADMIN_TOKEN HUMAN_TOKEN FORGE_TOKEN FORGE_REVIEW_TOKEN \
|
||||||
|
FORGE_PLANNER_TOKEN FORGE_GARDENER_TOKEN FORGE_VAULT_TOKEN \
|
||||||
|
FORGE_SUPERVISOR_TOKEN FORGE_PREDICTOR_TOKEN FORGE_ARCHITECT_TOKEN; do
|
||||||
|
if grep -q "^${_var}=" "$env_file" 2>/dev/null; then
|
||||||
|
echo "[keep] ${_var} (preserved)"
|
||||||
|
else
|
||||||
|
echo "[create] ${_var}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
echo "[create] all tokens and passwords"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "[ensure] Forgejo admin user 'disinto-admin'"
|
||||||
|
echo "[ensure] 8 bot users: dev-bot, review-bot, planner-bot, gardener-bot, vault-bot, supervisor-bot, predictor-bot, architect-bot"
|
||||||
|
echo "[ensure] 2 llama bot users: dev-qwen, dev-qwen-nightly"
|
||||||
|
echo "[ensure] .profile repos for all bots"
|
||||||
|
echo "[ensure] repo ${forge_repo} on Forgejo with collaborators"
|
||||||
|
echo "[run] preflight checks"
|
||||||
|
|
||||||
|
[ -d "${rr}/.git" ] \
|
||||||
|
&& echo "[skip] clone ${rr} (exists)" \
|
||||||
|
|| echo "[clone] ${repo_url} -> ${rr}"
|
||||||
|
|
||||||
|
echo "[push] to local Forgejo"
|
||||||
|
echo "[ensure] ops repo disinto-admin/${project_name}-ops"
|
||||||
|
echo "[ensure] branch protection on ${forge_repo}"
|
||||||
|
|
||||||
|
[ "$toml_exists" = true ] \
|
||||||
|
&& echo "[skip] ${toml_path} (exists)" \
|
||||||
|
|| echo "[create] ${toml_path}"
|
||||||
|
|
||||||
|
if [ "$bare" = false ]; then
|
||||||
|
echo "[ensure] Woodpecker OAuth2 app"
|
||||||
|
echo "[ensure] Chat OAuth2 app"
|
||||||
|
echo "[ensure] WOODPECKER_AGENT_SECRET in .env"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "[ensure] labels on ${forge_repo}"
|
||||||
|
|
||||||
|
[ -f "${rr}/VISION.md" ] \
|
||||||
|
&& echo "[skip] VISION.md (exists)" \
|
||||||
|
|| echo "[create] VISION.md"
|
||||||
|
|
||||||
|
echo "[copy] issue templates"
|
||||||
|
echo "[ensure] scheduling (cron or compose polling)"
|
||||||
|
|
||||||
|
if [ "$bare" = false ]; then
|
||||||
|
echo "[start] docker compose stack"
|
||||||
|
echo "[ensure] Woodpecker token + repo activation"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "[ensure] CLAUDE_CONFIG_DIR"
|
||||||
|
echo "[ensure] state files (.dev-active, .reviewer-active, .gardener-active)"
|
||||||
|
echo ""
|
||||||
|
echo "Dry run complete — no changes made."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
# Generate compose files (unless --bare)
|
# Generate compose files (unless --bare)
|
||||||
if [ "$bare" = false ]; then
|
if [ "$bare" = false ]; then
|
||||||
local forge_port
|
local forge_port
|
||||||
forge_port=$(printf '%s' "$forge_url" | sed -E 's|.*:([0-9]+)/?$|\1|')
|
forge_port=$(printf '%s' "$forge_url" | sed -E 's|.*:([0-9]+)/?$|\1|')
|
||||||
forge_port="${forge_port:-3000}"
|
forge_port="${forge_port:-3000}"
|
||||||
generate_compose "$forge_port"
|
generate_compose "$forge_port" "$use_build"
|
||||||
generate_agent_docker
|
generate_agent_docker
|
||||||
generate_caddyfile
|
generate_caddyfile
|
||||||
generate_staging_index
|
generate_staging_index
|
||||||
|
|
@ -890,6 +1006,19 @@ p.write_text(text)
|
||||||
echo "Config: CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=1 saved to .env"
|
echo "Config: CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=1 saved to .env"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Write local-Qwen dev agent env keys with safe defaults (#769)
|
||||||
|
if ! grep -q '^ENABLE_LLAMA_AGENT=' "$env_file" 2>/dev/null; then
|
||||||
|
cat >> "$env_file" <<'LLAMAENVEOF'
|
||||||
|
|
||||||
|
# Local Qwen dev agent (optional) — set to 1 to enable
|
||||||
|
ENABLE_LLAMA_AGENT=0
|
||||||
|
FORGE_TOKEN_LLAMA=
|
||||||
|
FORGE_PASS_LLAMA=
|
||||||
|
ANTHROPIC_BASE_URL=
|
||||||
|
LLAMAENVEOF
|
||||||
|
echo "Config: ENABLE_LLAMA_AGENT keys written to .env (disabled by default)"
|
||||||
|
fi
|
||||||
|
|
||||||
# Create labels on remote
|
# Create labels on remote
|
||||||
create_labels "$forge_repo" "$forge_url"
|
create_labels "$forge_repo" "$forge_url"
|
||||||
|
|
||||||
|
|
@ -1118,8 +1247,6 @@ disinto_secrets() {
|
||||||
local subcmd="${1:-}"
|
local subcmd="${1:-}"
|
||||||
local enc_file="${FACTORY_ROOT}/.env.enc"
|
local enc_file="${FACTORY_ROOT}/.env.enc"
|
||||||
local env_file="${FACTORY_ROOT}/.env"
|
local env_file="${FACTORY_ROOT}/.env"
|
||||||
local vault_enc_file="${FACTORY_ROOT}/.env.vault.enc"
|
|
||||||
local vault_env_file="${FACTORY_ROOT}/.env.vault"
|
|
||||||
|
|
||||||
# Shared helper: ensure sops+age and .sops.yaml exist
|
# Shared helper: ensure sops+age and .sops.yaml exist
|
||||||
_secrets_ensure_sops() {
|
_secrets_ensure_sops() {
|
||||||
|
|
@ -1165,30 +1292,51 @@ disinto_secrets() {
|
||||||
|
|
||||||
case "$subcmd" in
|
case "$subcmd" in
|
||||||
add)
|
add)
|
||||||
local name="${2:-}"
|
# Parse flags
|
||||||
|
local force=false
|
||||||
|
shift # consume 'add'
|
||||||
|
while [ $# -gt 0 ]; do
|
||||||
|
case "$1" in
|
||||||
|
-f|--force) force=true; shift ;;
|
||||||
|
-*) echo "Unknown flag: $1" >&2; exit 1 ;;
|
||||||
|
*) break ;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
local name="${1:-}"
|
||||||
if [ -z "$name" ]; then
|
if [ -z "$name" ]; then
|
||||||
echo "Usage: disinto secrets add <NAME>" >&2
|
echo "Usage: disinto secrets add [-f|--force] <NAME>" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
_secrets_ensure_age_key
|
_secrets_ensure_age_key
|
||||||
mkdir -p "$secrets_dir"
|
mkdir -p "$secrets_dir"
|
||||||
|
|
||||||
printf 'Enter value for %s: ' "$name" >&2
|
|
||||||
local value
|
local value
|
||||||
IFS= read -rs value
|
if [ -t 0 ]; then
|
||||||
echo >&2
|
# Interactive TTY — prompt with hidden input (original behavior)
|
||||||
|
printf 'Enter value for %s: ' "$name" >&2
|
||||||
|
IFS= read -rs value
|
||||||
|
echo >&2
|
||||||
|
else
|
||||||
|
# Piped/redirected stdin — read raw bytes verbatim
|
||||||
|
IFS= read -r -d '' value || true
|
||||||
|
fi
|
||||||
if [ -z "$value" ]; then
|
if [ -z "$value" ]; then
|
||||||
echo "Error: empty value" >&2
|
echo "Error: empty value" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local enc_path="${secrets_dir}/${name}.enc"
|
local enc_path="${secrets_dir}/${name}.enc"
|
||||||
if [ -f "$enc_path" ]; then
|
if [ -f "$enc_path" ] && [ "$force" = false ]; then
|
||||||
printf 'Secret %s already exists. Overwrite? [y/N] ' "$name" >&2
|
if [ -t 0 ]; then
|
||||||
local confirm
|
printf 'Secret %s already exists. Overwrite? [y/N] ' "$name" >&2
|
||||||
read -r confirm
|
local confirm
|
||||||
if [ "$confirm" != "y" ] && [ "$confirm" != "Y" ]; then
|
read -r confirm
|
||||||
echo "Aborted." >&2
|
if [ "$confirm" != "y" ] && [ "$confirm" != "Y" ]; then
|
||||||
|
echo "Aborted." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Error: secret ${name} already exists (use -f to overwrite)" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
@ -1221,6 +1369,37 @@ disinto_secrets() {
|
||||||
sops -d "$enc_file"
|
sops -d "$enc_file"
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
remove)
|
||||||
|
local name="${2:-}"
|
||||||
|
if [ -z "$name" ]; then
|
||||||
|
echo "Usage: disinto secrets remove <NAME>" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
local enc_path="${secrets_dir}/${name}.enc"
|
||||||
|
if [ ! -f "$enc_path" ]; then
|
||||||
|
echo "Error: ${enc_path} not found" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
rm -f "$enc_path"
|
||||||
|
echo "Removed: ${enc_path}"
|
||||||
|
;;
|
||||||
|
list)
|
||||||
|
if [ ! -d "$secrets_dir" ]; then
|
||||||
|
echo "No secrets directory found." >&2
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
local found=false
|
||||||
|
for enc_file_path in "${secrets_dir}"/*.enc; do
|
||||||
|
[ -f "$enc_file_path" ] || continue
|
||||||
|
found=true
|
||||||
|
local secret_name
|
||||||
|
secret_name=$(basename "$enc_file_path" .enc)
|
||||||
|
echo "$secret_name"
|
||||||
|
done
|
||||||
|
if [ "$found" = false ]; then
|
||||||
|
echo "No secrets stored." >&2
|
||||||
|
fi
|
||||||
|
;;
|
||||||
edit)
|
edit)
|
||||||
if [ ! -f "$enc_file" ]; then
|
if [ ! -f "$enc_file" ]; then
|
||||||
echo "Error: ${enc_file} not found. Run 'disinto secrets migrate' first." >&2
|
echo "Error: ${enc_file} not found. Run 'disinto secrets migrate' first." >&2
|
||||||
|
|
@ -1244,54 +1423,100 @@ disinto_secrets() {
|
||||||
rm -f "$env_file"
|
rm -f "$env_file"
|
||||||
echo "Migrated: .env -> .env.enc (plaintext removed)"
|
echo "Migrated: .env -> .env.enc (plaintext removed)"
|
||||||
;;
|
;;
|
||||||
edit-vault)
|
migrate-from-vault)
|
||||||
if [ ! -f "$vault_enc_file" ]; then
|
# One-shot migration: split .env.vault.enc into secrets/<KEY>.enc files (#777)
|
||||||
echo "Error: ${vault_enc_file} not found. Run 'disinto secrets migrate-vault' first." >&2
|
local vault_enc_file="${FACTORY_ROOT}/.env.vault.enc"
|
||||||
|
local vault_env_file="${FACTORY_ROOT}/.env.vault"
|
||||||
|
local source_file=""
|
||||||
|
|
||||||
|
if [ -f "$vault_enc_file" ] && command -v sops &>/dev/null; then
|
||||||
|
source_file="$vault_enc_file"
|
||||||
|
elif [ -f "$vault_env_file" ]; then
|
||||||
|
source_file="$vault_env_file"
|
||||||
|
else
|
||||||
|
echo "Error: neither .env.vault.enc nor .env.vault found — nothing to migrate." >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
sops "$vault_enc_file"
|
|
||||||
;;
|
_secrets_ensure_age_key
|
||||||
show-vault)
|
mkdir -p "$secrets_dir"
|
||||||
if [ ! -f "$vault_enc_file" ]; then
|
|
||||||
echo "Error: ${vault_enc_file} not found." >&2
|
# Decrypt vault to temp dotenv
|
||||||
|
local tmp_dotenv
|
||||||
|
tmp_dotenv=$(mktemp /tmp/disinto-vault-migrate-XXXXXX)
|
||||||
|
trap 'rm -f "$tmp_dotenv"' RETURN
|
||||||
|
|
||||||
|
if [ "$source_file" = "$vault_enc_file" ]; then
|
||||||
|
if ! sops -d --output-type dotenv "$vault_enc_file" > "$tmp_dotenv" 2>/dev/null; then
|
||||||
|
rm -f "$tmp_dotenv"
|
||||||
|
echo "Error: failed to decrypt .env.vault.enc" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
cp "$vault_env_file" "$tmp_dotenv"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Parse each KEY=VALUE and encrypt into secrets/<KEY>.enc
|
||||||
|
local count=0
|
||||||
|
local failed=0
|
||||||
|
while IFS='=' read -r key value; do
|
||||||
|
# Skip empty lines and comments
|
||||||
|
[[ -z "$key" || "$key" =~ ^[[:space:]]*# ]] && continue
|
||||||
|
# Trim whitespace from key
|
||||||
|
key=$(echo "$key" | xargs)
|
||||||
|
[ -z "$key" ] && continue
|
||||||
|
|
||||||
|
local enc_path="${secrets_dir}/${key}.enc"
|
||||||
|
if printf '%s' "$value" | age -r "$AGE_PUBLIC_KEY" -o "$enc_path" 2>/dev/null; then
|
||||||
|
# Verify round-trip
|
||||||
|
local check
|
||||||
|
check=$(age -d -i "$age_key_file" "$enc_path" 2>/dev/null) || { failed=$((failed + 1)); echo " FAIL (verify): ${key}" >&2; continue; }
|
||||||
|
if [ "$check" = "$value" ]; then
|
||||||
|
echo " OK: ${key} -> secrets/${key}.enc"
|
||||||
|
count=$((count + 1))
|
||||||
|
else
|
||||||
|
echo " FAIL (mismatch): ${key}" >&2
|
||||||
|
failed=$((failed + 1))
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo " FAIL (encrypt): ${key}" >&2
|
||||||
|
failed=$((failed + 1))
|
||||||
|
fi
|
||||||
|
done < "$tmp_dotenv"
|
||||||
|
|
||||||
|
rm -f "$tmp_dotenv"
|
||||||
|
|
||||||
|
if [ "$failed" -gt 0 ]; then
|
||||||
|
echo "Error: ${failed} secret(s) failed migration. Vault files NOT removed." >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
sops -d "$vault_enc_file"
|
|
||||||
;;
|
if [ "$count" -eq 0 ]; then
|
||||||
migrate-vault)
|
echo "Warning: no secrets found in vault file." >&2
|
||||||
if [ ! -f "$vault_env_file" ]; then
|
else
|
||||||
echo "Error: ${vault_env_file} not found — nothing to migrate." >&2
|
echo "Migrated ${count} secret(s) to secrets/*.enc"
|
||||||
echo " Create .env.vault with vault secrets (GITHUB_TOKEN, deploy keys, etc.)" >&2
|
# Remove old vault files on success
|
||||||
exit 1
|
rm -f "$vault_enc_file" "$vault_env_file"
|
||||||
|
echo "Removed: .env.vault.enc / .env.vault"
|
||||||
fi
|
fi
|
||||||
_secrets_ensure_sops
|
|
||||||
encrypt_env_file "$vault_env_file" "$vault_enc_file"
|
|
||||||
# Verify decryption works before removing plaintext
|
|
||||||
if ! sops -d "$vault_enc_file" >/dev/null 2>&1; then
|
|
||||||
echo "Error: failed to verify .env.vault.enc decryption" >&2
|
|
||||||
rm -f "$vault_enc_file"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
rm -f "$vault_env_file"
|
|
||||||
echo "Migrated: .env.vault -> .env.vault.enc (plaintext removed)"
|
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
cat <<EOF >&2
|
cat <<EOF >&2
|
||||||
Usage: disinto secrets <subcommand>
|
Usage: disinto secrets <subcommand>
|
||||||
|
|
||||||
Individual secrets (secrets/<NAME>.enc):
|
Secrets (secrets/<NAME>.enc — age-encrypted, one file per key):
|
||||||
add <NAME> Prompt for value, encrypt, store in secrets/<NAME>.enc
|
add <NAME> Prompt for value, encrypt, store in secrets/<NAME>.enc
|
||||||
show <NAME> Decrypt and print an individual secret
|
show <NAME> Decrypt and print a secret
|
||||||
|
remove <NAME> Remove a secret
|
||||||
|
list List all stored secrets
|
||||||
|
|
||||||
Agent secrets (.env.enc):
|
Agent secrets (.env.enc — sops-encrypted dotenv):
|
||||||
edit Edit agent secrets (FORGE_TOKEN, CLAUDE_API_KEY, etc.)
|
edit Edit agent secrets (FORGE_TOKEN, CLAUDE_API_KEY, etc.)
|
||||||
show Show decrypted agent secrets (no argument)
|
show Show decrypted agent secrets (no argument)
|
||||||
migrate Encrypt .env -> .env.enc
|
migrate Encrypt .env -> .env.enc
|
||||||
|
|
||||||
Vault secrets (.env.vault.enc):
|
Migration:
|
||||||
edit-vault Edit vault secrets (GITHUB_TOKEN, deploy keys, etc.)
|
migrate-from-vault Split .env.vault.enc into secrets/<KEY>.enc (one-shot)
|
||||||
show-vault Show decrypted vault secrets
|
|
||||||
migrate-vault Encrypt .env.vault -> .env.vault.enc
|
|
||||||
EOF
|
EOF
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
|
|
@ -1303,7 +1528,8 @@ EOF
|
||||||
disinto_run() {
|
disinto_run() {
|
||||||
local action_id="${1:?Usage: disinto run <action-id>}"
|
local action_id="${1:?Usage: disinto run <action-id>}"
|
||||||
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
||||||
local vault_enc="${FACTORY_ROOT}/.env.vault.enc"
|
local secrets_dir="${FACTORY_ROOT}/secrets"
|
||||||
|
local age_key_file="${HOME}/.config/sops/age/keys.txt"
|
||||||
|
|
||||||
if [ ! -f "$compose_file" ]; then
|
if [ ! -f "$compose_file" ]; then
|
||||||
echo "Error: docker-compose.yml not found" >&2
|
echo "Error: docker-compose.yml not found" >&2
|
||||||
|
|
@ -1311,29 +1537,42 @@ disinto_run() {
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -f "$vault_enc" ]; then
|
if [ ! -d "$secrets_dir" ]; then
|
||||||
echo "Error: .env.vault.enc not found — create vault secrets first" >&2
|
echo "Error: secrets/ directory not found — create secrets first" >&2
|
||||||
echo " Run 'disinto secrets migrate-vault' after creating .env.vault" >&2
|
echo " Run 'disinto secrets add <NAME>' to add secrets" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! command -v sops &>/dev/null; then
|
if ! command -v age &>/dev/null; then
|
||||||
echo "Error: sops not found — required to decrypt vault secrets" >&2
|
echo "Error: age not found — required to decrypt secrets" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Decrypt vault secrets to temp file
|
if [ ! -f "$age_key_file" ]; then
|
||||||
|
echo "Error: age key not found at ${age_key_file}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Decrypt all secrets/*.enc into a temp env file for the runner
|
||||||
local tmp_env
|
local tmp_env
|
||||||
tmp_env=$(mktemp /tmp/disinto-vault-XXXXXX)
|
tmp_env=$(mktemp /tmp/disinto-secrets-XXXXXX)
|
||||||
trap 'rm -f "$tmp_env"' EXIT
|
trap 'rm -f "$tmp_env"' EXIT
|
||||||
|
|
||||||
if ! sops -d --output-type dotenv "$vault_enc" > "$tmp_env" 2>/dev/null; then
|
local count=0
|
||||||
rm -f "$tmp_env"
|
for enc_path in "${secrets_dir}"/*.enc; do
|
||||||
echo "Error: failed to decrypt .env.vault.enc" >&2
|
[ -f "$enc_path" ] || continue
|
||||||
exit 1
|
local key
|
||||||
fi
|
key=$(basename "$enc_path" .enc)
|
||||||
|
local val
|
||||||
|
val=$(age -d -i "$age_key_file" "$enc_path" 2>/dev/null) || {
|
||||||
|
echo "Warning: failed to decrypt ${enc_path}" >&2
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
printf '%s=%s\n' "$key" "$val" >> "$tmp_env"
|
||||||
|
count=$((count + 1))
|
||||||
|
done
|
||||||
|
|
||||||
echo "Vault secrets decrypted to tmpfile"
|
echo "Decrypted ${count} secret(s) to tmpfile"
|
||||||
|
|
||||||
# Run action in ephemeral runner container
|
# Run action in ephemeral runner container
|
||||||
local rc=0
|
local rc=0
|
||||||
|
|
@ -1404,21 +1643,96 @@ download_agent_binaries() {
|
||||||
|
|
||||||
# ── up command ────────────────────────────────────────────────────────────────
|
# ── up command ────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
# Regenerate a file idempotently: run the generator, compare output, backup if changed.
|
||||||
|
# Usage: _regen_file <target_file> <generator_fn> [args...]
|
||||||
|
_regen_file() {
|
||||||
|
local target="$1"; shift
|
||||||
|
local generator="$1"; shift
|
||||||
|
local basename
|
||||||
|
basename=$(basename "$target")
|
||||||
|
|
||||||
|
# Move existing file aside so the generator (which skips if file exists)
|
||||||
|
# produces a fresh copy.
|
||||||
|
local stashed=""
|
||||||
|
if [ -f "$target" ]; then
|
||||||
|
stashed=$(mktemp "${target}.stash.XXXXXX")
|
||||||
|
mv "$target" "$stashed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run the generator — it writes $target from scratch.
|
||||||
|
# If the generator fails, restore the stashed original so it is not stranded.
|
||||||
|
if ! "$generator" "$@"; then
|
||||||
|
if [ -n "$stashed" ]; then
|
||||||
|
mv "$stashed" "$target"
|
||||||
|
fi
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$stashed" ]; then
|
||||||
|
# No previous file — first generation
|
||||||
|
echo "regenerated: ${basename} (new)"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
if cmp -s "$stashed" "$target"; then
|
||||||
|
# Content unchanged — restore original to preserve mtime
|
||||||
|
mv "$stashed" "$target"
|
||||||
|
echo "unchanged: ${basename}"
|
||||||
|
else
|
||||||
|
# Content changed — keep new, save old as .prev
|
||||||
|
mv "$stashed" "${target}.prev"
|
||||||
|
echo "regenerated: ${basename} (previous saved as ${basename}.prev)"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
disinto_up() {
|
disinto_up() {
|
||||||
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
||||||
|
local caddyfile="${FACTORY_ROOT}/docker/Caddyfile"
|
||||||
if [ ! -f "$compose_file" ]; then
|
if [ ! -f "$compose_file" ]; then
|
||||||
echo "Error: docker-compose.yml not found" >&2
|
echo "Error: docker-compose.yml not found" >&2
|
||||||
echo " Run 'disinto init <repo-url>' first (without --bare)" >&2
|
echo " Run 'disinto init <repo-url>' first (without --bare)" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Pre-build: download binaries to docker/agents/bin/ to avoid network calls during docker build
|
# Parse --no-regen flag; remaining args pass through to docker compose
|
||||||
echo "── Pre-build: downloading agent binaries ────────────────────────"
|
local no_regen=false
|
||||||
if ! download_agent_binaries; then
|
local -a compose_args=()
|
||||||
echo "Error: failed to download agent binaries" >&2
|
for arg in "$@"; do
|
||||||
exit 1
|
case "$arg" in
|
||||||
|
--no-regen) no_regen=true ;;
|
||||||
|
*) compose_args+=("$arg") ;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# ── Regenerate compose & Caddyfile from generators ──────────────────────
|
||||||
|
if [ "$no_regen" = true ]; then
|
||||||
|
echo "Warning: running with unmanaged compose — hand-edits will drift" >&2
|
||||||
|
else
|
||||||
|
# Determine forge_port from FORGE_URL (same logic as init)
|
||||||
|
local forge_url="${FORGE_URL:-http://localhost:3000}"
|
||||||
|
local forge_port
|
||||||
|
forge_port=$(printf '%s' "$forge_url" | sed -E 's|.*:([0-9]+)/?$|\1|')
|
||||||
|
forge_port="${forge_port:-3000}"
|
||||||
|
|
||||||
|
# Detect build mode from existing compose
|
||||||
|
local use_build=false
|
||||||
|
if grep -q '^\s*build:' "$compose_file"; then
|
||||||
|
use_build=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
_regen_file "$compose_file" generate_compose "$forge_port" "$use_build"
|
||||||
|
_regen_file "$caddyfile" generate_caddyfile
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Pre-build: download binaries only when compose uses local build
|
||||||
|
if grep -q '^\s*build:' "$compose_file"; then
|
||||||
|
echo "── Pre-build: downloading agent binaries ────────────────────────"
|
||||||
|
if ! download_agent_binaries; then
|
||||||
|
echo "Error: failed to download agent binaries" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
fi
|
fi
|
||||||
echo ""
|
|
||||||
|
|
||||||
# Decrypt secrets to temp .env if SOPS available and .env.enc exists
|
# Decrypt secrets to temp .env if SOPS available and .env.enc exists
|
||||||
local tmp_env=""
|
local tmp_env=""
|
||||||
|
|
@ -1431,7 +1745,7 @@ disinto_up() {
|
||||||
echo "Decrypted secrets for compose"
|
echo "Decrypted secrets for compose"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
docker compose -f "$compose_file" up -d "$@"
|
docker compose -f "$compose_file" up -d --build --remove-orphans ${compose_args[@]+"${compose_args[@]}"}
|
||||||
echo "Stack is up"
|
echo "Stack is up"
|
||||||
|
|
||||||
# Clean up temp .env (also handled by EXIT trap if compose fails)
|
# Clean up temp .env (also handled by EXIT trap if compose fails)
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: 3e65878093bbbcea6dfe4db341f82dc89d4e0ac0 -->
|
<!-- last-reviewed: c363ee0aea2ae447daab28c2c850d6abefc8c6b5 -->
|
||||||
# Dev Agent
|
# Dev Agent
|
||||||
|
|
||||||
**Role**: Implement issues autonomously — write code, push branches, address
|
**Role**: Implement issues autonomously — write code, push branches, address
|
||||||
|
|
@ -29,7 +29,11 @@ stale checks (vision issues are managed by the architect). If the issue is assig
|
||||||
`REQUEST_CHANGES`, spawns the dev-agent to address it before setting `BLOCKED_BY_INPROGRESS=true`;
|
`REQUEST_CHANGES`, spawns the dev-agent to address it before setting `BLOCKED_BY_INPROGRESS=true`;
|
||||||
otherwise just sets blocked. If assigned to another agent, logs and falls through (does not
|
otherwise just sets blocked. If assigned to another agent, logs and falls through (does not
|
||||||
block). If no assignee, no open PR, and no agent lock file — removes `in-progress`, adds
|
block). If no assignee, no open PR, and no agent lock file — removes `in-progress`, adds
|
||||||
`blocked` with a human-triage comment. **Per-agent open-PR gate**: before starting new work,
|
`blocked` with a human-triage comment. **Post-crash self-assigned recovery (#749)**: when the
|
||||||
|
issue is self-assigned (this bot) but there is no open PR, dev-poll now checks for a lock
|
||||||
|
file (`/tmp/dev-impl-summary-$PROJECT_NAME-$ISSUE_NUM.txt`) AND a remote branch
|
||||||
|
(`fix/issue-$ISSUE_NUM`) before declaring "my thread is busy". If neither exists after a cold
|
||||||
|
boot, it spawns a fresh dev-agent for recovery instead of looping forever. **Per-agent open-PR gate**: before starting new work,
|
||||||
filters open waiting PRs to only those assigned to this agent (`$BOT_USER`). Other agents'
|
filters open waiting PRs to only those assigned to this agent (`$BOT_USER`). Other agents'
|
||||||
PRs do not block this agent's pipeline (#358, #369). **Pre-lock merge scan own-PRs only**:
|
PRs do not block this agent's pipeline (#358, #369). **Pre-lock merge scan own-PRs only**:
|
||||||
the direct-merge scan only merges PRs whose linked issue is assigned to this agent — skips
|
the direct-merge scan only merges PRs whose linked issue is assigned to this agent — skips
|
||||||
|
|
@ -51,6 +55,12 @@ PRs owned by other bot users (#374).
|
||||||
|
|
||||||
**Crash recovery**: on `PHASE:crashed` or non-zero exit, the worktree is **preserved** (not destroyed) for debugging. Location logged. Supervisor housekeeping removes stale crashed worktrees older than 24h.
|
**Crash recovery**: on `PHASE:crashed` or non-zero exit, the worktree is **preserved** (not destroyed) for debugging. Location logged. Supervisor housekeeping removes stale crashed worktrees older than 24h.
|
||||||
|
|
||||||
|
**Polling loop isolation (#753)**: `docker/agents/entrypoint.sh` now tracks fast-poll PIDs
|
||||||
|
(`FAST_PIDS`) and calls `wait "${FAST_PIDS[@]}"` instead of `wait` (no-args). This means
|
||||||
|
long-running dev-agent sessions no longer block the loop from launching the next iteration's
|
||||||
|
fast polls — the loop only waits for review-poll and dev-poll (the fast agents), never for
|
||||||
|
the dev-agent subprocess itself.
|
||||||
|
|
||||||
**Lifecycle**: dev-poll.sh (invoked by polling loop, `check_active dev`) → dev-agent.sh →
|
**Lifecycle**: dev-poll.sh (invoked by polling loop, `check_active dev`) → dev-agent.sh →
|
||||||
tmux session → phase file drives CI/review loop → merge + `mirror_push()` → close issue.
|
tmux session → phase file drives CI/review loop → merge + `mirror_push()` → close issue.
|
||||||
On respawn after `PHASE:escalate`, the stale phase file is cleared first so the session
|
On respawn after `PHASE:escalate`, the stale phase file is cleared first so the session
|
||||||
|
|
|
||||||
|
|
@ -476,8 +476,19 @@ if [ "$ORPHAN_COUNT" -gt 0 ]; then
|
||||||
BLOCKED_BY_INPROGRESS=true
|
BLOCKED_BY_INPROGRESS=true
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
log "issue #${ISSUE_NUM} assigned to me — my thread is busy"
|
# No open PR — check if a thread is actually alive (lock file or remote branch)
|
||||||
BLOCKED_BY_INPROGRESS=true
|
LOCK_FILE="/tmp/dev-impl-summary-${PROJECT_NAME}-${ISSUE_NUM}.txt"
|
||||||
|
REMOTE_BRANCH_EXISTS=$(git ls-remote --exit-code origin "fix/issue-${ISSUE_NUM}" >/dev/null 2>&1 && echo yes || echo no)
|
||||||
|
|
||||||
|
if [ -f "$LOCK_FILE" ] || [ "$REMOTE_BRANCH_EXISTS" = "yes" ]; then
|
||||||
|
log "issue #${ISSUE_NUM} assigned to me — my thread is busy (lock=$([ -f "$LOCK_FILE" ] && echo y || echo n) remote_branch=$REMOTE_BRANCH_EXISTS)"
|
||||||
|
BLOCKED_BY_INPROGRESS=true
|
||||||
|
else
|
||||||
|
log "issue #${ISSUE_NUM} self-assigned but orphaned (no lock, no branch, no PR) — recovering"
|
||||||
|
nohup "${SCRIPT_DIR}/dev-agent.sh" "$ISSUE_NUM" >> "$LOGFILE" 2>&1 &
|
||||||
|
log "started dev-agent PID $! for issue #${ISSUE_NUM} (post-crash recovery)"
|
||||||
|
BLOCKED_BY_INPROGRESS=true
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
log "issue #${ISSUE_NUM} assigned to ${assignee} — their thread, not blocking"
|
log "issue #${ISSUE_NUM} assigned to ${assignee} — their thread, not blocking"
|
||||||
|
|
|
||||||
|
|
@ -14,10 +14,10 @@ services:
|
||||||
- agent-data:/home/agent/data
|
- agent-data:/home/agent/data
|
||||||
- project-repos:/home/agent/repos
|
- project-repos:/home/agent/repos
|
||||||
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
||||||
- ${HOME}/.claude.json:/home/agent/.claude.json:ro
|
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
|
||||||
- CLAUDE_BIN_PLACEHOLDER:/usr/local/bin/claude:ro
|
- ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro
|
||||||
- ${HOME}/.ssh:/home/agent/.ssh:ro
|
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
|
||||||
- ${HOME}/.config/sops/age:/home/agent/.config/sops/age:ro
|
- ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro
|
||||||
- woodpecker-data:/woodpecker-data:ro
|
- woodpecker-data:/woodpecker-data:ro
|
||||||
environment:
|
environment:
|
||||||
- FORGE_URL=http://forgejo:3000
|
- FORGE_URL=http://forgejo:3000
|
||||||
|
|
@ -30,6 +30,7 @@ services:
|
||||||
- FORGE_SUPERVISOR_TOKEN=${FORGE_SUPERVISOR_TOKEN:-}
|
- FORGE_SUPERVISOR_TOKEN=${FORGE_SUPERVISOR_TOKEN:-}
|
||||||
- FORGE_PREDICTOR_TOKEN=${FORGE_PREDICTOR_TOKEN:-}
|
- FORGE_PREDICTOR_TOKEN=${FORGE_PREDICTOR_TOKEN:-}
|
||||||
- FORGE_ARCHITECT_TOKEN=${FORGE_ARCHITECT_TOKEN:-}
|
- FORGE_ARCHITECT_TOKEN=${FORGE_ARCHITECT_TOKEN:-}
|
||||||
|
- FORGE_FILER_TOKEN=${FORGE_FILER_TOKEN:-}
|
||||||
- FORGE_BOT_USERNAMES=${FORGE_BOT_USERNAMES:-}
|
- FORGE_BOT_USERNAMES=${FORGE_BOT_USERNAMES:-}
|
||||||
- WOODPECKER_TOKEN=${WOODPECKER_TOKEN:-}
|
- WOODPECKER_TOKEN=${WOODPECKER_TOKEN:-}
|
||||||
- CLAUDE_TIMEOUT=${CLAUDE_TIMEOUT:-7200}
|
- CLAUDE_TIMEOUT=${CLAUDE_TIMEOUT:-7200}
|
||||||
|
|
@ -48,9 +49,18 @@ services:
|
||||||
- GARDENER_INTERVAL=${GARDENER_INTERVAL:-21600}
|
- GARDENER_INTERVAL=${GARDENER_INTERVAL:-21600}
|
||||||
- ARCHITECT_INTERVAL=${ARCHITECT_INTERVAL:-21600}
|
- ARCHITECT_INTERVAL=${ARCHITECT_INTERVAL:-21600}
|
||||||
- PLANNER_INTERVAL=${PLANNER_INTERVAL:-43200}
|
- PLANNER_INTERVAL=${PLANNER_INTERVAL:-43200}
|
||||||
|
- SUPERVISOR_INTERVAL=${SUPERVISOR_INTERVAL:-1200}
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "pgrep", "-f", "entrypoint.sh"]
|
||||||
|
interval: 60s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
depends_on:
|
depends_on:
|
||||||
- forgejo
|
forgejo:
|
||||||
- woodpecker
|
condition: service_healthy
|
||||||
|
woodpecker:
|
||||||
|
condition: service_started
|
||||||
networks:
|
networks:
|
||||||
- disinto-net
|
- disinto-net
|
||||||
|
|
||||||
|
|
@ -67,10 +77,10 @@ services:
|
||||||
- agent-data:/home/agent/data
|
- agent-data:/home/agent/data
|
||||||
- project-repos:/home/agent/repos
|
- project-repos:/home/agent/repos
|
||||||
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
||||||
- ${HOME}/.claude.json:/home/agent/.claude.json:ro
|
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
|
||||||
- CLAUDE_BIN_PLACEHOLDER:/usr/local/bin/claude:ro
|
- ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro
|
||||||
- ${HOME}/.ssh:/home/agent/.ssh:ro
|
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
|
||||||
- ${HOME}/.config/sops/age:/home/agent/.config/sops/age:ro
|
- ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro
|
||||||
- woodpecker-data:/woodpecker-data:ro
|
- woodpecker-data:/woodpecker-data:ro
|
||||||
environment:
|
environment:
|
||||||
- FORGE_URL=http://forgejo:3000
|
- FORGE_URL=http://forgejo:3000
|
||||||
|
|
@ -100,9 +110,85 @@ services:
|
||||||
- CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}
|
- CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}
|
||||||
- POLL_INTERVAL=${POLL_INTERVAL:-300}
|
- POLL_INTERVAL=${POLL_INTERVAL:-300}
|
||||||
- AGENT_ROLES=dev
|
- AGENT_ROLES=dev
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "pgrep", "-f", "entrypoint.sh"]
|
||||||
|
interval: 60s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
depends_on:
|
depends_on:
|
||||||
- forgejo
|
forgejo:
|
||||||
- woodpecker
|
condition: service_healthy
|
||||||
|
woodpecker:
|
||||||
|
condition: service_started
|
||||||
|
networks:
|
||||||
|
- disinto-net
|
||||||
|
|
||||||
|
agents-llama-all:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: docker/agents/Dockerfile
|
||||||
|
image: disinto/agents-llama:latest
|
||||||
|
container_name: disinto-agents-llama-all
|
||||||
|
restart: unless-stopped
|
||||||
|
profiles: ["agents-llama-all"]
|
||||||
|
security_opt:
|
||||||
|
- apparmor=unconfined
|
||||||
|
volumes:
|
||||||
|
- agent-data:/home/agent/data
|
||||||
|
- project-repos:/home/agent/repos
|
||||||
|
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
||||||
|
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
|
||||||
|
- ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro
|
||||||
|
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
|
||||||
|
- ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro
|
||||||
|
- woodpecker-data:/woodpecker-data:ro
|
||||||
|
environment:
|
||||||
|
- FORGE_URL=http://forgejo:3000
|
||||||
|
- FORGE_REPO=${FORGE_REPO:-disinto-admin/disinto}
|
||||||
|
- FORGE_TOKEN=${FORGE_TOKEN_LLAMA:-}
|
||||||
|
- FORGE_PASS=${FORGE_PASS_LLAMA:-}
|
||||||
|
- FORGE_REVIEW_TOKEN=${FORGE_REVIEW_TOKEN:-}
|
||||||
|
- FORGE_PLANNER_TOKEN=${FORGE_PLANNER_TOKEN:-}
|
||||||
|
- FORGE_GARDENER_TOKEN=${FORGE_GARDENER_TOKEN:-}
|
||||||
|
- FORGE_VAULT_TOKEN=${FORGE_VAULT_TOKEN:-}
|
||||||
|
- FORGE_SUPERVISOR_TOKEN=${FORGE_SUPERVISOR_TOKEN:-}
|
||||||
|
- FORGE_PREDICTOR_TOKEN=${FORGE_PREDICTOR_TOKEN:-}
|
||||||
|
- FORGE_ARCHITECT_TOKEN=${FORGE_ARCHITECT_TOKEN:-}
|
||||||
|
- FORGE_FILER_TOKEN=${FORGE_FILER_TOKEN:-}
|
||||||
|
- FORGE_BOT_USERNAMES=${FORGE_BOT_USERNAMES:-}
|
||||||
|
- WOODPECKER_TOKEN=${WOODPECKER_TOKEN:-}
|
||||||
|
- CLAUDE_TIMEOUT=${CLAUDE_TIMEOUT:-7200}
|
||||||
|
- CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=${CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC:-1}
|
||||||
|
- CLAUDE_AUTOCOMPACT_PCT_OVERRIDE=60
|
||||||
|
- CLAUDE_CODE_DISABLE_EXPERIMENTAL_BETAS=1
|
||||||
|
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
||||||
|
- ANTHROPIC_BASE_URL=${ANTHROPIC_BASE_URL:-}
|
||||||
|
- FORGE_ADMIN_PASS=${FORGE_ADMIN_PASS:-}
|
||||||
|
- DISINTO_CONTAINER=1
|
||||||
|
- PROJECT_TOML=projects/disinto.toml
|
||||||
|
- PROJECT_NAME=${PROJECT_NAME:-project}
|
||||||
|
- PROJECT_REPO_ROOT=/home/agent/repos/${PROJECT_NAME:-project}
|
||||||
|
- WOODPECKER_DATA_DIR=/woodpecker-data
|
||||||
|
- WOODPECKER_REPO_ID=${WOODPECKER_REPO_ID:-}
|
||||||
|
- CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}
|
||||||
|
- POLL_INTERVAL=${POLL_INTERVAL:-300}
|
||||||
|
- GARDENER_INTERVAL=${GARDENER_INTERVAL:-21600}
|
||||||
|
- ARCHITECT_INTERVAL=${ARCHITECT_INTERVAL:-21600}
|
||||||
|
- PLANNER_INTERVAL=${PLANNER_INTERVAL:-43200}
|
||||||
|
- SUPERVISOR_INTERVAL=${SUPERVISOR_INTERVAL:-1200}
|
||||||
|
- AGENT_ROLES=review,dev,gardener,architect,planner,predictor,supervisor
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "pgrep", "-f", "entrypoint.sh"]
|
||||||
|
interval: 60s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
|
depends_on:
|
||||||
|
forgejo:
|
||||||
|
condition: service_healthy
|
||||||
|
woodpecker:
|
||||||
|
condition: service_started
|
||||||
networks:
|
networks:
|
||||||
- disinto-net
|
- disinto-net
|
||||||
|
|
||||||
|
|
@ -117,9 +203,9 @@ services:
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
- agent-data:/home/agent/data
|
- agent-data:/home/agent/data
|
||||||
- project-repos:/home/agent/repos
|
- project-repos:/home/agent/repos
|
||||||
- ${HOME}/.claude:/home/agent/.claude
|
- ${CLAUDE_DIR:-${HOME}/.claude}:/home/agent/.claude
|
||||||
- /usr/local/bin/claude:/usr/local/bin/claude:ro
|
- ${CLAUDE_BIN_DIR:-/usr/local/bin/claude}:/usr/local/bin/claude:ro
|
||||||
- ${HOME}/.ssh:/home/agent/.ssh:ro
|
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
|
||||||
env_file:
|
env_file:
|
||||||
- .env
|
- .env
|
||||||
|
|
||||||
|
|
@ -133,9 +219,9 @@ services:
|
||||||
- apparmor=unconfined
|
- apparmor=unconfined
|
||||||
volumes:
|
volumes:
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
- /usr/local/bin/claude:/usr/local/bin/claude:ro
|
- ${CLAUDE_BIN_DIR:-/usr/local/bin/claude}:/usr/local/bin/claude:ro
|
||||||
- ${HOME}/.claude.json:/root/.claude.json:ro
|
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/root/.claude.json:ro
|
||||||
- ${HOME}/.claude:/root/.claude:ro
|
- ${CLAUDE_DIR:-${HOME}/.claude}:/root/.claude:ro
|
||||||
- disinto-logs:/opt/disinto-logs
|
- disinto-logs:/opt/disinto-logs
|
||||||
environment:
|
environment:
|
||||||
- FORGE_SUPERVISOR_TOKEN=${FORGE_SUPERVISOR_TOKEN:-}
|
- FORGE_SUPERVISOR_TOKEN=${FORGE_SUPERVISOR_TOKEN:-}
|
||||||
|
|
@ -151,6 +237,12 @@ services:
|
||||||
ports:
|
ports:
|
||||||
- "80:80"
|
- "80:80"
|
||||||
- "443:443"
|
- "443:443"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-fsS", "http://localhost:2019/config/"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 15s
|
||||||
depends_on:
|
depends_on:
|
||||||
- forgejo
|
- forgejo
|
||||||
networks:
|
networks:
|
||||||
|
|
@ -171,6 +263,12 @@ services:
|
||||||
- FORGEJO__security__INSTALL_LOCK=true
|
- FORGEJO__security__INSTALL_LOCK=true
|
||||||
- FORGEJO__service__DISABLE_REGISTRATION=true
|
- FORGEJO__service__DISABLE_REGISTRATION=true
|
||||||
- FORGEJO__webhook__ALLOWED_HOST_LIST=private
|
- FORGEJO__webhook__ALLOWED_HOST_LIST=private
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-sf", "http://localhost:3000/api/v1/version"]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 30
|
||||||
|
start_period: 30s
|
||||||
ports:
|
ports:
|
||||||
- "3000:3000"
|
- "3000:3000"
|
||||||
networks:
|
networks:
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,9 @@ RUN chmod +x /entrypoint.sh
|
||||||
|
|
||||||
# Entrypoint runs polling loop directly, dropping to agent user via gosu.
|
# Entrypoint runs polling loop directly, dropping to agent user via gosu.
|
||||||
# All scripts execute as the agent user (UID 1000) while preserving env vars.
|
# All scripts execute as the agent user (UID 1000) while preserving env vars.
|
||||||
|
VOLUME /home/agent/data
|
||||||
|
VOLUME /home/agent/repos
|
||||||
|
|
||||||
WORKDIR /home/agent/disinto
|
WORKDIR /home/agent/disinto
|
||||||
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
ENTRYPOINT ["/entrypoint.sh"]
|
||||||
|
|
|
||||||
|
|
@ -7,14 +7,15 @@ set -euo pipefail
|
||||||
# poll scripts. All Docker Compose env vars are inherited (PATH, FORGE_TOKEN,
|
# poll scripts. All Docker Compose env vars are inherited (PATH, FORGE_TOKEN,
|
||||||
# ANTHROPIC_API_KEY, etc.).
|
# ANTHROPIC_API_KEY, etc.).
|
||||||
#
|
#
|
||||||
# AGENT_ROLES env var controls which scripts run: "review,dev,gardener,architect,planner,predictor"
|
# AGENT_ROLES env var controls which scripts run: "review,dev,gardener,architect,planner,predictor,supervisor"
|
||||||
# (default: all six). Uses while-true loop with staggered intervals:
|
# (default: all seven). Uses while-true loop with staggered intervals:
|
||||||
# - review-poll: every 5 minutes (offset by 0s)
|
# - review-poll: every 5 minutes (offset by 0s)
|
||||||
# - dev-poll: every 5 minutes (offset by 2 minutes)
|
# - dev-poll: every 5 minutes (offset by 2 minutes)
|
||||||
# - gardener: every GARDENER_INTERVAL seconds (default: 21600 = 6 hours)
|
# - gardener: every GARDENER_INTERVAL seconds (default: 21600 = 6 hours)
|
||||||
# - architect: every ARCHITECT_INTERVAL seconds (default: 21600 = 6 hours)
|
# - architect: every ARCHITECT_INTERVAL seconds (default: 21600 = 6 hours)
|
||||||
# - planner: every PLANNER_INTERVAL seconds (default: 43200 = 12 hours)
|
# - planner: every PLANNER_INTERVAL seconds (default: 43200 = 12 hours)
|
||||||
# - predictor: every 24 hours (288 iterations * 5 min)
|
# - predictor: every 24 hours (288 iterations * 5 min)
|
||||||
|
# - supervisor: every SUPERVISOR_INTERVAL seconds (default: 1200 = 20 min)
|
||||||
|
|
||||||
DISINTO_BAKED="/home/agent/disinto"
|
DISINTO_BAKED="/home/agent/disinto"
|
||||||
DISINTO_LIVE="/home/agent/repos/_factory"
|
DISINTO_LIVE="/home/agent/repos/_factory"
|
||||||
|
|
@ -49,7 +50,7 @@ source "${DISINTO_BAKED}/lib/git-creds.sh"
|
||||||
# Wrapper that calls the shared configure_git_creds with agent-specific paths,
|
# Wrapper that calls the shared configure_git_creds with agent-specific paths,
|
||||||
# then repairs any legacy baked-credential URLs in existing clones.
|
# then repairs any legacy baked-credential URLs in existing clones.
|
||||||
_setup_git_creds() {
|
_setup_git_creds() {
|
||||||
configure_git_creds "/home/agent" "gosu agent"
|
_GIT_CREDS_LOG_FN=log configure_git_creds "/home/agent" "gosu agent"
|
||||||
if [ -n "${FORGE_PASS:-}" ] && [ -n "${FORGE_URL:-}" ]; then
|
if [ -n "${FORGE_PASS:-}" ] && [ -n "${FORGE_URL:-}" ]; then
|
||||||
log "Git credential helper configured (password auth)"
|
log "Git credential helper configured (password auth)"
|
||||||
fi
|
fi
|
||||||
|
|
@ -61,16 +62,21 @@ _setup_git_creds() {
|
||||||
# Configure git author identity for commits made by this container.
|
# Configure git author identity for commits made by this container.
|
||||||
# Derives identity from the resolved bot user (BOT_USER) to ensure commits
|
# Derives identity from the resolved bot user (BOT_USER) to ensure commits
|
||||||
# are visibly attributable to the correct bot in the forge timeline.
|
# are visibly attributable to the correct bot in the forge timeline.
|
||||||
|
# BOT_USER is normally set by configure_git_creds() (#741); this function
|
||||||
|
# only falls back to its own API call if BOT_USER was not already resolved.
|
||||||
configure_git_identity() {
|
configure_git_identity() {
|
||||||
# Resolve BOT_USER from FORGE_TOKEN if not already set
|
# Resolve BOT_USER from FORGE_TOKEN if not already set (configure_git_creds
|
||||||
|
# exports BOT_USER on success, so this is a fallback for edge cases only).
|
||||||
if [ -z "${BOT_USER:-}" ] && [ -n "${FORGE_TOKEN:-}" ]; then
|
if [ -z "${BOT_USER:-}" ] && [ -n "${FORGE_TOKEN:-}" ]; then
|
||||||
BOT_USER=$(curl -sf --max-time 10 \
|
BOT_USER=$(curl -sf --max-time 10 \
|
||||||
-H "Authorization: token ${FORGE_TOKEN}" \
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
"${FORGE_URL:-http://localhost:3000}/api/v1/user" 2>/dev/null | jq -r '.login // empty') || true
|
"${FORGE_URL:-http://localhost:3000}/api/v1/user" 2>/dev/null | jq -r '.login // empty') || true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Default to dev-bot if resolution fails
|
if [ -z "${BOT_USER:-}" ]; then
|
||||||
BOT_USER="${BOT_USER:-dev-bot}"
|
log "WARNING: Could not resolve bot username for git identity — commits will use fallback"
|
||||||
|
BOT_USER="agent"
|
||||||
|
fi
|
||||||
|
|
||||||
# Configure git identity for all repositories
|
# Configure git identity for all repositories
|
||||||
gosu agent git config --global user.name "${BOT_USER}"
|
gosu agent git config --global user.name "${BOT_USER}"
|
||||||
|
|
@ -323,7 +329,7 @@ init_state_dir
|
||||||
|
|
||||||
# Parse AGENT_ROLES env var (default: all agents)
|
# Parse AGENT_ROLES env var (default: all agents)
|
||||||
# Expected format: comma-separated list like "review,dev,gardener"
|
# Expected format: comma-separated list like "review,dev,gardener"
|
||||||
AGENT_ROLES="${AGENT_ROLES:-review,dev,gardener,architect,planner,predictor}"
|
AGENT_ROLES="${AGENT_ROLES:-review,dev,gardener,architect,planner,predictor,supervisor}"
|
||||||
log "Agent roles configured: ${AGENT_ROLES}"
|
log "Agent roles configured: ${AGENT_ROLES}"
|
||||||
|
|
||||||
# Poll interval in seconds (5 minutes default)
|
# Poll interval in seconds (5 minutes default)
|
||||||
|
|
@ -333,9 +339,10 @@ POLL_INTERVAL="${POLL_INTERVAL:-300}"
|
||||||
GARDENER_INTERVAL="${GARDENER_INTERVAL:-21600}"
|
GARDENER_INTERVAL="${GARDENER_INTERVAL:-21600}"
|
||||||
ARCHITECT_INTERVAL="${ARCHITECT_INTERVAL:-21600}"
|
ARCHITECT_INTERVAL="${ARCHITECT_INTERVAL:-21600}"
|
||||||
PLANNER_INTERVAL="${PLANNER_INTERVAL:-43200}"
|
PLANNER_INTERVAL="${PLANNER_INTERVAL:-43200}"
|
||||||
|
SUPERVISOR_INTERVAL="${SUPERVISOR_INTERVAL:-1200}"
|
||||||
|
|
||||||
log "Entering polling loop (interval: ${POLL_INTERVAL}s, roles: ${AGENT_ROLES})"
|
log "Entering polling loop (interval: ${POLL_INTERVAL}s, roles: ${AGENT_ROLES})"
|
||||||
log "Gardener interval: ${GARDENER_INTERVAL}s, Architect interval: ${ARCHITECT_INTERVAL}s, Planner interval: ${PLANNER_INTERVAL}s"
|
log "Gardener interval: ${GARDENER_INTERVAL}s, Architect interval: ${ARCHITECT_INTERVAL}s, Planner interval: ${PLANNER_INTERVAL}s, Supervisor interval: ${SUPERVISOR_INTERVAL}s"
|
||||||
|
|
||||||
# Main polling loop using iteration counter for gardener scheduling
|
# Main polling loop using iteration counter for gardener scheduling
|
||||||
iteration=0
|
iteration=0
|
||||||
|
|
@ -380,11 +387,13 @@ print(cfg.get('primary_branch', 'main'))
|
||||||
log "Processing project TOML: ${toml}"
|
log "Processing project TOML: ${toml}"
|
||||||
|
|
||||||
# --- Fast agents: run in background, wait before slow agents ---
|
# --- Fast agents: run in background, wait before slow agents ---
|
||||||
|
FAST_PIDS=()
|
||||||
|
|
||||||
# Review poll (every iteration)
|
# Review poll (every iteration)
|
||||||
if [[ ",${AGENT_ROLES}," == *",review,"* ]]; then
|
if [[ ",${AGENT_ROLES}," == *",review,"* ]]; then
|
||||||
log "Running review-poll (iteration ${iteration}) for ${toml}"
|
log "Running review-poll (iteration ${iteration}) for ${toml}"
|
||||||
gosu agent bash -c "cd ${DISINTO_DIR} && bash review/review-poll.sh \"${toml}\"" >> "${DISINTO_LOG_DIR}/review-poll.log" 2>&1 &
|
gosu agent bash -c "cd ${DISINTO_DIR} && bash review/review-poll.sh \"${toml}\"" >> "${DISINTO_LOG_DIR}/review-poll.log" 2>&1 &
|
||||||
|
FAST_PIDS+=($!)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
sleep 2 # stagger fast polls
|
sleep 2 # stagger fast polls
|
||||||
|
|
@ -393,10 +402,14 @@ print(cfg.get('primary_branch', 'main'))
|
||||||
if [[ ",${AGENT_ROLES}," == *",dev,"* ]]; then
|
if [[ ",${AGENT_ROLES}," == *",dev,"* ]]; then
|
||||||
log "Running dev-poll (iteration ${iteration}) for ${toml}"
|
log "Running dev-poll (iteration ${iteration}) for ${toml}"
|
||||||
gosu agent bash -c "cd ${DISINTO_DIR} && bash dev/dev-poll.sh \"${toml}\"" >> "${DISINTO_LOG_DIR}/dev-poll.log" 2>&1 &
|
gosu agent bash -c "cd ${DISINTO_DIR} && bash dev/dev-poll.sh \"${toml}\"" >> "${DISINTO_LOG_DIR}/dev-poll.log" 2>&1 &
|
||||||
|
FAST_PIDS+=($!)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Wait for fast polls to finish before launching slow agents
|
# Wait only for THIS iteration's fast polls — long-running gardener/dev-agent
|
||||||
wait
|
# from prior iterations must not block us.
|
||||||
|
if [ ${#FAST_PIDS[@]} -gt 0 ]; then
|
||||||
|
wait "${FAST_PIDS[@]}"
|
||||||
|
fi
|
||||||
|
|
||||||
# --- Slow agents: run in background with pgrep guard ---
|
# --- Slow agents: run in background with pgrep guard ---
|
||||||
|
|
||||||
|
|
@ -452,6 +465,19 @@ print(cfg.get('primary_branch', 'main'))
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Supervisor (interval configurable via SUPERVISOR_INTERVAL env var, default 20 min)
|
||||||
|
if [[ ",${AGENT_ROLES}," == *",supervisor,"* ]]; then
|
||||||
|
supervisor_iteration=$((iteration * POLL_INTERVAL))
|
||||||
|
if [ $((supervisor_iteration % SUPERVISOR_INTERVAL)) -eq 0 ] && [ "$now" -ge "$supervisor_iteration" ]; then
|
||||||
|
if ! pgrep -f "supervisor-run.sh" >/dev/null; then
|
||||||
|
log "Running supervisor (iteration ${iteration}, ${SUPERVISOR_INTERVAL}s interval) for ${toml}"
|
||||||
|
gosu agent bash -c "cd ${DISINTO_DIR} && bash supervisor/supervisor-run.sh \"${toml}\"" >> "${DISINTO_LOG_DIR}/supervisor.log" 2>&1 &
|
||||||
|
else
|
||||||
|
log "Skipping supervisor — already running"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
sleep "${POLL_INTERVAL}"
|
sleep "${POLL_INTERVAL}"
|
||||||
|
|
|
||||||
|
|
@ -30,6 +30,6 @@ WORKDIR /var/chat
|
||||||
|
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
|
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
|
||||||
CMD python3 -c "import urllib.request; urllib.request.urlopen('http://localhost:8080/')" || exit 1
|
CMD python3 -c "import urllib.request; urllib.request.urlopen('http://localhost:8080/health')" || exit 1
|
||||||
|
|
||||||
ENTRYPOINT ["/entrypoint-chat.sh"]
|
ENTRYPOINT ["/entrypoint-chat.sh"]
|
||||||
|
|
|
||||||
|
|
@ -3,15 +3,16 @@
|
||||||
disinto-chat server — minimal HTTP backend for Claude chat UI.
|
disinto-chat server — minimal HTTP backend for Claude chat UI.
|
||||||
|
|
||||||
Routes:
|
Routes:
|
||||||
GET /chat/login → 302 to Forgejo OAuth authorize
|
GET /chat/auth/verify -> Caddy forward_auth callback (returns 200+X-Forwarded-User or 401)
|
||||||
GET /chat/oauth/callback → exchange code for token, validate user, set session
|
GET /chat/login -> 302 to Forgejo OAuth authorize
|
||||||
GET /chat/ → serves index.html (session required)
|
GET /chat/oauth/callback -> exchange code for token, validate user, set session
|
||||||
GET /chat/static/* → serves static assets (session required)
|
GET /chat/ -> serves index.html (session required)
|
||||||
POST /chat → spawns `claude --print` with user message (session required)
|
GET /chat/static/* -> serves static assets (session required)
|
||||||
GET /ws → reserved for future streaming upgrade (returns 501)
|
POST /chat -> spawns `claude --print` with user message (session required)
|
||||||
|
GET /ws -> reserved for future streaming upgrade (returns 501)
|
||||||
|
|
||||||
OAuth flow:
|
OAuth flow:
|
||||||
1. User hits any /chat/* route without a valid session cookie → 302 /chat/login
|
1. User hits any /chat/* route without a valid session cookie -> 302 /chat/login
|
||||||
2. /chat/login redirects to Forgejo /login/oauth/authorize
|
2. /chat/login redirects to Forgejo /login/oauth/authorize
|
||||||
3. Forgejo redirects back to /chat/oauth/callback with ?code=...&state=...
|
3. Forgejo redirects back to /chat/oauth/callback with ?code=...&state=...
|
||||||
4. Server exchanges code for access token, fetches /api/v1/user
|
4. Server exchanges code for access token, fetches /api/v1/user
|
||||||
|
|
@ -21,8 +22,10 @@ OAuth flow:
|
||||||
The claude binary is expected to be mounted from the host at /usr/local/bin/claude.
|
The claude binary is expected to be mounted from the host at /usr/local/bin/claude.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import datetime
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import secrets
|
import secrets
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
|
@ -43,7 +46,18 @@ CHAT_OAUTH_CLIENT_ID = os.environ.get("CHAT_OAUTH_CLIENT_ID", "")
|
||||||
CHAT_OAUTH_CLIENT_SECRET = os.environ.get("CHAT_OAUTH_CLIENT_SECRET", "")
|
CHAT_OAUTH_CLIENT_SECRET = os.environ.get("CHAT_OAUTH_CLIENT_SECRET", "")
|
||||||
EDGE_TUNNEL_FQDN = os.environ.get("EDGE_TUNNEL_FQDN", "")
|
EDGE_TUNNEL_FQDN = os.environ.get("EDGE_TUNNEL_FQDN", "")
|
||||||
|
|
||||||
# Allowed users — disinto-admin always allowed; CSV allowlist extends it
|
# Shared secret for Caddy forward_auth verify endpoint (#709).
|
||||||
|
# When set, only requests carrying this value in X-Forward-Auth-Secret are
|
||||||
|
# allowed to call /chat/auth/verify. When empty the endpoint is unrestricted
|
||||||
|
# (acceptable during local dev; production MUST set this).
|
||||||
|
FORWARD_AUTH_SECRET = os.environ.get("FORWARD_AUTH_SECRET", "")
|
||||||
|
|
||||||
|
# Rate limiting / cost caps (#711)
|
||||||
|
CHAT_MAX_REQUESTS_PER_HOUR = int(os.environ.get("CHAT_MAX_REQUESTS_PER_HOUR", 60))
|
||||||
|
CHAT_MAX_REQUESTS_PER_DAY = int(os.environ.get("CHAT_MAX_REQUESTS_PER_DAY", 500))
|
||||||
|
CHAT_MAX_TOKENS_PER_DAY = int(os.environ.get("CHAT_MAX_TOKENS_PER_DAY", 1000000))
|
||||||
|
|
||||||
|
# Allowed users - disinto-admin always allowed; CSV allowlist extends it
|
||||||
_allowed_csv = os.environ.get("DISINTO_CHAT_ALLOWED_USERS", "")
|
_allowed_csv = os.environ.get("DISINTO_CHAT_ALLOWED_USERS", "")
|
||||||
ALLOWED_USERS = {"disinto-admin"}
|
ALLOWED_USERS = {"disinto-admin"}
|
||||||
if _allowed_csv:
|
if _allowed_csv:
|
||||||
|
|
@ -55,12 +69,24 @@ SESSION_COOKIE = "disinto_chat_session"
|
||||||
# Session TTL: 24 hours
|
# Session TTL: 24 hours
|
||||||
SESSION_TTL = 24 * 60 * 60
|
SESSION_TTL = 24 * 60 * 60
|
||||||
|
|
||||||
# In-memory session store: token → {"user": str, "expires": float}
|
# Chat history directory (bind-mounted from host)
|
||||||
|
CHAT_HISTORY_DIR = os.environ.get("CHAT_HISTORY_DIR", "/var/lib/chat/history")
|
||||||
|
|
||||||
|
# Regex for valid conversation_id (12-char hex, no slashes)
|
||||||
|
CONVERSATION_ID_PATTERN = re.compile(r"^[0-9a-f]{12}$")
|
||||||
|
|
||||||
|
# In-memory session store: token -> {"user": str, "expires": float}
|
||||||
_sessions = {}
|
_sessions = {}
|
||||||
|
|
||||||
# Pending OAuth state tokens: state → expires (float)
|
# Pending OAuth state tokens: state -> expires (float)
|
||||||
_oauth_states = {}
|
_oauth_states = {}
|
||||||
|
|
||||||
|
# Per-user rate limiting state (#711)
|
||||||
|
# user -> list of request timestamps (for sliding-window hourly/daily caps)
|
||||||
|
_request_log = {}
|
||||||
|
# user -> {"tokens": int, "date": "YYYY-MM-DD"}
|
||||||
|
_daily_tokens = {}
|
||||||
|
|
||||||
# MIME types for static files
|
# MIME types for static files
|
||||||
MIME_TYPES = {
|
MIME_TYPES = {
|
||||||
".html": "text/html; charset=utf-8",
|
".html": "text/html; charset=utf-8",
|
||||||
|
|
@ -100,7 +126,7 @@ def _validate_session(cookie_header):
|
||||||
session = _sessions.get(token)
|
session = _sessions.get(token)
|
||||||
if session and session["expires"] > time.time():
|
if session and session["expires"] > time.time():
|
||||||
return session["user"]
|
return session["user"]
|
||||||
# Expired — clean up
|
# Expired - clean up
|
||||||
_sessions.pop(token, None)
|
_sessions.pop(token, None)
|
||||||
return None
|
return None
|
||||||
return None
|
return None
|
||||||
|
|
@ -161,6 +187,242 @@ def _fetch_user(access_token):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# Rate Limiting Functions (#711)
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
def _check_rate_limit(user):
|
||||||
|
"""Check per-user rate limits. Returns (allowed, retry_after, reason) (#711).
|
||||||
|
|
||||||
|
Checks hourly request cap, daily request cap, and daily token cap.
|
||||||
|
"""
|
||||||
|
now = time.time()
|
||||||
|
one_hour_ago = now - 3600
|
||||||
|
today = datetime.date.today().isoformat()
|
||||||
|
|
||||||
|
# Prune old entries from request log
|
||||||
|
timestamps = _request_log.get(user, [])
|
||||||
|
timestamps = [t for t in timestamps if t > now - 86400]
|
||||||
|
_request_log[user] = timestamps
|
||||||
|
|
||||||
|
# Hourly request cap
|
||||||
|
hourly = [t for t in timestamps if t > one_hour_ago]
|
||||||
|
if len(hourly) >= CHAT_MAX_REQUESTS_PER_HOUR:
|
||||||
|
oldest_in_window = min(hourly)
|
||||||
|
retry_after = int(oldest_in_window + 3600 - now) + 1
|
||||||
|
return False, max(retry_after, 1), "hourly request limit"
|
||||||
|
|
||||||
|
# Daily request cap
|
||||||
|
start_of_day = time.mktime(datetime.date.today().timetuple())
|
||||||
|
daily = [t for t in timestamps if t >= start_of_day]
|
||||||
|
if len(daily) >= CHAT_MAX_REQUESTS_PER_DAY:
|
||||||
|
next_day = start_of_day + 86400
|
||||||
|
retry_after = int(next_day - now) + 1
|
||||||
|
return False, max(retry_after, 1), "daily request limit"
|
||||||
|
|
||||||
|
# Daily token cap
|
||||||
|
token_info = _daily_tokens.get(user, {"tokens": 0, "date": today})
|
||||||
|
if token_info["date"] != today:
|
||||||
|
token_info = {"tokens": 0, "date": today}
|
||||||
|
_daily_tokens[user] = token_info
|
||||||
|
if token_info["tokens"] >= CHAT_MAX_TOKENS_PER_DAY:
|
||||||
|
next_day = start_of_day + 86400
|
||||||
|
retry_after = int(next_day - now) + 1
|
||||||
|
return False, max(retry_after, 1), "daily token limit"
|
||||||
|
|
||||||
|
return True, 0, ""
|
||||||
|
|
||||||
|
|
||||||
|
def _record_request(user):
|
||||||
|
"""Record a request timestamp for the user (#711)."""
|
||||||
|
_request_log.setdefault(user, []).append(time.time())
|
||||||
|
|
||||||
|
|
||||||
|
def _record_tokens(user, tokens):
|
||||||
|
"""Record token usage for the user (#711)."""
|
||||||
|
today = datetime.date.today().isoformat()
|
||||||
|
token_info = _daily_tokens.get(user, {"tokens": 0, "date": today})
|
||||||
|
if token_info["date"] != today:
|
||||||
|
token_info = {"tokens": 0, "date": today}
|
||||||
|
token_info["tokens"] += tokens
|
||||||
|
_daily_tokens[user] = token_info
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_stream_json(output):
|
||||||
|
"""Parse stream-json output from claude --print (#711).
|
||||||
|
|
||||||
|
Returns (text_content, total_tokens). Falls back gracefully if the
|
||||||
|
usage event is absent or malformed.
|
||||||
|
"""
|
||||||
|
text_parts = []
|
||||||
|
total_tokens = 0
|
||||||
|
|
||||||
|
for line in output.splitlines():
|
||||||
|
line = line.strip()
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
event = json.loads(line)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
etype = event.get("type", "")
|
||||||
|
|
||||||
|
# Collect assistant text
|
||||||
|
if etype == "content_block_delta":
|
||||||
|
delta = event.get("delta", {})
|
||||||
|
if delta.get("type") == "text_delta":
|
||||||
|
text_parts.append(delta.get("text", ""))
|
||||||
|
elif etype == "assistant":
|
||||||
|
# Full assistant message (non-streaming)
|
||||||
|
content = event.get("content", "")
|
||||||
|
if isinstance(content, str) and content:
|
||||||
|
text_parts.append(content)
|
||||||
|
elif isinstance(content, list):
|
||||||
|
for block in content:
|
||||||
|
if isinstance(block, dict) and block.get("text"):
|
||||||
|
text_parts.append(block["text"])
|
||||||
|
|
||||||
|
# Parse usage from result event
|
||||||
|
if etype == "result":
|
||||||
|
usage = event.get("usage", {})
|
||||||
|
total_tokens = usage.get("input_tokens", 0) + usage.get("output_tokens", 0)
|
||||||
|
elif "usage" in event:
|
||||||
|
usage = event["usage"]
|
||||||
|
if isinstance(usage, dict):
|
||||||
|
total_tokens = usage.get("input_tokens", 0) + usage.get("output_tokens", 0)
|
||||||
|
|
||||||
|
return "".join(text_parts), total_tokens
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# Conversation History Functions (#710)
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
def _generate_conversation_id():
|
||||||
|
"""Generate a new conversation ID (12-char hex string)."""
|
||||||
|
return secrets.token_hex(6)
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_conversation_id(conv_id):
|
||||||
|
"""Validate that conversation_id matches the required format."""
|
||||||
|
return bool(CONVERSATION_ID_PATTERN.match(conv_id))
|
||||||
|
|
||||||
|
|
||||||
|
def _get_user_history_dir(user):
|
||||||
|
"""Get the history directory path for a user."""
|
||||||
|
return os.path.join(CHAT_HISTORY_DIR, user)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_conversation_path(user, conv_id):
|
||||||
|
"""Get the full path to a conversation file."""
|
||||||
|
user_dir = _get_user_history_dir(user)
|
||||||
|
return os.path.join(user_dir, f"{conv_id}.ndjson")
|
||||||
|
|
||||||
|
|
||||||
|
def _ensure_user_dir(user):
|
||||||
|
"""Ensure the user's history directory exists."""
|
||||||
|
user_dir = _get_user_history_dir(user)
|
||||||
|
os.makedirs(user_dir, exist_ok=True)
|
||||||
|
return user_dir
|
||||||
|
|
||||||
|
|
||||||
|
def _write_message(user, conv_id, role, content):
|
||||||
|
"""Append a message to a conversation file in NDJSON format."""
|
||||||
|
conv_path = _get_conversation_path(user, conv_id)
|
||||||
|
_ensure_user_dir(user)
|
||||||
|
|
||||||
|
record = {
|
||||||
|
"ts": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
|
||||||
|
"user": user,
|
||||||
|
"role": role,
|
||||||
|
"content": content,
|
||||||
|
}
|
||||||
|
|
||||||
|
with open(conv_path, "a", encoding="utf-8") as f:
|
||||||
|
f.write(json.dumps(record, ensure_ascii=False) + "\n")
|
||||||
|
|
||||||
|
|
||||||
|
def _read_conversation(user, conv_id):
|
||||||
|
"""Read all messages from a conversation file."""
|
||||||
|
conv_path = _get_conversation_path(user, conv_id)
|
||||||
|
messages = []
|
||||||
|
|
||||||
|
if not os.path.exists(conv_path):
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(conv_path, "r", encoding="utf-8") as f:
|
||||||
|
for line in f:
|
||||||
|
line = line.strip()
|
||||||
|
if line:
|
||||||
|
try:
|
||||||
|
messages.append(json.loads(line))
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
# Skip malformed lines
|
||||||
|
continue
|
||||||
|
except IOError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return messages
|
||||||
|
|
||||||
|
|
||||||
|
def _list_user_conversations(user):
|
||||||
|
"""List all conversation files for a user with first message preview."""
|
||||||
|
user_dir = _get_user_history_dir(user)
|
||||||
|
conversations = []
|
||||||
|
|
||||||
|
if not os.path.exists(user_dir):
|
||||||
|
return conversations
|
||||||
|
|
||||||
|
try:
|
||||||
|
for filename in os.listdir(user_dir):
|
||||||
|
if not filename.endswith(".ndjson"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
conv_id = filename[:-7] # Remove .ndjson extension
|
||||||
|
if not _validate_conversation_id(conv_id):
|
||||||
|
continue
|
||||||
|
|
||||||
|
conv_path = os.path.join(user_dir, filename)
|
||||||
|
messages = _read_conversation(user, conv_id)
|
||||||
|
|
||||||
|
if messages:
|
||||||
|
first_msg = messages[0]
|
||||||
|
preview = first_msg.get("content", "")[:50]
|
||||||
|
if len(first_msg.get("content", "")) > 50:
|
||||||
|
preview += "..."
|
||||||
|
conversations.append({
|
||||||
|
"id": conv_id,
|
||||||
|
"created_at": first_msg.get("ts", ""),
|
||||||
|
"preview": preview,
|
||||||
|
"message_count": len(messages),
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
# Empty conversation file
|
||||||
|
conversations.append({
|
||||||
|
"id": conv_id,
|
||||||
|
"created_at": "",
|
||||||
|
"preview": "(empty)",
|
||||||
|
"message_count": 0,
|
||||||
|
})
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Sort by created_at descending
|
||||||
|
conversations.sort(key=lambda x: x["created_at"] or "", reverse=True)
|
||||||
|
return conversations
|
||||||
|
|
||||||
|
|
||||||
|
def _delete_conversation(user, conv_id):
|
||||||
|
"""Delete a conversation file."""
|
||||||
|
conv_path = _get_conversation_path(user, conv_id)
|
||||||
|
if os.path.exists(conv_path):
|
||||||
|
os.remove(conv_path)
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
class ChatHandler(BaseHTTPRequestHandler):
|
class ChatHandler(BaseHTTPRequestHandler):
|
||||||
"""HTTP request handler for disinto-chat with Forgejo OAuth."""
|
"""HTTP request handler for disinto-chat with Forgejo OAuth."""
|
||||||
|
|
||||||
|
|
@ -186,11 +448,52 @@ class ChatHandler(BaseHTTPRequestHandler):
|
||||||
self.end_headers()
|
self.end_headers()
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def _check_forwarded_user(self, session_user):
|
||||||
|
"""Defense-in-depth: verify X-Forwarded-User matches session user (#709).
|
||||||
|
|
||||||
|
Returns True if the request may proceed, False if a 403 was sent.
|
||||||
|
When X-Forwarded-User is absent (forward_auth removed from Caddy),
|
||||||
|
the request is rejected - fail-closed by design.
|
||||||
|
"""
|
||||||
|
forwarded = self.headers.get("X-Forwarded-User")
|
||||||
|
if not forwarded:
|
||||||
|
rid = self.headers.get("X-Request-Id", "-")
|
||||||
|
print(
|
||||||
|
f"WARN: missing X-Forwarded-User for session_user={session_user} "
|
||||||
|
f"req_id={rid} - fail-closed (#709)",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
self.send_error_page(403, "Forbidden: missing forwarded-user header")
|
||||||
|
return False
|
||||||
|
if forwarded != session_user:
|
||||||
|
rid = self.headers.get("X-Request-Id", "-")
|
||||||
|
print(
|
||||||
|
f"WARN: X-Forwarded-User mismatch: header={forwarded} "
|
||||||
|
f"session={session_user} req_id={rid} (#709)",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
self.send_error_page(403, "Forbidden: user identity mismatch")
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
def do_GET(self):
|
def do_GET(self):
|
||||||
"""Handle GET requests."""
|
"""Handle GET requests."""
|
||||||
parsed = urlparse(self.path)
|
parsed = urlparse(self.path)
|
||||||
path = parsed.path
|
path = parsed.path
|
||||||
|
|
||||||
|
# Health endpoint (no auth required) — used by Docker healthcheck
|
||||||
|
if path == "/health":
|
||||||
|
self.send_response(200)
|
||||||
|
self.send_header("Content-Type", "text/plain")
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(b"ok\n")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Verify endpoint for Caddy forward_auth (#709)
|
||||||
|
if path == "/chat/auth/verify":
|
||||||
|
self.handle_auth_verify()
|
||||||
|
return
|
||||||
|
|
||||||
# OAuth routes (no session required)
|
# OAuth routes (no session required)
|
||||||
if path == "/chat/login":
|
if path == "/chat/login":
|
||||||
self.handle_login()
|
self.handle_login()
|
||||||
|
|
@ -200,16 +503,43 @@ class ChatHandler(BaseHTTPRequestHandler):
|
||||||
self.handle_oauth_callback(parsed.query)
|
self.handle_oauth_callback(parsed.query)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Conversation list endpoint: GET /chat/history
|
||||||
|
if path == "/chat/history":
|
||||||
|
user = self._require_session()
|
||||||
|
if not user:
|
||||||
|
return
|
||||||
|
if not self._check_forwarded_user(user):
|
||||||
|
return
|
||||||
|
self.handle_conversation_list(user)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Single conversation endpoint: GET /chat/history/<id>
|
||||||
|
if path.startswith("/chat/history/"):
|
||||||
|
user = self._require_session()
|
||||||
|
if not user:
|
||||||
|
return
|
||||||
|
if not self._check_forwarded_user(user):
|
||||||
|
return
|
||||||
|
conv_id = path[len("/chat/history/"):]
|
||||||
|
self.handle_conversation_get(user, conv_id)
|
||||||
|
return
|
||||||
|
|
||||||
# Serve index.html at root
|
# Serve index.html at root
|
||||||
if path in ("/", "/chat", "/chat/"):
|
if path in ("/", "/chat", "/chat/"):
|
||||||
if not self._require_session():
|
user = self._require_session()
|
||||||
|
if not user:
|
||||||
|
return
|
||||||
|
if not self._check_forwarded_user(user):
|
||||||
return
|
return
|
||||||
self.serve_index()
|
self.serve_index()
|
||||||
return
|
return
|
||||||
|
|
||||||
# Serve static files
|
# Serve static files
|
||||||
if path.startswith("/chat/static/") or path.startswith("/static/"):
|
if path.startswith("/chat/static/") or path.startswith("/static/"):
|
||||||
if not self._require_session():
|
user = self._require_session()
|
||||||
|
if not user:
|
||||||
|
return
|
||||||
|
if not self._check_forwarded_user(user):
|
||||||
return
|
return
|
||||||
self.serve_static(path)
|
self.serve_static(path)
|
||||||
return
|
return
|
||||||
|
|
@ -227,16 +557,59 @@ class ChatHandler(BaseHTTPRequestHandler):
|
||||||
parsed = urlparse(self.path)
|
parsed = urlparse(self.path)
|
||||||
path = parsed.path
|
path = parsed.path
|
||||||
|
|
||||||
|
# New conversation endpoint (session required)
|
||||||
|
if path == "/chat/new":
|
||||||
|
user = self._require_session()
|
||||||
|
if not user:
|
||||||
|
return
|
||||||
|
if not self._check_forwarded_user(user):
|
||||||
|
return
|
||||||
|
self.handle_new_conversation(user)
|
||||||
|
return
|
||||||
|
|
||||||
# Chat endpoint (session required)
|
# Chat endpoint (session required)
|
||||||
if path in ("/chat", "/chat/"):
|
if path in ("/chat", "/chat/"):
|
||||||
if not self._require_session():
|
user = self._require_session()
|
||||||
|
if not user:
|
||||||
return
|
return
|
||||||
self.handle_chat()
|
if not self._check_forwarded_user(user):
|
||||||
|
return
|
||||||
|
self.handle_chat(user)
|
||||||
return
|
return
|
||||||
|
|
||||||
# 404 for unknown paths
|
# 404 for unknown paths
|
||||||
self.send_error_page(404, "Not found")
|
self.send_error_page(404, "Not found")
|
||||||
|
|
||||||
|
def handle_auth_verify(self):
|
||||||
|
"""Caddy forward_auth callback - validate session and return X-Forwarded-User (#709).
|
||||||
|
|
||||||
|
Caddy calls this endpoint for every /chat/* request. If the session
|
||||||
|
cookie is valid the endpoint returns 200 with the X-Forwarded-User
|
||||||
|
header set to the session username. Otherwise it returns 401 so Caddy
|
||||||
|
knows the request is unauthenticated.
|
||||||
|
|
||||||
|
Access control: when FORWARD_AUTH_SECRET is configured, the request must
|
||||||
|
carry a matching X-Forward-Auth-Secret header (shared secret between
|
||||||
|
Caddy and the chat backend).
|
||||||
|
"""
|
||||||
|
# Shared-secret gate
|
||||||
|
if FORWARD_AUTH_SECRET:
|
||||||
|
provided = self.headers.get("X-Forward-Auth-Secret", "")
|
||||||
|
if not secrets.compare_digest(provided, FORWARD_AUTH_SECRET):
|
||||||
|
self.send_error_page(403, "Forbidden: invalid forward-auth secret")
|
||||||
|
return
|
||||||
|
|
||||||
|
user = _validate_session(self.headers.get("Cookie"))
|
||||||
|
if not user:
|
||||||
|
self.send_error_page(401, "Unauthorized: no valid session")
|
||||||
|
return
|
||||||
|
|
||||||
|
self.send_response(200)
|
||||||
|
self.send_header("X-Forwarded-User", user)
|
||||||
|
self.send_header("Content-Type", "text/plain; charset=utf-8")
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(b"ok")
|
||||||
|
|
||||||
def handle_login(self):
|
def handle_login(self):
|
||||||
"""Redirect to Forgejo OAuth authorize endpoint."""
|
"""Redirect to Forgejo OAuth authorize endpoint."""
|
||||||
_gc_sessions()
|
_gc_sessions()
|
||||||
|
|
@ -363,11 +736,33 @@ class ChatHandler(BaseHTTPRequestHandler):
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
self.send_error_page(500, f"Error reading file: {e}")
|
self.send_error_page(500, f"Error reading file: {e}")
|
||||||
|
|
||||||
def handle_chat(self):
|
def _send_rate_limit_response(self, retry_after, reason):
|
||||||
|
"""Send a 429 response with Retry-After header and HTMX fragment (#711)."""
|
||||||
|
body = (
|
||||||
|
f'<div class="rate-limit-error">'
|
||||||
|
f"Rate limit exceeded: {reason}. "
|
||||||
|
f"Please try again in {retry_after} seconds."
|
||||||
|
f"</div>"
|
||||||
|
)
|
||||||
|
self.send_response(429)
|
||||||
|
self.send_header("Retry-After", str(retry_after))
|
||||||
|
self.send_header("Content-Type", "text/html; charset=utf-8")
|
||||||
|
self.send_header("Content-Length", str(len(body.encode("utf-8"))))
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(body.encode("utf-8"))
|
||||||
|
|
||||||
|
def handle_chat(self, user):
|
||||||
"""
|
"""
|
||||||
Handle chat requests by spawning `claude --print` with the user message.
|
Handle chat requests by spawning `claude --print` with the user message.
|
||||||
Returns the response as plain text.
|
Enforces per-user rate limits and tracks token usage (#711).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# Check rate limits before processing (#711)
|
||||||
|
allowed, retry_after, reason = _check_rate_limit(user)
|
||||||
|
if not allowed:
|
||||||
|
self._send_rate_limit_response(retry_after, reason)
|
||||||
|
return
|
||||||
|
|
||||||
# Read request body
|
# Read request body
|
||||||
content_length = int(self.headers.get("Content-Length", 0))
|
content_length = int(self.headers.get("Content-Length", 0))
|
||||||
if content_length == 0:
|
if content_length == 0:
|
||||||
|
|
@ -380,6 +775,7 @@ class ChatHandler(BaseHTTPRequestHandler):
|
||||||
body_str = body.decode("utf-8")
|
body_str = body.decode("utf-8")
|
||||||
params = parse_qs(body_str)
|
params = parse_qs(body_str)
|
||||||
message = params.get("message", [""])[0]
|
message = params.get("message", [""])[0]
|
||||||
|
conv_id = params.get("conversation_id", [None])[0]
|
||||||
except (UnicodeDecodeError, KeyError):
|
except (UnicodeDecodeError, KeyError):
|
||||||
self.send_error_page(400, "Invalid message format")
|
self.send_error_page(400, "Invalid message format")
|
||||||
return
|
return
|
||||||
|
|
@ -388,46 +784,150 @@ class ChatHandler(BaseHTTPRequestHandler):
|
||||||
self.send_error_page(400, "Empty message")
|
self.send_error_page(400, "Empty message")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Get user from session
|
||||||
|
user = _validate_session(self.headers.get("Cookie"))
|
||||||
|
if not user:
|
||||||
|
self.send_error_page(401, "Unauthorized")
|
||||||
|
return
|
||||||
|
|
||||||
# Validate Claude binary exists
|
# Validate Claude binary exists
|
||||||
if not os.path.exists(CLAUDE_BIN):
|
if not os.path.exists(CLAUDE_BIN):
|
||||||
self.send_error_page(500, "Claude CLI not found")
|
self.send_error_page(500, "Claude CLI not found")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Generate new conversation ID if not provided
|
||||||
|
if not conv_id or not _validate_conversation_id(conv_id):
|
||||||
|
conv_id = _generate_conversation_id()
|
||||||
|
|
||||||
|
# Record request for rate limiting (#711)
|
||||||
|
_record_request(user)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Spawn claude --print with text output format
|
# Save user message to history
|
||||||
|
_write_message(user, conv_id, "user", message)
|
||||||
|
|
||||||
|
# Spawn claude --print with stream-json for token tracking (#711)
|
||||||
proc = subprocess.Popen(
|
proc = subprocess.Popen(
|
||||||
[CLAUDE_BIN, "--print", message],
|
[CLAUDE_BIN, "--print", "--output-format", "stream-json", message],
|
||||||
stdout=subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
stderr=subprocess.PIPE,
|
stderr=subprocess.PIPE,
|
||||||
text=True,
|
text=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Read response as text (Claude outputs plain text when not using stream-json)
|
raw_output = proc.stdout.read()
|
||||||
response = proc.stdout.read()
|
|
||||||
|
|
||||||
# Read stderr (should be minimal, mostly for debugging)
|
|
||||||
error_output = proc.stderr.read()
|
error_output = proc.stderr.read()
|
||||||
if error_output:
|
if error_output:
|
||||||
print(f"Claude stderr: {error_output}", file=sys.stderr)
|
print(f"Claude stderr: {error_output}", file=sys.stderr)
|
||||||
|
|
||||||
# Wait for process to complete
|
|
||||||
proc.wait()
|
proc.wait()
|
||||||
|
|
||||||
# Check for errors
|
|
||||||
if proc.returncode != 0:
|
if proc.returncode != 0:
|
||||||
self.send_error_page(500, f"Claude CLI failed with exit code {proc.returncode}")
|
self.send_error_page(500, f"Claude CLI failed with exit code {proc.returncode}")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Parse stream-json for text and token usage (#711)
|
||||||
|
response, total_tokens = _parse_stream_json(raw_output)
|
||||||
|
|
||||||
|
# Track token usage - does not block *this* request (#711)
|
||||||
|
if total_tokens > 0:
|
||||||
|
_record_tokens(user, total_tokens)
|
||||||
|
print(
|
||||||
|
f"Token usage: user={user} tokens={total_tokens}",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Fall back to raw output if stream-json parsing yielded no text
|
||||||
|
if not response:
|
||||||
|
response = raw_output
|
||||||
|
|
||||||
|
# Save assistant response to history
|
||||||
|
_write_message(user, conv_id, "assistant", response)
|
||||||
|
|
||||||
self.send_response(200)
|
self.send_response(200)
|
||||||
self.send_header("Content-Type", "text/plain; charset=utf-8")
|
self.send_header("Content-Type", "application/json; charset=utf-8")
|
||||||
self.send_header("Content-Length", len(response.encode("utf-8")))
|
|
||||||
self.end_headers()
|
self.end_headers()
|
||||||
self.wfile.write(response.encode("utf-8"))
|
self.wfile.write(json.dumps({
|
||||||
|
"response": response,
|
||||||
|
"conversation_id": conv_id,
|
||||||
|
}, ensure_ascii=False).encode("utf-8"))
|
||||||
|
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
self.send_error_page(500, "Claude CLI not found")
|
self.send_error_page(500, "Claude CLI not found")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.send_error_page(500, f"Error: {e}")
|
self.send_error_page(500, f"Error: {e}")
|
||||||
|
|
||||||
|
# =======================================================================
|
||||||
|
# Conversation History Handlers
|
||||||
|
# =======================================================================
|
||||||
|
|
||||||
|
def handle_conversation_list(self, user):
|
||||||
|
"""List all conversations for the logged-in user."""
|
||||||
|
conversations = _list_user_conversations(user)
|
||||||
|
|
||||||
|
self.send_response(200)
|
||||||
|
self.send_header("Content-Type", "application/json; charset=utf-8")
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(json.dumps(conversations, ensure_ascii=False).encode("utf-8"))
|
||||||
|
|
||||||
|
def handle_conversation_get(self, user, conv_id):
|
||||||
|
"""Get a specific conversation for the logged-in user."""
|
||||||
|
# Validate conversation_id format
|
||||||
|
if not _validate_conversation_id(conv_id):
|
||||||
|
self.send_error_page(400, "Invalid conversation ID")
|
||||||
|
return
|
||||||
|
|
||||||
|
messages = _read_conversation(user, conv_id)
|
||||||
|
|
||||||
|
if messages is None:
|
||||||
|
self.send_error_page(404, "Conversation not found")
|
||||||
|
return
|
||||||
|
|
||||||
|
self.send_response(200)
|
||||||
|
self.send_header("Content-Type", "application/json; charset=utf-8")
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(json.dumps(messages, ensure_ascii=False).encode("utf-8"))
|
||||||
|
|
||||||
|
def handle_conversation_delete(self, user, conv_id):
|
||||||
|
"""Delete a specific conversation for the logged-in user."""
|
||||||
|
# Validate conversation_id format
|
||||||
|
if not _validate_conversation_id(conv_id):
|
||||||
|
self.send_error_page(400, "Invalid conversation ID")
|
||||||
|
return
|
||||||
|
|
||||||
|
if _delete_conversation(user, conv_id):
|
||||||
|
self.send_response(204) # No Content
|
||||||
|
self.end_headers()
|
||||||
|
else:
|
||||||
|
self.send_error_page(404, "Conversation not found")
|
||||||
|
|
||||||
|
def handle_new_conversation(self, user):
|
||||||
|
"""Create a new conversation and return its ID."""
|
||||||
|
conv_id = _generate_conversation_id()
|
||||||
|
self.send_response(200)
|
||||||
|
self.send_header("Content-Type", "application/json; charset=utf-8")
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(json.dumps({"conversation_id": conv_id}, ensure_ascii=False).encode("utf-8"))
|
||||||
|
|
||||||
|
def do_DELETE(self):
|
||||||
|
"""Handle DELETE requests."""
|
||||||
|
parsed = urlparse(self.path)
|
||||||
|
path = parsed.path
|
||||||
|
|
||||||
|
# Delete conversation endpoint
|
||||||
|
if path.startswith("/chat/history/"):
|
||||||
|
user = self._require_session()
|
||||||
|
if not user:
|
||||||
|
return
|
||||||
|
if not self._check_forwarded_user(user):
|
||||||
|
return
|
||||||
|
conv_id = path[len("/chat/history/"):]
|
||||||
|
self.handle_conversation_delete(user, conv_id)
|
||||||
|
return
|
||||||
|
|
||||||
|
# 404 for unknown paths
|
||||||
|
self.send_error_page(404, "Not found")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Start the HTTP server."""
|
"""Start the HTTP server."""
|
||||||
|
|
@ -439,7 +939,17 @@ def main():
|
||||||
print(f"OAuth enabled (client_id={CHAT_OAUTH_CLIENT_ID[:8]}...)", file=sys.stderr)
|
print(f"OAuth enabled (client_id={CHAT_OAUTH_CLIENT_ID[:8]}...)", file=sys.stderr)
|
||||||
print(f"Allowed users: {', '.join(sorted(ALLOWED_USERS))}", file=sys.stderr)
|
print(f"Allowed users: {', '.join(sorted(ALLOWED_USERS))}", file=sys.stderr)
|
||||||
else:
|
else:
|
||||||
print("WARNING: CHAT_OAUTH_CLIENT_ID not set — OAuth disabled", file=sys.stderr)
|
print("WARNING: CHAT_OAUTH_CLIENT_ID not set - OAuth disabled", file=sys.stderr)
|
||||||
|
if FORWARD_AUTH_SECRET:
|
||||||
|
print("forward_auth secret configured (#709)", file=sys.stderr)
|
||||||
|
else:
|
||||||
|
print("WARNING: FORWARD_AUTH_SECRET not set - verify endpoint unrestricted", file=sys.stderr)
|
||||||
|
print(
|
||||||
|
f"Rate limits (#711): {CHAT_MAX_REQUESTS_PER_HOUR}/hr, "
|
||||||
|
f"{CHAT_MAX_REQUESTS_PER_DAY}/day, "
|
||||||
|
f"{CHAT_MAX_TOKENS_PER_DAY} tokens/day",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
httpd.serve_forever()
|
httpd.serve_forever()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,93 @@
|
||||||
color: #eaeaea;
|
color: #eaeaea;
|
||||||
min-height: 100vh;
|
min-height: 100vh;
|
||||||
display: flex;
|
display: flex;
|
||||||
|
}
|
||||||
|
/* Sidebar styles */
|
||||||
|
.sidebar {
|
||||||
|
width: 280px;
|
||||||
|
background: #16213e;
|
||||||
|
border-right: 1px solid #0f3460;
|
||||||
|
display: flex;
|
||||||
flex-direction: column;
|
flex-direction: column;
|
||||||
|
height: 100vh;
|
||||||
|
position: fixed;
|
||||||
|
left: 0;
|
||||||
|
top: 0;
|
||||||
|
z-index: 100;
|
||||||
|
}
|
||||||
|
.sidebar-header {
|
||||||
|
padding: 1rem;
|
||||||
|
border-bottom: 1px solid #0f3460;
|
||||||
|
}
|
||||||
|
.sidebar-header h1 {
|
||||||
|
font-size: 1.25rem;
|
||||||
|
font-weight: 600;
|
||||||
|
margin-bottom: 0.5rem;
|
||||||
|
}
|
||||||
|
.new-chat-btn {
|
||||||
|
width: 100%;
|
||||||
|
background: #e94560;
|
||||||
|
color: white;
|
||||||
|
border: none;
|
||||||
|
border-radius: 6px;
|
||||||
|
padding: 0.75rem 1rem;
|
||||||
|
font-size: 0.9rem;
|
||||||
|
font-weight: 600;
|
||||||
|
cursor: pointer;
|
||||||
|
transition: background 0.2s;
|
||||||
|
}
|
||||||
|
.new-chat-btn:hover {
|
||||||
|
background: #d63447;
|
||||||
|
}
|
||||||
|
.new-chat-btn:disabled {
|
||||||
|
background: #555;
|
||||||
|
cursor: not-allowed;
|
||||||
|
}
|
||||||
|
.conversations-list {
|
||||||
|
flex: 1;
|
||||||
|
overflow-y: auto;
|
||||||
|
padding: 0.5rem;
|
||||||
|
}
|
||||||
|
.conversation-item {
|
||||||
|
padding: 0.75rem 1rem;
|
||||||
|
border-radius: 6px;
|
||||||
|
cursor: pointer;
|
||||||
|
margin-bottom: 0.25rem;
|
||||||
|
transition: background 0.2s;
|
||||||
|
border: 1px solid transparent;
|
||||||
|
}
|
||||||
|
.conversation-item:hover {
|
||||||
|
background: #1a1a2e;
|
||||||
|
}
|
||||||
|
.conversation-item.active {
|
||||||
|
background: #0f3460;
|
||||||
|
border-color: #e94560;
|
||||||
|
}
|
||||||
|
.conversation-item .preview {
|
||||||
|
font-size: 0.875rem;
|
||||||
|
white-space: nowrap;
|
||||||
|
overflow: hidden;
|
||||||
|
text-overflow: ellipsis;
|
||||||
|
opacity: 0.9;
|
||||||
|
}
|
||||||
|
.conversation-item .meta {
|
||||||
|
font-size: 0.75rem;
|
||||||
|
opacity: 0.6;
|
||||||
|
margin-top: 0.25rem;
|
||||||
|
}
|
||||||
|
.conversation-item .message-count {
|
||||||
|
float: right;
|
||||||
|
font-size: 0.7rem;
|
||||||
|
background: #0f3460;
|
||||||
|
padding: 0.125rem 0.5rem;
|
||||||
|
border-radius: 10px;
|
||||||
|
}
|
||||||
|
.main-content {
|
||||||
|
margin-left: 280px;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
width: 100%;
|
||||||
|
height: 100vh;
|
||||||
}
|
}
|
||||||
header {
|
header {
|
||||||
background: #16213e;
|
background: #16213e;
|
||||||
|
|
@ -119,30 +205,202 @@
|
||||||
.loading {
|
.loading {
|
||||||
opacity: 0.6;
|
opacity: 0.6;
|
||||||
}
|
}
|
||||||
|
.empty-state {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
height: 100%;
|
||||||
|
color: #888;
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
.empty-state p {
|
||||||
|
margin-top: 1rem;
|
||||||
|
}
|
||||||
|
/* Responsive sidebar toggle */
|
||||||
|
.sidebar-toggle {
|
||||||
|
display: none;
|
||||||
|
position: fixed;
|
||||||
|
top: 1rem;
|
||||||
|
left: 1rem;
|
||||||
|
z-index: 200;
|
||||||
|
background: #e94560;
|
||||||
|
color: white;
|
||||||
|
border: none;
|
||||||
|
border-radius: 6px;
|
||||||
|
padding: 0.5rem;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
@media (max-width: 768px) {
|
||||||
|
.sidebar {
|
||||||
|
transform: translateX(-100%);
|
||||||
|
transition: transform 0.3s;
|
||||||
|
}
|
||||||
|
.sidebar.open {
|
||||||
|
transform: translateX(0);
|
||||||
|
}
|
||||||
|
.sidebar-toggle {
|
||||||
|
display: block;
|
||||||
|
}
|
||||||
|
.main-content {
|
||||||
|
margin-left: 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
</style>
|
</style>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<header>
|
<button class="sidebar-toggle" id="sidebar-toggle">☰</button>
|
||||||
<h1>disinto-chat</h1>
|
<aside class="sidebar" id="sidebar">
|
||||||
</header>
|
<div class="sidebar-header">
|
||||||
<main>
|
<h1>disinto-chat</h1>
|
||||||
<div id="messages">
|
<button class="new-chat-btn" id="new-chat-btn">+ New Chat</button>
|
||||||
<div class="message system">
|
|
||||||
<div class="role">system</div>
|
|
||||||
<div class="content">Welcome to disinto-chat. Type a message to start chatting with Claude.</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
</div>
|
||||||
<form class="input-area">
|
<div class="conversations-list" id="conversations-list">
|
||||||
<textarea name="message" placeholder="Type your message..." required></textarea>
|
<!-- Conversations will be loaded here -->
|
||||||
<button type="submit" id="send-btn">Send</button>
|
</div>
|
||||||
</form>
|
</aside>
|
||||||
</main>
|
<div class="main-content">
|
||||||
|
<header>
|
||||||
|
<h1>disinto-chat</h1>
|
||||||
|
</header>
|
||||||
|
<main>
|
||||||
|
<div id="messages">
|
||||||
|
<div class="message system">
|
||||||
|
<div class="role">system</div>
|
||||||
|
<div class="content">Welcome to disinto-chat. Type a message to start chatting with Claude.</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<form class="input-area" id="chat-form">
|
||||||
|
<textarea name="message" placeholder="Type your message..." required></textarea>
|
||||||
|
<button type="submit" id="send-btn">Send</button>
|
||||||
|
</form>
|
||||||
|
</main>
|
||||||
|
</div>
|
||||||
|
|
||||||
<script>
|
<script>
|
||||||
|
// State
|
||||||
|
let currentConversationId = null;
|
||||||
|
let conversations = [];
|
||||||
|
|
||||||
|
// DOM elements
|
||||||
const messagesDiv = document.getElementById('messages');
|
const messagesDiv = document.getElementById('messages');
|
||||||
const sendBtn = document.getElementById('send-btn');
|
const sendBtn = document.getElementById('send-btn');
|
||||||
const textarea = document.querySelector('textarea');
|
const textarea = document.querySelector('textarea');
|
||||||
|
const conversationsList = document.getElementById('conversations-list');
|
||||||
|
const newChatBtn = document.getElementById('new-chat-btn');
|
||||||
|
const sidebar = document.getElementById('sidebar');
|
||||||
|
const sidebarToggle = document.getElementById('sidebar-toggle');
|
||||||
|
|
||||||
|
// Load conversations list
|
||||||
|
async function loadConversations() {
|
||||||
|
try {
|
||||||
|
const response = await fetch('/chat/history');
|
||||||
|
if (response.ok) {
|
||||||
|
conversations = await response.json();
|
||||||
|
renderConversationsList();
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to load conversations:', error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render conversations list
|
||||||
|
function renderConversationsList() {
|
||||||
|
conversationsList.innerHTML = '';
|
||||||
|
|
||||||
|
if (conversations.length === 0) {
|
||||||
|
conversationsList.innerHTML = '<div style="padding: 1rem; color: #888; text-align: center; font-size: 0.875rem;">No conversations yet</div>';
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
conversations.forEach(conv => {
|
||||||
|
const item = document.createElement('div');
|
||||||
|
item.className = 'conversation-item';
|
||||||
|
if (conv.id === currentConversationId) {
|
||||||
|
item.classList.add('active');
|
||||||
|
}
|
||||||
|
item.dataset.conversationId = conv.id;
|
||||||
|
|
||||||
|
const previewDiv = document.createElement('div');
|
||||||
|
previewDiv.className = 'preview';
|
||||||
|
previewDiv.textContent = conv.preview || '(empty)';
|
||||||
|
|
||||||
|
const metaDiv = document.createElement('div');
|
||||||
|
metaDiv.className = 'meta';
|
||||||
|
const date = conv.created_at ? new Date(conv.created_at).toLocaleDateString() : '';
|
||||||
|
metaDiv.innerHTML = `${date} <span class="message-count">${conv.message_count || 0} msg${conv.message_count !== 1 ? 's' : ''}</span>`;
|
||||||
|
|
||||||
|
item.appendChild(previewDiv);
|
||||||
|
item.appendChild(metaDiv);
|
||||||
|
|
||||||
|
item.addEventListener('click', () => loadConversation(conv.id));
|
||||||
|
conversationsList.appendChild(item);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load a specific conversation
|
||||||
|
async function loadConversation(convId) {
|
||||||
|
// Early-return if already showing this conversation
|
||||||
|
if (convId === currentConversationId) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear messages
|
||||||
|
messagesDiv.innerHTML = '';
|
||||||
|
|
||||||
|
// Update active state in sidebar
|
||||||
|
document.querySelectorAll('.conversation-item').forEach(item => {
|
||||||
|
item.classList.remove('active');
|
||||||
|
});
|
||||||
|
document.querySelector(`[data-conversation-id="${convId}"]`)?.classList.add('active');
|
||||||
|
|
||||||
|
currentConversationId = convId;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await fetch(`/chat/history/${convId}`);
|
||||||
|
if (response.ok) {
|
||||||
|
const messages = await response.json();
|
||||||
|
if (messages && messages.length > 0) {
|
||||||
|
messages.forEach(msg => {
|
||||||
|
addMessage(msg.role, msg.content);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
addSystemMessage('This conversation is empty');
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
addSystemMessage('Failed to load conversation');
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to load conversation:', error);
|
||||||
|
addSystemMessage('Error loading conversation');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close sidebar on mobile
|
||||||
|
if (window.innerWidth <= 768) {
|
||||||
|
sidebar.classList.remove('open');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new conversation
|
||||||
|
async function createNewConversation() {
|
||||||
|
try {
|
||||||
|
const response = await fetch('/chat/new', { method: 'POST' });
|
||||||
|
if (response.ok) {
|
||||||
|
const data = await response.json();
|
||||||
|
currentConversationId = data.conversation_id;
|
||||||
|
messagesDiv.innerHTML = '';
|
||||||
|
addSystemMessage('New conversation started');
|
||||||
|
await loadConversations();
|
||||||
|
} else {
|
||||||
|
addSystemMessage('Failed to create new conversation');
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to create new conversation:', error);
|
||||||
|
addSystemMessage('Error creating new conversation');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add message to display
|
||||||
function addMessage(role, content, streaming = false) {
|
function addMessage(role, content, streaming = false) {
|
||||||
const msgDiv = document.createElement('div');
|
const msgDiv = document.createElement('div');
|
||||||
msgDiv.className = `message ${role}`;
|
msgDiv.className = `message ${role}`;
|
||||||
|
|
@ -155,6 +413,17 @@
|
||||||
return msgDiv.querySelector('.content');
|
return msgDiv.querySelector('.content');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function addSystemMessage(content) {
|
||||||
|
const msgDiv = document.createElement('div');
|
||||||
|
msgDiv.className = 'message system';
|
||||||
|
msgDiv.innerHTML = `
|
||||||
|
<div class="role">system</div>
|
||||||
|
<div class="content">${escapeHtml(content)}</div>
|
||||||
|
`;
|
||||||
|
messagesDiv.appendChild(msgDiv);
|
||||||
|
messagesDiv.scrollTop = messagesDiv.scrollHeight;
|
||||||
|
}
|
||||||
|
|
||||||
function escapeHtml(text) {
|
function escapeHtml(text) {
|
||||||
const div = document.createElement('div');
|
const div = document.createElement('div');
|
||||||
div.textContent = text;
|
div.textContent = text;
|
||||||
|
|
@ -162,7 +431,7 @@
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send message handler
|
// Send message handler
|
||||||
sendBtn.addEventListener('click', async () => {
|
async function sendMessage() {
|
||||||
const message = textarea.value.trim();
|
const message = textarea.value.trim();
|
||||||
if (!message) return;
|
if (!message) return;
|
||||||
|
|
||||||
|
|
@ -175,10 +444,16 @@
|
||||||
addMessage('user', message);
|
addMessage('user', message);
|
||||||
textarea.value = '';
|
textarea.value = '';
|
||||||
|
|
||||||
|
// If no conversation ID, create one
|
||||||
|
if (!currentConversationId) {
|
||||||
|
await createNewConversation();
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Use fetch with URLSearchParams for application/x-www-form-urlencoded
|
// Use fetch with URLSearchParams for application/x-www-form-urlencoded
|
||||||
const params = new URLSearchParams();
|
const params = new URLSearchParams();
|
||||||
params.append('message', message);
|
params.append('message', message);
|
||||||
|
params.append('conversation_id', currentConversationId);
|
||||||
|
|
||||||
const response = await fetch('/chat', {
|
const response = await fetch('/chat', {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
|
|
@ -192,31 +467,55 @@
|
||||||
throw new Error(`HTTP ${response.status}`);
|
throw new Error(`HTTP ${response.status}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the response as text and add assistant message
|
// Read the response as JSON (now returns JSON with response and conversation_id)
|
||||||
const content = await response.text();
|
const data = await response.json();
|
||||||
addMessage('assistant', content);
|
addMessage('assistant', data.response);
|
||||||
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
addMessage('system', `Error: ${error.message}`);
|
addSystemMessage(`Error: ${error.message}`);
|
||||||
} finally {
|
} finally {
|
||||||
textarea.disabled = false;
|
textarea.disabled = false;
|
||||||
sendBtn.disabled = false;
|
sendBtn.disabled = false;
|
||||||
sendBtn.textContent = 'Send';
|
sendBtn.textContent = 'Send';
|
||||||
textarea.focus();
|
textarea.focus();
|
||||||
messagesDiv.scrollTop = messagesDiv.scrollHeight;
|
messagesDiv.scrollTop = messagesDiv.scrollHeight;
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Handle Enter key in textarea
|
// Refresh conversations list
|
||||||
|
await loadConversations();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Event listeners
|
||||||
|
sendBtn.addEventListener('click', sendMessage);
|
||||||
|
|
||||||
|
newChatBtn.addEventListener('click', createNewConversation);
|
||||||
|
|
||||||
textarea.addEventListener('keydown', (e) => {
|
textarea.addEventListener('keydown', (e) => {
|
||||||
if (e.key === 'Enter' && !e.shiftKey) {
|
if (e.key === 'Enter' && !e.shiftKey) {
|
||||||
e.preventDefault();
|
e.preventDefault();
|
||||||
sendBtn.click();
|
sendMessage();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Sidebar toggle for mobile
|
||||||
|
sidebarToggle.addEventListener('click', () => {
|
||||||
|
sidebar.classList.toggle('open');
|
||||||
|
});
|
||||||
|
|
||||||
|
// Close sidebar when clicking outside on mobile
|
||||||
|
document.addEventListener('click', (e) => {
|
||||||
|
if (window.innerWidth <= 768) {
|
||||||
|
if (!sidebar.contains(e.target) && !sidebarToggle.contains(e.target)) {
|
||||||
|
sidebar.classList.remove('open');
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// Initial focus
|
// Initial focus
|
||||||
textarea.focus();
|
textarea.focus();
|
||||||
|
|
||||||
|
// Load conversations on page load
|
||||||
|
loadConversations();
|
||||||
</script>
|
</script>
|
||||||
</body>
|
</body>
|
||||||
</html>
|
</html>
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,7 @@
|
||||||
FROM caddy:latest
|
FROM caddy:latest
|
||||||
RUN apk add --no-cache bash jq curl git docker-cli python3 openssh-client autossh
|
RUN apk add --no-cache bash jq curl git docker-cli python3 openssh-client autossh
|
||||||
COPY entrypoint-edge.sh /usr/local/bin/entrypoint-edge.sh
|
COPY entrypoint-edge.sh /usr/local/bin/entrypoint-edge.sh
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
|
||||||
ENTRYPOINT ["bash", "/usr/local/bin/entrypoint-edge.sh"]
|
ENTRYPOINT ["bash", "/usr/local/bin/entrypoint-edge.sh"]
|
||||||
|
|
|
||||||
|
|
@ -8,8 +8,8 @@
|
||||||
# 2. Scan vault/actions/ for TOML files without .result.json
|
# 2. Scan vault/actions/ for TOML files without .result.json
|
||||||
# 3. Verify TOML arrived via merged PR with admin merger (Forgejo API)
|
# 3. Verify TOML arrived via merged PR with admin merger (Forgejo API)
|
||||||
# 4. Validate TOML using vault-env.sh validator
|
# 4. Validate TOML using vault-env.sh validator
|
||||||
# 5. Decrypt .env.vault.enc and extract only declared secrets
|
# 5. Decrypt declared secrets via load_secret (lib/env.sh)
|
||||||
# 6. Launch: docker run --rm disinto/agents:latest <action-id>
|
# 6. Launch: delegate to _launch_runner_{docker,nomad} backend
|
||||||
# 7. Write <action-id>.result.json with exit code, timestamp, logs summary
|
# 7. Write <action-id>.result.json with exit code, timestamp, logs summary
|
||||||
#
|
#
|
||||||
# Part of #76.
|
# Part of #76.
|
||||||
|
|
@ -19,7 +19,7 @@ set -euo pipefail
|
||||||
# Resolve script root (parent of lib/)
|
# Resolve script root (parent of lib/)
|
||||||
SCRIPT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
SCRIPT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
|
||||||
# Source shared environment
|
# Source shared environment (provides load_secret, log helpers, etc.)
|
||||||
source "${SCRIPT_ROOT}/../lib/env.sh"
|
source "${SCRIPT_ROOT}/../lib/env.sh"
|
||||||
|
|
||||||
# Project TOML location: prefer mounted path, fall back to cloned path
|
# Project TOML location: prefer mounted path, fall back to cloned path
|
||||||
|
|
@ -27,26 +27,18 @@ source "${SCRIPT_ROOT}/../lib/env.sh"
|
||||||
# the shallow clone only has .toml.example files.
|
# the shallow clone only has .toml.example files.
|
||||||
PROJECTS_DIR="${PROJECTS_DIR:-${FACTORY_ROOT:-/opt/disinto}-projects}"
|
PROJECTS_DIR="${PROJECTS_DIR:-${FACTORY_ROOT:-/opt/disinto}-projects}"
|
||||||
|
|
||||||
# Load vault secrets after env.sh (env.sh unsets them for agent security)
|
# -----------------------------------------------------------------------------
|
||||||
# Vault secrets must be available to the dispatcher
|
# Backend selection: DISPATCHER_BACKEND={docker,nomad}
|
||||||
if [ -f "$FACTORY_ROOT/.env.vault.enc" ] && command -v sops &>/dev/null; then
|
# Default: docker. nomad lands as a pure addition during migration Step 5.
|
||||||
set -a
|
# -----------------------------------------------------------------------------
|
||||||
eval "$(sops -d --output-type dotenv "$FACTORY_ROOT/.env.vault.enc" 2>/dev/null)" \
|
DISPATCHER_BACKEND="${DISPATCHER_BACKEND:-docker}"
|
||||||
|| echo "Warning: failed to decrypt .env.vault.enc — vault secrets not loaded" >&2
|
|
||||||
set +a
|
|
||||||
elif [ -f "$FACTORY_ROOT/.env.vault" ]; then
|
|
||||||
set -a
|
|
||||||
# shellcheck source=/dev/null
|
|
||||||
source "$FACTORY_ROOT/.env.vault"
|
|
||||||
set +a
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Ops repo location (vault/actions directory)
|
# Ops repo location (vault/actions directory)
|
||||||
OPS_REPO_ROOT="${OPS_REPO_ROOT:-/home/debian/disinto-ops}"
|
OPS_REPO_ROOT="${OPS_REPO_ROOT:-/home/debian/disinto-ops}"
|
||||||
VAULT_ACTIONS_DIR="${OPS_REPO_ROOT}/vault/actions"
|
VAULT_ACTIONS_DIR="${OPS_REPO_ROOT}/vault/actions"
|
||||||
|
|
||||||
# Vault action validation
|
# Vault action validation
|
||||||
VAULT_ENV="${SCRIPT_ROOT}/../vault/vault-env.sh"
|
VAULT_ENV="${SCRIPT_ROOT}/../action-vault/vault-env.sh"
|
||||||
|
|
||||||
# Admin users who can merge vault PRs (from issue #77)
|
# Admin users who can merge vault PRs (from issue #77)
|
||||||
# Comma-separated list of Forgejo usernames with admin role
|
# Comma-separated list of Forgejo usernames with admin role
|
||||||
|
|
@ -350,73 +342,113 @@ get_dispatch_mode() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Write result file for an action
|
# Commit result.json to the ops repo via git push (portable, no bind-mount).
|
||||||
# Usage: write_result <action_id> <exit_code> <logs>
|
#
|
||||||
write_result() {
|
# Clones the ops repo into a scratch directory, writes the result file,
|
||||||
|
# commits as vault-bot, and pushes to the primary branch.
|
||||||
|
# Idempotent: skips if result.json already exists upstream.
|
||||||
|
# Retries on push conflict with rebase-and-push (handles concurrent merges).
|
||||||
|
#
|
||||||
|
# Usage: commit_result_via_git <action_id> <exit_code> <logs>
|
||||||
|
commit_result_via_git() {
|
||||||
local action_id="$1"
|
local action_id="$1"
|
||||||
local exit_code="$2"
|
local exit_code="$2"
|
||||||
local logs="$3"
|
local logs="$3"
|
||||||
|
|
||||||
local result_file="${VAULT_ACTIONS_DIR}/${action_id}.result.json"
|
local result_relpath="vault/actions/${action_id}.result.json"
|
||||||
|
local ops_clone_url="${FORGE_URL}/${FORGE_OPS_REPO}.git"
|
||||||
|
local branch="${PRIMARY_BRANCH:-main}"
|
||||||
|
local scratch_dir
|
||||||
|
scratch_dir=$(mktemp -d /tmp/dispatcher-result-XXXXXX)
|
||||||
|
# shellcheck disable=SC2064
|
||||||
|
trap "rm -rf '${scratch_dir}'" RETURN
|
||||||
|
|
||||||
|
# Shallow clone of the ops repo — only the primary branch
|
||||||
|
if ! git clone --depth 1 --branch "$branch" \
|
||||||
|
"$ops_clone_url" "$scratch_dir" 2>/dev/null; then
|
||||||
|
log "ERROR: Failed to clone ops repo for result commit (action ${action_id})"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Idempotency: skip if result.json already exists upstream
|
||||||
|
if [ -f "${scratch_dir}/${result_relpath}" ]; then
|
||||||
|
log "Result already exists upstream for ${action_id} — skipping commit"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Configure git identity as vault-bot
|
||||||
|
git -C "$scratch_dir" config user.name "vault-bot"
|
||||||
|
git -C "$scratch_dir" config user.email "vault-bot@disinto.local"
|
||||||
|
|
||||||
# Truncate logs if too long (keep last 1000 chars)
|
# Truncate logs if too long (keep last 1000 chars)
|
||||||
if [ ${#logs} -gt 1000 ]; then
|
if [ ${#logs} -gt 1000 ]; then
|
||||||
logs="${logs: -1000}"
|
logs="${logs: -1000}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Write result JSON
|
# Write result JSON via jq (never string-interpolate into JSON)
|
||||||
|
mkdir -p "$(dirname "${scratch_dir}/${result_relpath}")"
|
||||||
jq -n \
|
jq -n \
|
||||||
--arg id "$action_id" \
|
--arg id "$action_id" \
|
||||||
--argjson exit_code "$exit_code" \
|
--argjson exit_code "$exit_code" \
|
||||||
--arg timestamp "$(date -u '+%Y-%m-%dT%H:%M:%SZ')" \
|
--arg timestamp "$(date -u '+%Y-%m-%dT%H:%M:%SZ')" \
|
||||||
--arg logs "$logs" \
|
--arg logs "$logs" \
|
||||||
'{id: $id, exit_code: $exit_code, timestamp: $timestamp, logs: $logs}' \
|
'{id: $id, exit_code: $exit_code, timestamp: $timestamp, logs: $logs}' \
|
||||||
> "$result_file"
|
> "${scratch_dir}/${result_relpath}"
|
||||||
|
|
||||||
log "Result written: ${result_file}"
|
git -C "$scratch_dir" add "$result_relpath"
|
||||||
|
git -C "$scratch_dir" commit -q -m "vault: result for ${action_id}"
|
||||||
|
|
||||||
|
# Push with retry on conflict (rebase-and-push pattern).
|
||||||
|
# Common case: admin merges another action PR between our clone and push.
|
||||||
|
local attempt
|
||||||
|
for attempt in 1 2 3; do
|
||||||
|
if git -C "$scratch_dir" push origin "$branch" 2>/dev/null; then
|
||||||
|
log "Result committed and pushed for ${action_id} (attempt ${attempt})"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Push conflict for ${action_id} (attempt ${attempt}/3) — rebasing"
|
||||||
|
|
||||||
|
if ! git -C "$scratch_dir" pull --rebase origin "$branch" 2>/dev/null; then
|
||||||
|
# Rebase conflict — check if result was pushed by another process
|
||||||
|
git -C "$scratch_dir" rebase --abort 2>/dev/null || true
|
||||||
|
if git -C "$scratch_dir" fetch origin "$branch" 2>/dev/null && \
|
||||||
|
git -C "$scratch_dir" show "origin/${branch}:${result_relpath}" >/dev/null 2>&1; then
|
||||||
|
log "Result already exists upstream for ${action_id} (pushed by another process)"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
log "ERROR: Failed to push result for ${action_id} after 3 attempts"
|
||||||
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
# Launch runner for the given action
|
# Write result file for an action via git push to the ops repo.
|
||||||
# Usage: launch_runner <toml_file>
|
# Usage: write_result <action_id> <exit_code> <logs>
|
||||||
launch_runner() {
|
write_result() {
|
||||||
local toml_file="$1"
|
local action_id="$1"
|
||||||
local action_id
|
local exit_code="$2"
|
||||||
action_id=$(basename "$toml_file" .toml)
|
local logs="$3"
|
||||||
|
|
||||||
log "Launching runner for action: ${action_id}"
|
commit_result_via_git "$action_id" "$exit_code" "$logs"
|
||||||
|
}
|
||||||
|
|
||||||
# Validate TOML
|
# -----------------------------------------------------------------------------
|
||||||
if ! validate_action "$toml_file"; then
|
# Pluggable launcher backends
|
||||||
log "ERROR: Action validation failed for ${action_id}"
|
# -----------------------------------------------------------------------------
|
||||||
write_result "$action_id" 1 "Validation failed: see logs above"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check dispatch mode to determine if admin verification is needed
|
# _launch_runner_docker ACTION_ID SECRETS_CSV MOUNTS_CSV
|
||||||
local dispatch_mode
|
#
|
||||||
dispatch_mode=$(get_dispatch_mode "$toml_file")
|
# Builds and executes a `docker run` command for the vault runner.
|
||||||
|
# Secrets are resolved via load_secret (lib/env.sh).
|
||||||
|
# Returns: exit code of the docker run. Stdout/stderr are captured to a temp
|
||||||
|
# log file whose path is printed to stdout (caller reads it).
|
||||||
|
_launch_runner_docker() {
|
||||||
|
local action_id="$1"
|
||||||
|
local secrets_csv="$2"
|
||||||
|
local mounts_csv="$3"
|
||||||
|
|
||||||
if [ "$dispatch_mode" = "direct" ]; then
|
|
||||||
log "Action ${action_id}: tier=${VAULT_TIER:-unknown}, dispatch_mode=${dispatch_mode} — skipping admin merge verification (direct commit)"
|
|
||||||
else
|
|
||||||
# Verify admin merge for PR-based actions
|
|
||||||
log "Action ${action_id}: tier=${VAULT_TIER:-unknown}, dispatch_mode=${dispatch_mode} — verifying admin merge"
|
|
||||||
if ! verify_admin_merged "$toml_file"; then
|
|
||||||
log "ERROR: Admin merge verification failed for ${action_id}"
|
|
||||||
write_result "$action_id" 1 "Admin merge verification failed: see logs above"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
log "Action ${action_id}: admin merge verified"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Extract secrets from validated action
|
|
||||||
local secrets_array
|
|
||||||
secrets_array="${VAULT_ACTION_SECRETS:-}"
|
|
||||||
|
|
||||||
# Build docker run command (self-contained, no compose context needed).
|
|
||||||
# The edge container has the Docker socket but not the host's compose project,
|
|
||||||
# so docker compose run would fail with exit 125. docker run is self-contained:
|
|
||||||
# the dispatcher knows the image, network, env vars, and entrypoint.
|
|
||||||
local -a cmd=(docker run --rm
|
local -a cmd=(docker run --rm
|
||||||
--name "vault-runner-${action_id}"
|
--name "vault-runner-${action_id}"
|
||||||
--network host
|
--network host
|
||||||
|
|
@ -451,29 +483,27 @@ launch_runner() {
|
||||||
cmd+=(-v "${runtime_home}/.claude.json:/home/agent/.claude.json:ro")
|
cmd+=(-v "${runtime_home}/.claude.json:/home/agent/.claude.json:ro")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Add environment variables for secrets (if any declared)
|
# Add environment variables for secrets (resolved via load_secret)
|
||||||
if [ -n "$secrets_array" ]; then
|
if [ -n "$secrets_csv" ]; then
|
||||||
for secret in $secrets_array; do
|
local secret
|
||||||
|
for secret in $(echo "$secrets_csv" | tr ',' ' '); do
|
||||||
secret=$(echo "$secret" | xargs)
|
secret=$(echo "$secret" | xargs)
|
||||||
if [ -n "$secret" ]; then
|
[ -n "$secret" ] || continue
|
||||||
# Verify secret exists in vault
|
local secret_val
|
||||||
if [ -z "${!secret:-}" ]; then
|
secret_val=$(load_secret "$secret") || true
|
||||||
log "ERROR: Secret '${secret}' not found in vault for action ${action_id}"
|
if [ -z "$secret_val" ]; then
|
||||||
write_result "$action_id" 1 "Secret not found in vault: ${secret}"
|
log "ERROR: Secret '${secret}' could not be resolved for action ${action_id}"
|
||||||
return 1
|
write_result "$action_id" 1 "Secret not found: ${secret}"
|
||||||
fi
|
return 1
|
||||||
cmd+=(-e "${secret}=${!secret}")
|
|
||||||
fi
|
fi
|
||||||
|
cmd+=(-e "${secret}=${secret_val}")
|
||||||
done
|
done
|
||||||
else
|
|
||||||
log "Action ${action_id} has no secrets declared — runner will execute without extra env vars"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Add volume mounts for file-based credentials (if any declared)
|
# Add volume mounts for file-based credentials
|
||||||
local mounts_array
|
if [ -n "$mounts_csv" ]; then
|
||||||
mounts_array="${VAULT_ACTION_MOUNTS:-}"
|
local mount_alias
|
||||||
if [ -n "$mounts_array" ]; then
|
for mount_alias in $(echo "$mounts_csv" | tr ',' ' '); do
|
||||||
for mount_alias in $mounts_array; do
|
|
||||||
mount_alias=$(echo "$mount_alias" | xargs)
|
mount_alias=$(echo "$mount_alias" | xargs)
|
||||||
[ -n "$mount_alias" ] || continue
|
[ -n "$mount_alias" ] || continue
|
||||||
case "$mount_alias" in
|
case "$mount_alias" in
|
||||||
|
|
@ -501,7 +531,7 @@ launch_runner() {
|
||||||
# Image and entrypoint arguments: runner entrypoint + action-id
|
# Image and entrypoint arguments: runner entrypoint + action-id
|
||||||
cmd+=(disinto/agents:latest /home/agent/disinto/docker/runner/entrypoint-runner.sh "$action_id")
|
cmd+=(disinto/agents:latest /home/agent/disinto/docker/runner/entrypoint-runner.sh "$action_id")
|
||||||
|
|
||||||
log "Running: docker run --rm vault-runner-${action_id} (secrets: ${secrets_array:-none}, mounts: ${mounts_array:-none})"
|
log "Running: docker run --rm vault-runner-${action_id} (secrets: ${secrets_csv:-none}, mounts: ${mounts_csv:-none})"
|
||||||
|
|
||||||
# Create temp file for logs
|
# Create temp file for logs
|
||||||
local log_file
|
local log_file
|
||||||
|
|
@ -509,7 +539,6 @@ launch_runner() {
|
||||||
trap 'rm -f "$log_file"' RETURN
|
trap 'rm -f "$log_file"' RETURN
|
||||||
|
|
||||||
# Execute with array expansion (safe from shell injection)
|
# Execute with array expansion (safe from shell injection)
|
||||||
# Capture stdout and stderr to log file
|
|
||||||
"${cmd[@]}" > "$log_file" 2>&1
|
"${cmd[@]}" > "$log_file" 2>&1
|
||||||
local exit_code=$?
|
local exit_code=$?
|
||||||
|
|
||||||
|
|
@ -529,6 +558,137 @@ launch_runner() {
|
||||||
return $exit_code
|
return $exit_code
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# _launch_runner_nomad ACTION_ID SECRETS_CSV MOUNTS_CSV
|
||||||
|
#
|
||||||
|
# Nomad backend stub — will be implemented in migration Step 5.
|
||||||
|
_launch_runner_nomad() {
|
||||||
|
echo "nomad backend not yet implemented" >&2
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Launch runner for the given action (backend-agnostic orchestrator)
|
||||||
|
# Usage: launch_runner <toml_file>
|
||||||
|
launch_runner() {
|
||||||
|
local toml_file="$1"
|
||||||
|
local action_id
|
||||||
|
action_id=$(basename "$toml_file" .toml)
|
||||||
|
|
||||||
|
log "Launching runner for action: ${action_id}"
|
||||||
|
|
||||||
|
# Validate TOML
|
||||||
|
if ! validate_action "$toml_file"; then
|
||||||
|
log "ERROR: Action validation failed for ${action_id}"
|
||||||
|
write_result "$action_id" 1 "Validation failed: see logs above"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check dispatch mode to determine if admin verification is needed
|
||||||
|
local dispatch_mode
|
||||||
|
dispatch_mode=$(get_dispatch_mode "$toml_file")
|
||||||
|
|
||||||
|
if [ "$dispatch_mode" = "direct" ]; then
|
||||||
|
log "Action ${action_id}: tier=${VAULT_TIER:-unknown}, dispatch_mode=${dispatch_mode} — skipping admin merge verification (direct commit)"
|
||||||
|
else
|
||||||
|
# Verify admin merge for PR-based actions
|
||||||
|
log "Action ${action_id}: tier=${VAULT_TIER:-unknown}, dispatch_mode=${dispatch_mode} — verifying admin merge"
|
||||||
|
if ! verify_admin_merged "$toml_file"; then
|
||||||
|
log "ERROR: Admin merge verification failed for ${action_id}"
|
||||||
|
write_result "$action_id" 1 "Admin merge verification failed: see logs above"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
log "Action ${action_id}: admin merge verified"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Build CSV lists from validated action metadata
|
||||||
|
local secrets_csv=""
|
||||||
|
if [ -n "${VAULT_ACTION_SECRETS:-}" ]; then
|
||||||
|
# Convert space-separated to comma-separated
|
||||||
|
secrets_csv=$(echo "${VAULT_ACTION_SECRETS}" | xargs | tr ' ' ',')
|
||||||
|
fi
|
||||||
|
|
||||||
|
local mounts_csv=""
|
||||||
|
if [ -n "${VAULT_ACTION_MOUNTS:-}" ]; then
|
||||||
|
mounts_csv=$(echo "${VAULT_ACTION_MOUNTS}" | xargs | tr ' ' ',')
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Delegate to the selected backend
|
||||||
|
"_launch_runner_${DISPATCHER_BACKEND}" "$action_id" "$secrets_csv" "$mounts_csv"
|
||||||
|
}
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
# Pluggable sidecar launcher (reproduce / triage / verify)
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# _dispatch_sidecar_docker CONTAINER_NAME ISSUE_NUM PROJECT_TOML IMAGE [FORMULA]
|
||||||
|
#
|
||||||
|
# Launches a sidecar container via docker run (background, pid-tracked).
|
||||||
|
# Prints the background PID to stdout.
|
||||||
|
_dispatch_sidecar_docker() {
|
||||||
|
local container_name="$1"
|
||||||
|
local issue_number="$2"
|
||||||
|
local project_toml="$3"
|
||||||
|
local image="$4"
|
||||||
|
local formula="${5:-}"
|
||||||
|
|
||||||
|
local -a cmd=(docker run --rm
|
||||||
|
--name "${container_name}"
|
||||||
|
--network host
|
||||||
|
--security-opt apparmor=unconfined
|
||||||
|
-v /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
-v agent-data:/home/agent/data
|
||||||
|
-v project-repos:/home/agent/repos
|
||||||
|
-e "FORGE_URL=${FORGE_URL}"
|
||||||
|
-e "FORGE_TOKEN=${FORGE_TOKEN}"
|
||||||
|
-e "FORGE_REPO=${FORGE_REPO}"
|
||||||
|
-e "PRIMARY_BRANCH=${PRIMARY_BRANCH:-main}"
|
||||||
|
-e DISINTO_CONTAINER=1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set formula if provided
|
||||||
|
if [ -n "$formula" ]; then
|
||||||
|
cmd+=(-e "DISINTO_FORMULA=${formula}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Pass through ANTHROPIC_API_KEY if set
|
||||||
|
if [ -n "${ANTHROPIC_API_KEY:-}" ]; then
|
||||||
|
cmd+=(-e "ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount shared Claude config dir and ~/.ssh from the runtime user's home
|
||||||
|
local runtime_home="${HOME:-/home/debian}"
|
||||||
|
if [ -d "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}" ]; then
|
||||||
|
cmd+=(-v "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}")
|
||||||
|
cmd+=(-e "CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}")
|
||||||
|
fi
|
||||||
|
if [ -f "${runtime_home}/.claude.json" ]; then
|
||||||
|
cmd+=(-v "${runtime_home}/.claude.json:/home/agent/.claude.json:ro")
|
||||||
|
fi
|
||||||
|
if [ -d "${runtime_home}/.ssh" ]; then
|
||||||
|
cmd+=(-v "${runtime_home}/.ssh:/home/agent/.ssh:ro")
|
||||||
|
fi
|
||||||
|
if [ -f /usr/local/bin/claude ]; then
|
||||||
|
cmd+=(-v /usr/local/bin/claude:/usr/local/bin/claude:ro)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount the project TOML into the container at a stable path
|
||||||
|
local container_toml="/home/agent/project.toml"
|
||||||
|
cmd+=(-v "${project_toml}:${container_toml}:ro")
|
||||||
|
|
||||||
|
cmd+=("${image}" "$container_toml" "$issue_number")
|
||||||
|
|
||||||
|
# Launch in background
|
||||||
|
"${cmd[@]}" &
|
||||||
|
echo $!
|
||||||
|
}
|
||||||
|
|
||||||
|
# _dispatch_sidecar_nomad CONTAINER_NAME ISSUE_NUM PROJECT_TOML IMAGE [FORMULA]
|
||||||
|
#
|
||||||
|
# Nomad sidecar backend stub — will be implemented in migration Step 5.
|
||||||
|
_dispatch_sidecar_nomad() {
|
||||||
|
echo "nomad backend not yet implemented" >&2
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
# Reproduce dispatch — launch sidecar for bug-report issues
|
# Reproduce dispatch — launch sidecar for bug-report issues
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
|
|
@ -607,52 +767,13 @@ dispatch_reproduce() {
|
||||||
|
|
||||||
log "Dispatching reproduce-agent for issue #${issue_number} (project: ${project_toml})"
|
log "Dispatching reproduce-agent for issue #${issue_number} (project: ${project_toml})"
|
||||||
|
|
||||||
# Build docker run command using array (safe from injection)
|
local bg_pid
|
||||||
local -a cmd=(docker run --rm
|
bg_pid=$("_dispatch_sidecar_${DISPATCHER_BACKEND}" \
|
||||||
--name "disinto-reproduce-${issue_number}"
|
"disinto-reproduce-${issue_number}" \
|
||||||
--network host
|
"$issue_number" \
|
||||||
--security-opt apparmor=unconfined
|
"$project_toml" \
|
||||||
-v /var/run/docker.sock:/var/run/docker.sock
|
"disinto-reproduce:latest")
|
||||||
-v agent-data:/home/agent/data
|
|
||||||
-v project-repos:/home/agent/repos
|
|
||||||
-e "FORGE_URL=${FORGE_URL}"
|
|
||||||
-e "FORGE_TOKEN=${FORGE_TOKEN}"
|
|
||||||
-e "FORGE_REPO=${FORGE_REPO}"
|
|
||||||
-e "PRIMARY_BRANCH=${PRIMARY_BRANCH:-main}"
|
|
||||||
-e DISINTO_CONTAINER=1
|
|
||||||
)
|
|
||||||
|
|
||||||
# Pass through ANTHROPIC_API_KEY if set
|
|
||||||
if [ -n "${ANTHROPIC_API_KEY:-}" ]; then
|
|
||||||
cmd+=(-e "ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}")
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Mount shared Claude config dir and ~/.ssh from the runtime user's home if available
|
|
||||||
local runtime_home="${HOME:-/home/debian}"
|
|
||||||
if [ -d "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}" ]; then
|
|
||||||
cmd+=(-v "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}")
|
|
||||||
cmd+=(-e "CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}")
|
|
||||||
fi
|
|
||||||
if [ -f "${runtime_home}/.claude.json" ]; then
|
|
||||||
cmd+=(-v "${runtime_home}/.claude.json:/home/agent/.claude.json:ro")
|
|
||||||
fi
|
|
||||||
if [ -d "${runtime_home}/.ssh" ]; then
|
|
||||||
cmd+=(-v "${runtime_home}/.ssh:/home/agent/.ssh:ro")
|
|
||||||
fi
|
|
||||||
# Mount claude CLI binary if present on host
|
|
||||||
if [ -f /usr/local/bin/claude ]; then
|
|
||||||
cmd+=(-v /usr/local/bin/claude:/usr/local/bin/claude:ro)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Mount the project TOML into the container at a stable path
|
|
||||||
local container_toml="/home/agent/project.toml"
|
|
||||||
cmd+=(-v "${project_toml}:${container_toml}:ro")
|
|
||||||
|
|
||||||
cmd+=(disinto-reproduce:latest "$container_toml" "$issue_number")
|
|
||||||
|
|
||||||
# Launch in background; write pid-file so we don't double-launch
|
|
||||||
"${cmd[@]}" &
|
|
||||||
local bg_pid=$!
|
|
||||||
echo "$bg_pid" > "$(_reproduce_lockfile "$issue_number")"
|
echo "$bg_pid" > "$(_reproduce_lockfile "$issue_number")"
|
||||||
log "Reproduce container launched (pid ${bg_pid}) for issue #${issue_number}"
|
log "Reproduce container launched (pid ${bg_pid}) for issue #${issue_number}"
|
||||||
}
|
}
|
||||||
|
|
@ -732,53 +853,14 @@ dispatch_triage() {
|
||||||
|
|
||||||
log "Dispatching triage-agent for issue #${issue_number} (project: ${project_toml})"
|
log "Dispatching triage-agent for issue #${issue_number} (project: ${project_toml})"
|
||||||
|
|
||||||
# Build docker run command using array (safe from injection)
|
local bg_pid
|
||||||
local -a cmd=(docker run --rm
|
bg_pid=$("_dispatch_sidecar_${DISPATCHER_BACKEND}" \
|
||||||
--name "disinto-triage-${issue_number}"
|
"disinto-triage-${issue_number}" \
|
||||||
--network host
|
"$issue_number" \
|
||||||
--security-opt apparmor=unconfined
|
"$project_toml" \
|
||||||
-v /var/run/docker.sock:/var/run/docker.sock
|
"disinto-reproduce:latest" \
|
||||||
-v agent-data:/home/agent/data
|
"triage")
|
||||||
-v project-repos:/home/agent/repos
|
|
||||||
-e "FORGE_URL=${FORGE_URL}"
|
|
||||||
-e "FORGE_TOKEN=${FORGE_TOKEN}"
|
|
||||||
-e "FORGE_REPO=${FORGE_REPO}"
|
|
||||||
-e "PRIMARY_BRANCH=${PRIMARY_BRANCH:-main}"
|
|
||||||
-e DISINTO_CONTAINER=1
|
|
||||||
-e DISINTO_FORMULA=triage
|
|
||||||
)
|
|
||||||
|
|
||||||
# Pass through ANTHROPIC_API_KEY if set
|
|
||||||
if [ -n "${ANTHROPIC_API_KEY:-}" ]; then
|
|
||||||
cmd+=(-e "ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}")
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Mount shared Claude config dir and ~/.ssh from the runtime user's home if available
|
|
||||||
local runtime_home="${HOME:-/home/debian}"
|
|
||||||
if [ -d "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}" ]; then
|
|
||||||
cmd+=(-v "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}")
|
|
||||||
cmd+=(-e "CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}")
|
|
||||||
fi
|
|
||||||
if [ -f "${runtime_home}/.claude.json" ]; then
|
|
||||||
cmd+=(-v "${runtime_home}/.claude.json:/home/agent/.claude.json:ro")
|
|
||||||
fi
|
|
||||||
if [ -d "${runtime_home}/.ssh" ]; then
|
|
||||||
cmd+=(-v "${runtime_home}/.ssh:/home/agent/.ssh:ro")
|
|
||||||
fi
|
|
||||||
# Mount claude CLI binary if present on host
|
|
||||||
if [ -f /usr/local/bin/claude ]; then
|
|
||||||
cmd+=(-v /usr/local/bin/claude:/usr/local/bin/claude:ro)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Mount the project TOML into the container at a stable path
|
|
||||||
local container_toml="/home/agent/project.toml"
|
|
||||||
cmd+=(-v "${project_toml}:${container_toml}:ro")
|
|
||||||
|
|
||||||
cmd+=(disinto-reproduce:latest "$container_toml" "$issue_number")
|
|
||||||
|
|
||||||
# Launch in background; write pid-file so we don't double-launch
|
|
||||||
"${cmd[@]}" &
|
|
||||||
local bg_pid=$!
|
|
||||||
echo "$bg_pid" > "$(_triage_lockfile "$issue_number")"
|
echo "$bg_pid" > "$(_triage_lockfile "$issue_number")"
|
||||||
log "Triage container launched (pid ${bg_pid}) for issue #${issue_number}"
|
log "Triage container launched (pid ${bg_pid}) for issue #${issue_number}"
|
||||||
}
|
}
|
||||||
|
|
@ -934,53 +1016,14 @@ dispatch_verify() {
|
||||||
|
|
||||||
log "Dispatching verification-agent for issue #${issue_number} (project: ${project_toml})"
|
log "Dispatching verification-agent for issue #${issue_number} (project: ${project_toml})"
|
||||||
|
|
||||||
# Build docker run command using array (safe from injection)
|
local bg_pid
|
||||||
local -a cmd=(docker run --rm
|
bg_pid=$("_dispatch_sidecar_${DISPATCHER_BACKEND}" \
|
||||||
--name "disinto-verify-${issue_number}"
|
"disinto-verify-${issue_number}" \
|
||||||
--network host
|
"$issue_number" \
|
||||||
--security-opt apparmor=unconfined
|
"$project_toml" \
|
||||||
-v /var/run/docker.sock:/var/run/docker.sock
|
"disinto-reproduce:latest" \
|
||||||
-v agent-data:/home/agent/data
|
"verify")
|
||||||
-v project-repos:/home/agent/repos
|
|
||||||
-e "FORGE_URL=${FORGE_URL}"
|
|
||||||
-e "FORGE_TOKEN=${FORGE_TOKEN}"
|
|
||||||
-e "FORGE_REPO=${FORGE_REPO}"
|
|
||||||
-e "PRIMARY_BRANCH=${PRIMARY_BRANCH:-main}"
|
|
||||||
-e DISINTO_CONTAINER=1
|
|
||||||
-e DISINTO_FORMULA=verify
|
|
||||||
)
|
|
||||||
|
|
||||||
# Pass through ANTHROPIC_API_KEY if set
|
|
||||||
if [ -n "${ANTHROPIC_API_KEY:-}" ]; then
|
|
||||||
cmd+=(-e "ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}")
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Mount shared Claude config dir and ~/.ssh from the runtime user's home if available
|
|
||||||
local runtime_home="${HOME:-/home/debian}"
|
|
||||||
if [ -d "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}" ]; then
|
|
||||||
cmd+=(-v "${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}")
|
|
||||||
cmd+=(-e "CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}")
|
|
||||||
fi
|
|
||||||
if [ -f "${runtime_home}/.claude.json" ]; then
|
|
||||||
cmd+=(-v "${runtime_home}/.claude.json:/home/agent/.claude.json:ro")
|
|
||||||
fi
|
|
||||||
if [ -d "${runtime_home}/.ssh" ]; then
|
|
||||||
cmd+=(-v "${runtime_home}/.ssh:/home/agent/.ssh:ro")
|
|
||||||
fi
|
|
||||||
# Mount claude CLI binary if present on host
|
|
||||||
if [ -f /usr/local/bin/claude ]; then
|
|
||||||
cmd+=(-v /usr/local/bin/claude:/usr/local/bin/claude:ro)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Mount the project TOML into the container at a stable path
|
|
||||||
local container_toml="/home/agent/project.toml"
|
|
||||||
cmd+=(-v "${project_toml}:${container_toml}:ro")
|
|
||||||
|
|
||||||
cmd+=(disinto-reproduce:latest "$container_toml" "$issue_number")
|
|
||||||
|
|
||||||
# Launch in background; write pid-file so we don't double-launch
|
|
||||||
"${cmd[@]}" &
|
|
||||||
local bg_pid=$!
|
|
||||||
echo "$bg_pid" > "$(_verify_lockfile "$issue_number")"
|
echo "$bg_pid" > "$(_verify_lockfile "$issue_number")"
|
||||||
log "Verification container launched (pid ${bg_pid}) for issue #${issue_number}"
|
log "Verification container launched (pid ${bg_pid}) for issue #${issue_number}"
|
||||||
}
|
}
|
||||||
|
|
@ -1002,10 +1045,25 @@ ensure_ops_repo() {
|
||||||
|
|
||||||
# Main dispatcher loop
|
# Main dispatcher loop
|
||||||
main() {
|
main() {
|
||||||
log "Starting dispatcher..."
|
log "Starting dispatcher (backend=${DISPATCHER_BACKEND})..."
|
||||||
log "Polling ops repo: ${VAULT_ACTIONS_DIR}"
|
log "Polling ops repo: ${VAULT_ACTIONS_DIR}"
|
||||||
log "Admin users: ${ADMIN_USERS}"
|
log "Admin users: ${ADMIN_USERS}"
|
||||||
|
|
||||||
|
# Validate backend selection at startup
|
||||||
|
case "$DISPATCHER_BACKEND" in
|
||||||
|
docker) ;;
|
||||||
|
nomad)
|
||||||
|
log "ERROR: nomad backend not yet implemented"
|
||||||
|
echo "nomad backend not yet implemented" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
log "ERROR: unknown DISPATCHER_BACKEND=${DISPATCHER_BACKEND}"
|
||||||
|
echo "unknown DISPATCHER_BACKEND=${DISPATCHER_BACKEND} (expected: docker, nomad)" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
while true; do
|
while true; do
|
||||||
# Refresh ops repo at the start of each poll cycle
|
# Refresh ops repo at the start of each poll cycle
|
||||||
ensure_ops_repo
|
ensure_ops_repo
|
||||||
|
|
|
||||||
|
|
@ -173,6 +173,67 @@ PROJECT_TOML="${PROJECT_TOML:-projects/disinto.toml}"
|
||||||
sleep 1200 # 20 minutes
|
sleep 1200 # 20 minutes
|
||||||
done) &
|
done) &
|
||||||
|
|
||||||
|
# ── Load required secrets from secrets/*.enc (#777) ────────────────────
|
||||||
|
# Edge container declares its required secrets; missing ones cause a hard fail.
|
||||||
|
_AGE_KEY_FILE="${HOME}/.config/sops/age/keys.txt"
|
||||||
|
_SECRETS_DIR="/opt/disinto/secrets"
|
||||||
|
EDGE_REQUIRED_SECRETS="CADDY_SSH_KEY CADDY_SSH_HOST CADDY_SSH_USER CADDY_ACCESS_LOG"
|
||||||
|
|
||||||
|
_edge_decrypt_secret() {
|
||||||
|
local enc_path="${_SECRETS_DIR}/${1}.enc"
|
||||||
|
[ -f "$enc_path" ] || return 1
|
||||||
|
age -d -i "$_AGE_KEY_FILE" "$enc_path" 2>/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ -f "$_AGE_KEY_FILE" ] && [ -d "$_SECRETS_DIR" ]; then
|
||||||
|
_missing=""
|
||||||
|
for _secret_name in $EDGE_REQUIRED_SECRETS; do
|
||||||
|
_val=$(_edge_decrypt_secret "$_secret_name") || { _missing="${_missing} ${_secret_name}"; continue; }
|
||||||
|
export "$_secret_name=$_val"
|
||||||
|
done
|
||||||
|
if [ -n "$_missing" ]; then
|
||||||
|
echo "FATAL: required secrets missing from secrets/*.enc:${_missing}" >&2
|
||||||
|
echo " Run 'disinto secrets add <NAME>' for each missing secret." >&2
|
||||||
|
echo " If migrating from .env.vault.enc, run 'disinto secrets migrate-from-vault' first." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "edge: loaded required secrets: ${EDGE_REQUIRED_SECRETS}" >&2
|
||||||
|
else
|
||||||
|
echo "FATAL: age key (${_AGE_KEY_FILE}) or secrets dir (${_SECRETS_DIR}) not found — cannot load required secrets" >&2
|
||||||
|
echo " Ensure age is installed and secrets/*.enc files are present." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Start daily engagement collection cron loop in background (#745)
|
||||||
|
# Runs collect-engagement.sh daily at ~23:50 UTC via a sleep loop that
|
||||||
|
# calculates seconds until the next 23:50 window. SSH key from secrets/*.enc (#777).
|
||||||
|
(while true; do
|
||||||
|
# Calculate seconds until next 23:50 UTC
|
||||||
|
_now=$(date -u +%s)
|
||||||
|
_target=$(date -u -d "today 23:50" +%s 2>/dev/null || date -u -d "23:50" +%s 2>/dev/null || echo 0)
|
||||||
|
if [ "$_target" -le "$_now" ]; then
|
||||||
|
_target=$(( _target + 86400 ))
|
||||||
|
fi
|
||||||
|
_sleep_secs=$(( _target - _now ))
|
||||||
|
echo "edge: collect-engagement scheduled in ${_sleep_secs}s (next 23:50 UTC)" >&2
|
||||||
|
sleep "$_sleep_secs"
|
||||||
|
_fetch_log="/tmp/caddy-access-log-fetch.log"
|
||||||
|
_ssh_key_file=$(mktemp)
|
||||||
|
printf '%s\n' "$CADDY_SSH_KEY" > "$_ssh_key_file"
|
||||||
|
chmod 0600 "$_ssh_key_file"
|
||||||
|
scp -i "$_ssh_key_file" -o StrictHostKeyChecking=accept-new -o ConnectTimeout=10 -o BatchMode=yes \
|
||||||
|
"${CADDY_SSH_USER}@${CADDY_SSH_HOST}:${CADDY_ACCESS_LOG}" \
|
||||||
|
"$_fetch_log" 2>&1 | tee -a /opt/disinto-logs/collect-engagement.log || true
|
||||||
|
rm -f "$_ssh_key_file"
|
||||||
|
if [ -s "$_fetch_log" ]; then
|
||||||
|
CADDY_ACCESS_LOG="$_fetch_log" bash /opt/disinto/site/collect-engagement.sh 2>&1 \
|
||||||
|
| tee -a /opt/disinto-logs/collect-engagement.log || true
|
||||||
|
else
|
||||||
|
echo "edge: collect-engagement: fetched log is empty, skipping parse" >&2
|
||||||
|
fi
|
||||||
|
rm -f "$_fetch_log"
|
||||||
|
done) &
|
||||||
|
|
||||||
# Caddy as main process — run in foreground via wait so background jobs survive
|
# Caddy as main process — run in foreground via wait so background jobs survive
|
||||||
# (exec replaces the shell, which can orphan backgrounded subshells)
|
# (exec replaces the shell, which can orphan backgrounded subshells)
|
||||||
caddy run --config /etc/caddy/Caddyfile --adapter caddyfile &
|
caddy run --config /etc/caddy/Caddyfile --adapter caddyfile &
|
||||||
|
|
|
||||||
|
|
@ -7,5 +7,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
RUN useradd -m -u 1000 -s /bin/bash agent
|
RUN useradd -m -u 1000 -s /bin/bash agent
|
||||||
COPY docker/reproduce/entrypoint-reproduce.sh /entrypoint-reproduce.sh
|
COPY docker/reproduce/entrypoint-reproduce.sh /entrypoint-reproduce.sh
|
||||||
RUN chmod +x /entrypoint-reproduce.sh
|
RUN chmod +x /entrypoint-reproduce.sh
|
||||||
|
VOLUME /home/agent/data
|
||||||
|
VOLUME /home/agent/repos
|
||||||
|
|
||||||
WORKDIR /home/agent
|
WORKDIR /home/agent
|
||||||
ENTRYPOINT ["/entrypoint-reproduce.sh"]
|
ENTRYPOINT ["/entrypoint-reproduce.sh"]
|
||||||
|
|
|
||||||
|
|
@ -26,8 +26,8 @@ The `main` branch on the ops repo (`johba/disinto-ops`) is protected via Forgejo
|
||||||
|
|
||||||
## Vault PR Lifecycle
|
## Vault PR Lifecycle
|
||||||
|
|
||||||
1. **Request** — Agent calls `lib/vault.sh:vault_request()` with action TOML content
|
1. **Request** — Agent calls `lib/action-vault.sh:vault_request()` with action TOML content
|
||||||
2. **Validation** — TOML is validated against the schema in `vault/vault-env.sh`
|
2. **Validation** — TOML is validated against the schema in `action-vault/vault-env.sh`
|
||||||
3. **PR Creation** — A PR is created on `disinto-ops` with:
|
3. **PR Creation** — A PR is created on `disinto-ops` with:
|
||||||
- Branch: `vault/<action-id>`
|
- Branch: `vault/<action-id>`
|
||||||
- Title: `vault: <action-id>`
|
- Title: `vault: <action-id>`
|
||||||
|
|
@ -90,12 +90,12 @@ To verify the protection is working:
|
||||||
|
|
||||||
- #73 — Vault redesign proposal
|
- #73 — Vault redesign proposal
|
||||||
- #74 — Vault action TOML schema
|
- #74 — Vault action TOML schema
|
||||||
- #75 — Vault PR creation helper (`lib/vault.sh`)
|
- #75 — Vault PR creation helper (`lib/action-vault.sh`)
|
||||||
- #76 — Dispatcher rewrite (poll for merged vault PRs)
|
- #76 — Dispatcher rewrite (poll for merged vault PRs)
|
||||||
- #77 — Branch protection on ops repo (this issue)
|
- #77 — Branch protection on ops repo (this issue)
|
||||||
|
|
||||||
## See Also
|
## See Also
|
||||||
|
|
||||||
- [`lib/vault.sh`](../lib/vault.sh) — Vault PR creation helper
|
- [`lib/action-vault.sh`](../lib/action-vault.sh) — Vault PR creation helper
|
||||||
- [`vault/vault-env.sh`](../vault/vault-env.sh) — TOML validation
|
- [`action-vault/vault-env.sh`](../action-vault/vault-env.sh) — TOML validation
|
||||||
- [`lib/branch-protection.sh`](../lib/branch-protection.sh) — Branch protection helper
|
- [`lib/branch-protection.sh`](../lib/branch-protection.sh) — Branch protection helper
|
||||||
|
|
|
||||||
59
docs/agents-llama.md
Normal file
59
docs/agents-llama.md
Normal file
|
|
@ -0,0 +1,59 @@
|
||||||
|
# agents-llama — Local-Qwen Agents
|
||||||
|
|
||||||
|
The `agents-llama` service is an optional compose service that runs agents
|
||||||
|
backed by a local llama-server instance (e.g. Qwen) instead of the Anthropic
|
||||||
|
API. It uses the same Docker image as the main `agents` service but connects to
|
||||||
|
a local inference endpoint via `ANTHROPIC_BASE_URL`.
|
||||||
|
|
||||||
|
Two profiles are available:
|
||||||
|
|
||||||
|
| Profile | Service | Roles | Use case |
|
||||||
|
|---------|---------|-------|----------|
|
||||||
|
| _(default)_ | `agents-llama` | `dev` only | Conservative: single-role soak test |
|
||||||
|
| `agents-llama-all` | `agents-llama-all` | all 7 (review, dev, gardener, architect, planner, predictor, supervisor) | Pre-migration: validate every role on llama before Nomad cutover |
|
||||||
|
|
||||||
|
## Enabling
|
||||||
|
|
||||||
|
Set `ENABLE_LLAMA_AGENT=1` in `.env` (or `.env.enc`) and provide the required
|
||||||
|
credentials:
|
||||||
|
|
||||||
|
```env
|
||||||
|
ENABLE_LLAMA_AGENT=1
|
||||||
|
FORGE_TOKEN_LLAMA=<dev-qwen API token>
|
||||||
|
FORGE_PASS_LLAMA=<dev-qwen password>
|
||||||
|
ANTHROPIC_BASE_URL=http://host.docker.internal:8081 # llama-server endpoint
|
||||||
|
```
|
||||||
|
|
||||||
|
Then regenerate the compose file (`disinto init ...`) and bring the stack up.
|
||||||
|
|
||||||
|
### Running all 7 roles (agents-llama-all)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose --profile agents-llama-all up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts the `agents-llama-all` container with all 7 bot roles against the
|
||||||
|
local llama endpoint. The per-role forge tokens (`FORGE_REVIEW_TOKEN`,
|
||||||
|
`FORGE_GARDENER_TOKEN`, etc.) must be set in `.env` — they are the same tokens
|
||||||
|
used by the Claude-backed `agents` container.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- **llama-server** (or compatible OpenAI-API endpoint) running on the host,
|
||||||
|
reachable from inside Docker at the URL set in `ANTHROPIC_BASE_URL`.
|
||||||
|
- A Forgejo bot user (e.g. `dev-qwen`) with its own API token and password,
|
||||||
|
stored as `FORGE_TOKEN_LLAMA` / `FORGE_PASS_LLAMA`.
|
||||||
|
|
||||||
|
## Behaviour
|
||||||
|
|
||||||
|
- `agents-llama`: `AGENT_ROLES=dev` — only picks up dev work.
|
||||||
|
- `agents-llama-all`: `AGENT_ROLES=review,dev,gardener,architect,planner,predictor,supervisor` — runs all 7 roles.
|
||||||
|
- `CLAUDE_AUTOCOMPACT_PCT_OVERRIDE=60` — more aggressive compaction for smaller
|
||||||
|
context windows.
|
||||||
|
- Serialises on the llama-server's single KV cache (AD-002).
|
||||||
|
|
||||||
|
## Disabling
|
||||||
|
|
||||||
|
Set `ENABLE_LLAMA_AGENT=0` (or leave it unset) and regenerate. The service
|
||||||
|
block is omitted entirely from `docker-compose.yml`; the stack starts cleanly
|
||||||
|
without it.
|
||||||
149
docs/edge-routing-fallback.md
Normal file
149
docs/edge-routing-fallback.md
Normal file
|
|
@ -0,0 +1,149 @@
|
||||||
|
# Edge Routing Fallback: Per-Project Subdomains
|
||||||
|
|
||||||
|
> **Status:** Contingency plan. Only implement if subpath routing (#704 / #708)
|
||||||
|
> proves unworkable.
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
The primary approach routes services under subpaths of `<project>.disinto.ai`:
|
||||||
|
|
||||||
|
| Service | Primary (subpath) |
|
||||||
|
|------------|--------------------------------------------|
|
||||||
|
| Forgejo | `<project>.disinto.ai/forge/` |
|
||||||
|
| Woodpecker | `<project>.disinto.ai/ci/` |
|
||||||
|
| Chat | `<project>.disinto.ai/chat/` |
|
||||||
|
| Staging | `<project>.disinto.ai/staging/` |
|
||||||
|
|
||||||
|
The fallback uses per-service subdomains instead:
|
||||||
|
|
||||||
|
| Service | Fallback (subdomain) |
|
||||||
|
|------------|--------------------------------------------|
|
||||||
|
| Forgejo | `forge.<project>.disinto.ai/` |
|
||||||
|
| Woodpecker | `ci.<project>.disinto.ai/` |
|
||||||
|
| Chat | `chat.<project>.disinto.ai/` |
|
||||||
|
| Staging | `<project>.disinto.ai/` (root) |
|
||||||
|
|
||||||
|
The wildcard cert from #621 already covers `*.<project>.disinto.ai` — no new
|
||||||
|
DNS records or certs are needed for sub-subdomains because `*.disinto.ai`
|
||||||
|
matches one level deep. For sub-subdomains like `forge.<project>.disinto.ai`
|
||||||
|
we would need to add a second wildcard (`*.*.disinto.ai`) or explicit DNS
|
||||||
|
records per project. Both are straightforward with the existing Gandi DNS-01
|
||||||
|
setup.
|
||||||
|
|
||||||
|
## Pivot Decision Criteria
|
||||||
|
|
||||||
|
**Pivot if:**
|
||||||
|
|
||||||
|
- Forgejo `ROOT_URL` under a subpath (`/forge/`) causes redirect loops that
|
||||||
|
cannot be fixed with `X-Forwarded-Prefix` or Caddy `uri strip_prefix`.
|
||||||
|
- Woodpecker's `WOODPECKER_HOST` does not honour subpath prefixes, causing
|
||||||
|
OAuth callback mismatches that persist after adjusting redirect URIs.
|
||||||
|
- Forward-auth on `/chat/*` conflicts with Forgejo's own OAuth flow when both
|
||||||
|
share the same origin (cookie collision, CSRF token mismatch).
|
||||||
|
|
||||||
|
**Do NOT pivot if:**
|
||||||
|
|
||||||
|
- Forgejo login redirects to `/` instead of `/forge/` — fixable with Caddy
|
||||||
|
`handle_path` + `uri prefix` rewrite.
|
||||||
|
- Woodpecker UI assets 404 under `/ci/` — fixable with asset prefix config
|
||||||
|
(`WOODPECKER_ROOT_PATH`).
|
||||||
|
- A single OAuth app needs a second redirect URI — Forgejo supports multiple
|
||||||
|
`redirect_uris` in the same app.
|
||||||
|
|
||||||
|
## Fallback Topology
|
||||||
|
|
||||||
|
### Caddyfile
|
||||||
|
|
||||||
|
Replace the single `:80` block with four host blocks:
|
||||||
|
|
||||||
|
```caddy
|
||||||
|
# Main project domain — staging / landing
|
||||||
|
<project>.disinto.ai {
|
||||||
|
reverse_proxy staging:80
|
||||||
|
}
|
||||||
|
|
||||||
|
# Forgejo — root path, no subpath rewrite needed
|
||||||
|
forge.<project>.disinto.ai {
|
||||||
|
reverse_proxy forgejo:3000
|
||||||
|
}
|
||||||
|
|
||||||
|
# Woodpecker CI — root path
|
||||||
|
ci.<project>.disinto.ai {
|
||||||
|
reverse_proxy woodpecker:8000
|
||||||
|
}
|
||||||
|
|
||||||
|
# Chat — with forward_auth (same as #709, but on its own host)
|
||||||
|
chat.<project>.disinto.ai {
|
||||||
|
handle /login {
|
||||||
|
reverse_proxy chat:8080
|
||||||
|
}
|
||||||
|
handle /oauth/callback {
|
||||||
|
reverse_proxy chat:8080
|
||||||
|
}
|
||||||
|
handle /* {
|
||||||
|
forward_auth chat:8080 {
|
||||||
|
uri /auth/verify
|
||||||
|
copy_headers X-Forwarded-User
|
||||||
|
header_up X-Forward-Auth-Secret {$FORWARD_AUTH_SECRET}
|
||||||
|
}
|
||||||
|
reverse_proxy chat:8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Current file:** `docker/Caddyfile` (generated by `lib/generators.sh:_generate_caddyfile_impl`, line ~596).
|
||||||
|
|
||||||
|
### Service Configuration Changes
|
||||||
|
|
||||||
|
| Variable / Setting | Current (subpath) | Fallback (subdomain) | File |
|
||||||
|
|----------------------------|------------------------------------------------|-------------------------------------------------|-----------------------------|
|
||||||
|
| Forgejo `ROOT_URL` | `https://<project>.disinto.ai/forge/` | `https://forge.<project>.disinto.ai/` | forgejo `app.ini` |
|
||||||
|
| `WOODPECKER_HOST` | `http://localhost:8000` (subpath via proxy) | `https://ci.<project>.disinto.ai` | `lib/ci-setup.sh` line ~164 |
|
||||||
|
| Woodpecker OAuth redirect | `https://<project>.disinto.ai/ci/authorize` | `https://ci.<project>.disinto.ai/authorize` | `lib/ci-setup.sh` line ~153 |
|
||||||
|
| Chat OAuth redirect | `https://<project>.disinto.ai/chat/oauth/callback` | `https://chat.<project>.disinto.ai/oauth/callback` | `lib/ci-setup.sh` line ~188 |
|
||||||
|
| `EDGE_TUNNEL_FQDN` | `<project>.disinto.ai` | unchanged (main domain) | `lib/generators.sh` line ~432 |
|
||||||
|
|
||||||
|
### New Environment Variables (pivot only)
|
||||||
|
|
||||||
|
These would be added to `lib/generators.sh` `_generate_compose_impl()` in the
|
||||||
|
edge service environment block (currently line ~415):
|
||||||
|
|
||||||
|
| Variable | Value |
|
||||||
|
|------------------------------|----------------------------------------|
|
||||||
|
| `EDGE_TUNNEL_FQDN_FORGE` | `forge.<project>.disinto.ai` |
|
||||||
|
| `EDGE_TUNNEL_FQDN_CI` | `ci.<project>.disinto.ai` |
|
||||||
|
| `EDGE_TUNNEL_FQDN_CHAT` | `chat.<project>.disinto.ai` |
|
||||||
|
|
||||||
|
### DNS
|
||||||
|
|
||||||
|
No new records needed if the registrar supports `*.*.disinto.ai` wildcards.
|
||||||
|
Otherwise, add explicit A/CNAME records per project:
|
||||||
|
|
||||||
|
```
|
||||||
|
forge.<project>.disinto.ai → edge server IP
|
||||||
|
ci.<project>.disinto.ai → edge server IP
|
||||||
|
chat.<project>.disinto.ai → edge server IP
|
||||||
|
```
|
||||||
|
|
||||||
|
The edge server already handles TLS via Caddy's automatic HTTPS with the
|
||||||
|
existing ACME / DNS-01 challenge.
|
||||||
|
|
||||||
|
### Edge Control (`tools/edge-control/register.sh`)
|
||||||
|
|
||||||
|
Currently `do_register()` creates a single route for `<project>.disinto.ai`.
|
||||||
|
The fallback would need to register four routes (or accept a `--subdomain`
|
||||||
|
parameter). See the TODO in `register.sh`.
|
||||||
|
|
||||||
|
## Files to Change on Pivot
|
||||||
|
|
||||||
|
| File | What changes |
|
||||||
|
|-----------------------------------|-----------------------------------------------------------------|
|
||||||
|
| `docker/Caddyfile` | Replace single host block → four host blocks (see above) |
|
||||||
|
| `lib/generators.sh` | Add `EDGE_TUNNEL_FQDN_{FORGE,CI,CHAT}` env vars to compose |
|
||||||
|
| `lib/ci-setup.sh` ~line 153 | Woodpecker OAuth redirect URI → `ci.<project>` subdomain |
|
||||||
|
| `lib/ci-setup.sh` ~line 188 | Chat OAuth redirect URI → `chat.<project>` subdomain |
|
||||||
|
| `tools/edge-control/register.sh` | Register four routes per project instead of one |
|
||||||
|
| `tools/edge-control/lib/caddy.sh`| `add_route()` gains subdomain support |
|
||||||
|
| forgejo `app.ini` | `ROOT_URL` → `https://forge.<project>.disinto.ai/` |
|
||||||
|
|
||||||
|
Estimated effort for a full pivot: **under one day** given this plan.
|
||||||
59
docs/mirror-bootstrap.md
Normal file
59
docs/mirror-bootstrap.md
Normal file
|
|
@ -0,0 +1,59 @@
|
||||||
|
# Mirror Bootstrap — Pull-Mirror Cutover Path
|
||||||
|
|
||||||
|
How to populate an empty Forgejo repo from an external source using
|
||||||
|
`lib/mirrors.sh`'s `mirror_pull_register()`.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
| Variable | Example | Purpose |
|
||||||
|
|---|---|---|
|
||||||
|
| `FORGE_URL` | `http://forgejo:3000` | Forgejo instance base URL |
|
||||||
|
| `FORGE_API_BASE` | `${FORGE_URL}/api/v1` | Global API base (set by `lib/env.sh`) |
|
||||||
|
| `FORGE_TOKEN` | (admin or org-owner token) | Must have `repo:create` scope |
|
||||||
|
|
||||||
|
The target org/user must already exist on the Forgejo instance.
|
||||||
|
|
||||||
|
## Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
source lib/env.sh
|
||||||
|
source lib/mirrors.sh
|
||||||
|
|
||||||
|
# Register a pull mirror — creates the repo and starts the first sync.
|
||||||
|
mirror_pull_register \
|
||||||
|
"https://codeberg.org/johba/disinto.git" \ # source URL
|
||||||
|
"disinto-admin" \ # target owner
|
||||||
|
"disinto" \ # target repo name
|
||||||
|
"8h0m0s" # sync interval (optional, default 8h)
|
||||||
|
```
|
||||||
|
|
||||||
|
The function calls `POST /api/v1/repos/migrate` with `mirror: true`.
|
||||||
|
Forgejo creates the repo and immediately queues the first sync.
|
||||||
|
|
||||||
|
## Verifying the sync
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check mirror status via API
|
||||||
|
forge_api GET "/repos/disinto-admin/disinto" | jq '.mirror, .mirror_interval'
|
||||||
|
|
||||||
|
# Confirm content arrived — should list branches
|
||||||
|
forge_api GET "/repos/disinto-admin/disinto/branches" | jq '.[].name'
|
||||||
|
```
|
||||||
|
|
||||||
|
The first sync typically completes within a few seconds for small-to-medium
|
||||||
|
repos. For large repos, poll the branches endpoint until content appears.
|
||||||
|
|
||||||
|
## Cutover scenario (Nomad migration)
|
||||||
|
|
||||||
|
At cutover to the Nomad box:
|
||||||
|
|
||||||
|
1. Stand up fresh Forgejo on the Nomad cluster (empty instance).
|
||||||
|
2. Create the `disinto-admin` org via `disinto init` or API.
|
||||||
|
3. Run `mirror_pull_register` pointing at the Codeberg source.
|
||||||
|
4. Wait for sync to complete (check branches endpoint).
|
||||||
|
5. Once content is confirmed, proceed with `disinto init` against the
|
||||||
|
now-populated repo — all subsequent `mirror_push` calls will push
|
||||||
|
to any additional mirrors configured in `projects/*.toml`.
|
||||||
|
|
||||||
|
No manual `git clone` + `git push` step is needed. The Forgejo pull-mirror
|
||||||
|
handles the entire transfer.
|
||||||
172
formulas/collect-engagement.toml
Normal file
172
formulas/collect-engagement.toml
Normal file
|
|
@ -0,0 +1,172 @@
|
||||||
|
# formulas/collect-engagement.toml — Collect website engagement data
|
||||||
|
#
|
||||||
|
# Daily formula: SSH into Caddy host, fetch access log, parse locally,
|
||||||
|
# commit evidence JSON to ops repo via Forgejo API.
|
||||||
|
#
|
||||||
|
# Triggered by cron in the edge container entrypoint (daily at 23:50 UTC).
|
||||||
|
# Design choices from #426: Q1=A (fetch raw log, process locally),
|
||||||
|
# Q2=A (direct cron in edge container), Q3=B (dedicated purpose-limited SSH key).
|
||||||
|
#
|
||||||
|
# Steps: fetch-log → parse-engagement → commit-evidence
|
||||||
|
|
||||||
|
name = "collect-engagement"
|
||||||
|
description = "SSH-fetch Caddy access log, parse engagement metrics, commit evidence"
|
||||||
|
version = 1
|
||||||
|
|
||||||
|
[context]
|
||||||
|
files = ["AGENTS.md"]
|
||||||
|
|
||||||
|
[vars.caddy_host]
|
||||||
|
description = "SSH host for the Caddy server"
|
||||||
|
required = false
|
||||||
|
default = "${CADDY_SSH_HOST:-disinto.ai}"
|
||||||
|
|
||||||
|
[vars.caddy_user]
|
||||||
|
description = "SSH user on the Caddy host"
|
||||||
|
required = false
|
||||||
|
default = "${CADDY_SSH_USER:-debian}"
|
||||||
|
|
||||||
|
[vars.caddy_log_path]
|
||||||
|
description = "Path to Caddy access log on the remote host"
|
||||||
|
required = false
|
||||||
|
default = "${CADDY_ACCESS_LOG:-/var/log/caddy/access.log}"
|
||||||
|
|
||||||
|
[vars.local_log_path]
|
||||||
|
description = "Local path to store fetched access log"
|
||||||
|
required = false
|
||||||
|
default = "/tmp/caddy-access-log-fetch.log"
|
||||||
|
|
||||||
|
[vars.evidence_dir]
|
||||||
|
description = "Evidence output directory in the ops repo"
|
||||||
|
required = false
|
||||||
|
default = "evidence/engagement"
|
||||||
|
|
||||||
|
# ── Step 1: SSH fetch ────────────────────────────────────────────────
|
||||||
|
|
||||||
|
[[steps]]
|
||||||
|
id = "fetch-log"
|
||||||
|
title = "Fetch Caddy access log from remote host via SSH"
|
||||||
|
description = """
|
||||||
|
Fetch today's Caddy access log segment from the remote host using SCP.
|
||||||
|
|
||||||
|
The SSH key is read from the environment (CADDY_SSH_KEY), which is
|
||||||
|
decrypted from secrets/CADDY_SSH_KEY.enc by the edge entrypoint. It is NEVER hardcoded.
|
||||||
|
|
||||||
|
1. Write the SSH key to a temporary file with restricted permissions:
|
||||||
|
_ssh_key_file=$(mktemp)
|
||||||
|
trap 'rm -f "$_ssh_key_file"' EXIT
|
||||||
|
printf '%s\n' "$CADDY_SSH_KEY" > "$_ssh_key_file"
|
||||||
|
chmod 0600 "$_ssh_key_file"
|
||||||
|
|
||||||
|
2. Verify connectivity:
|
||||||
|
ssh -i "$_ssh_key_file" -o StrictHostKeyChecking=accept-new \
|
||||||
|
-o ConnectTimeout=10 -o BatchMode=yes \
|
||||||
|
{{caddy_user}}@{{caddy_host}} 'echo ok'
|
||||||
|
|
||||||
|
3. Fetch the access log via scp:
|
||||||
|
scp -i "$_ssh_key_file" -o StrictHostKeyChecking=accept-new \
|
||||||
|
-o ConnectTimeout=10 -o BatchMode=yes \
|
||||||
|
"{{caddy_user}}@{{caddy_host}}:{{caddy_log_path}}" \
|
||||||
|
"{{local_log_path}}"
|
||||||
|
|
||||||
|
4. Verify the fetched file is non-empty:
|
||||||
|
if [ ! -s "{{local_log_path}}" ]; then
|
||||||
|
echo "WARNING: fetched access log is empty — site may have no traffic"
|
||||||
|
else
|
||||||
|
echo "Fetched $(wc -l < "{{local_log_path}}") lines from {{caddy_host}}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
5. Clean up the temporary key file:
|
||||||
|
rm -f "$_ssh_key_file"
|
||||||
|
"""
|
||||||
|
|
||||||
|
# ── Step 2: Parse engagement ─────────────────────────────────────────
|
||||||
|
|
||||||
|
[[steps]]
|
||||||
|
id = "parse-engagement"
|
||||||
|
title = "Run collect-engagement.sh against the local log copy"
|
||||||
|
description = """
|
||||||
|
Run the engagement parser against the locally fetched access log.
|
||||||
|
|
||||||
|
1. Set CADDY_ACCESS_LOG to point at the local copy so collect-engagement.sh
|
||||||
|
reads from it instead of the default path:
|
||||||
|
export CADDY_ACCESS_LOG="{{local_log_path}}"
|
||||||
|
|
||||||
|
2. Run the parser:
|
||||||
|
bash "$FACTORY_ROOT/site/collect-engagement.sh"
|
||||||
|
|
||||||
|
3. Verify the evidence JSON was written:
|
||||||
|
REPORT_DATE=$(date -u +%Y-%m-%d)
|
||||||
|
EVIDENCE_FILE="${OPS_REPO_ROOT}/{{evidence_dir}}/${REPORT_DATE}.json"
|
||||||
|
if [ -f "$EVIDENCE_FILE" ]; then
|
||||||
|
echo "Evidence written: $EVIDENCE_FILE"
|
||||||
|
jq . "$EVIDENCE_FILE"
|
||||||
|
else
|
||||||
|
echo "ERROR: evidence file not found at $EVIDENCE_FILE"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
4. Clean up the fetched log:
|
||||||
|
rm -f "{{local_log_path}}"
|
||||||
|
"""
|
||||||
|
needs = ["fetch-log"]
|
||||||
|
|
||||||
|
# ── Step 3: Commit evidence ──────────────────────────────────────────
|
||||||
|
|
||||||
|
[[steps]]
|
||||||
|
id = "commit-evidence"
|
||||||
|
title = "Commit evidence JSON to ops repo via Forgejo API"
|
||||||
|
description = """
|
||||||
|
Commit the dated evidence JSON to the ops repo so the planner can
|
||||||
|
consume it during gap analysis.
|
||||||
|
|
||||||
|
1. Read the evidence file:
|
||||||
|
REPORT_DATE=$(date -u +%Y-%m-%d)
|
||||||
|
EVIDENCE_FILE="${OPS_REPO_ROOT}/{{evidence_dir}}/${REPORT_DATE}.json"
|
||||||
|
CONTENT=$(base64 < "$EVIDENCE_FILE")
|
||||||
|
|
||||||
|
2. Check if the file already exists in the ops repo (update vs create):
|
||||||
|
OPS_OWNER="${OPS_FORGE_OWNER:-${FORGE_REPO%%/*}}"
|
||||||
|
OPS_REPO="${OPS_FORGE_REPO:-${PROJECT_NAME:-disinto}-ops}"
|
||||||
|
FILE_PATH="{{evidence_dir}}/${REPORT_DATE}.json"
|
||||||
|
|
||||||
|
EXISTING=$(curl -sf \
|
||||||
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
"${FORGE_URL}/api/v1/repos/${OPS_OWNER}/${OPS_REPO}/contents/${FILE_PATH}" \
|
||||||
|
2>/dev/null || echo "")
|
||||||
|
|
||||||
|
3. Create or update the file via Forgejo API:
|
||||||
|
if [ -n "$EXISTING" ] && printf '%s' "$EXISTING" | jq -e '.sha' >/dev/null 2>&1; then
|
||||||
|
# Update existing file
|
||||||
|
SHA=$(printf '%s' "$EXISTING" | jq -r '.sha')
|
||||||
|
curl -sf -X PUT \
|
||||||
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"${FORGE_URL}/api/v1/repos/${OPS_OWNER}/${OPS_REPO}/contents/${FILE_PATH}" \
|
||||||
|
-d "$(jq -nc --arg content "$CONTENT" --arg sha "$SHA" --arg msg "evidence: engagement ${REPORT_DATE}" \
|
||||||
|
'{message: $msg, content: $content, sha: $sha}')"
|
||||||
|
echo "Updated existing evidence file in ops repo"
|
||||||
|
else
|
||||||
|
# Create new file
|
||||||
|
curl -sf -X POST \
|
||||||
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"${FORGE_URL}/api/v1/repos/${OPS_OWNER}/${OPS_REPO}/contents/${FILE_PATH}" \
|
||||||
|
-d "$(jq -nc --arg content "$CONTENT" --arg msg "evidence: engagement ${REPORT_DATE}" \
|
||||||
|
'{message: $msg, content: $content}')"
|
||||||
|
echo "Created evidence file in ops repo"
|
||||||
|
fi
|
||||||
|
|
||||||
|
4. Verify the commit landed:
|
||||||
|
VERIFY=$(curl -sf \
|
||||||
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
"${FORGE_URL}/api/v1/repos/${OPS_OWNER}/${OPS_REPO}/contents/${FILE_PATH}" \
|
||||||
|
| jq -r '.name // empty')
|
||||||
|
if [ "$VERIFY" = "${REPORT_DATE}.json" ]; then
|
||||||
|
echo "Evidence committed: ${FILE_PATH}"
|
||||||
|
else
|
||||||
|
echo "ERROR: could not verify evidence commit"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
"""
|
||||||
|
needs = ["parse-engagement"]
|
||||||
161
formulas/rent-a-human-caddy-ssh.toml
Normal file
161
formulas/rent-a-human-caddy-ssh.toml
Normal file
|
|
@ -0,0 +1,161 @@
|
||||||
|
# formulas/rent-a-human-caddy-ssh.toml — Provision SSH key for Caddy log collection
|
||||||
|
#
|
||||||
|
# "Rent a Human" — walk the operator through provisioning a purpose-limited
|
||||||
|
# SSH keypair so collect-engagement.sh can fetch Caddy access logs remotely.
|
||||||
|
#
|
||||||
|
# The key uses a `command=` restriction so it can ONLY cat the access log.
|
||||||
|
# No interactive shell, no port forwarding, no agent forwarding.
|
||||||
|
#
|
||||||
|
# Parent vision issue: #426
|
||||||
|
# Sprint: website-observability-wire-up (ops PR #10)
|
||||||
|
# Consumed by: site/collect-engagement.sh (issue #745)
|
||||||
|
|
||||||
|
name = "rent-a-human-caddy-ssh"
|
||||||
|
description = "Provision a purpose-limited SSH keypair for remote Caddy log collection"
|
||||||
|
version = 1
|
||||||
|
|
||||||
|
# ── Step 1: Generate keypair ─────────────────────────────────────────────────
|
||||||
|
|
||||||
|
[[steps]]
|
||||||
|
id = "generate-keypair"
|
||||||
|
title = "Generate a dedicated ed25519 keypair"
|
||||||
|
description = """
|
||||||
|
Generate a purpose-limited SSH keypair for Caddy log collection.
|
||||||
|
|
||||||
|
Run on your local machine (NOT the Caddy host):
|
||||||
|
|
||||||
|
```
|
||||||
|
ssh-keygen -t ed25519 -f caddy-collect -N '' -C 'disinto-collect-engagement'
|
||||||
|
```
|
||||||
|
|
||||||
|
This produces two files:
|
||||||
|
- caddy-collect (private key — goes into the vault)
|
||||||
|
- caddy-collect.pub (public key — goes onto the Caddy host)
|
||||||
|
|
||||||
|
Do NOT set a passphrase (-N '') — the factory runs unattended.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# ── Step 2: Install public key on Caddy host ─────────────────────────────────
|
||||||
|
|
||||||
|
[[steps]]
|
||||||
|
id = "install-public-key"
|
||||||
|
title = "Install the public key on the Caddy host with command= restriction"
|
||||||
|
needs = ["generate-keypair"]
|
||||||
|
description = """
|
||||||
|
Install the public key on the Caddy host with a strict command= restriction
|
||||||
|
so this key can ONLY read the access log.
|
||||||
|
|
||||||
|
1. SSH into the Caddy host as the user who owns /var/log/caddy/access.log.
|
||||||
|
|
||||||
|
2. Open (or create) ~/.ssh/authorized_keys:
|
||||||
|
mkdir -p ~/.ssh && chmod 700 ~/.ssh
|
||||||
|
nano ~/.ssh/authorized_keys
|
||||||
|
|
||||||
|
3. Add this line (all on ONE line — do not wrap):
|
||||||
|
|
||||||
|
command="cat /var/log/caddy/access.log",no-port-forwarding,no-X11-forwarding,no-agent-forwarding ssh-ed25519 AAAA... disinto-collect-engagement
|
||||||
|
|
||||||
|
Replace "AAAA..." with the contents of caddy-collect.pub.
|
||||||
|
|
||||||
|
To build the line automatically:
|
||||||
|
echo "command=\"cat /var/log/caddy/access.log\",no-port-forwarding,no-X11-forwarding,no-agent-forwarding $(cat caddy-collect.pub)"
|
||||||
|
|
||||||
|
4. Set permissions:
|
||||||
|
chmod 600 ~/.ssh/authorized_keys
|
||||||
|
|
||||||
|
What the restrictions do:
|
||||||
|
- command="cat /var/log/caddy/access.log"
|
||||||
|
Forces this key to only execute `cat /var/log/caddy/access.log`,
|
||||||
|
regardless of what the client requests.
|
||||||
|
- no-port-forwarding — blocks SSH tunnels
|
||||||
|
- no-X11-forwarding — blocks X11
|
||||||
|
- no-agent-forwarding — blocks agent forwarding
|
||||||
|
|
||||||
|
If the access log is at a different path, update the command= restriction
|
||||||
|
AND set CADDY_ACCESS_LOG in the factory environment to match.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# ── Step 3: Add private key to vault secrets ─────────────────────────────────
|
||||||
|
|
||||||
|
[[steps]]
|
||||||
|
id = "store-private-key"
|
||||||
|
title = "Add the private key as CADDY_SSH_KEY secret"
|
||||||
|
needs = ["generate-keypair"]
|
||||||
|
description = """
|
||||||
|
Store the private key in the factory's encrypted secrets store.
|
||||||
|
|
||||||
|
1. Add the private key using `disinto secrets add`:
|
||||||
|
|
||||||
|
cat caddy-collect | disinto secrets add CADDY_SSH_KEY
|
||||||
|
|
||||||
|
This encrypts the key with age and stores it as secrets/CADDY_SSH_KEY.enc.
|
||||||
|
|
||||||
|
2. IMPORTANT: After storing, securely delete the local private key file:
|
||||||
|
shred -u caddy-collect 2>/dev/null || rm -f caddy-collect
|
||||||
|
rm -f caddy-collect.pub
|
||||||
|
|
||||||
|
The public key is already installed on the Caddy host; the private key
|
||||||
|
now lives only in secrets/CADDY_SSH_KEY.enc.
|
||||||
|
|
||||||
|
Never commit the private key to any git repository.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# ── Step 4: Configure Caddy host address ─────────────────────────────────────
|
||||||
|
|
||||||
|
[[steps]]
|
||||||
|
id = "store-caddy-host"
|
||||||
|
title = "Add the Caddy host details as secrets"
|
||||||
|
needs = ["install-public-key"]
|
||||||
|
description = """
|
||||||
|
Store the Caddy connection details so collect-engagement.sh knows
|
||||||
|
where to SSH.
|
||||||
|
|
||||||
|
1. Add each value using `disinto secrets add`:
|
||||||
|
|
||||||
|
echo 'disinto.ai' | disinto secrets add CADDY_SSH_HOST
|
||||||
|
echo 'debian' | disinto secrets add CADDY_SSH_USER
|
||||||
|
echo '/var/log/caddy/access.log' | disinto secrets add CADDY_ACCESS_LOG
|
||||||
|
|
||||||
|
Replace values with the actual SSH host, user, and log path for your setup.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# ── Step 5: Test the connection ──────────────────────────────────────────────
|
||||||
|
|
||||||
|
[[steps]]
|
||||||
|
id = "test-connection"
|
||||||
|
title = "Verify the SSH key works and returns the access log"
|
||||||
|
needs = ["install-public-key", "store-private-key", "store-caddy-host"]
|
||||||
|
description = """
|
||||||
|
Test the end-to-end connection before the factory tries to use it.
|
||||||
|
|
||||||
|
1. From the factory host (or anywhere with the private key), run:
|
||||||
|
|
||||||
|
ssh -i caddy-collect -o StrictHostKeyChecking=accept-new user@caddy-host
|
||||||
|
|
||||||
|
Expected behavior:
|
||||||
|
- Outputs the contents of /var/log/caddy/access.log
|
||||||
|
- Disconnects immediately (command= restriction forces this)
|
||||||
|
|
||||||
|
If you already shredded the local key, decode it from the vault:
|
||||||
|
echo "$CADDY_SSH_KEY" | base64 -d > /tmp/caddy-collect-test
|
||||||
|
chmod 600 /tmp/caddy-collect-test
|
||||||
|
ssh -i /tmp/caddy-collect-test -o StrictHostKeyChecking=accept-new user@caddy-host
|
||||||
|
rm -f /tmp/caddy-collect-test
|
||||||
|
|
||||||
|
2. Verify the output is Caddy structured JSON (one JSON object per line):
|
||||||
|
ssh -i /tmp/caddy-collect-test user@caddy-host | head -1 | jq .
|
||||||
|
|
||||||
|
You should see fields like: ts, request, status, duration.
|
||||||
|
|
||||||
|
3. If the connection fails:
|
||||||
|
- Permission denied → check authorized_keys format (must be one line)
|
||||||
|
- Connection refused → check sshd is running on the Caddy host
|
||||||
|
- Empty output → check /var/log/caddy/access.log exists and is readable
|
||||||
|
by the SSH user
|
||||||
|
- "jq: error" → Caddy may be using Combined Log Format instead of
|
||||||
|
structured JSON; check Caddy's log configuration
|
||||||
|
|
||||||
|
4. Once verified, the factory's collect-engagement.sh can use this key
|
||||||
|
to fetch logs remotely via:
|
||||||
|
ssh -i <decoded-key-path> $CADDY_HOST
|
||||||
|
"""
|
||||||
|
|
@ -213,7 +213,7 @@ should file a vault item instead of executing directly.
|
||||||
**Exceptions** (do NOT flag these):
|
**Exceptions** (do NOT flag these):
|
||||||
- Code inside `vault/` — the vault system itself is allowed to handle secrets
|
- Code inside `vault/` — the vault system itself is allowed to handle secrets
|
||||||
- References in comments or documentation explaining the architecture
|
- References in comments or documentation explaining the architecture
|
||||||
- `bin/disinto` setup commands that manage `.env.vault.enc` and the `run` subcommand
|
- `bin/disinto` setup commands that manage `secrets/*.enc` and the `run` subcommand
|
||||||
- Local operations (git push to forge, forge API calls with `FORGE_TOKEN`)
|
- Local operations (git push to forge, forge API calls with `FORGE_TOKEN`)
|
||||||
|
|
||||||
## 6. Re-review (if previous review is provided)
|
## 6. Re-review (if previous review is provided)
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,14 @@
|
||||||
# - Bash creates the ops PR with pitch content
|
# - Bash creates the ops PR with pitch content
|
||||||
# - Bash posts the ACCEPT/REJECT footer comment
|
# - Bash posts the ACCEPT/REJECT footer comment
|
||||||
# Step 3: Sprint PR creation with questions (issue #101) (one PR per pitch)
|
# Step 3: Sprint PR creation with questions (issue #101) (one PR per pitch)
|
||||||
# Step 4: Answer parsing + sub-issue filing (issue #102)
|
# Step 4: Post-merge sub-issue filing via filer-bot (#764)
|
||||||
|
#
|
||||||
|
# Permission model (#764):
|
||||||
|
# architect-bot: READ-ONLY on project repo (GET issues/PRs/labels for context).
|
||||||
|
# Cannot POST/PUT/PATCH/DELETE any project-repo resource.
|
||||||
|
# Write access ONLY on ops repo (branches, PRs, comments).
|
||||||
|
# filer-bot: issues:write on project repo. Files sub-issues from merged sprint
|
||||||
|
# PRs via ops-filer pipeline. Adds in-progress label to vision issues.
|
||||||
#
|
#
|
||||||
# Architecture:
|
# Architecture:
|
||||||
# - Bash script (architect-run.sh) handles ALL state management
|
# - Bash script (architect-run.sh) handles ALL state management
|
||||||
|
|
@ -146,15 +153,32 @@ For each issue in ARCHITECT_TARGET_ISSUES, bash performs:
|
||||||
## Recommendation
|
## Recommendation
|
||||||
<architect's assessment: worth it / defer / alternative approach>
|
<architect's assessment: worth it / defer / alternative approach>
|
||||||
|
|
||||||
|
## Sub-issues
|
||||||
|
|
||||||
|
<!-- filer:begin -->
|
||||||
|
- id: <kebab-case-id>
|
||||||
|
title: "vision(#N): <concise sub-issue title>"
|
||||||
|
labels: [backlog]
|
||||||
|
depends_on: []
|
||||||
|
body: |
|
||||||
|
## Goal
|
||||||
|
<what this sub-issue accomplishes>
|
||||||
|
## Acceptance criteria
|
||||||
|
- [ ] <criterion>
|
||||||
|
<!-- filer:end -->
|
||||||
|
|
||||||
IMPORTANT: Do NOT include design forks or questions yet. The pitch is a go/no-go
|
IMPORTANT: Do NOT include design forks or questions yet. The pitch is a go/no-go
|
||||||
decision for the human. Questions come only after acceptance.
|
decision for the human. Questions come only after acceptance.
|
||||||
|
The ## Sub-issues block is parsed by the filer-bot pipeline after sprint PR merge.
|
||||||
|
Each sub-issue between filer:begin/end markers becomes a Forgejo issue on the
|
||||||
|
project repo. The filer appends a decomposed-from marker to each body automatically.
|
||||||
|
|
||||||
4. Bash creates PR:
|
4. Bash creates PR:
|
||||||
- Create branch: architect/sprint-{pitch-number}
|
- Create branch: architect/sprint-{pitch-number}
|
||||||
- Write sprint spec to sprints/{sprint-slug}.md
|
- Write sprint spec to sprints/{sprint-slug}.md
|
||||||
- Create PR with pitch content as body
|
- Create PR with pitch content as body
|
||||||
- Post footer comment: "Reply ACCEPT to proceed with design questions, or REJECT: <reason> to decline."
|
- Post footer comment: "Reply ACCEPT to proceed with design questions, or REJECT: <reason> to decline."
|
||||||
- Add in-progress label to vision issue
|
- NOTE: in-progress label is added by filer-bot after sprint PR merge (#764)
|
||||||
|
|
||||||
Output:
|
Output:
|
||||||
- One PR per vision issue (up to 3 per run)
|
- One PR per vision issue (up to 3 per run)
|
||||||
|
|
@ -185,6 +209,9 @@ This ensures approved PRs don't sit indefinitely without design conversation.
|
||||||
Architecture:
|
Architecture:
|
||||||
- Bash creates PRs during stateless pitch generation (step 2)
|
- Bash creates PRs during stateless pitch generation (step 2)
|
||||||
- Model has no role in PR creation — no Forgejo API access
|
- Model has no role in PR creation — no Forgejo API access
|
||||||
|
- architect-bot is READ-ONLY on the project repo (#764) — all project-repo
|
||||||
|
writes (sub-issue filing, in-progress label) are handled by filer-bot
|
||||||
|
via the ops-filer pipeline after sprint PR merge
|
||||||
- This step describes the PR format for reference
|
- This step describes the PR format for reference
|
||||||
|
|
||||||
PR Format (created by bash):
|
PR Format (created by bash):
|
||||||
|
|
@ -201,64 +228,29 @@ PR Format (created by bash):
|
||||||
- Head: architect/sprint-{pitch-number}
|
- Head: architect/sprint-{pitch-number}
|
||||||
- Footer comment: "Reply ACCEPT to proceed with design questions, or REJECT: <reason> to decline."
|
- Footer comment: "Reply ACCEPT to proceed with design questions, or REJECT: <reason> to decline."
|
||||||
|
|
||||||
4. Add in-progress label to vision issue:
|
|
||||||
- Look up label ID: GET /repos/{owner}/{repo}/labels
|
|
||||||
- Add label: POST /repos/{owner}/{repo}/issues/{issue_number}/labels
|
|
||||||
|
|
||||||
After creating all PRs, signal PHASE:done.
|
After creating all PRs, signal PHASE:done.
|
||||||
|
NOTE: in-progress label on the vision issue is added by filer-bot after sprint PR merge (#764).
|
||||||
|
|
||||||
## Forgejo API Reference
|
## Forgejo API Reference (ops repo only)
|
||||||
|
|
||||||
All operations use the Forgejo API with Authorization: token ${FORGE_TOKEN} header.
|
All operations use the ops repo Forgejo API with `Authorization: token ${FORGE_TOKEN}` header.
|
||||||
|
architect-bot is READ-ONLY on the project repo — cannot POST/PUT/PATCH/DELETE project-repo resources (#764).
|
||||||
|
|
||||||
### Create branch
|
### Create branch (ops repo)
|
||||||
```
|
```
|
||||||
POST /repos/{owner}/{repo}/branches
|
POST /repos/{owner}/{repo-ops}/branches
|
||||||
Body: {"new_branch_name": "architect/<sprint-slug>", "old_branch_name": "main"}
|
Body: {"new_branch_name": "architect/<sprint-slug>", "old_branch_name": "main"}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Create/update file
|
### Create/update file (ops repo)
|
||||||
```
|
```
|
||||||
PUT /repos/{owner}/{repo}/contents/<path>
|
PUT /repos/{owner}/{repo-ops}/contents/<path>
|
||||||
Body: {"message": "sprint: add <sprint-slug>.md", "content": "<base64-encoded-content>", "branch": "architect/<sprint-slug>"}
|
Body: {"message": "sprint: add <sprint-slug>.md", "content": "<base64-encoded-content>", "branch": "architect/<sprint-slug>"}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Create PR
|
### Create PR (ops repo)
|
||||||
```
|
```
|
||||||
POST /repos/{owner}/{repo}/pulls
|
POST /repos/{owner}/{repo-ops}/pulls
|
||||||
Body: {"title": "architect: <sprint summary>", "body": "<markdown-text>", "head": "architect/<sprint-slug>", "base": "main"}
|
|
||||||
```
|
|
||||||
|
|
||||||
**Important: PR body format**
|
|
||||||
- The body field must contain plain markdown text (the raw content from the model)
|
|
||||||
- Do NOT JSON-encode or escape the body — pass it as a JSON string value
|
|
||||||
- Newlines and markdown formatting (headings, lists, etc.) must be preserved as-is
|
|
||||||
|
|
||||||
### Add label to issue
|
|
||||||
```
|
|
||||||
POST /repos/{owner}/{repo}/issues/{index}/labels
|
|
||||||
Body: {"labels": [<label-id>]}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Forgejo API Reference
|
|
||||||
|
|
||||||
All operations use the Forgejo API with `Authorization: token ${FORGE_TOKEN}` header.
|
|
||||||
|
|
||||||
### Create branch
|
|
||||||
```
|
|
||||||
POST /repos/{owner}/{repo}/branches
|
|
||||||
Body: {"new_branch_name": "architect/<sprint-slug>", "old_branch_name": "main"}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Create/update file
|
|
||||||
```
|
|
||||||
PUT /repos/{owner}/{repo}/contents/<path>
|
|
||||||
Body: {"message": "sprint: add <sprint-slug>.md", "content": "<base64-encoded-content>", "branch": "architect/<sprint-slug>"}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Create PR
|
|
||||||
```
|
|
||||||
POST /repos/{owner}/{repo}/pulls
|
|
||||||
Body: {"title": "architect: <sprint summary>", "body": "<markdown-text>", "head": "architect/<sprint-slug>", "base": "main"}
|
Body: {"title": "architect: <sprint summary>", "body": "<markdown-text>", "head": "architect/<sprint-slug>", "base": "main"}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
@ -267,30 +259,22 @@ Body: {"title": "architect: <sprint summary>", "body": "<markdown-text>", "head"
|
||||||
- Do NOT JSON-encode or escape the body — pass it as a JSON string value
|
- Do NOT JSON-encode or escape the body — pass it as a JSON string value
|
||||||
- Newlines and markdown formatting (headings, lists, etc.) must be preserved as-is
|
- Newlines and markdown formatting (headings, lists, etc.) must be preserved as-is
|
||||||
|
|
||||||
### Close PR
|
### Close PR (ops repo)
|
||||||
```
|
```
|
||||||
PATCH /repos/{owner}/{repo}/pulls/{index}
|
PATCH /repos/{owner}/{repo-ops}/pulls/{index}
|
||||||
Body: {"state": "closed"}
|
Body: {"state": "closed"}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Delete branch
|
### Delete branch (ops repo)
|
||||||
```
|
```
|
||||||
DELETE /repos/{owner}/{repo}/git/branches/<branch-name>
|
DELETE /repos/{owner}/{repo-ops}/git/branches/<branch-name>
|
||||||
```
|
```
|
||||||
|
|
||||||
### Get labels (look up label IDs by name)
|
### Read-only on project repo (context gathering)
|
||||||
```
|
```
|
||||||
GET /repos/{owner}/{repo}/labels
|
GET /repos/{owner}/{repo}/issues — list issues
|
||||||
```
|
GET /repos/{owner}/{repo}/issues/{number} — read issue details
|
||||||
|
GET /repos/{owner}/{repo}/labels — list labels
|
||||||
### Add label to issue (for in-progress on vision issue)
|
GET /repos/{owner}/{repo}/pulls — list PRs
|
||||||
```
|
|
||||||
POST /repos/{owner}/{repo}/issues/{index}/labels
|
|
||||||
Body: {"labels": [<label-id>]}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Remove label from issue (for in-progress removal on REJECT)
|
|
||||||
```
|
|
||||||
DELETE /repos/{owner}/{repo}/issues/{index}/labels/{label-id}
|
|
||||||
```
|
```
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -177,7 +177,7 @@ DUST (trivial — single-line edit, rename, comment, style, whitespace):
|
||||||
|
|
||||||
VAULT (needs human decision or external resource):
|
VAULT (needs human decision or external resource):
|
||||||
File a vault procurement item using vault_request():
|
File a vault procurement item using vault_request():
|
||||||
source "$(dirname "$0")/../lib/vault.sh"
|
source "$(dirname "$0")/../lib/action-vault.sh"
|
||||||
TOML_CONTENT="# Vault action: <action_id>
|
TOML_CONTENT="# Vault action: <action_id>
|
||||||
context = \"<description of what decision/resource is needed>\"
|
context = \"<description of what decision/resource is needed>\"
|
||||||
unblocks = [\"#NNN\"]
|
unblocks = [\"#NNN\"]
|
||||||
|
|
|
||||||
|
|
@ -243,7 +243,7 @@ needs = ["preflight"]
|
||||||
|
|
||||||
[[steps]]
|
[[steps]]
|
||||||
id = "commit-ops-changes"
|
id = "commit-ops-changes"
|
||||||
title = "Write tree, memory, and journal; commit and push"
|
title = "Write tree, memory, and journal; commit and push branch"
|
||||||
description = """
|
description = """
|
||||||
### 1. Write prerequisite tree
|
### 1. Write prerequisite tree
|
||||||
Write to: $OPS_REPO_ROOT/prerequisites.md
|
Write to: $OPS_REPO_ROOT/prerequisites.md
|
||||||
|
|
@ -256,14 +256,16 @@ If (count - N) >= 5 or planner-memory.md missing, write to:
|
||||||
Include: run counter marker, date, constraint focus, patterns, direction.
|
Include: run counter marker, date, constraint focus, patterns, direction.
|
||||||
Keep under 100 lines. Replace entire file.
|
Keep under 100 lines. Replace entire file.
|
||||||
|
|
||||||
### 3. Commit ops repo changes
|
### 3. Commit ops repo changes to the planner branch
|
||||||
Commit the ops repo changes (prerequisites, memory, vault items):
|
Commit the ops repo changes (prerequisites, memory, vault items) and push the
|
||||||
|
branch. Do NOT push directly to $PRIMARY_BRANCH — planner-run.sh will create a
|
||||||
|
PR and walk it to merge via review-bot.
|
||||||
cd "$OPS_REPO_ROOT"
|
cd "$OPS_REPO_ROOT"
|
||||||
git add prerequisites.md knowledge/planner-memory.md vault/pending/
|
git add prerequisites.md knowledge/planner-memory.md vault/pending/
|
||||||
git add -u
|
git add -u
|
||||||
if ! git diff --cached --quiet; then
|
if ! git diff --cached --quiet; then
|
||||||
git commit -m "chore: planner run $(date -u +%Y-%m-%d)"
|
git commit -m "chore: planner run $(date -u +%Y-%m-%d)"
|
||||||
git push origin "$PRIMARY_BRANCH"
|
git push origin HEAD
|
||||||
fi
|
fi
|
||||||
cd "$PROJECT_REPO_ROOT"
|
cd "$PROJECT_REPO_ROOT"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -125,8 +125,8 @@ For each weakness you identify, choose one:
|
||||||
The prediction explains the theory. The vault PR triggers the proof
|
The prediction explains the theory. The vault PR triggers the proof
|
||||||
after human approval. When the planner runs next, evidence is already there.
|
after human approval. When the planner runs next, evidence is already there.
|
||||||
|
|
||||||
Vault dispatch (requires lib/vault.sh):
|
Vault dispatch (requires lib/action-vault.sh):
|
||||||
source "$PROJECT_REPO_ROOT/lib/vault.sh"
|
source "$PROJECT_REPO_ROOT/lib/action-vault.sh"
|
||||||
|
|
||||||
TOML_CONTENT="id = \"predict-<prediction_number>-<formula>\"
|
TOML_CONTENT="id = \"predict-<prediction_number>-<formula>\"
|
||||||
context = \"Test prediction #<prediction_number>: <theory summary> — focus: <specific test>\"
|
context = \"Test prediction #<prediction_number>: <theory summary> — focus: <specific test>\"
|
||||||
|
|
@ -154,7 +154,7 @@ tea is pre-configured with login "$TEA_LOGIN" and repo "$FORGE_REPO".
|
||||||
--title "<title>" --body "<body>" --labels "prediction/unreviewed"
|
--title "<title>" --body "<body>" --labels "prediction/unreviewed"
|
||||||
|
|
||||||
2. Dispatch formula via vault (if exploiting):
|
2. Dispatch formula via vault (if exploiting):
|
||||||
source "$PROJECT_REPO_ROOT/lib/vault.sh"
|
source "$PROJECT_REPO_ROOT/lib/action-vault.sh"
|
||||||
PR_NUM=$(vault_request "predict-NNN-<formula>" "$TOML_CONTENT")
|
PR_NUM=$(vault_request "predict-NNN-<formula>" "$TOML_CONTENT")
|
||||||
# See EXPLOIT section above for TOML_CONTENT format
|
# See EXPLOIT section above for TOML_CONTENT format
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: c51cc9dba649ed543b910b561231a5c8bd2130bc -->
|
<!-- last-reviewed: c363ee0aea2ae447daab28c2c850d6abefc8c6b5 -->
|
||||||
# Gardener Agent
|
# Gardener Agent
|
||||||
|
|
||||||
**Role**: Backlog grooming — detect duplicate issues, missing acceptance
|
**Role**: Backlog grooming — detect duplicate issues, missing acceptance
|
||||||
|
|
@ -32,7 +32,7 @@ the gardener runs as part of the polling loop alongside the planner, predictor,
|
||||||
PR, reviewed alongside AGENTS.md changes, executed by gardener-run.sh after merge.
|
PR, reviewed alongside AGENTS.md changes, executed by gardener-run.sh after merge.
|
||||||
|
|
||||||
**Environment variables consumed**:
|
**Environment variables consumed**:
|
||||||
- `FORGE_TOKEN`, `FORGE_GARDENER_TOKEN` (falls back to FORGE_TOKEN), `FORGE_REPO`, `FORGE_API`, `PROJECT_NAME`, `PROJECT_REPO_ROOT`
|
- `FORGE_TOKEN`, `FORGE_GARDENER_TOKEN` (falls back to FORGE_TOKEN), `FORGE_REPO`, `FORGE_API`, `PROJECT_NAME`, `PROJECT_REPO_ROOT`. `FORGE_TOKEN_OVERRIDE` is exported to `$FORGE_GARDENER_TOKEN` before sourcing env.sh so the gardener-bot identity survives re-sourcing (#762).
|
||||||
- `PRIMARY_BRANCH`, `CLAUDE_MODEL` (set to sonnet by gardener-run.sh)
|
- `PRIMARY_BRANCH`, `CLAUDE_MODEL` (set to sonnet by gardener-run.sh)
|
||||||
|
|
||||||
**Lifecycle**: gardener-run.sh (invoked by polling loop every 6h, `check_active gardener`) →
|
**Lifecycle**: gardener-run.sh (invoked by polling loop every 6h, `check_active gardener`) →
|
||||||
|
|
|
||||||
|
|
@ -26,10 +26,11 @@ FACTORY_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||||
|
|
||||||
# Accept project config from argument; default to disinto
|
# Accept project config from argument; default to disinto
|
||||||
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
||||||
|
# Set override BEFORE sourcing env.sh so it survives any later re-source of
|
||||||
|
# env.sh from nested shells / claude -p tools (#762, #747)
|
||||||
|
export FORGE_TOKEN_OVERRIDE="${FORGE_GARDENER_TOKEN:-}"
|
||||||
# shellcheck source=../lib/env.sh
|
# shellcheck source=../lib/env.sh
|
||||||
source "$FACTORY_ROOT/lib/env.sh"
|
source "$FACTORY_ROOT/lib/env.sh"
|
||||||
# Use gardener-bot's own Forgejo identity (#747)
|
|
||||||
FORGE_TOKEN="${FORGE_GARDENER_TOKEN:-${FORGE_TOKEN}}"
|
|
||||||
# shellcheck source=../lib/formula-session.sh
|
# shellcheck source=../lib/formula-session.sh
|
||||||
source "$FACTORY_ROOT/lib/formula-session.sh"
|
source "$FACTORY_ROOT/lib/formula-session.sh"
|
||||||
# shellcheck source=../lib/worktree.sh
|
# shellcheck source=../lib/worktree.sh
|
||||||
|
|
|
||||||
|
|
@ -1,37 +1,12 @@
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"action": "edit_body",
|
"action": "comment",
|
||||||
"issue": 707,
|
"issue": 623,
|
||||||
"body": "## Goal\n\nGive `disinto-chat` its own Claude identity mount so its OAuth refresh races cannot corrupt the factory agents' shared `~/.claude` credentials. Default to a separate `~/.claude-chat/` on the host; support `ANTHROPIC_API_KEY` as a fallback that skips OAuth entirely.\n\n## Why\n\n- #623 root-caused this: Claude Code's internal refresh lock in `~/.claude.lock` operates outside bind-mounted directories, so two containers sharing `~/.claude` can race during token refresh and invalidate each other. The factory has already had OAuth expiry incidents traced to multiple agents sharing credentials.\n- Scoping chat to its own identity dir means chat can be logged in as a different Anthropic account, or pinned to an API key, without touching agent credentials.\n\n## Scope\n\n### Files to touch\n\n- `lib/generators.sh` chat service block (from #705):\n - Replace the throwaway named volume with `${CHAT_CLAUDE_DIR:-${HOME}/.claude-chat}:/home/chat/.claude-chat`.\n - Env: `CLAUDE_CONFIG_DIR=/home/chat/.claude-chat/config`, `CLAUDE_CREDENTIALS_DIR=/home/chat/.claude-chat/config/credentials`.\n - Conditional: if `ANTHROPIC_API_KEY` is set in `.env`, pass it through and **do not** mount `~/.claude-chat` at all (no credentials on disk in that mode).\n- `bin/disinto disinto_init()` — after #620's admin password prompt, add an optional prompt: `Use separate Anthropic identity for chat? (y/N)`. On yes, create `~/.claude-chat/` and invoke `claude login` in a subshell with `CLAUDE_CONFIG_DIR=~/.claude-chat/config`.\n- `lib/claude-config.sh` — factor out the existing `~/.claude` setup logic so a non-default `CLAUDE_CONFIG_DIR` is a first-class parameter. If it is already parameterised, just document it; if not, extract a helper `setup_claude_dir <dir>` and have the existing path call it with the default dir.\n- `docker/chat/Dockerfile` — declare `VOLUME /home/chat/.claude-chat`, set owner to the non-root chat user introduced in #706.\n\n### Out of scope\n\n- Cross-session lock coherence for multiple concurrent chat containers (single-chat-container assumption is fine for MVP).\n- Anthropic team / workspace support — single identity is enough.\n\n## Acceptance\n\n- [ ] Fresh `disinto init` with \"use separate chat identity\" answered yes creates `~/.claude-chat/` and logs in successfully.\n- [ ] With `ANTHROPIC_API_KEY=sk-ant-...` set in `.env`, chat starts without any `~/.claude-chat` mount (verified via `docker inspect disinto-chat`) and successfully completes a test prompt.\n- [ ] Running the factory agents AND chat simultaneously for 24h does not produce any OAuth refresh failures on either side (manual soak test — document result in PR).\n- [ ] `CLAUDE_CONFIG_DIR` and `CLAUDE_CREDENTIALS_DIR` inside the chat container resolve to `/home/chat/.claude-chat/config*`, not the shared factory path.\n\n## Depends on\n\n- #705 (chat scaffold).\n- #620 (admin password prompt — same init flow this adds a step to).\n\n## Notes\n\n- The factory's existing shared mount is `/var/lib/disinto/claude-shared` (see `lib/generators.sh:113,327,381,426`). Chat must NOT use this path.\n- `flock(\"${HOME}/.claude/session.lock\")` logic mentioned in #623 is load-bearing, not redundant — do not \"simplify\" it.\n- Prefer the API-key path for anyone running the factory on shared hardware; call this out in README updates.\n\n## Boundaries for dev-agent\n\n- Do not try to make chat share `~/.claude` with the agents \"just for convenience\". The whole point of this chunk is the opposite.\n- Do not add a third claude config dir. One for agents, one for chat, done.\n- Do not refactor `lib/claude-config.sh` beyond extracting a parameterised helper if needed.\n- Parent vision: #623.\n\n## Affected files\n- `lib/generators.sh` — chat service bind mount and env vars for CLAUDE_CONFIG_DIR\n- `bin/disinto` — disinto_init() prompt for separate chat identity\n- `lib/claude-config.sh` — factor out claude dir setup (new helper setup_claude_dir)\n- `docker/chat/Dockerfile` — declare VOLUME /home/chat/.claude-chat"
|
"body": "**Dependency check:** All blocking dependencies are now closed:\n- #620 ✓ closed\n- #621 ✓ closed \n- #622 ✓ closed\n\nPer the issue description: *\"Once #620/#621/#622 are green, this issue should fork into at least three backlog children: subpath routing + Forgejo ROOT_URL / Woodpecker HOST, disinto-chat container scaffold with OAuth gate, and Claude Code sandbox envelope + working-dir scoping.\"*\n\nThis vision issue is ready for the planner to decompose into backlog children."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"action": "edit_body",
|
"action": "comment",
|
||||||
"issue": 708,
|
"issue": 758,
|
||||||
"body": "## Goal\n\nGate `/chat/*` behind Forgejo OAuth. Register a second OAuth2 app on the internal Forgejo (alongside the existing Woodpecker one), implement the server-side authorization-code flow in the chat backend, and enforce that the logged-in user is `disinto-admin` (or a member of a configured allowlist).\n\n## Why\n\n- #623: the single `disinto-admin` password (bootstrap secret from #620) is the only auth credential; chat must reuse it via Forgejo OAuth, not invent a second password.\n- Keeps attack surface flat: exactly one identity provider for forge, CI, and chat.\n\n## Scope\n\n### Files to touch\n\n- `lib/ci-setup.sh` — generalise the Woodpecker OAuth-app creation helper `_create_woodpecker_oauth_impl()` (at `lib/ci-setup.sh:96`) into `_create_forgejo_oauth_app <name> <redirect_uri>` that both Woodpecker and chat can call. The existing Woodpecker callsite becomes a thin wrapper; no behaviour change for Woodpecker.\n- `bin/disinto disinto_init()` — after Woodpecker OAuth creation (around `bin/disinto:847`), add a call to create the chat OAuth app with redirect URI `https://${EDGE_TUNNEL_FQDN}/chat/oauth/callback`. Write `CHAT_OAUTH_CLIENT_ID` / `CHAT_OAUTH_CLIENT_SECRET` to `.env`.\n- `docker/chat/server.{py,go}` — new routes:\n - `GET /chat/login` → 302 to Forgejo `/login/oauth/authorize?client_id=...&state=...`.\n - `GET /chat/oauth/callback` → exchange code for token, fetch `/api/v1/user`, assert `login == \"disinto-admin\"` (or `DISINTO_CHAT_ALLOWED_USERS` CSV if set), set an `HttpOnly` session cookie, 302 to `/chat/`.\n - Any other route with no valid session → 302 to `/chat/login`.\n - Session store: in-memory map keyed by random token; TTL 24h; no persistence across container restarts (by design — forces re-auth after deploy).\n- `lib/generators.sh` chat service env: pass `FORGE_URL`, `CHAT_OAUTH_CLIENT_ID`, `CHAT_OAUTH_CLIENT_SECRET`, `EDGE_TUNNEL_FQDN`, `DISINTO_CHAT_ALLOWED_USERS`.\n\n### Out of scope\n\n- Defense-in-depth `Remote-User` header check — #709.\n- Team membership lookup via Forgejo API — start with a plain user allowlist, add teams later.\n- CSRF protection on `POST /chat` beyond the session cookie — add only if soak testing reveals a need.\n\n## Acceptance\n\n- [ ] `disinto init` on a fresh project creates TWO OAuth apps on Forgejo (woodpecker + chat), both visible in Forgejo admin UI.\n- [ ] `curl -c cookies -L http://edge/chat/` follows through the OAuth flow when seeded with `disinto-admin` credentials and lands on the chat UI.\n- [ ] Same flow as a non-admin Forgejo user lands on a \"not authorised\" page with a 403.\n- [ ] Expired / missing session cookie redirects to `/chat/login`.\n- [ ] `DISINTO_CHAT_ALLOWED_USERS=alice,bob` permits those users in addition to `disinto-admin`.\n\n## Depends on\n\n- #705 (chat scaffold).\n- #620 (admin password prompt — the password this auth leans on).\n\n## Notes\n\n- The existing Woodpecker OAuth exchange at `lib/ci-setup.sh:273-294` is the reference for the server-side code→token exchange. Read it before implementing; do not guess the Forgejo OAuth flow from docs.\n- Forgejo OAuth2 endpoints are `/login/oauth/authorize` and `/login/oauth/access_token`. Do not hit `/api/v1/...` for OAuth — that is the REST API, not the OAuth endpoints.\n- Session cookie must be `Secure; HttpOnly; SameSite=Lax` in prod; permit non-secure in local dev when `EDGE_TUNNEL_FQDN` is unset.\n\n## Boundaries for dev-agent\n\n- Do not add a new OAuth library dependency if the standard library of the chosen backend language has HTTP client + JSON — the flow is two HTTP calls.\n- Do not reuse the Woodpecker OAuth app. Create a second one with a distinct name and redirect URI. They are different principals.\n- Do not persist sessions to disk. In-memory is correct for MVP; persistence is a separate conversation.\n- Do not implement team membership this chunk. Static allowlist first; teams later.\n- Parent vision: #623.\n\n## Affected files\n- `lib/ci-setup.sh` — generalise _create_woodpecker_oauth_impl() into _create_forgejo_oauth_app helper\n- `bin/disinto` — disinto_init() to create chat OAuth app and write CHAT_OAUTH_CLIENT_ID/SECRET to .env\n- `docker/chat/server.{py,go}` — new OAuth routes: /chat/login, /chat/oauth/callback, session middleware\n- `lib/generators.sh` — chat service env vars: FORGE_URL, CHAT_OAUTH_CLIENT_ID, CHAT_OAUTH_CLIENT_SECRET, EDGE_TUNNEL_FQDN, DISINTO_CHAT_ALLOWED_USERS"
|
"body": "**Gardener flag:** This issue requires human admin action on Forgejo to resolve — changing branch protection settings on the ops repo. No automated formula can fix Forgejo admin settings.\n\nProposed options (from issue body):\n1. Add `planner-bot` to the merge whitelist in ops repo branch protection\n2. Remove branch protection from the ops repo (agents are primary writers)\n3. Create an admin-level service token for agents\n\nThis is blocking all ops repo writes (planner knowledge, sprint artifacts, vault items)."
|
||||||
},
|
|
||||||
{
|
|
||||||
"action": "edit_body",
|
|
||||||
"issue": 709,
|
|
||||||
"body": "## Goal\n\nAdd a second, independent auth check on top of #708: Caddy injects an `X-Forwarded-User` header from the validated Forgejo session, and the chat backend refuses any request whose session cookie disagrees with the header. This is the belt to #708's braces.\n\n## Why\n\n- #623 explicitly calls this out as defense-in-depth. If the chat backend session logic has a bug (forged cookie, state confusion), a correctly-configured Caddy `forward_auth` layer catches it — and vice versa.\n- Cheap to add on top of #704 and #708; expensive to bolt on after an incident.\n\n## Scope\n\n### Files to touch\n\n- `docker/Caddyfile` — the `/chat/*` block:\n - Add `forward_auth chat:8080 { uri /chat/auth/verify; copy_headers X-Forwarded-User }`.\n - Requests without a valid session are forwarded to `/chat/login` by chat itself; `forward_auth` just stamps the header when there is one.\n- `docker/chat/server.{py,go}`:\n - New route `GET /chat/auth/verify` — reads the session cookie, returns 200 + `X-Forwarded-User: <login>` if valid, 401 otherwise.\n - On `POST /chat` and other authenticated routes: read `X-Forwarded-User`, read the session cookie, assert both resolve to the same user. On mismatch: log a warning with the request ID and return 403.\n\n### Out of scope\n\n- Rewriting the session store. The verify endpoint reads the same in-memory map #708 introduced.\n\n## Acceptance\n\n- [ ] `curl http://edge/chat/` with a valid session cookie still works; chat backend logs show `X-Forwarded-User` matching the cookie user.\n- [ ] Editing the session cookie client-side to impersonate another user while keeping the forged cookie valid triggers a 403 with a clear log line (simulate by swapping cookies mid-session).\n- [ ] Removing the `forward_auth` block from Caddyfile and restarting causes the chat backend to fail-closed (all authenticated routes 403) — documented as the intended failure mode.\n- [ ] The verify endpoint does not accept arbitrary external requests from outside Caddy: the chat backend rejects calls to `/chat/auth/verify` that lack a shared-secret header (or whose origin IP is not the edge container).\n\n## Depends on\n\n- #704 (Caddy subpath routing).\n- #708 (chat OAuth gate — provides the session store this chunk reads).\n\n## Notes\n\n- Caddy `forward_auth` reference: https://caddyserver.com/docs/caddyfile/directives/forward_auth — stick to the documented directives, do not hand-roll header passing.\n- If network-level origin validation on `/chat/auth/verify` is fiddly, a shared-secret header between Caddy and chat is acceptable — but prefer network-level if possible.\n\n## Boundaries for dev-agent\n\n- Do not replace the #708 session store with something new. Read it, do not rewrite it.\n- Do not push the entire auth decision into Caddy. The chat backend is still the source of truth; Caddy adds a redundant check.\n- Parent vision: #623.\n\n## Affected files\n- `docker/Caddyfile` — add forward_auth block to /chat/* with X-Forwarded-User header copy\n- `docker/chat/server.{py,go}` — new GET /chat/auth/verify route; X-Forwarded-User vs session cookie check on authenticated routes"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"action": "edit_body",
|
|
||||||
"issue": 710,
|
|
||||||
"body": "## Goal\n\nDecide and implement a conversation history persistence model for `disinto-chat`. MVP target: append-only per-user NDJSON files on a bind-mounted host volume, one file per conversation, with a simple history list endpoint and sidebar in the UI.\n\n## Why\n\n- Without history, every page refresh loses context. Claude is stateless per invocation; the chat UI is what makes it feel like a conversation.\n- A full database with search is overkill for a personal / small-team factory (#623 security posture). Flat files are enough and recoverable by `cat`.\n\n## Scope\n\n### Files to touch\n\n- `lib/generators.sh` chat service:\n - Add a writable bind mount `${CHAT_HISTORY_DIR:-./state/chat-history}:/var/lib/chat/history` (one per-project host path; compose already pins the project root).\n - Must coexist with #706's read-only rootfs (this is a separate mount, not part of rootfs — sanity-check the sandbox verify script still passes).\n- `docker/chat/server.{py,go}`:\n - On each `POST /chat`, append one NDJSON line `{ts, user, role, content}` to `/var/lib/chat/history/<user>/<conversation_id>.ndjson`.\n - `GET /chat/history` → returns the list of conversation ids and first-message previews for the logged-in user.\n - `GET /chat/history/<id>` → returns the full conversation for the logged-in user; 404 if the file belongs to another user.\n - New conversation: `POST /chat/new` → generates a fresh conversation_id (random 12-char hex) and returns it.\n - UI: sidebar with conversation list, \"new chat\" button, load history into the log on click.\n- File naming: `<user>/<conversation_id>.ndjson` — user-scoped directory prevents cross-user leakage even if a bug leaks ids. `conversation_id` must match `^[0-9a-f]{12}$`, no slashes allowed.\n\n### Out of scope\n\n- Full-text search.\n- Database / SQLite.\n- History retention / rotation — unbounded for now.\n\n### In scope explicitly\n\n- Replaying prior turns back into the `claude --print` subprocess for follow-up turns: the backend must feed the prior NDJSON lines back into claude via whatever convention the agent code uses. Cross-check `docker/agents/entrypoint.sh` for how agents pass conversation state.\n\n## Acceptance\n\n- [ ] Sending 3 messages, refreshing the page, and clicking the conversation in the sidebar re-loads all 3 messages.\n- [ ] A new conversation starts with an empty context and does not see prior messages.\n- [ ] `ls state/chat-history/disinto-admin/` on the host shows one NDJSON file per conversation, each line is valid JSON.\n- [ ] A second user logging in via the #708 allowlist sees only their own conversations.\n- [ ] History endpoints are blocked for unauthenticated requests (inherits #708 / #709 auth).\n\n## Depends on\n\n- #705 (chat scaffold).\n\n## Notes\n\n- NDJSON, not JSON-array: append is O(1) and partial writes never corrupt prior lines. Mirrors the factory's CI log format at `lib/ci-log-reader.py`.\n- Per-user directory, not a single shared dir — path traversal via a crafted `conversation_id` is the main risk. The strict regex above is the mitigation.\n\n## Boundaries for dev-agent\n\n- Do not add SQLite, Postgres, or any database. Files.\n- Do not invent a conversation replay system. Whatever `claude --print` / the agents already do for context is the baseline — match it.\n- Do not store history inside the container's tmpfs — it has to survive container restarts.\n- Parent vision: #623.\n\n## Affected files\n- `lib/generators.sh` — chat service bind mount CHAT_HISTORY_DIR:/var/lib/chat/history\n- `docker/chat/server.{py,go}` — NDJSON append on POST /chat; GET /chat/history; GET /chat/history/<id>; POST /chat/new"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"action": "edit_body",
|
|
||||||
"issue": 711,
|
|
||||||
"body": "## Goal\n\nAdd per-user cost and request caps to `disinto-chat` so a compromised session (or a wedged browser tab firing requests in a loop) cannot run up an unbounded Anthropic bill or starve the agents' token budget.\n\n## Why\n\n- #623 \"Open questions\" explicitly calls this out. Chat is the only user-facing surface that spawns Claude on demand; no other factory surface does.\n- Cheap to enforce (counter + bash-style dict), expensive to forget.\n\n## Scope\n\n### Files to touch\n\n- `docker/chat/server.{py,go}`:\n - Per-user sliding-window request counter: `CHAT_MAX_REQUESTS_PER_HOUR` (default `60`), `CHAT_MAX_REQUESTS_PER_DAY` (default `500`).\n - Per-user token-cost counter: after each `claude --print`, parse the final `usage` event from `--output-format stream-json` if present; track cumulative tokens per day; reject if over `CHAT_MAX_TOKENS_PER_DAY` (default `1000000`).\n - Counters stored in-memory; reset on container restart (acceptable for MVP; file-based persistence is a follow-up).\n - Rejection response: 429 with `Retry-After` header and a friendly HTMX fragment explaining which cap was hit.\n- `lib/generators.sh` chat env: expose the three caps as overridable env vars with sane defaults baked in.\n\n### Out of scope\n\n- Billing dashboard.\n- Cross-container token budget coordination with the agents.\n- Cost tracking via Anthropic's billing API (not stable enough to depend on).\n\n## Acceptance\n\n- [ ] Sending 61 requests in an hour trips the hourly cap and returns 429 with `Retry-After: <seconds>`.\n- [ ] A single large completion that pushes daily tokens over the cap blocks the *next* request, not the current one (atomic check-then-consume is OK to skip for MVP).\n- [ ] Resetting the container clears counters (verified manually).\n- [ ] Caps are configurable via `.env` without rebuilding the image.\n\n## Depends on\n\n- #705 (chat scaffold).\n\n## Notes\n\n- Token accounting from `claude --print`: the stream-json mode emits a final `usage` event. If that event is absent or its format changes, fall back to a coarse request count only — do not block the user on parsing failures.\n- `Retry-After` must be an integer seconds value, not an HTTP-date, for HTMX to handle it cleanly client-side.\n\n## Boundaries for dev-agent\n\n- Do not add a rate-limiting library. A dict + timestamp list is sufficient for three counters.\n- Do not persist counters to disk this chunk. In-memory is the contract.\n- Do not block requests on Anthropic's own rate limiter. That is retried by `claude` itself; this layer is about *cost*, not throttling.\n- Parent vision: #623.\n\n## Affected files\n- `docker/chat/server.{py,go}` — per-user sliding-window request counter and token-cost counter; 429 rejection with Retry-After header\n- `lib/generators.sh` — chat env: CHAT_MAX_REQUESTS_PER_HOUR, CHAT_MAX_REQUESTS_PER_DAY, CHAT_MAX_TOKENS_PER_DAY"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"action": "edit_body",
|
|
||||||
"issue": 712,
|
|
||||||
"body": "## Goal\n\nLet `disinto-chat` perform scoped write actions against the factory — specifically: trigger a Woodpecker CI run, create a Forgejo issue, create a Forgejo PR — via explicit backend endpoints. The UI surfaces these as buttons the user clicks from a chat turn that proposes an action. The model never holds API tokens directly.\n\n## Why\n\n- #623 lists these escalations as the difference between \"chat that talks about the project\" and \"chat that moves the project forward\".\n- Routing through explicit backend endpoints (instead of giving the sandboxed claude process API tokens) keeps the trust model tight: the *user* authorises each action, not the model.\n\n## Scope\n\n### Files to touch\n\n- `docker/chat/server.{py,go}` — new authenticated endpoints (reuse #708 / #709 session check):\n - `POST /chat/action/ci-run` — body `{repo, branch}` → calls Woodpecker API with `WOODPECKER_TOKEN` (already in `.env` from existing factory setup) to trigger a pipeline.\n - `POST /chat/action/issue-create` — body `{title, body, labels}` → calls Forgejo API `/repos/<owner>/<repo>/issues` with `FORGE_TOKEN`.\n - `POST /chat/action/pr-create` — body `{head, base, title, body}` → calls `/repos/<owner>/<repo>/pulls`.\n - All actions record to #710's NDJSON history as `{role: \"action\", ...}` lines.\n- `docker/chat/ui/index.html` — small HTMX pattern: when claude's response contains a marker like `<action type=\"issue-create\">{...}</action>`, render a clickable button below the message; clicking POSTs to `/chat/action/<type>` with the payload.\n- `lib/generators.sh` chat env: pass `WOODPECKER_TOKEN`, `FORGE_TOKEN`, `FORGE_URL`, `FORGE_OWNER`, `FORGE_REPO`.\n\n### Out of scope\n\n- Destructive actions (branch delete, force push, secret rotation) — deliberately excluded.\n- Multi-step workflows / approval chains.\n- Arbitrary code execution in the chat container (that is what the agents exist for).\n\n## Acceptance\n\n- [ ] A chat turn that emits an `<action type=\"issue-create\">{...}</action>` block renders a button; clicking it creates an issue on Forgejo, visible via the API.\n- [ ] CI-trigger action creates a Woodpecker pipeline that can be seen in the CI UI.\n- [ ] PR-create action produces a Forgejo PR with the specified head / base.\n- [ ] All three actions are logged into the #710 history file with role `action` and the response from the API call.\n- [ ] Unauthenticated requests to `/chat/action/*` return 401 (inherits #708 gate).\n\n## Depends on\n\n- #708 (OAuth gate — actions are authorised by the logged-in user).\n- #710 (history — actions need to be logged alongside chat turns).\n\n## Notes\n\n- Forgejo API auth: the factory's `FORGE_TOKEN` is a long-lived admin token. For MVP, reuse it; a follow-up issue can scope it down to per-user Forgejo tokens derived from the OAuth flow.\n- Woodpecker API is at `http://woodpecker:8000/api/...`, reachable via the compose network — no need to go through the edge container.\n- The `<action>` marker is deliberately simple markup the model can emit in its response text. Do not implement tool-calling protocol; do not spin up an MCP server.\n\n## Boundaries for dev-agent\n\n- Do not give the claude subprocess direct API tokens. The chat backend holds them; the model only emits action markers the user clicks.\n- Do not add destructive actions (delete, force-push). Additive only.\n- Do not invent a new markup format beyond `<action type=\"...\">{JSON}</action>`.\n- Parent vision: #623.\n\n## Affected files\n- `docker/chat/server.{py,go}` — new endpoints: POST /chat/action/ci-run, POST /chat/action/issue-create, POST /chat/action/pr-create\n- `docker/chat/ui/index.html` — HTMX pattern for action buttons triggered by <action type=\"...\"> markers\n- `lib/generators.sh` — chat env: WOODPECKER_TOKEN, FORGE_TOKEN, FORGE_URL, FORGE_OWNER, FORGE_REPO"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"action": "edit_body",
|
|
||||||
"issue": 713,
|
|
||||||
"body": "## Goal\n\nContingency track: if the subpath routing + Forgejo OAuth combination from #704 and #708 proves unworkable (redirect loops, Forgejo `ROOT_URL` quirks, etc.), provide a documented fallback using per-service subdomains (`forge.<project>.disinto.ai`, `ci.<project>.disinto.ai`, `chat.<project>.disinto.ai`) under the same wildcard cert.\n\n## Why\n\n- #623 Scope Highlights mentions this as the fallback if subpath OAuth fails.\n- Documenting the fallback up front means we can pivot without a days-long investigation when the subpath approach hits a wall.\n- The wildcard cert from #621 already covers `*.disinto.ai` at no extra cost.\n\n## Scope\n\nThis issue is a **plan + small toggle**, not a full implementation. Implementation only happens if #704 or #708 get stuck.\n\n### Files to touch\n\n- `docs/edge-routing-fallback.md` (new) — documents the fallback topology, diffing concretely against #704 / #708:\n - Caddyfile: four separate host blocks (`<project>.disinto.ai`, `forge.<project>...`, `ci.<project>...`, `chat.<project>...`), each a single `reverse_proxy` to the container.\n - Forgejo `ROOT_URL` becomes `https://forge.<project>.disinto.ai/` (root path, not subpath).\n - Woodpecker `WOODPECKER_HOST` becomes `https://ci.<project>.disinto.ai`.\n - OAuth redirect URIs (chat, woodpecker) become sub-subdomain paths.\n - DNS: all handled by the existing wildcard; no new records.\n- `lib/generators.sh` — no code change until pivot; document the env vars that would need to change (`EDGE_TUNNEL_FQDN_FORGE`, etc.) in a comment near `generate_compose`.\n- `tools/edge-control/register.sh` (from #621) — leave a TODO comment noting the fallback shape would need an additional subdomain parameter per project.\n\n### Out of scope (unless pivot)\n\n- Actually implementing the fallback — gated on #704 / #708 failing.\n\n## Acceptance\n\n- [ ] `docs/edge-routing-fallback.md` exists and is concrete enough that a follow-up PR to pivot would take under a day.\n- [ ] The doc names exactly which files / lines each pivot would touch (Caddyfile, `lib/generators.sh`, `lib/ci-setup.sh` redirect URI).\n- [ ] A pivot decision criterion is written into the doc: \"pivot if <specific symptom>, not if <symptom with a known fix>\".\n\n## Depends on\n\n- None — can be written in parallel to #704 / #708.\n\n## Notes\n\n- Keep the doc short. This is a pressure-release valve, not a parallel architecture.\n- Whichever chunk is implementing subpaths first should update this doc if they hit a blocker so the pivot decision is informed.\n\n## Boundaries for dev-agent\n\n- This is a documentation chunk. Do not implement the fallback unless someone explicitly says to pivot.\n- Do not make the main chunks \"fallback-ready\" — that is over-engineering for a contingency.\n- Parent vision: #623.\n\n## Affected files\n- `docs/edge-routing-fallback.md` (new) — fallback topology doc with Caddyfile, Forgejo ROOT_URL, Woodpecker HOST, OAuth redirect URI changes\n- `lib/generators.sh` — comment near generate_compose documenting env vars that change on pivot\n- `tools/edge-control/register.sh` — TODO comment noting fallback shape needs additional subdomain parameter"
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: 3e65878093bbbcea6dfe4db341f82dc89d4e0ac0 -->
|
<!-- last-reviewed: c363ee0aea2ae447daab28c2c850d6abefc8c6b5 -->
|
||||||
# Shared Helpers (`lib/`)
|
# Shared Helpers (`lib/`)
|
||||||
|
|
||||||
All agents source `lib/env.sh` as their first action. Additional helpers are
|
All agents source `lib/env.sh` as their first action. Additional helpers are
|
||||||
|
|
@ -6,7 +6,7 @@ sourced as needed.
|
||||||
|
|
||||||
| File | What it provides | Sourced by |
|
| File | What it provides | Sourced by |
|
||||||
|---|---|---|
|
|---|---|---|
|
||||||
| `lib/env.sh` | Loads `.env`, sets `FACTORY_ROOT`, exports project config (`FORGE_REPO`, `PROJECT_NAME`, etc.), defines `log()`, `forge_api()`, `forge_api_all()` (paginates all pages; accepts optional second TOKEN parameter, defaults to `$FORGE_TOKEN`; handles invalid/empty JSON responses gracefully — returns empty on parse error instead of crashing), `woodpecker_api()`, `wpdb()`, `memory_guard()` (skips agent if RAM < threshold). Auto-loads project TOML if `PROJECT_TOML` is set. Exports per-agent tokens (`FORGE_PLANNER_TOKEN`, `FORGE_GARDENER_TOKEN`, `FORGE_VAULT_TOKEN`, `FORGE_SUPERVISOR_TOKEN`, `FORGE_PREDICTOR_TOKEN`) — each falls back to `$FORGE_TOKEN` if not set. **Vault-only token guard (AD-006)**: `unset GITHUB_TOKEN CLAWHUB_TOKEN` so agents never hold external-action tokens — only the runner container receives them. **Container note**: when `DISINTO_CONTAINER=1`, `.env` is NOT re-sourced — compose already injects env vars (including `FORGE_URL=http://forgejo:3000`) and re-sourcing would clobber them. **Save/restore scope (#364)**: only `FORGE_URL` is preserved across `.env` re-sourcing (compose injects `http://forgejo:3000`, `.env` has `http://localhost:3000`). `FORGE_TOKEN` is NOT preserved so refreshed tokens in `.env` take effect immediately. **Required env var**: `FORGE_PASS` — bot password for git HTTP push (Forgejo 11.x rejects API tokens for `git push`, #361). **Hard preconditions (#674)**: `USER` and `HOME` must be exported by the entrypoint before sourcing. When `PROJECT_TOML` is set, `PROJECT_REPO_ROOT`, `PRIMARY_BRANCH`, and `OPS_REPO_ROOT` must also be set (by entrypoint or TOML). | Every agent |
|
| `lib/env.sh` | Loads `.env`, sets `FACTORY_ROOT`, exports project config (`FORGE_REPO`, `PROJECT_NAME`, etc.), defines `log()`, `forge_api()`, `forge_api_all()` (paginates all pages; accepts optional second TOKEN parameter, defaults to `$FORGE_TOKEN`; handles invalid/empty JSON responses gracefully — returns empty on parse error instead of crashing), `woodpecker_api()`, `wpdb()`, `memory_guard()` (skips agent if RAM < threshold), `load_secret()` (secret-source abstraction — see below). Auto-loads project TOML if `PROJECT_TOML` is set. Exports per-agent tokens (`FORGE_PLANNER_TOKEN`, `FORGE_GARDENER_TOKEN`, `FORGE_VAULT_TOKEN`, `FORGE_SUPERVISOR_TOKEN`, `FORGE_PREDICTOR_TOKEN`) — each falls back to `$FORGE_TOKEN` if not set. **Vault-only token guard (AD-006)**: `unset GITHUB_TOKEN CLAWHUB_TOKEN` so agents never hold external-action tokens — only the runner container receives them. **Container note**: when `DISINTO_CONTAINER=1`, `.env` is NOT re-sourced — compose already injects env vars (including `FORGE_URL=http://forgejo:3000`) and re-sourcing would clobber them. **Save/restore scope (#364)**: only `FORGE_URL` is preserved across `.env` re-sourcing (compose injects `http://forgejo:3000`, `.env` has `http://localhost:3000`). `FORGE_TOKEN` is NOT preserved so refreshed tokens in `.env` take effect immediately. **Per-agent token override (#762)**: agent run scripts export `FORGE_TOKEN_OVERRIDE=<agent-specific-token>` BEFORE sourcing `env.sh`; `env.sh` applies this override at lines 98-100, ensuring the correct identity survives any re-sourcing of `env.sh` by nested shells or `claude -p` invocations. **Required env var**: `FORGE_PASS` — bot password for git HTTP push (Forgejo 11.x rejects API tokens for `git push`, #361). **Hard preconditions (#674)**: `USER` and `HOME` must be exported by the entrypoint before sourcing. When `PROJECT_TOML` is set, `PROJECT_REPO_ROOT`, `PRIMARY_BRANCH`, and `OPS_REPO_ROOT` must also be set (by entrypoint or TOML). **`load_secret NAME [DEFAULT]` (#793)**: backend-agnostic secret resolution. Precedence: (1) `/secrets/<NAME>.env` — Nomad-rendered template, (2) current environment — already set by `.env.enc` / compose, (3) `secrets/<NAME>.enc` — age-encrypted per-key file (decrypted on demand, cached in process env), (4) DEFAULT or empty. Consumers call `$(load_secret GITHUB_TOKEN)` instead of `${GITHUB_TOKEN}` — identical behavior whether secrets come from Docker compose injection or Nomad Vault templates. | Every agent |
|
||||||
| `lib/ci-helpers.sh` | `ci_passed()` — returns 0 if CI state is "success" (or no CI configured). `ci_required_for_pr()` — returns 0 if PR has code files (CI required), 1 if non-code only (CI not required). `is_infra_step()` — returns 0 if a single CI step failure matches infra heuristics (clone/git exit 128, any exit 137, log timeout patterns). `classify_pipeline_failure()` — returns "infra \<reason>" if any failed Woodpecker step matches infra heuristics via `is_infra_step()`, else "code". `ensure_priority_label()` — looks up (or creates) the `priority` label and returns its ID; caches in `_PRIORITY_LABEL_ID`. `ci_commit_status <sha>` — queries Woodpecker directly for CI state, falls back to forge commit status API. `ci_pipeline_number <sha>` — returns the Woodpecker pipeline number for a commit, falls back to parsing forge status `target_url`. `ci_promote <repo_id> <pipeline_num> <environment>` — promotes a pipeline to a named Woodpecker environment (vault-gated deployment: vault approves, vault-fire calls this — vault redesign in progress, see #73-#77). `ci_get_logs <pipeline_number> [--step <name>]` — reads CI logs from Woodpecker SQLite database via `lib/ci-log-reader.py`; outputs last 200 lines to stdout. Requires mounted woodpecker-data volume at /woodpecker-data. | dev-poll, review-poll, review-pr |
|
| `lib/ci-helpers.sh` | `ci_passed()` — returns 0 if CI state is "success" (or no CI configured). `ci_required_for_pr()` — returns 0 if PR has code files (CI required), 1 if non-code only (CI not required). `is_infra_step()` — returns 0 if a single CI step failure matches infra heuristics (clone/git exit 128, any exit 137, log timeout patterns). `classify_pipeline_failure()` — returns "infra \<reason>" if any failed Woodpecker step matches infra heuristics via `is_infra_step()`, else "code". `ensure_priority_label()` — looks up (or creates) the `priority` label and returns its ID; caches in `_PRIORITY_LABEL_ID`. `ci_commit_status <sha>` — queries Woodpecker directly for CI state, falls back to forge commit status API. `ci_pipeline_number <sha>` — returns the Woodpecker pipeline number for a commit, falls back to parsing forge status `target_url`. `ci_promote <repo_id> <pipeline_num> <environment>` — promotes a pipeline to a named Woodpecker environment (vault-gated deployment: vault approves, vault-fire calls this — vault redesign in progress, see #73-#77). `ci_get_logs <pipeline_number> [--step <name>]` — reads CI logs from Woodpecker SQLite database via `lib/ci-log-reader.py`; outputs last 200 lines to stdout. Requires mounted woodpecker-data volume at /woodpecker-data. | dev-poll, review-poll, review-pr |
|
||||||
| `lib/ci-debug.sh` | CLI tool for Woodpecker CI: `list`, `status`, `logs`, `failures` subcommands. Not sourced — run directly. | Humans / dev-agent (tool access) |
|
| `lib/ci-debug.sh` | CLI tool for Woodpecker CI: `list`, `status`, `logs`, `failures` subcommands. Not sourced — run directly. | Humans / dev-agent (tool access) |
|
||||||
| `lib/ci-log-reader.py` | Python tool: reads CI logs from Woodpecker SQLite database. `<pipeline_number> [--step <name>]` — returns last 200 lines from failed steps (or specified step). Used by `ci_get_logs()` in ci-helpers.sh. Requires `WOODPECKER_DATA_DIR` (default: /woodpecker-data). | ci-helpers.sh |
|
| `lib/ci-log-reader.py` | Python tool: reads CI logs from Woodpecker SQLite database. `<pipeline_number> [--step <name>]` — returns last 200 lines from failed steps (or specified step). Used by `ci_get_logs()` in ci-helpers.sh. Requires `WOODPECKER_DATA_DIR` (default: /woodpecker-data). | ci-helpers.sh |
|
||||||
|
|
@ -14,7 +14,7 @@ sourced as needed.
|
||||||
| `lib/parse-deps.sh` | Extracts dependency issue numbers from an issue body (stdin → stdout, one number per line). Matches `## Dependencies` / `## Depends on` / `## Blocked by` sections and inline `depends on #N` / `blocked by #N` patterns. Inline scan skips fenced code blocks to prevent false positives from code examples in issue bodies. Not sourced — executed via `bash lib/parse-deps.sh`. | dev-poll |
|
| `lib/parse-deps.sh` | Extracts dependency issue numbers from an issue body (stdin → stdout, one number per line). Matches `## Dependencies` / `## Depends on` / `## Blocked by` sections and inline `depends on #N` / `blocked by #N` patterns. Inline scan skips fenced code blocks to prevent false positives from code examples in issue bodies. Not sourced — executed via `bash lib/parse-deps.sh`. | dev-poll |
|
||||||
| `lib/formula-session.sh` | `acquire_run_lock()`, `load_formula()`, `load_formula_or_profile()`, `build_context_block()`, `ensure_ops_repo()`, `ops_commit_and_push()`, `build_prompt_footer()`, `build_sdk_prompt_footer()`, `formula_worktree_setup()`, `formula_prepare_profile_context()`, `formula_lessons_block()`, `profile_write_journal()`, `profile_load_lessons()`, `ensure_profile_repo()`, `_profile_has_repo()`, `_count_undigested_journals()`, `_profile_digest_journals()`, `_profile_restore_lessons()`, `_profile_commit_and_push()`, `resolve_agent_identity()`, `build_graph_section()`, `build_scratch_instruction()`, `read_scratch_context()`, `cleanup_stale_crashed_worktrees()` — shared helpers for formula-driven polling-loop agents (lock, .profile repo management, prompt assembly, worktree setup). Memory guard is provided by `memory_guard()` in `lib/env.sh` (not duplicated here). `resolve_agent_identity()` — sets `FORGE_TOKEN`, `AGENT_IDENTITY`, `FORGE_REMOTE` from per-agent token env vars and FORGE_URL remote detection. `build_graph_section()` generates the structural-analysis section (runs `lib/build-graph.py`, formats JSON output) — previously duplicated in planner-run.sh and predictor-run.sh, now shared here. `cleanup_stale_crashed_worktrees()` — thin wrapper around `worktree_cleanup_stale()` from `lib/worktree.sh` (kept for backwards compatibility). **Journal digestion guards (#702)**: `_profile_digest_journals()` respects `PROFILE_DIGEST_TIMEOUT` (default 300s) and `PROFILE_DIGEST_MAX_BATCH` (default 5 journals per run); `_profile_restore_lessons()` restores the previous lessons-learned.md on digest failure. | planner-run.sh, predictor-run.sh, gardener-run.sh, supervisor-run.sh, dev-agent.sh |
|
| `lib/formula-session.sh` | `acquire_run_lock()`, `load_formula()`, `load_formula_or_profile()`, `build_context_block()`, `ensure_ops_repo()`, `ops_commit_and_push()`, `build_prompt_footer()`, `build_sdk_prompt_footer()`, `formula_worktree_setup()`, `formula_prepare_profile_context()`, `formula_lessons_block()`, `profile_write_journal()`, `profile_load_lessons()`, `ensure_profile_repo()`, `_profile_has_repo()`, `_count_undigested_journals()`, `_profile_digest_journals()`, `_profile_restore_lessons()`, `_profile_commit_and_push()`, `resolve_agent_identity()`, `build_graph_section()`, `build_scratch_instruction()`, `read_scratch_context()`, `cleanup_stale_crashed_worktrees()` — shared helpers for formula-driven polling-loop agents (lock, .profile repo management, prompt assembly, worktree setup). Memory guard is provided by `memory_guard()` in `lib/env.sh` (not duplicated here). `resolve_agent_identity()` — sets `FORGE_TOKEN`, `AGENT_IDENTITY`, `FORGE_REMOTE` from per-agent token env vars and FORGE_URL remote detection. `build_graph_section()` generates the structural-analysis section (runs `lib/build-graph.py`, formats JSON output) — previously duplicated in planner-run.sh and predictor-run.sh, now shared here. `cleanup_stale_crashed_worktrees()` — thin wrapper around `worktree_cleanup_stale()` from `lib/worktree.sh` (kept for backwards compatibility). **Journal digestion guards (#702)**: `_profile_digest_journals()` respects `PROFILE_DIGEST_TIMEOUT` (default 300s) and `PROFILE_DIGEST_MAX_BATCH` (default 5 journals per run); `_profile_restore_lessons()` restores the previous lessons-learned.md on digest failure. | planner-run.sh, predictor-run.sh, gardener-run.sh, supervisor-run.sh, dev-agent.sh |
|
||||||
| `lib/guard.sh` | `check_active(agent_name)` — reads `$FACTORY_ROOT/state/.{agent_name}-active`; exits 0 (skip) if the file is absent. Factory is off by default — state files must be created to enable each agent. **Logs a message to stderr** when skipping (`[check_active] SKIP: state file not found`), so agent dropout is visible in loop logs. Sourced by dev-poll.sh, review-poll.sh, predictor-run.sh, supervisor-run.sh. | polling-loop entry points |
|
| `lib/guard.sh` | `check_active(agent_name)` — reads `$FACTORY_ROOT/state/.{agent_name}-active`; exits 0 (skip) if the file is absent. Factory is off by default — state files must be created to enable each agent. **Logs a message to stderr** when skipping (`[check_active] SKIP: state file not found`), so agent dropout is visible in loop logs. Sourced by dev-poll.sh, review-poll.sh, predictor-run.sh, supervisor-run.sh. | polling-loop entry points |
|
||||||
| `lib/mirrors.sh` | `mirror_push()` — pushes `$PRIMARY_BRANCH` + tags to all configured mirror remotes (fire-and-forget background pushes). Reads `MIRROR_NAMES` and `MIRROR_*` vars exported by `load-project.sh` from the `[mirrors]` TOML section. Failures are logged but never block the pipeline. Sourced by dev-poll.sh — called after every successful merge. | dev-poll.sh |
|
| `lib/mirrors.sh` | `mirror_push()` — pushes `$PRIMARY_BRANCH` + tags to all configured mirror remotes (fire-and-forget background pushes). Reads `MIRROR_NAMES` and `MIRROR_*` vars exported by `load-project.sh` from the `[mirrors]` TOML section. Failures are logged but never block the pipeline. `mirror_pull_register(clone_url, owner, repo_name, [interval])` — registers a Forgejo pull mirror via `POST /repos/migrate` with `mirror: true`. Creates the target repo and queues the first sync automatically. Works against empty Forgejo instances — no pre-existing content required. Used for Nomad migration cutover: point at Codeberg source, wait for sync, then proceed with `disinto init`. See [docs/mirror-bootstrap.md](../docs/mirror-bootstrap.md) for the full cutover path. Sourced by dev-poll.sh — called after every successful merge. | dev-poll.sh |
|
||||||
| `lib/build-graph.py` | Python tool: parses VISION.md, prerequisites.md (from ops repo), AGENTS.md, formulas/*.toml, evidence/ (from ops repo), and forge issues/labels into a NetworkX DiGraph. Runs structural analyses (orphaned objectives, stale prerequisites, thin evidence, circular deps) and outputs a JSON report. Used by `review-pr.sh` (per-PR changed-file analysis) and `predictor-run.sh` (full-project analysis) to provide structural context to Claude. | review-pr.sh, predictor-run.sh |
|
| `lib/build-graph.py` | Python tool: parses VISION.md, prerequisites.md (from ops repo), AGENTS.md, formulas/*.toml, evidence/ (from ops repo), and forge issues/labels into a NetworkX DiGraph. Runs structural analyses (orphaned objectives, stale prerequisites, thin evidence, circular deps) and outputs a JSON report. Used by `review-pr.sh` (per-PR changed-file analysis) and `predictor-run.sh` (full-project analysis) to provide structural context to Claude. | review-pr.sh, predictor-run.sh |
|
||||||
| `lib/secret-scan.sh` | `scan_for_secrets()` — detects potential secrets (API keys, bearer tokens, private keys, URLs with embedded credentials) in text; returns 1 if secrets found. `redact_secrets()` — replaces detected secret patterns with `[REDACTED]`. | issue-lifecycle.sh |
|
| `lib/secret-scan.sh` | `scan_for_secrets()` — detects potential secrets (API keys, bearer tokens, private keys, URLs with embedded credentials) in text; returns 1 if secrets found. `redact_secrets()` — replaces detected secret patterns with `[REDACTED]`. | issue-lifecycle.sh |
|
||||||
| `lib/stack-lock.sh` | File-based lock protocol for singleton project stack access. `stack_lock_acquire(holder, project)` — polls until free, breaks stale heartbeats (>10 min old), claims lock. `stack_lock_release(project)` — deletes lock file. `stack_lock_check(project)` — inspect current lock state. `stack_lock_heartbeat(project)` — update heartbeat timestamp (callers must call every 2 min while holding). Lock files at `~/data/locks/<project>-stack.lock`. | docker/edge/dispatcher.sh, reproduce formula |
|
| `lib/stack-lock.sh` | File-based lock protocol for singleton project stack access. `stack_lock_acquire(holder, project)` — polls until free, breaks stale heartbeats (>10 min old), claims lock. `stack_lock_release(project)` — deletes lock file. `stack_lock_check(project)` — inspect current lock state. `stack_lock_heartbeat(project)` — update heartbeat timestamp (callers must call every 2 min while holding). Lock files at `~/data/locks/<project>-stack.lock`. | docker/edge/dispatcher.sh, reproduce formula |
|
||||||
|
|
@ -22,14 +22,16 @@ sourced as needed.
|
||||||
| `lib/worktree.sh` | Reusable git worktree management: `worktree_create(path, branch, [base_ref])` — create worktree, checkout base, fetch submodules. `worktree_recover(path, branch, [remote])` — detect existing worktree, reuse if on correct branch (sets `_WORKTREE_REUSED`), otherwise clean and recreate. `worktree_cleanup(path)` — `git worktree remove --force`, clear Claude Code project cache (`~/.claude/projects/` matching path). `worktree_cleanup_stale([max_age_hours])` — scan `/tmp` for orphaned worktrees older than threshold, skip preserved and active tmux worktrees, prune. `worktree_preserve(path, reason)` — mark worktree as preserved for debugging (writes `.worktree-preserved` marker, skipped by stale cleanup). | dev-agent.sh, supervisor-run.sh, planner-run.sh, predictor-run.sh, gardener-run.sh |
|
| `lib/worktree.sh` | Reusable git worktree management: `worktree_create(path, branch, [base_ref])` — create worktree, checkout base, fetch submodules. `worktree_recover(path, branch, [remote])` — detect existing worktree, reuse if on correct branch (sets `_WORKTREE_REUSED`), otherwise clean and recreate. `worktree_cleanup(path)` — `git worktree remove --force`, clear Claude Code project cache (`~/.claude/projects/` matching path). `worktree_cleanup_stale([max_age_hours])` — scan `/tmp` for orphaned worktrees older than threshold, skip preserved and active tmux worktrees, prune. `worktree_preserve(path, reason)` — mark worktree as preserved for debugging (writes `.worktree-preserved` marker, skipped by stale cleanup). | dev-agent.sh, supervisor-run.sh, planner-run.sh, predictor-run.sh, gardener-run.sh |
|
||||||
| `lib/pr-lifecycle.sh` | Reusable PR lifecycle library: `pr_create()`, `pr_find_by_branch()`, `pr_poll_ci()`, `pr_poll_review()`, `pr_merge()`, `pr_is_merged()`, `pr_walk_to_merge()`, `build_phase_protocol_prompt()`. Requires `lib/ci-helpers.sh`. | dev-agent.sh (future) |
|
| `lib/pr-lifecycle.sh` | Reusable PR lifecycle library: `pr_create()`, `pr_find_by_branch()`, `pr_poll_ci()`, `pr_poll_review()`, `pr_merge()`, `pr_is_merged()`, `pr_walk_to_merge()`, `build_phase_protocol_prompt()`. Requires `lib/ci-helpers.sh`. | dev-agent.sh (future) |
|
||||||
| `lib/issue-lifecycle.sh` | Reusable issue lifecycle library: `issue_claim()` (add in-progress, remove backlog), `issue_release()` (remove in-progress, add backlog), `issue_block()` (post diagnostic comment with secret redaction, add blocked label), `issue_close()`, `issue_check_deps()` (parse deps, check transitive closure; sets `_ISSUE_BLOCKED_BY`, `_ISSUE_SUGGESTION`), `issue_suggest_next()` (find next unblocked backlog issue; sets `_ISSUE_NEXT`), `issue_post_refusal()` (structured refusal comment with dedup). Label IDs cached in globals on first lookup. Sources `lib/secret-scan.sh`. | dev-agent.sh (future) |
|
| `lib/issue-lifecycle.sh` | Reusable issue lifecycle library: `issue_claim()` (add in-progress, remove backlog), `issue_release()` (remove in-progress, add backlog), `issue_block()` (post diagnostic comment with secret redaction, add blocked label), `issue_close()`, `issue_check_deps()` (parse deps, check transitive closure; sets `_ISSUE_BLOCKED_BY`, `_ISSUE_SUGGESTION`), `issue_suggest_next()` (find next unblocked backlog issue; sets `_ISSUE_NEXT`), `issue_post_refusal()` (structured refusal comment with dedup). Label IDs cached in globals on first lookup. Sources `lib/secret-scan.sh`. | dev-agent.sh (future) |
|
||||||
| `lib/vault.sh` | **Vault PR helper** — create vault action PRs on ops repo via Forgejo API (works from containers without SSH). `vault_request <action_id> <toml_content>` validates TOML (using `validate_vault_action` from `vault/vault-env.sh`), creates branch `vault/<action-id>`, writes `vault/actions/<action-id>.toml`, creates PR targeting `main` with title `vault: <action-id>` and body from context field, returns PR number. Idempotent: if PR exists, returns existing number. **Low-tier bypass**: if the action's `blast_radius` classifies as `low` (via `vault/classify.sh`), `vault_request` calls `_vault_commit_direct()` which commits directly to ops `main` using `FORGE_ADMIN_TOKEN` — no PR, no approval wait. Returns `0` (not a PR number) for direct commits. Requires `FORGE_TOKEN`, `FORGE_ADMIN_TOKEN` (low-tier only), `FORGE_URL`, `FORGE_REPO`, `FORGE_OPS_REPO`. Uses the calling agent's own token (saves/restores `FORGE_TOKEN` around sourcing `vault-env.sh`), so approval workflow respects individual agent identities. | dev-agent (vault actions), future vault dispatcher |
|
| `lib/action-vault.sh` | **Vault PR helper** — create vault action PRs on ops repo via Forgejo API (works from containers without SSH). `vault_request <action_id> <toml_content>` validates TOML (using `validate_vault_action` from `action-vault/vault-env.sh`), creates branch `vault/<action-id>`, writes `vault/actions/<action-id>.toml`, creates PR targeting `main` with title `vault: <action-id>` and body from context field, returns PR number. Idempotent: if PR exists, returns existing number. **Low-tier bypass**: if the action's `blast_radius` classifies as `low` (via `action-vault/classify.sh`), `vault_request` calls `_vault_commit_direct()` which commits directly to ops `main` using `FORGE_ADMIN_TOKEN` — no PR, no approval wait. Returns `0` (not a PR number) for direct commits. Requires `FORGE_TOKEN`, `FORGE_ADMIN_TOKEN` (low-tier only), `FORGE_URL`, `FORGE_REPO`, `FORGE_OPS_REPO`. Uses the calling agent's own token (saves/restores `FORGE_TOKEN` around sourcing `vault-env.sh`), so approval workflow respects individual agent identities. | dev-agent (vault actions), future vault dispatcher |
|
||||||
| `lib/branch-protection.sh` | Branch protection helpers for Forgejo repos. `setup_vault_branch_protection()` — configures admin-only merge protection on main (require 1 approval, restrict merge to admin role, block direct pushes). `setup_profile_branch_protection()` — same protection for `.profile` repos. `verify_branch_protection()` — checks protection is correctly configured. `remove_branch_protection()` — removes protection (cleanup/testing). Handles race condition after initial push: retries with backoff if Forgejo hasn't processed the branch yet. Requires `FORGE_TOKEN`, `FORGE_URL`, `FORGE_OPS_REPO`. | bin/disinto (hire-an-agent) |
|
| `lib/branch-protection.sh` | Branch protection helpers for Forgejo repos. `setup_vault_branch_protection()` — configures admin-only merge protection on main (require 1 approval, restrict merge to admin role, block direct pushes). `setup_profile_branch_protection()` — same protection for `.profile` repos. `verify_branch_protection()` — checks protection is correctly configured. `remove_branch_protection()` — removes protection (cleanup/testing). Handles race condition after initial push: retries with backoff if Forgejo hasn't processed the branch yet. Requires `FORGE_TOKEN`, `FORGE_URL`, `FORGE_OPS_REPO`. | bin/disinto (hire-an-agent) |
|
||||||
| `lib/agent-sdk.sh` | `agent_run([--resume SESSION_ID] [--worktree DIR] PROMPT)` — one-shot `claude -p` invocation with session persistence. Saves session ID to `SID_FILE`, reads it back on resume. `agent_recover_session()` — restore previous session ID from `SID_FILE` on startup. **Nudge guard**: skips nudge injection if the worktree is clean and no push is expected, preventing spurious re-invocations. Callers must define `SID_FILE`, `LOGFILE`, and `log()` before sourcing. **Concurrency**: external `flock` on `session.lock` is gated behind `CLAUDE_EXTERNAL_LOCK=1` (default off). When unset, each container's per-session `CLAUDE_CONFIG_DIR` isolation lets Claude Code's native lockfile handle OAuth refresh — no external serialization needed. Set `CLAUDE_EXTERNAL_LOCK=1` to re-enable the old flock wrapper as a rollback mechanism. See [`docs/CLAUDE-AUTH-CONCURRENCY.md`](../docs/CLAUDE-AUTH-CONCURRENCY.md) and AD-002 (#647). | formula-driven agents (dev-agent, planner-run, predictor-run, gardener-run) |
|
| `lib/agent-sdk.sh` | `agent_run([--resume SESSION_ID] [--worktree DIR] PROMPT)` — one-shot `claude -p` invocation with session persistence. Saves session ID to `SID_FILE`, reads it back on resume. `agent_recover_session()` — restore previous session ID from `SID_FILE` on startup. **Nudge guard**: skips nudge injection if the worktree is clean and no push is expected, preventing spurious re-invocations. Callers must define `SID_FILE`, `LOGFILE`, and `log()` before sourcing. **Concurrency**: external `flock` on `session.lock` is gated behind `CLAUDE_EXTERNAL_LOCK=1` (default off). When unset, each container's per-session `CLAUDE_CONFIG_DIR` isolation lets Claude Code's native lockfile handle OAuth refresh — no external serialization needed. Set `CLAUDE_EXTERNAL_LOCK=1` to re-enable the old flock wrapper as a rollback mechanism. See [`docs/CLAUDE-AUTH-CONCURRENCY.md`](../docs/CLAUDE-AUTH-CONCURRENCY.md) and AD-002 (#647). | formula-driven agents (dev-agent, planner-run, predictor-run, gardener-run) |
|
||||||
| `lib/forge-setup.sh` | `setup_forge()` — Forgejo instance provisioning: creates admin user, bot accounts, org, repos (code + ops), configures webhooks, sets repo topics. Extracted from `bin/disinto`. Requires `FORGE_URL`, `FORGE_TOKEN`, `FACTORY_ROOT`. **Password storage (#361)**: after creating each bot account, stores its password in `.env` as `FORGE_<BOT>_PASS` (e.g. `FORGE_PASS`, `FORGE_REVIEW_PASS`, etc.) for use by `forge-push.sh`. | bin/disinto (init) |
|
| `lib/forge-setup.sh` | `setup_forge()` — Forgejo instance provisioning: creates admin user, bot accounts, org, repos (code + ops), configures webhooks, sets repo topics. Extracted from `bin/disinto`. Requires `FORGE_URL`, `FORGE_TOKEN`, `FACTORY_ROOT`. **Password storage (#361)**: after creating each bot account, stores its password in `.env` as `FORGE_<BOT>_PASS` (e.g. `FORGE_PASS`, `FORGE_REVIEW_PASS`, etc.) for use by `forge-push.sh`. | bin/disinto (init) |
|
||||||
| `lib/forge-push.sh` | `push_to_forge()` — pushes a local clone to the Forgejo remote and verifies the push. `_assert_forge_push_globals()` validates required env vars before use. Requires `FORGE_URL`, `FORGE_PASS`, `FACTORY_ROOT`, `PRIMARY_BRANCH`. **Auth**: uses `FORGE_PASS` (bot password) for git HTTP push — Forgejo 11.x rejects API tokens for `git push` (#361). | bin/disinto (init) |
|
| `lib/forge-push.sh` | `push_to_forge()` — pushes a local clone to the Forgejo remote and verifies the push. `_assert_forge_push_globals()` validates required env vars before use. Requires `FORGE_URL`, `FORGE_PASS`, `FACTORY_ROOT`, `PRIMARY_BRANCH`. **Auth**: uses `FORGE_PASS` (bot password) for git HTTP push — Forgejo 11.x rejects API tokens for `git push` (#361). | bin/disinto (init) |
|
||||||
| `lib/git-creds.sh` | Shared git credential helper configuration. `configure_git_creds([HOME_DIR] [RUN_AS_CMD])` — writes a static credential helper script and configures git globally to use password-based HTTP auth (Forgejo 11.x rejects API tokens for `git push`, #361). `repair_baked_cred_urls([--as RUN_AS_CMD] DIR ...)` — rewrites any git remote URLs that have credentials baked in to use clean URLs instead; uses `safe.directory` bypass for root-owned repos (#671). Requires `FORGE_PASS`, `FORGE_URL`, `FORGE_TOKEN`. | entrypoints (agents, edge) |
|
| `lib/git-creds.sh` | Shared git credential helper configuration. `configure_git_creds([HOME_DIR] [RUN_AS_CMD])` — writes a static credential helper script and configures git globally to use password-based HTTP auth (Forgejo 11.x rejects API tokens for `git push`, #361). **Retry on cold boot (#741)**: resolves bot username from `FORGE_TOKEN` with 5 retries (exponential backoff 1-5s); fails loudly and returns 1 if Forgejo is unreachable — never falls back to a wrong hardcoded default (exports `BOT_USER` on success). `repair_baked_cred_urls([--as RUN_AS_CMD] DIR ...)` — rewrites any git remote URLs that have credentials baked in to use clean URLs instead; uses `safe.directory` bypass for root-owned repos (#671). Requires `FORGE_PASS`, `FORGE_URL`, `FORGE_TOKEN`. | entrypoints (agents, edge) |
|
||||||
| `lib/ops-setup.sh` | `setup_ops_repo()` — creates ops repo on Forgejo if it doesn't exist, configures bot collaborators, clones/initializes ops repo locally, seeds directory structure (vault, knowledge, evidence, sprints). Evidence subdirectories seeded: engagement/, red-team/, holdout/, evolution/, user-test/. Also seeds sprints/ for architect output. Exports `_ACTUAL_OPS_SLUG`. `migrate_ops_repo(ops_root, [primary_branch])` — idempotent migration helper that seeds missing directories and .gitkeep files on existing ops repos (pre-#407 deployments). | bin/disinto (init) |
|
| `lib/ops-setup.sh` | `setup_ops_repo()` — creates ops repo on Forgejo if it doesn't exist, configures bot collaborators, clones/initializes ops repo locally, seeds directory structure (vault, knowledge, evidence, sprints). Evidence subdirectories seeded: engagement/, red-team/, holdout/, evolution/, user-test/. Also seeds sprints/ for architect output. Exports `_ACTUAL_OPS_SLUG`. `migrate_ops_repo(ops_root, [primary_branch])` — idempotent migration helper that seeds missing directories and .gitkeep files on existing ops repos (pre-#407 deployments). | bin/disinto (init) |
|
||||||
| `lib/ci-setup.sh` | `_install_cron_impl()` — installs crontab entries for bare-metal deployments (compose mode uses polling loop instead). `_create_forgejo_oauth_app()` — generic helper to create an OAuth2 app on Forgejo (shared by Woodpecker and chat). `_create_woodpecker_oauth_impl()` — creates Woodpecker OAuth2 app (thin wrapper). `_create_chat_oauth_impl()` — creates disinto-chat OAuth2 app, writes `CHAT_OAUTH_CLIENT_ID`/`CHAT_OAUTH_CLIENT_SECRET` to `.env` (#708). `_generate_woodpecker_token_impl()` — auto-generates WOODPECKER_TOKEN via OAuth2 flow. `_activate_woodpecker_repo_impl()` — activates repo in Woodpecker. All gated by `_load_ci_context()` which validates required env vars. | bin/disinto (init) |
|
| `lib/ci-setup.sh` | `_install_cron_impl()` — installs crontab entries for bare-metal deployments (compose mode uses polling loop instead). `_create_forgejo_oauth_app()` — generic helper to create an OAuth2 app on Forgejo (shared by Woodpecker and chat). `_create_woodpecker_oauth_impl()` — creates Woodpecker OAuth2 app (thin wrapper). `_create_chat_oauth_impl()` — creates disinto-chat OAuth2 app, writes `CHAT_OAUTH_CLIENT_ID`/`CHAT_OAUTH_CLIENT_SECRET` to `.env` (#708). `_generate_woodpecker_token_impl()` — auto-generates WOODPECKER_TOKEN via OAuth2 flow. `_activate_woodpecker_repo_impl()` — activates repo in Woodpecker. All gated by `_load_ci_context()` which validates required env vars. | bin/disinto (init) |
|
||||||
| `lib/generators.sh` | Template generation for `disinto init`: `generate_compose()` — docker-compose.yml (uses `codeberg.org/forgejo/forgejo:11.0` tag; adds `security_opt: [apparmor:unconfined]` to all services for rootless container compatibility; Forgejo includes a healthcheck so dependent services use `condition: service_healthy` — fixes cold-start races, #665; adds `chat` service block with isolated `chat-config` named volume; all `depends_on` now use `condition: service_healthy/started` instead of bare service names), `generate_caddyfile()` — Caddyfile (routes: `/forge/*` → forgejo:3000, `/woodpecker/*` → woodpecker:8000, `/staging/*` → staging:80, `/chat/*` → chat:8080; root `/` redirects to `/forge/`), `generate_staging_index()` — staging index, `generate_deploy_pipelines()` — Woodpecker deployment pipeline configs. Requires `FACTORY_ROOT`, `PROJECT_NAME`, `PRIMARY_BRANCH`. | bin/disinto (init) |
|
| `lib/generators.sh` | Template generation for `disinto init`: `generate_compose()` — docker-compose.yml (uses `codeberg.org/forgejo/forgejo:11.0` tag; adds `security_opt: [apparmor:unconfined]` to all services for rootless container compatibility; Forgejo includes a healthcheck so dependent services use `condition: service_healthy` — fixes cold-start races, #665; adds `chat` service block with isolated `chat-config` named volume and `CHAT_HISTORY_DIR` bind-mount for per-user NDJSON history persistence (#710); injects `FORWARD_AUTH_SECRET` for Caddy↔chat defense-in-depth auth (#709); cost-cap env vars `CHAT_MAX_REQUESTS_PER_HOUR`, `CHAT_MAX_REQUESTS_PER_DAY`, `CHAT_MAX_TOKENS_PER_DAY` (#711); subdomain fallback comment for `EDGE_TUNNEL_FQDN_*` vars (#713); all `depends_on` now use `condition: service_healthy/started` instead of bare service names; all services now include `restart: unless-stopped` including the edge service — #768; agents service now uses `image: ghcr.io/disinto/agents:${DISINTO_IMAGE_TAG:-latest}` instead of `build:` (#429); `WOODPECKER_PLUGINS_PRIVILEGED` env var added to woodpecker service (#779); agents-llama conditional block gated on `ENABLE_LLAMA_AGENT=1` (#769); `agents-llama-all` compose service (profile `agents-llama-all`, all 7 roles: review,dev,gardener,architect,planner,predictor,supervisor) added by #801; agents service gains volume mounts for `./projects`, `./.env`, `./state`), `generate_caddyfile()` — Caddyfile (routes: `/forge/*` → forgejo:3000, `/woodpecker/*` → woodpecker:8000, `/staging/*` → staging:80; `/chat/login` and `/chat/oauth/callback` bypass `forward_auth` so unauthenticated users can reach the OAuth flow; `/chat/*` gated by `forward_auth` on `chat:8080/chat/auth/verify` which stamps `X-Forwarded-User` (#709); root `/` redirects to `/forge/`), `generate_staging_index()` — staging index, `generate_deploy_pipelines()` — Woodpecker deployment pipeline configs. Requires `FACTORY_ROOT`, `PROJECT_NAME`, `PRIMARY_BRANCH`. | bin/disinto (init) |
|
||||||
|
| `lib/sprint-filer.sh` | Post-merge sub-issue filer for sprint PRs. Invoked by the `.woodpecker/ops-filer.yml` pipeline after a sprint PR merges to ops repo `main`. Parses `<!-- filer:begin --> ... <!-- filer:end -->` blocks from sprint PR bodies to extract sub-issue definitions, creates them on the project repo using `FORGE_FILER_TOKEN` (narrow-scope `filer-bot` identity with `issues:write` only), adds `in-progress` label to the parent vision issue, and handles vision lifecycle closure when all sub-issues are closed. Uses `filer_api_all()` for paginated fetches. Idempotent: uses `<!-- decomposed-from: #<vision>, sprint: <slug>, id: <id> -->` markers to skip already-filed issues. Requires `FORGE_FILER_TOKEN`, `FORGE_API`, `FORGE_API_BASE`, `FORGE_OPS_REPO`. | `.woodpecker/ops-filer.yml` (CI pipeline on ops repo) |
|
||||||
| `lib/hire-agent.sh` | `disinto_hire_an_agent()` — user creation, `.profile` repo setup, formula copying, branch protection, and state marker creation for hiring a new agent. Requires `FORGE_URL`, `FORGE_TOKEN`, `FACTORY_ROOT`, `PROJECT_NAME`. Extracted from `bin/disinto`. | bin/disinto (hire) |
|
| `lib/hire-agent.sh` | `disinto_hire_an_agent()` — user creation, `.profile` repo setup, formula copying, branch protection, and state marker creation for hiring a new agent. Requires `FORGE_URL`, `FORGE_TOKEN`, `FACTORY_ROOT`, `PROJECT_NAME`. Extracted from `bin/disinto`. | bin/disinto (hire) |
|
||||||
| `lib/release.sh` | `disinto_release()` — vault TOML creation, branch setup on ops repo, PR creation, and auto-merge request for a versioned release. `_assert_release_globals()` validates required env vars. Requires `FORGE_URL`, `FORGE_TOKEN`, `FORGE_OPS_REPO`, `FACTORY_ROOT`, `PRIMARY_BRANCH`. Extracted from `bin/disinto`. | bin/disinto (release) |
|
| `lib/release.sh` | `disinto_release()` — vault TOML creation, branch setup on ops repo, PR creation, and auto-merge request for a versioned release. `_assert_release_globals()` validates required env vars. Requires `FORGE_URL`, `FORGE_TOKEN`, `FORGE_OPS_REPO`, `FACTORY_ROOT`, `PRIMARY_BRANCH`. Extracted from `bin/disinto`. | bin/disinto (release) |
|
||||||
|
| `lib/hvault.sh` | HashiCorp Vault helper module. `hvault_kv_get(PATH, [KEY])` — read KV v2 secret, optionally extract one key. `hvault_kv_put(PATH, KEY=VAL ...)` — write KV v2 secret. `hvault_kv_list(PATH)` — list keys at a KV path. `hvault_policy_apply(NAME, FILE)` — idempotent policy upsert. `hvault_jwt_login(ROLE, JWT)` — exchange JWT for short-lived token. `hvault_token_lookup()` — returns TTL/policies/accessor for current token. All functions use `VAULT_ADDR` + `VAULT_TOKEN` from env (fallback: `/etc/vault.d/root.token`), emit structured JSON errors to stderr on failure. Tests: `tests/lib-hvault.bats` (requires `vault server -dev`). | Not sourced at runtime yet — pure scaffolding for Nomad+Vault migration (#799) |
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,9 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# vault.sh — Helper for agents to create vault PRs on ops repo
|
# action-vault.sh — Helper for agents to create vault PRs on ops repo
|
||||||
#
|
#
|
||||||
# Source after lib/env.sh:
|
# Source after lib/env.sh:
|
||||||
# source "$(dirname "$0")/../lib/env.sh"
|
# source "$(dirname "$0")/../lib/env.sh"
|
||||||
# source "$(dirname "$0")/lib/vault.sh"
|
# source "$(dirname "$0")/lib/action-vault.sh"
|
||||||
#
|
#
|
||||||
# Required globals: FORGE_TOKEN, FORGE_URL, FORGE_REPO, FORGE_OPS_REPO
|
# Required globals: FORGE_TOKEN, FORGE_URL, FORGE_REPO, FORGE_OPS_REPO
|
||||||
# Optional: OPS_REPO_ROOT (local path for ops repo)
|
# Optional: OPS_REPO_ROOT (local path for ops repo)
|
||||||
|
|
@ -12,7 +12,7 @@
|
||||||
# vault_request <action_id> <toml_content> — Create vault PR, return PR number
|
# vault_request <action_id> <toml_content> — Create vault PR, return PR number
|
||||||
#
|
#
|
||||||
# The function:
|
# The function:
|
||||||
# 1. Validates TOML content using validate_vault_action() from vault/vault-env.sh
|
# 1. Validates TOML content using validate_vault_action() from action-vault/vault-env.sh
|
||||||
# 2. Creates a branch on the ops repo: vault/<action-id>
|
# 2. Creates a branch on the ops repo: vault/<action-id>
|
||||||
# 3. Writes TOML to vault/actions/<action-id>.toml on that branch
|
# 3. Writes TOML to vault/actions/<action-id>.toml on that branch
|
||||||
# 4. Creates PR targeting main with title "vault: <action-id>"
|
# 4. Creates PR targeting main with title "vault: <action-id>"
|
||||||
|
|
@ -133,7 +133,7 @@ vault_request() {
|
||||||
printf '%s' "$toml_content" > "$tmp_toml"
|
printf '%s' "$toml_content" > "$tmp_toml"
|
||||||
|
|
||||||
# Source vault-env.sh for validate_vault_action
|
# Source vault-env.sh for validate_vault_action
|
||||||
local vault_env="${FACTORY_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}/vault/vault-env.sh"
|
local vault_env="${FACTORY_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}/action-vault/vault-env.sh"
|
||||||
if [ ! -f "$vault_env" ]; then
|
if [ ! -f "$vault_env" ]; then
|
||||||
echo "ERROR: vault-env.sh not found at $vault_env" >&2
|
echo "ERROR: vault-env.sh not found at $vault_env" >&2
|
||||||
return 1
|
return 1
|
||||||
|
|
@ -161,7 +161,7 @@ vault_request() {
|
||||||
ops_api="$(_vault_ops_api)"
|
ops_api="$(_vault_ops_api)"
|
||||||
|
|
||||||
# Classify the action to determine if PR bypass is allowed
|
# Classify the action to determine if PR bypass is allowed
|
||||||
local classify_script="${FACTORY_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}/vault/classify.sh"
|
local classify_script="${FACTORY_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}/action-vault/classify.sh"
|
||||||
local vault_tier
|
local vault_tier
|
||||||
vault_tier=$("$classify_script" "${VAULT_ACTION_FORMULA:-}" "${VAULT_BLAST_RADIUS_OVERRIDE:-}") || {
|
vault_tier=$("$classify_script" "${VAULT_ACTION_FORMULA:-}" "${VAULT_BLAST_RADIUS_OVERRIDE:-}") || {
|
||||||
# Classification failed, default to high tier (require PR)
|
# Classification failed, default to high tier (require PR)
|
||||||
69
lib/env.sh
69
lib/env.sh
|
|
@ -121,9 +121,10 @@ export FORGE_VAULT_TOKEN="${FORGE_VAULT_TOKEN:-${FORGE_TOKEN}}"
|
||||||
export FORGE_SUPERVISOR_TOKEN="${FORGE_SUPERVISOR_TOKEN:-${FORGE_TOKEN}}"
|
export FORGE_SUPERVISOR_TOKEN="${FORGE_SUPERVISOR_TOKEN:-${FORGE_TOKEN}}"
|
||||||
export FORGE_PREDICTOR_TOKEN="${FORGE_PREDICTOR_TOKEN:-${FORGE_TOKEN}}"
|
export FORGE_PREDICTOR_TOKEN="${FORGE_PREDICTOR_TOKEN:-${FORGE_TOKEN}}"
|
||||||
export FORGE_ARCHITECT_TOKEN="${FORGE_ARCHITECT_TOKEN:-${FORGE_TOKEN}}"
|
export FORGE_ARCHITECT_TOKEN="${FORGE_ARCHITECT_TOKEN:-${FORGE_TOKEN}}"
|
||||||
|
export FORGE_FILER_TOKEN="${FORGE_FILER_TOKEN:-${FORGE_TOKEN}}"
|
||||||
|
|
||||||
# Bot usernames filter
|
# Bot usernames filter
|
||||||
export FORGE_BOT_USERNAMES="${FORGE_BOT_USERNAMES:-dev-bot,review-bot,planner-bot,gardener-bot,vault-bot,supervisor-bot,predictor-bot,architect-bot}"
|
export FORGE_BOT_USERNAMES="${FORGE_BOT_USERNAMES:-dev-bot,review-bot,planner-bot,gardener-bot,vault-bot,supervisor-bot,predictor-bot,architect-bot,filer-bot}"
|
||||||
|
|
||||||
# Project config
|
# Project config
|
||||||
export FORGE_REPO="${FORGE_REPO:-}"
|
export FORGE_REPO="${FORGE_REPO:-}"
|
||||||
|
|
@ -157,8 +158,8 @@ export WOODPECKER_SERVER="${WOODPECKER_SERVER:-http://localhost:8000}"
|
||||||
export CLAUDE_TIMEOUT="${CLAUDE_TIMEOUT:-7200}"
|
export CLAUDE_TIMEOUT="${CLAUDE_TIMEOUT:-7200}"
|
||||||
|
|
||||||
# Vault-only token guard (#745): external-action tokens (GITHUB_TOKEN, CLAWHUB_TOKEN)
|
# Vault-only token guard (#745): external-action tokens (GITHUB_TOKEN, CLAWHUB_TOKEN)
|
||||||
# must NEVER be available to agents. They live in .env.vault.enc and are injected
|
# must NEVER be available to agents. They live in secrets/*.enc and are decrypted
|
||||||
# only into the ephemeral runner container at fire time. Unset them here so
|
# only into the ephemeral runner container at fire time (#777). Unset them here so
|
||||||
# even an accidental .env inclusion cannot leak them into agent sessions.
|
# even an accidental .env inclusion cannot leak them into agent sessions.
|
||||||
unset GITHUB_TOKEN 2>/dev/null || true
|
unset GITHUB_TOKEN 2>/dev/null || true
|
||||||
unset CLAWHUB_TOKEN 2>/dev/null || true
|
unset CLAWHUB_TOKEN 2>/dev/null || true
|
||||||
|
|
@ -312,6 +313,68 @@ memory_guard() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# SECRET LOADING ABSTRACTION
|
||||||
|
# =============================================================================
|
||||||
|
# load_secret NAME [DEFAULT]
|
||||||
|
#
|
||||||
|
# Resolves a secret value using the following precedence:
|
||||||
|
# 1. /secrets/<NAME>.env — Nomad-rendered template (future)
|
||||||
|
# 2. Current environment — already set by .env.enc, compose, etc.
|
||||||
|
# 3. secrets/<NAME>.enc — age-encrypted per-key file (decrypted on demand)
|
||||||
|
# 4. DEFAULT (or empty)
|
||||||
|
#
|
||||||
|
# Prints the resolved value to stdout. Caches age-decrypted values in the
|
||||||
|
# process environment so subsequent calls are free.
|
||||||
|
# =============================================================================
|
||||||
|
load_secret() {
|
||||||
|
local name="$1"
|
||||||
|
local default="${2:-}"
|
||||||
|
|
||||||
|
# 1. Nomad-rendered template (future: Nomad writes /secrets/<NAME>.env)
|
||||||
|
local nomad_path="/secrets/${name}.env"
|
||||||
|
if [ -f "$nomad_path" ]; then
|
||||||
|
# Source into a subshell to extract just the value
|
||||||
|
local _nomad_val
|
||||||
|
_nomad_val=$(
|
||||||
|
set -a
|
||||||
|
# shellcheck source=/dev/null
|
||||||
|
source "$nomad_path"
|
||||||
|
set +a
|
||||||
|
printf '%s' "${!name:-}"
|
||||||
|
)
|
||||||
|
if [ -n "$_nomad_val" ]; then
|
||||||
|
export "$name=$_nomad_val"
|
||||||
|
printf '%s' "$_nomad_val"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 2. Already in environment (set by .env.enc, compose injection, etc.)
|
||||||
|
if [ -n "${!name:-}" ]; then
|
||||||
|
printf '%s' "${!name}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 3. Age-encrypted per-key file: secrets/<NAME>.enc (#777)
|
||||||
|
local _age_key="${HOME}/.config/sops/age/keys.txt"
|
||||||
|
local _enc_path="${FACTORY_ROOT}/secrets/${name}.enc"
|
||||||
|
if [ -f "$_enc_path" ] && [ -f "$_age_key" ] && command -v age &>/dev/null; then
|
||||||
|
local _dec_val
|
||||||
|
if _dec_val=$(age -d -i "$_age_key" "$_enc_path" 2>/dev/null) && [ -n "$_dec_val" ]; then
|
||||||
|
export "$name=$_dec_val"
|
||||||
|
printf '%s' "$_dec_val"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 4. Default (or empty)
|
||||||
|
if [ -n "$default" ]; then
|
||||||
|
printf '%s' "$default"
|
||||||
|
fi
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
# Source tea helpers (available when tea binary is installed)
|
# Source tea helpers (available when tea binary is installed)
|
||||||
if command -v tea &>/dev/null; then
|
if command -v tea &>/dev/null; then
|
||||||
# shellcheck source=tea-helpers.sh
|
# shellcheck source=tea-helpers.sh
|
||||||
|
|
|
||||||
|
|
@ -31,8 +31,9 @@ _load_init_context() {
|
||||||
# Execute a command in the Forgejo container (for admin operations)
|
# Execute a command in the Forgejo container (for admin operations)
|
||||||
_forgejo_exec() {
|
_forgejo_exec() {
|
||||||
local use_bare="${DISINTO_BARE:-false}"
|
local use_bare="${DISINTO_BARE:-false}"
|
||||||
|
local cname="${FORGEJO_CONTAINER_NAME:-disinto-forgejo}"
|
||||||
if [ "$use_bare" = true ]; then
|
if [ "$use_bare" = true ]; then
|
||||||
docker exec -u git disinto-forgejo "$@"
|
docker exec -u git "$cname" "$@"
|
||||||
else
|
else
|
||||||
docker compose -f "${FACTORY_ROOT}/docker-compose.yml" exec -T -u git forgejo "$@"
|
docker compose -f "${FACTORY_ROOT}/docker-compose.yml" exec -T -u git forgejo "$@"
|
||||||
fi
|
fi
|
||||||
|
|
@ -94,11 +95,12 @@ setup_forge() {
|
||||||
# Bare-metal mode: standalone docker run
|
# Bare-metal mode: standalone docker run
|
||||||
mkdir -p "${FORGEJO_DATA_DIR}"
|
mkdir -p "${FORGEJO_DATA_DIR}"
|
||||||
|
|
||||||
if docker ps -a --format '{{.Names}}' | grep -q '^disinto-forgejo$'; then
|
local cname="${FORGEJO_CONTAINER_NAME:-disinto-forgejo}"
|
||||||
docker start disinto-forgejo >/dev/null 2>&1 || true
|
if docker ps -a --format '{{.Names}}' | grep -q "^${cname}$"; then
|
||||||
|
docker start "$cname" >/dev/null 2>&1 || true
|
||||||
else
|
else
|
||||||
docker run -d \
|
docker run -d \
|
||||||
--name disinto-forgejo \
|
--name "$cname" \
|
||||||
--restart unless-stopped \
|
--restart unless-stopped \
|
||||||
-p "${forge_port}:3000" \
|
-p "${forge_port}:3000" \
|
||||||
-p 2222:22 \
|
-p 2222:22 \
|
||||||
|
|
@ -210,8 +212,8 @@ setup_forge() {
|
||||||
|
|
||||||
# Create human user (disinto-admin) as site admin if it doesn't exist
|
# Create human user (disinto-admin) as site admin if it doesn't exist
|
||||||
local human_user="disinto-admin"
|
local human_user="disinto-admin"
|
||||||
local human_pass
|
# human_user == admin_user; reuse admin_pass for basic-auth operations
|
||||||
human_pass="admin-$(head -c 16 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 20)"
|
local human_pass="$admin_pass"
|
||||||
|
|
||||||
if ! curl -sf --max-time 5 -H "Authorization: token ${FORGE_TOKEN:-}" "${forge_url}/api/v1/users/${human_user}" >/dev/null 2>&1; then
|
if ! curl -sf --max-time 5 -H "Authorization: token ${FORGE_TOKEN:-}" "${forge_url}/api/v1/users/${human_user}" >/dev/null 2>&1; then
|
||||||
echo "Creating human user: ${human_user}"
|
echo "Creating human user: ${human_user}"
|
||||||
|
|
@ -243,63 +245,89 @@ setup_forge() {
|
||||||
echo "Human user: ${human_user} (already exists)"
|
echo "Human user: ${human_user} (already exists)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Delete existing admin token if present (token sha1 is only returned at creation time)
|
# Preserve admin token if already stored in .env (idempotent re-run)
|
||||||
local existing_token_id
|
local admin_token=""
|
||||||
existing_token_id=$(curl -sf \
|
if _token_exists_in_env "FORGE_ADMIN_TOKEN" "$env_file" && [ "$rotate_tokens" = false ]; then
|
||||||
-u "${admin_user}:${admin_pass}" \
|
admin_token=$(grep '^FORGE_ADMIN_TOKEN=' "$env_file" | head -1 | cut -d= -f2-)
|
||||||
"${forge_url}/api/v1/users/${admin_user}/tokens" 2>/dev/null \
|
[ -n "$admin_token" ] && echo "Admin token: preserved (use --rotate-tokens to force)"
|
||||||
| jq -r '.[] | select(.name == "disinto-admin-token") | .id') || existing_token_id=""
|
|
||||||
if [ -n "$existing_token_id" ]; then
|
|
||||||
curl -sf -X DELETE \
|
|
||||||
-u "${admin_user}:${admin_pass}" \
|
|
||||||
"${forge_url}/api/v1/users/${admin_user}/tokens/${existing_token_id}" >/dev/null 2>&1 || true
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create admin token (fresh, so sha1 is returned)
|
|
||||||
local admin_token
|
|
||||||
admin_token=$(curl -sf -X POST \
|
|
||||||
-u "${admin_user}:${admin_pass}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
"${forge_url}/api/v1/users/${admin_user}/tokens" \
|
|
||||||
-d '{"name":"disinto-admin-token","scopes":["all"]}' 2>/dev/null \
|
|
||||||
| jq -r '.sha1 // empty') || admin_token=""
|
|
||||||
|
|
||||||
if [ -z "$admin_token" ]; then
|
if [ -z "$admin_token" ]; then
|
||||||
echo "Error: failed to obtain admin API token" >&2
|
# Delete existing admin token if present (token sha1 is only returned at creation time)
|
||||||
exit 1
|
local existing_token_id
|
||||||
fi
|
existing_token_id=$(curl -sf \
|
||||||
|
-u "${admin_user}:${admin_pass}" \
|
||||||
# Get or create human user token
|
"${forge_url}/api/v1/users/${admin_user}/tokens" 2>/dev/null \
|
||||||
local human_token=""
|
| jq -r '.[] | select(.name == "disinto-admin-token") | .id') || existing_token_id=""
|
||||||
# Delete existing human token if present (token sha1 is only returned at creation time)
|
if [ -n "$existing_token_id" ]; then
|
||||||
local existing_human_token_id
|
curl -sf -X DELETE \
|
||||||
existing_human_token_id=$(curl -sf \
|
-u "${admin_user}:${admin_pass}" \
|
||||||
-u "${human_user}:${human_pass}" \
|
"${forge_url}/api/v1/users/${admin_user}/tokens/${existing_token_id}" >/dev/null 2>&1 || true
|
||||||
"${forge_url}/api/v1/users/${human_user}/tokens" 2>/dev/null \
|
fi
|
||||||
| jq -r '.[] | select(.name == "disinto-human-token") | .id') || existing_human_token_id=""
|
|
||||||
if [ -n "$existing_human_token_id" ]; then
|
# Create admin token (fresh, so sha1 is returned)
|
||||||
curl -sf -X DELETE \
|
admin_token=$(curl -sf -X POST \
|
||||||
-u "${human_user}:${human_pass}" \
|
-u "${admin_user}:${admin_pass}" \
|
||||||
"${forge_url}/api/v1/users/${human_user}/tokens/${existing_human_token_id}" >/dev/null 2>&1 || true
|
-H "Content-Type: application/json" \
|
||||||
fi
|
"${forge_url}/api/v1/users/${admin_user}/tokens" \
|
||||||
|
-d '{"name":"disinto-admin-token","scopes":["all"]}' 2>/dev/null \
|
||||||
# Create human token (fresh, so sha1 is returned)
|
| jq -r '.sha1 // empty') || admin_token=""
|
||||||
human_token=$(curl -sf -X POST \
|
|
||||||
-u "${human_user}:${human_pass}" \
|
if [ -z "$admin_token" ]; then
|
||||||
-H "Content-Type: application/json" \
|
echo "Error: failed to obtain admin API token" >&2
|
||||||
"${forge_url}/api/v1/users/${human_user}/tokens" \
|
exit 1
|
||||||
-d '{"name":"disinto-human-token","scopes":["all"]}' 2>/dev/null \
|
fi
|
||||||
| jq -r '.sha1 // empty') || human_token=""
|
|
||||||
|
# Store admin token for idempotent re-runs
|
||||||
if [ -n "$human_token" ]; then
|
if grep -q '^FORGE_ADMIN_TOKEN=' "$env_file" 2>/dev/null; then
|
||||||
# Store human token in .env
|
sed -i "s|^FORGE_ADMIN_TOKEN=.*|FORGE_ADMIN_TOKEN=${admin_token}|" "$env_file"
|
||||||
if grep -q '^HUMAN_TOKEN=' "$env_file" 2>/dev/null; then
|
else
|
||||||
sed -i "s|^HUMAN_TOKEN=.*|HUMAN_TOKEN=${human_token}|" "$env_file"
|
printf 'FORGE_ADMIN_TOKEN=%s\n' "$admin_token" >> "$env_file"
|
||||||
else
|
fi
|
||||||
printf 'HUMAN_TOKEN=%s\n' "$human_token" >> "$env_file"
|
echo "Admin token: generated and saved (FORGE_ADMIN_TOKEN)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get or create human user token (human_user == admin_user; use admin_pass)
|
||||||
|
local human_token=""
|
||||||
|
if _token_exists_in_env "HUMAN_TOKEN" "$env_file" && [ "$rotate_tokens" = false ]; then
|
||||||
|
human_token=$(grep '^HUMAN_TOKEN=' "$env_file" | head -1 | cut -d= -f2-)
|
||||||
|
if [ -n "$human_token" ]; then
|
||||||
|
export HUMAN_TOKEN="$human_token"
|
||||||
|
echo " Human token preserved (use --rotate-tokens to force)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$human_token" ]; then
|
||||||
|
# Delete existing human token if present (token sha1 is only returned at creation time)
|
||||||
|
local existing_human_token_id
|
||||||
|
existing_human_token_id=$(curl -sf \
|
||||||
|
-u "${admin_user}:${admin_pass}" \
|
||||||
|
"${forge_url}/api/v1/users/${human_user}/tokens" 2>/dev/null \
|
||||||
|
| jq -r '.[] | select(.name == "disinto-human-token") | .id') || existing_human_token_id=""
|
||||||
|
if [ -n "$existing_human_token_id" ]; then
|
||||||
|
curl -sf -X DELETE \
|
||||||
|
-u "${admin_user}:${admin_pass}" \
|
||||||
|
"${forge_url}/api/v1/users/${human_user}/tokens/${existing_human_token_id}" >/dev/null 2>&1 || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create human token (use admin_pass since human_user == admin_user)
|
||||||
|
human_token=$(curl -sf -X POST \
|
||||||
|
-u "${admin_user}:${admin_pass}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"${forge_url}/api/v1/users/${human_user}/tokens" \
|
||||||
|
-d '{"name":"disinto-human-token","scopes":["all"]}' 2>/dev/null \
|
||||||
|
| jq -r '.sha1 // empty') || human_token=""
|
||||||
|
|
||||||
|
if [ -n "$human_token" ]; then
|
||||||
|
# Store human token in .env
|
||||||
|
if grep -q '^HUMAN_TOKEN=' "$env_file" 2>/dev/null; then
|
||||||
|
sed -i "s|^HUMAN_TOKEN=.*|HUMAN_TOKEN=${human_token}|" "$env_file"
|
||||||
|
else
|
||||||
|
printf 'HUMAN_TOKEN=%s\n' "$human_token" >> "$env_file"
|
||||||
|
fi
|
||||||
|
export HUMAN_TOKEN="$human_token"
|
||||||
|
echo " Human token generated and saved (HUMAN_TOKEN)"
|
||||||
fi
|
fi
|
||||||
export HUMAN_TOKEN="$human_token"
|
|
||||||
echo " Human token saved (HUMAN_TOKEN)"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create bot users and tokens
|
# Create bot users and tokens
|
||||||
|
|
@ -719,7 +747,7 @@ setup_forge() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Add all bot users as collaborators with appropriate permissions
|
# Add all bot users as collaborators with appropriate permissions
|
||||||
# dev-bot: write (PR creation via lib/vault.sh)
|
# dev-bot: write (PR creation via lib/action-vault.sh)
|
||||||
# review-bot: read (PR review)
|
# review-bot: read (PR review)
|
||||||
# planner-bot: write (prerequisites.md, memory)
|
# planner-bot: write (prerequisites.md, memory)
|
||||||
# gardener-bot: write (backlog grooming)
|
# gardener-bot: write (backlog grooming)
|
||||||
|
|
|
||||||
|
|
@ -819,8 +819,7 @@ build_prompt_footer() {
|
||||||
Base URL: ${FORGE_API}
|
Base URL: ${FORGE_API}
|
||||||
Auth header: -H \"Authorization: token \${FORGE_TOKEN}\"
|
Auth header: -H \"Authorization: token \${FORGE_TOKEN}\"
|
||||||
Read issue: curl -sf -H \"Authorization: token \${FORGE_TOKEN}\" '${FORGE_API}/issues/{number}' | jq '.body'
|
Read issue: curl -sf -H \"Authorization: token \${FORGE_TOKEN}\" '${FORGE_API}/issues/{number}' | jq '.body'
|
||||||
Create issue: curl -sf -X POST -H \"Authorization: token \${FORGE_TOKEN}\" -H 'Content-Type: application/json' '${FORGE_API}/issues' -d '{\"title\":\"...\",\"body\":\"...\",\"labels\":[LABEL_ID]}'${extra_api}
|
List labels: curl -sf -H \"Authorization: token \${FORGE_TOKEN}\" '${FORGE_API}/labels'${extra_api}
|
||||||
List labels: curl -sf -H \"Authorization: token \${FORGE_TOKEN}\" '${FORGE_API}/labels'
|
|
||||||
NEVER echo or include the actual token value in output — always reference \${FORGE_TOKEN}.
|
NEVER echo or include the actual token value in output — always reference \${FORGE_TOKEN}.
|
||||||
|
|
||||||
## Environment
|
## Environment
|
||||||
|
|
|
||||||
|
|
@ -100,9 +100,7 @@ _generate_local_model_services() {
|
||||||
cat >> "$temp_file" <<EOF
|
cat >> "$temp_file" <<EOF
|
||||||
|
|
||||||
agents-${service_name}:
|
agents-${service_name}:
|
||||||
build:
|
image: ghcr.io/disinto/agents:\${DISINTO_IMAGE_TAG:-latest}
|
||||||
context: .
|
|
||||||
dockerfile: docker/agents/Dockerfile
|
|
||||||
container_name: disinto-agents-${service_name}
|
container_name: disinto-agents-${service_name}
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
security_opt:
|
security_opt:
|
||||||
|
|
@ -111,9 +109,9 @@ _generate_local_model_services() {
|
||||||
- agents-${service_name}-data:/home/agent/data
|
- agents-${service_name}-data:/home/agent/data
|
||||||
- project-repos:/home/agent/repos
|
- project-repos:/home/agent/repos
|
||||||
- \${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:\${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
- \${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:\${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
||||||
- \${HOME}/.claude.json:/home/agent/.claude.json:ro
|
- \${CLAUDE_CONFIG_FILE:-\${HOME}/.claude.json}:/home/agent/.claude.json:ro
|
||||||
- CLAUDE_BIN_PLACEHOLDER:/usr/local/bin/claude:ro
|
- \${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro
|
||||||
- \${HOME}/.ssh:/home/agent/.ssh:ro
|
- \${AGENT_SSH_DIR:-\${HOME}/.ssh}:/home/agent/.ssh:ro
|
||||||
environment:
|
environment:
|
||||||
FORGE_URL: http://forgejo:3000
|
FORGE_URL: http://forgejo:3000
|
||||||
FORGE_REPO: ${FORGE_REPO:-disinto-admin/disinto}
|
FORGE_REPO: ${FORGE_REPO:-disinto-admin/disinto}
|
||||||
|
|
@ -142,6 +140,7 @@ _generate_local_model_services() {
|
||||||
GARDENER_INTERVAL: "${GARDENER_INTERVAL:-21600}"
|
GARDENER_INTERVAL: "${GARDENER_INTERVAL:-21600}"
|
||||||
ARCHITECT_INTERVAL: "${ARCHITECT_INTERVAL:-21600}"
|
ARCHITECT_INTERVAL: "${ARCHITECT_INTERVAL:-21600}"
|
||||||
PLANNER_INTERVAL: "${PLANNER_INTERVAL:-43200}"
|
PLANNER_INTERVAL: "${PLANNER_INTERVAL:-43200}"
|
||||||
|
SUPERVISOR_INTERVAL: "${SUPERVISOR_INTERVAL:-1200}"
|
||||||
depends_on:
|
depends_on:
|
||||||
forgejo:
|
forgejo:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
@ -233,6 +232,7 @@ for name, config in agents.items():
|
||||||
# to materialize a working stack on a fresh checkout.
|
# to materialize a working stack on a fresh checkout.
|
||||||
_generate_compose_impl() {
|
_generate_compose_impl() {
|
||||||
local forge_port="${1:-3000}"
|
local forge_port="${1:-3000}"
|
||||||
|
local use_build="${2:-false}"
|
||||||
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
||||||
|
|
||||||
# Check if compose file already exists
|
# Check if compose file already exists
|
||||||
|
|
@ -296,6 +296,7 @@ services:
|
||||||
WOODPECKER_AGENT_SECRET: ${WOODPECKER_AGENT_SECRET:-}
|
WOODPECKER_AGENT_SECRET: ${WOODPECKER_AGENT_SECRET:-}
|
||||||
WOODPECKER_DATABASE_DRIVER: sqlite3
|
WOODPECKER_DATABASE_DRIVER: sqlite3
|
||||||
WOODPECKER_DATABASE_DATASOURCE: /var/lib/woodpecker/woodpecker.sqlite
|
WOODPECKER_DATABASE_DATASOURCE: /var/lib/woodpecker/woodpecker.sqlite
|
||||||
|
WOODPECKER_PLUGINS_PRIVILEGED: ${WOODPECKER_PLUGINS_PRIVILEGED:-plugins/docker}
|
||||||
WOODPECKER_ENVIRONMENT: "FORGE_TOKEN:${FORGE_TOKEN}"
|
WOODPECKER_ENVIRONMENT: "FORGE_TOKEN:${FORGE_TOKEN}"
|
||||||
depends_on:
|
depends_on:
|
||||||
forgejo:
|
forgejo:
|
||||||
|
|
@ -318,15 +319,19 @@ services:
|
||||||
WOODPECKER_AGENT_SECRET: ${WOODPECKER_AGENT_SECRET:-}
|
WOODPECKER_AGENT_SECRET: ${WOODPECKER_AGENT_SECRET:-}
|
||||||
WOODPECKER_GRPC_SECURE: "false"
|
WOODPECKER_GRPC_SECURE: "false"
|
||||||
WOODPECKER_HEALTHCHECK_ADDR: ":3333"
|
WOODPECKER_HEALTHCHECK_ADDR: ":3333"
|
||||||
WOODPECKER_BACKEND_DOCKER_NETWORK: disinto_disinto-net
|
WOODPECKER_BACKEND_DOCKER_NETWORK: ${WOODPECKER_CI_NETWORK:-disinto_disinto-net}
|
||||||
WOODPECKER_MAX_WORKFLOWS: 1
|
WOODPECKER_MAX_WORKFLOWS: 1
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3333/healthz"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 15s
|
||||||
depends_on:
|
depends_on:
|
||||||
- woodpecker
|
- woodpecker
|
||||||
|
|
||||||
agents:
|
agents:
|
||||||
build:
|
image: ghcr.io/disinto/agents:${DISINTO_IMAGE_TAG:-latest}
|
||||||
context: .
|
|
||||||
dockerfile: docker/agents/Dockerfile
|
|
||||||
container_name: disinto-agents
|
container_name: disinto-agents
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
security_opt:
|
security_opt:
|
||||||
|
|
@ -335,11 +340,14 @@ services:
|
||||||
- agent-data:/home/agent/data
|
- agent-data:/home/agent/data
|
||||||
- project-repos:/home/agent/repos
|
- project-repos:/home/agent/repos
|
||||||
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
||||||
- ${HOME}/.claude.json:/home/agent/.claude.json:ro
|
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
|
||||||
- CLAUDE_BIN_PLACEHOLDER:/usr/local/bin/claude:ro
|
- ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro
|
||||||
- ${HOME}/.ssh:/home/agent/.ssh:ro
|
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
|
||||||
- ${HOME}/.config/sops/age:/home/agent/.config/sops/age:ro
|
- ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro
|
||||||
- woodpecker-data:/woodpecker-data:ro
|
- woodpecker-data:/woodpecker-data:ro
|
||||||
|
- ./projects:/home/agent/disinto/projects:ro
|
||||||
|
- ./.env:/home/agent/disinto/.env:ro
|
||||||
|
- ./state:/home/agent/disinto/state
|
||||||
environment:
|
environment:
|
||||||
FORGE_URL: http://forgejo:3000
|
FORGE_URL: http://forgejo:3000
|
||||||
FORGE_REPO: ${FORGE_REPO:-disinto-admin/disinto}
|
FORGE_REPO: ${FORGE_REPO:-disinto-admin/disinto}
|
||||||
|
|
@ -371,8 +379,14 @@ services:
|
||||||
PLANNER_INTERVAL: ${PLANNER_INTERVAL:-43200}
|
PLANNER_INTERVAL: ${PLANNER_INTERVAL:-43200}
|
||||||
# IMPORTANT: agents get explicit environment variables (forge tokens, CI tokens, config).
|
# IMPORTANT: agents get explicit environment variables (forge tokens, CI tokens, config).
|
||||||
# Vault-only secrets (GITHUB_TOKEN, CLAWHUB_TOKEN, deploy keys) live in
|
# Vault-only secrets (GITHUB_TOKEN, CLAWHUB_TOKEN, deploy keys) live in
|
||||||
# .env.vault.enc and are NEVER injected here — only the runner
|
# secrets/*.enc and are NEVER injected here — only the runner
|
||||||
# container receives them at fire time (AD-006, #745).
|
# container receives them at fire time (AD-006, #745, #777).
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "pgrep", "-f", "entrypoint.sh"]
|
||||||
|
interval: 60s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
depends_on:
|
depends_on:
|
||||||
forgejo:
|
forgejo:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
@ -381,10 +395,137 @@ services:
|
||||||
networks:
|
networks:
|
||||||
- disinto-net
|
- disinto-net
|
||||||
|
|
||||||
runner:
|
COMPOSEEOF
|
||||||
|
|
||||||
|
# ── Conditional agents-llama block (ENABLE_LLAMA_AGENT=1) ──────────────
|
||||||
|
# Local-Qwen dev agent — gated on ENABLE_LLAMA_AGENT so factories without
|
||||||
|
# a local llama endpoint don't try to start it. See docs/agents-llama.md.
|
||||||
|
if [ "${ENABLE_LLAMA_AGENT:-0}" = "1" ]; then
|
||||||
|
cat >> "$compose_file" <<'LLAMAEOF'
|
||||||
|
|
||||||
|
agents-llama:
|
||||||
build:
|
build:
|
||||||
context: .
|
context: .
|
||||||
dockerfile: docker/agents/Dockerfile
|
dockerfile: docker/agents/Dockerfile
|
||||||
|
container_name: disinto-agents-llama
|
||||||
|
restart: unless-stopped
|
||||||
|
security_opt:
|
||||||
|
- apparmor=unconfined
|
||||||
|
volumes:
|
||||||
|
- agent-data:/home/agent/data
|
||||||
|
- project-repos:/home/agent/repos
|
||||||
|
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
||||||
|
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
|
||||||
|
- ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro
|
||||||
|
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
|
||||||
|
- ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro
|
||||||
|
- woodpecker-data:/woodpecker-data:ro
|
||||||
|
environment:
|
||||||
|
FORGE_URL: http://forgejo:3000
|
||||||
|
FORGE_REPO: ${FORGE_REPO:-disinto-admin/disinto}
|
||||||
|
FORGE_TOKEN: ${FORGE_TOKEN_LLAMA:-}
|
||||||
|
FORGE_PASS: ${FORGE_PASS_LLAMA:-}
|
||||||
|
FORGE_BOT_USERNAMES: ${FORGE_BOT_USERNAMES:-}
|
||||||
|
WOODPECKER_TOKEN: ${WOODPECKER_TOKEN:-}
|
||||||
|
CLAUDE_TIMEOUT: ${CLAUDE_TIMEOUT:-7200}
|
||||||
|
CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: ${CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC:-1}
|
||||||
|
CLAUDE_AUTOCOMPACT_PCT_OVERRIDE: "60"
|
||||||
|
ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY:-}
|
||||||
|
ANTHROPIC_BASE_URL: ${ANTHROPIC_BASE_URL:-}
|
||||||
|
FORGE_ADMIN_PASS: ${FORGE_ADMIN_PASS:-}
|
||||||
|
DISINTO_CONTAINER: "1"
|
||||||
|
PROJECT_NAME: ${PROJECT_NAME:-project}
|
||||||
|
PROJECT_REPO_ROOT: /home/agent/repos/${PROJECT_NAME:-project}
|
||||||
|
WOODPECKER_DATA_DIR: /woodpecker-data
|
||||||
|
WOODPECKER_REPO_ID: "PLACEHOLDER_WP_REPO_ID"
|
||||||
|
CLAUDE_CONFIG_DIR: ${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}
|
||||||
|
POLL_INTERVAL: ${POLL_INTERVAL:-300}
|
||||||
|
AGENT_ROLES: dev
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "pgrep", "-f", "entrypoint.sh"]
|
||||||
|
interval: 60s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
|
depends_on:
|
||||||
|
forgejo:
|
||||||
|
condition: service_healthy
|
||||||
|
networks:
|
||||||
|
- disinto-net
|
||||||
|
|
||||||
|
agents-llama-all:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: docker/agents/Dockerfile
|
||||||
|
container_name: disinto-agents-llama-all
|
||||||
|
restart: unless-stopped
|
||||||
|
profiles: ["agents-llama-all"]
|
||||||
|
security_opt:
|
||||||
|
- apparmor=unconfined
|
||||||
|
volumes:
|
||||||
|
- agent-data:/home/agent/data
|
||||||
|
- project-repos:/home/agent/repos
|
||||||
|
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
||||||
|
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
|
||||||
|
- ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro
|
||||||
|
- ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro
|
||||||
|
- ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro
|
||||||
|
- woodpecker-data:/woodpecker-data:ro
|
||||||
|
environment:
|
||||||
|
FORGE_URL: http://forgejo:3000
|
||||||
|
FORGE_REPO: ${FORGE_REPO:-disinto-admin/disinto}
|
||||||
|
FORGE_TOKEN: ${FORGE_TOKEN_LLAMA:-}
|
||||||
|
FORGE_PASS: ${FORGE_PASS_LLAMA:-}
|
||||||
|
FORGE_REVIEW_TOKEN: ${FORGE_REVIEW_TOKEN:-}
|
||||||
|
FORGE_PLANNER_TOKEN: ${FORGE_PLANNER_TOKEN:-}
|
||||||
|
FORGE_GARDENER_TOKEN: ${FORGE_GARDENER_TOKEN:-}
|
||||||
|
FORGE_VAULT_TOKEN: ${FORGE_VAULT_TOKEN:-}
|
||||||
|
FORGE_SUPERVISOR_TOKEN: ${FORGE_SUPERVISOR_TOKEN:-}
|
||||||
|
FORGE_PREDICTOR_TOKEN: ${FORGE_PREDICTOR_TOKEN:-}
|
||||||
|
FORGE_ARCHITECT_TOKEN: ${FORGE_ARCHITECT_TOKEN:-}
|
||||||
|
FORGE_FILER_TOKEN: ${FORGE_FILER_TOKEN:-}
|
||||||
|
FORGE_BOT_USERNAMES: ${FORGE_BOT_USERNAMES:-}
|
||||||
|
WOODPECKER_TOKEN: ${WOODPECKER_TOKEN:-}
|
||||||
|
CLAUDE_TIMEOUT: ${CLAUDE_TIMEOUT:-7200}
|
||||||
|
CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: ${CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC:-1}
|
||||||
|
CLAUDE_AUTOCOMPACT_PCT_OVERRIDE: "60"
|
||||||
|
CLAUDE_CODE_DISABLE_EXPERIMENTAL_BETAS: "1"
|
||||||
|
ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY:-}
|
||||||
|
ANTHROPIC_BASE_URL: ${ANTHROPIC_BASE_URL:-}
|
||||||
|
FORGE_ADMIN_PASS: ${FORGE_ADMIN_PASS:-}
|
||||||
|
DISINTO_CONTAINER: "1"
|
||||||
|
PROJECT_NAME: ${PROJECT_NAME:-project}
|
||||||
|
PROJECT_REPO_ROOT: /home/agent/repos/${PROJECT_NAME:-project}
|
||||||
|
WOODPECKER_DATA_DIR: /woodpecker-data
|
||||||
|
WOODPECKER_REPO_ID: "PLACEHOLDER_WP_REPO_ID"
|
||||||
|
CLAUDE_CONFIG_DIR: ${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config}
|
||||||
|
POLL_INTERVAL: ${POLL_INTERVAL:-300}
|
||||||
|
GARDENER_INTERVAL: ${GARDENER_INTERVAL:-21600}
|
||||||
|
ARCHITECT_INTERVAL: ${ARCHITECT_INTERVAL:-21600}
|
||||||
|
PLANNER_INTERVAL: ${PLANNER_INTERVAL:-43200}
|
||||||
|
SUPERVISOR_INTERVAL: ${SUPERVISOR_INTERVAL:-1200}
|
||||||
|
AGENT_ROLES: review,dev,gardener,architect,planner,predictor,supervisor
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "pgrep", "-f", "entrypoint.sh"]
|
||||||
|
interval: 60s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
|
depends_on:
|
||||||
|
forgejo:
|
||||||
|
condition: service_healthy
|
||||||
|
woodpecker:
|
||||||
|
condition: service_started
|
||||||
|
networks:
|
||||||
|
- disinto-net
|
||||||
|
LLAMAEOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Resume the rest of the compose file (runner onward)
|
||||||
|
cat >> "$compose_file" <<'COMPOSEEOF'
|
||||||
|
|
||||||
|
runner:
|
||||||
|
image: ghcr.io/disinto/agents:${DISINTO_IMAGE_TAG:-latest}
|
||||||
profiles: ["vault"]
|
profiles: ["vault"]
|
||||||
security_opt:
|
security_opt:
|
||||||
- apparmor=unconfined
|
- apparmor=unconfined
|
||||||
|
|
@ -405,8 +546,9 @@ services:
|
||||||
# Edge proxy — reverse proxy to Forgejo, Woodpecker, and staging
|
# Edge proxy — reverse proxy to Forgejo, Woodpecker, and staging
|
||||||
# Serves on ports 80/443, routes based on path
|
# Serves on ports 80/443, routes based on path
|
||||||
edge:
|
edge:
|
||||||
build: ./docker/edge
|
image: ghcr.io/disinto/edge:${DISINTO_IMAGE_TAG:-latest}
|
||||||
container_name: disinto-edge
|
container_name: disinto-edge
|
||||||
|
restart: unless-stopped
|
||||||
security_opt:
|
security_opt:
|
||||||
- apparmor=unconfined
|
- apparmor=unconfined
|
||||||
ports:
|
ports:
|
||||||
|
|
@ -430,13 +572,24 @@ services:
|
||||||
- EDGE_TUNNEL_USER=${EDGE_TUNNEL_USER:-tunnel}
|
- EDGE_TUNNEL_USER=${EDGE_TUNNEL_USER:-tunnel}
|
||||||
- EDGE_TUNNEL_PORT=${EDGE_TUNNEL_PORT:-}
|
- EDGE_TUNNEL_PORT=${EDGE_TUNNEL_PORT:-}
|
||||||
- EDGE_TUNNEL_FQDN=${EDGE_TUNNEL_FQDN:-}
|
- EDGE_TUNNEL_FQDN=${EDGE_TUNNEL_FQDN:-}
|
||||||
|
# Subdomain fallback (#713): if subpath routing (#704/#708) fails, add:
|
||||||
|
# EDGE_TUNNEL_FQDN_FORGE, EDGE_TUNNEL_FQDN_CI, EDGE_TUNNEL_FQDN_CHAT
|
||||||
|
# See docs/edge-routing-fallback.md for the full pivot plan.
|
||||||
|
# Shared secret for Caddy ↔ chat forward_auth (#709)
|
||||||
|
- FORWARD_AUTH_SECRET=${FORWARD_AUTH_SECRET:-}
|
||||||
volumes:
|
volumes:
|
||||||
- ./docker/Caddyfile:/etc/caddy/Caddyfile
|
- ./docker/Caddyfile:/etc/caddy/Caddyfile
|
||||||
- caddy_data:/data
|
- caddy_data:/data
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
- ./secrets/tunnel_key:/run/secrets/tunnel_key:ro
|
- ./secrets/tunnel_key:/run/secrets/tunnel_key:ro
|
||||||
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
- ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}
|
||||||
- ${HOME}/.claude.json:/home/agent/.claude.json:ro
|
- ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-fsS", "http://localhost:2019/config/"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 15s
|
||||||
depends_on:
|
depends_on:
|
||||||
forgejo:
|
forgejo:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
@ -454,6 +607,12 @@ services:
|
||||||
command: ["caddy", "file-server", "--root", "/srv/site"]
|
command: ["caddy", "file-server", "--root", "/srv/site"]
|
||||||
security_opt:
|
security_opt:
|
||||||
- apparmor=unconfined
|
- apparmor=unconfined
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "-q", "--spider", "http://localhost:2019/config/"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
volumes:
|
volumes:
|
||||||
- ./docker:/srv/site:ro
|
- ./docker:/srv/site:ro
|
||||||
networks:
|
networks:
|
||||||
|
|
@ -494,9 +653,11 @@ services:
|
||||||
memswap_limit: 512m
|
memswap_limit: 512m
|
||||||
volumes:
|
volumes:
|
||||||
# Mount claude binary from host (same as agents)
|
# Mount claude binary from host (same as agents)
|
||||||
- CLAUDE_BIN_PLACEHOLDER:/usr/local/bin/claude:ro
|
- ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro
|
||||||
# Throwaway named volume for chat config (isolated from host ~/.claude)
|
# Throwaway named volume for chat config (isolated from host ~/.claude)
|
||||||
- chat-config:/var/chat/config
|
- chat-config:/var/chat/config
|
||||||
|
# Chat history persistence: per-user NDJSON files on bind-mounted host volume
|
||||||
|
- ${CHAT_HISTORY_DIR:-./state/chat-history}:/var/lib/chat/history
|
||||||
environment:
|
environment:
|
||||||
CHAT_HOST: "0.0.0.0"
|
CHAT_HOST: "0.0.0.0"
|
||||||
CHAT_PORT: "8080"
|
CHAT_PORT: "8080"
|
||||||
|
|
@ -505,6 +666,18 @@ services:
|
||||||
CHAT_OAUTH_CLIENT_SECRET: ${CHAT_OAUTH_CLIENT_SECRET:-}
|
CHAT_OAUTH_CLIENT_SECRET: ${CHAT_OAUTH_CLIENT_SECRET:-}
|
||||||
EDGE_TUNNEL_FQDN: ${EDGE_TUNNEL_FQDN:-}
|
EDGE_TUNNEL_FQDN: ${EDGE_TUNNEL_FQDN:-}
|
||||||
DISINTO_CHAT_ALLOWED_USERS: ${DISINTO_CHAT_ALLOWED_USERS:-}
|
DISINTO_CHAT_ALLOWED_USERS: ${DISINTO_CHAT_ALLOWED_USERS:-}
|
||||||
|
# Shared secret for Caddy forward_auth verify endpoint (#709)
|
||||||
|
FORWARD_AUTH_SECRET: ${FORWARD_AUTH_SECRET:-}
|
||||||
|
# Cost caps / rate limiting (#711)
|
||||||
|
CHAT_MAX_REQUESTS_PER_HOUR: ${CHAT_MAX_REQUESTS_PER_HOUR:-60}
|
||||||
|
CHAT_MAX_REQUESTS_PER_DAY: ${CHAT_MAX_REQUESTS_PER_DAY:-500}
|
||||||
|
CHAT_MAX_TOKENS_PER_DAY: ${CHAT_MAX_TOKENS_PER_DAY:-1000000}
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8080/health')"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
networks:
|
networks:
|
||||||
- disinto-net
|
- disinto-net
|
||||||
|
|
||||||
|
|
@ -543,20 +716,35 @@ COMPOSEEOF
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Append local-model agent services if any are configured
|
# Append local-model agent services if any are configured
|
||||||
# (must run before CLAUDE_BIN_PLACEHOLDER substitution so the placeholder
|
|
||||||
# in local-model services is also resolved)
|
|
||||||
_generate_local_model_services "$compose_file"
|
_generate_local_model_services "$compose_file"
|
||||||
|
|
||||||
# Patch the Claude CLI binary path — resolve from host PATH at init time.
|
# Resolve the Claude CLI binary path and persist as CLAUDE_BIN_DIR in .env.
|
||||||
|
# docker-compose.yml references ${CLAUDE_BIN_DIR} so the value must be set.
|
||||||
local claude_bin
|
local claude_bin
|
||||||
claude_bin="$(command -v claude 2>/dev/null || true)"
|
claude_bin="$(command -v claude 2>/dev/null || true)"
|
||||||
if [ -n "$claude_bin" ]; then
|
if [ -n "$claude_bin" ]; then
|
||||||
# Resolve symlinks to get the real binary path
|
|
||||||
claude_bin="$(readlink -f "$claude_bin")"
|
claude_bin="$(readlink -f "$claude_bin")"
|
||||||
sed -i "s|CLAUDE_BIN_PLACEHOLDER|${claude_bin}|g" "$compose_file"
|
|
||||||
else
|
else
|
||||||
echo "Warning: claude CLI not found in PATH — update docker-compose.yml volumes manually" >&2
|
echo "Warning: claude CLI not found in PATH — set CLAUDE_BIN_DIR in .env manually" >&2
|
||||||
sed -i "s|CLAUDE_BIN_PLACEHOLDER|/usr/local/bin/claude|g" "$compose_file"
|
claude_bin="/usr/local/bin/claude"
|
||||||
|
fi
|
||||||
|
# Persist CLAUDE_BIN_DIR into .env so docker-compose can resolve it.
|
||||||
|
local env_file="${FACTORY_ROOT}/.env"
|
||||||
|
if [ -f "$env_file" ]; then
|
||||||
|
if grep -q "^CLAUDE_BIN_DIR=" "$env_file" 2>/dev/null; then
|
||||||
|
sed -i "s|^CLAUDE_BIN_DIR=.*|CLAUDE_BIN_DIR=${claude_bin}|" "$env_file"
|
||||||
|
else
|
||||||
|
printf 'CLAUDE_BIN_DIR=%s\n' "$claude_bin" >> "$env_file"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
printf 'CLAUDE_BIN_DIR=%s\n' "$claude_bin" > "$env_file"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# In build mode, replace image: with build: for locally-built images
|
||||||
|
if [ "$use_build" = true ]; then
|
||||||
|
sed -i 's|^\( agents:\)|\1|' "$compose_file"
|
||||||
|
sed -i '/^ image: ghcr\.io\/disinto\/agents:/{s|image: ghcr\.io/disinto/agents:.*|build:\n context: .\n dockerfile: docker/agents/Dockerfile|}' "$compose_file"
|
||||||
|
sed -i '/^ image: ghcr\.io\/disinto\/edge:/{s|image: ghcr\.io/disinto/edge:.*|build: ./docker/edge|}' "$compose_file"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Created: ${compose_file}"
|
echo "Created: ${compose_file}"
|
||||||
|
|
@ -575,7 +763,11 @@ _generate_agent_docker_impl() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Generate docker/Caddyfile template for edge proxy.
|
# Generate docker/Caddyfile for the edge proxy.
|
||||||
|
# **CANONICAL SOURCE**: This generator is the single source of truth for the Caddyfile.
|
||||||
|
# Output path: ${FACTORY_ROOT}/docker/Caddyfile (gitignored — generated artifact).
|
||||||
|
# The edge compose service mounts this path as /etc/caddy/Caddyfile.
|
||||||
|
# On a fresh clone, `disinto init` calls generate_caddyfile before first `disinto up`.
|
||||||
_generate_caddyfile_impl() {
|
_generate_caddyfile_impl() {
|
||||||
local docker_dir="${FACTORY_ROOT}/docker"
|
local docker_dir="${FACTORY_ROOT}/docker"
|
||||||
local caddyfile="${docker_dir}/Caddyfile"
|
local caddyfile="${docker_dir}/Caddyfile"
|
||||||
|
|
@ -611,7 +803,20 @@ _generate_caddyfile_impl() {
|
||||||
}
|
}
|
||||||
|
|
||||||
# Chat service — reverse proxy to disinto-chat backend (#705)
|
# Chat service — reverse proxy to disinto-chat backend (#705)
|
||||||
|
# OAuth routes bypass forward_auth — unauthenticated users need these (#709)
|
||||||
|
handle /chat/login {
|
||||||
|
reverse_proxy chat:8080
|
||||||
|
}
|
||||||
|
handle /chat/oauth/callback {
|
||||||
|
reverse_proxy chat:8080
|
||||||
|
}
|
||||||
|
# Defense-in-depth: forward_auth stamps X-Forwarded-User from session (#709)
|
||||||
handle /chat/* {
|
handle /chat/* {
|
||||||
|
forward_auth chat:8080 {
|
||||||
|
uri /chat/auth/verify
|
||||||
|
copy_headers X-Forwarded-User
|
||||||
|
header_up X-Forward-Auth-Secret {$FORWARD_AUTH_SECRET}
|
||||||
|
}
|
||||||
reverse_proxy chat:8080
|
reverse_proxy chat:8080
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -35,13 +35,35 @@ configure_git_creds() {
|
||||||
forge_host=$(printf '%s' "$FORGE_URL" | sed 's|https\?://||; s|/.*||')
|
forge_host=$(printf '%s' "$FORGE_URL" | sed 's|https\?://||; s|/.*||')
|
||||||
forge_proto=$(printf '%s' "$FORGE_URL" | sed 's|://.*||')
|
forge_proto=$(printf '%s' "$FORGE_URL" | sed 's|://.*||')
|
||||||
|
|
||||||
# Determine the bot username from FORGE_TOKEN identity (or default to dev-bot)
|
local log_fn="${_GIT_CREDS_LOG_FN:-echo}"
|
||||||
|
|
||||||
|
# Determine the bot username from FORGE_TOKEN identity with retry/backoff.
|
||||||
|
# Never fall back to a hardcoded default — a wrong username paired with the
|
||||||
|
# real password produces a cryptic 401 that's much harder to diagnose than
|
||||||
|
# a missing credential helper (#741).
|
||||||
local bot_user=""
|
local bot_user=""
|
||||||
if [ -n "${FORGE_TOKEN:-}" ]; then
|
if [ -n "${FORGE_TOKEN:-}" ]; then
|
||||||
bot_user=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
|
local attempt
|
||||||
"${FORGE_URL}/api/v1/user" 2>/dev/null | jq -r '.login // empty') || bot_user=""
|
for attempt in 1 2 3 4 5; do
|
||||||
|
bot_user=$(curl -sf --max-time 5 -H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
"${FORGE_URL}/api/v1/user" 2>/dev/null | jq -r '.login // empty') || bot_user=""
|
||||||
|
if [ -n "$bot_user" ]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
$log_fn "WARNING: Forgejo not reachable (attempt ${attempt}/5) — retrying in ${attempt}s"
|
||||||
|
sleep "$attempt"
|
||||||
|
done
|
||||||
fi
|
fi
|
||||||
bot_user="${bot_user:-dev-bot}"
|
|
||||||
|
if [ -z "$bot_user" ]; then
|
||||||
|
$log_fn "ERROR: Could not determine bot username from FORGE_TOKEN after 5 attempts — credential helper NOT configured"
|
||||||
|
$log_fn "ERROR: git push will fail until this is resolved. Restart the container after Forgejo is healthy."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Export BOT_USER so downstream functions (e.g. configure_git_identity) can
|
||||||
|
# reuse the resolved value without a redundant API call.
|
||||||
|
export BOT_USER="$bot_user"
|
||||||
|
|
||||||
local helper_path="${home_dir}/.git-credentials-helper"
|
local helper_path="${home_dir}/.git-credentials-helper"
|
||||||
|
|
||||||
|
|
@ -77,6 +99,17 @@ CREDEOF
|
||||||
else
|
else
|
||||||
git config --global --add safe.directory '*'
|
git config --global --add safe.directory '*'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Verify the credential helper actually authenticates (#741).
|
||||||
|
# A helper that was written with a valid username but a mismatched password
|
||||||
|
# would silently 401 on every push — catch it now.
|
||||||
|
if ! curl -sf --max-time 5 -u "${bot_user}:${FORGE_PASS}" \
|
||||||
|
"${FORGE_URL}/api/v1/user" >/dev/null 2>&1; then
|
||||||
|
$log_fn "ERROR: credential helper verification failed — ${bot_user}:FORGE_PASS rejected by Forgejo"
|
||||||
|
rm -f "$helper_path"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
$log_fn "Git credential helper verified: ${bot_user}@${forge_host}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# repair_baked_cred_urls [--as RUN_AS_CMD] DIR [DIR ...]
|
# repair_baked_cred_urls [--as RUN_AS_CMD] DIR [DIR ...]
|
||||||
|
|
|
||||||
279
lib/hvault.sh
Normal file
279
lib/hvault.sh
Normal file
|
|
@ -0,0 +1,279 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# hvault.sh — HashiCorp Vault helper module
|
||||||
|
#
|
||||||
|
# Typed, audited helpers for Vault KV v2 access so no script re-implements
|
||||||
|
# `curl -H "X-Vault-Token: ..."` ad-hoc.
|
||||||
|
#
|
||||||
|
# Usage: source this file, then call any hvault_* function.
|
||||||
|
#
|
||||||
|
# Environment:
|
||||||
|
# VAULT_ADDR — Vault server address (required, no default)
|
||||||
|
# VAULT_TOKEN — auth token (precedence: env > /etc/vault.d/root.token)
|
||||||
|
#
|
||||||
|
# All functions emit structured JSON errors to stderr on failure.
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# ── Internal helpers ─────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
# _hvault_err — emit structured JSON error to stderr
|
||||||
|
# Args: func_name, message, [detail]
|
||||||
|
_hvault_err() {
|
||||||
|
local func="$1" msg="$2" detail="${3:-}"
|
||||||
|
jq -n --arg func "$func" --arg msg "$msg" --arg detail "$detail" \
|
||||||
|
'{error:true,function:$func,message:$msg,detail:$detail}' >&2
|
||||||
|
}
|
||||||
|
|
||||||
|
# _hvault_resolve_token — resolve VAULT_TOKEN from env or token file
|
||||||
|
_hvault_resolve_token() {
|
||||||
|
if [ -n "${VAULT_TOKEN:-}" ]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
local token_file="/etc/vault.d/root.token"
|
||||||
|
if [ -f "$token_file" ]; then
|
||||||
|
VAULT_TOKEN="$(cat "$token_file")"
|
||||||
|
export VAULT_TOKEN
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# _hvault_check_prereqs — validate VAULT_ADDR and VAULT_TOKEN are set
|
||||||
|
# Args: caller function name
|
||||||
|
_hvault_check_prereqs() {
|
||||||
|
local caller="$1"
|
||||||
|
if [ -z "${VAULT_ADDR:-}" ]; then
|
||||||
|
_hvault_err "$caller" "VAULT_ADDR is not set" "export VAULT_ADDR before calling $caller"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
if ! _hvault_resolve_token; then
|
||||||
|
_hvault_err "$caller" "VAULT_TOKEN is not set and /etc/vault.d/root.token not found" \
|
||||||
|
"export VAULT_TOKEN or write token to /etc/vault.d/root.token"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# _hvault_request — execute a Vault API request
|
||||||
|
# Args: method, path, [data]
|
||||||
|
# Outputs: response body to stdout
|
||||||
|
# Returns: 0 on 2xx, 1 otherwise (error JSON to stderr)
|
||||||
|
_hvault_request() {
|
||||||
|
local method="$1" path="$2" data="${3:-}"
|
||||||
|
local url="${VAULT_ADDR}/v1/${path}"
|
||||||
|
local http_code body
|
||||||
|
local tmpfile
|
||||||
|
tmpfile="$(mktemp)"
|
||||||
|
|
||||||
|
local curl_args=(
|
||||||
|
-s
|
||||||
|
-w '%{http_code}'
|
||||||
|
-H "X-Vault-Token: ${VAULT_TOKEN}"
|
||||||
|
-H "Content-Type: application/json"
|
||||||
|
-X "$method"
|
||||||
|
-o "$tmpfile"
|
||||||
|
)
|
||||||
|
if [ -n "$data" ]; then
|
||||||
|
curl_args+=(-d "$data")
|
||||||
|
fi
|
||||||
|
|
||||||
|
http_code="$(curl "${curl_args[@]}" "$url")" || {
|
||||||
|
_hvault_err "_hvault_request" "curl failed" "url=$url"
|
||||||
|
rm -f "$tmpfile"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
body="$(cat "$tmpfile")"
|
||||||
|
rm -f "$tmpfile"
|
||||||
|
|
||||||
|
# Check HTTP status — 2xx is success
|
||||||
|
case "$http_code" in
|
||||||
|
2[0-9][0-9])
|
||||||
|
printf '%s' "$body"
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
_hvault_err "_hvault_request" "HTTP $http_code" "$body"
|
||||||
|
return 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Public API ───────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
# hvault_kv_get PATH [KEY]
|
||||||
|
# Read a KV v2 secret at PATH, optionally extract a single KEY.
|
||||||
|
# Outputs: JSON value (full data object, or single key value)
|
||||||
|
hvault_kv_get() {
|
||||||
|
local path="${1:-}"
|
||||||
|
local key="${2:-}"
|
||||||
|
|
||||||
|
if [ -z "$path" ]; then
|
||||||
|
_hvault_err "hvault_kv_get" "PATH is required" "usage: hvault_kv_get PATH [KEY]"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
_hvault_check_prereqs "hvault_kv_get" || return 1
|
||||||
|
|
||||||
|
local response
|
||||||
|
response="$(_hvault_request GET "secret/data/${path}")" || return 1
|
||||||
|
|
||||||
|
if [ -n "$key" ]; then
|
||||||
|
printf '%s' "$response" | jq -e -r --arg key "$key" '.data.data[$key]' 2>/dev/null || {
|
||||||
|
_hvault_err "hvault_kv_get" "key not found" "key=$key path=$path"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
else
|
||||||
|
printf '%s' "$response" | jq -e '.data.data' 2>/dev/null || {
|
||||||
|
_hvault_err "hvault_kv_get" "failed to parse response" "path=$path"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# hvault_kv_put PATH KEY=VAL [KEY=VAL ...]
|
||||||
|
# Write a KV v2 secret at PATH. Accepts one or more KEY=VAL pairs.
|
||||||
|
hvault_kv_put() {
|
||||||
|
local path="${1:-}"
|
||||||
|
shift || true
|
||||||
|
|
||||||
|
if [ -z "$path" ] || [ $# -eq 0 ]; then
|
||||||
|
_hvault_err "hvault_kv_put" "PATH and at least one KEY=VAL required" \
|
||||||
|
"usage: hvault_kv_put PATH KEY=VAL [KEY=VAL ...]"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
_hvault_check_prereqs "hvault_kv_put" || return 1
|
||||||
|
|
||||||
|
# Build JSON payload from KEY=VAL pairs entirely via jq
|
||||||
|
local payload='{"data":{}}'
|
||||||
|
for kv in "$@"; do
|
||||||
|
local k="${kv%%=*}"
|
||||||
|
local v="${kv#*=}"
|
||||||
|
if [ "$k" = "$kv" ]; then
|
||||||
|
_hvault_err "hvault_kv_put" "invalid KEY=VAL pair" "got: $kv"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
payload="$(printf '%s' "$payload" | jq --arg k "$k" --arg v "$v" '.data[$k] = $v')"
|
||||||
|
done
|
||||||
|
|
||||||
|
_hvault_request POST "secret/data/${path}" "$payload" >/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
# hvault_kv_list PATH
|
||||||
|
# List keys at a KV v2 path.
|
||||||
|
# Outputs: JSON array of key names
|
||||||
|
hvault_kv_list() {
|
||||||
|
local path="${1:-}"
|
||||||
|
|
||||||
|
if [ -z "$path" ]; then
|
||||||
|
_hvault_err "hvault_kv_list" "PATH is required" "usage: hvault_kv_list PATH"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
_hvault_check_prereqs "hvault_kv_list" || return 1
|
||||||
|
|
||||||
|
local response
|
||||||
|
response="$(_hvault_request LIST "secret/metadata/${path}")" || return 1
|
||||||
|
|
||||||
|
printf '%s' "$response" | jq -e '.data.keys' 2>/dev/null || {
|
||||||
|
_hvault_err "hvault_kv_list" "failed to parse response" "path=$path"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# hvault_policy_apply NAME FILE
|
||||||
|
# Idempotent policy upsert — create or update a Vault policy.
|
||||||
|
hvault_policy_apply() {
|
||||||
|
local name="${1:-}"
|
||||||
|
local file="${2:-}"
|
||||||
|
|
||||||
|
if [ -z "$name" ] || [ -z "$file" ]; then
|
||||||
|
_hvault_err "hvault_policy_apply" "NAME and FILE are required" \
|
||||||
|
"usage: hvault_policy_apply NAME FILE"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
if [ ! -f "$file" ]; then
|
||||||
|
_hvault_err "hvault_policy_apply" "policy file not found" "file=$file"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
_hvault_check_prereqs "hvault_policy_apply" || return 1
|
||||||
|
|
||||||
|
local policy_content
|
||||||
|
policy_content="$(cat "$file")"
|
||||||
|
local payload
|
||||||
|
payload="$(jq -n --arg policy "$policy_content" '{"policy": $policy}')"
|
||||||
|
|
||||||
|
_hvault_request PUT "sys/policies/acl/${name}" "$payload" >/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
# hvault_jwt_login ROLE JWT
|
||||||
|
# Exchange a JWT for a short-lived Vault token.
|
||||||
|
# Outputs: client token string
|
||||||
|
hvault_jwt_login() {
|
||||||
|
local role="${1:-}"
|
||||||
|
local jwt="${2:-}"
|
||||||
|
|
||||||
|
if [ -z "$role" ] || [ -z "$jwt" ]; then
|
||||||
|
_hvault_err "hvault_jwt_login" "ROLE and JWT are required" \
|
||||||
|
"usage: hvault_jwt_login ROLE JWT"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
# Only need VAULT_ADDR, not VAULT_TOKEN (we're obtaining a token)
|
||||||
|
if [ -z "${VAULT_ADDR:-}" ]; then
|
||||||
|
_hvault_err "hvault_jwt_login" "VAULT_ADDR is not set"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local payload
|
||||||
|
payload="$(jq -n --arg role "$role" --arg jwt "$jwt" \
|
||||||
|
'{"role": $role, "jwt": $jwt}')"
|
||||||
|
|
||||||
|
local response
|
||||||
|
# JWT login does not require an existing token — use curl directly
|
||||||
|
local tmpfile http_code
|
||||||
|
tmpfile="$(mktemp)"
|
||||||
|
http_code="$(curl -s -w '%{http_code}' \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-X POST \
|
||||||
|
-d "$payload" \
|
||||||
|
-o "$tmpfile" \
|
||||||
|
"${VAULT_ADDR}/v1/auth/jwt/login")" || {
|
||||||
|
_hvault_err "hvault_jwt_login" "curl failed"
|
||||||
|
rm -f "$tmpfile"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
local body
|
||||||
|
body="$(cat "$tmpfile")"
|
||||||
|
rm -f "$tmpfile"
|
||||||
|
|
||||||
|
case "$http_code" in
|
||||||
|
2[0-9][0-9])
|
||||||
|
printf '%s' "$body" | jq -e -r '.auth.client_token' 2>/dev/null || {
|
||||||
|
_hvault_err "hvault_jwt_login" "failed to extract client_token" "$body"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
_hvault_err "hvault_jwt_login" "HTTP $http_code" "$body"
|
||||||
|
return 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
# hvault_token_lookup
|
||||||
|
# Returns TTL, policies, and accessor for the current token.
|
||||||
|
# Outputs: JSON object with ttl, policies, accessor fields
|
||||||
|
hvault_token_lookup() {
|
||||||
|
_hvault_check_prereqs "hvault_token_lookup" || return 1
|
||||||
|
|
||||||
|
local response
|
||||||
|
response="$(_hvault_request GET "auth/token/lookup-self")" || return 1
|
||||||
|
|
||||||
|
printf '%s' "$response" | jq -e '{
|
||||||
|
ttl: .data.ttl,
|
||||||
|
policies: .data.policies,
|
||||||
|
accessor: .data.accessor,
|
||||||
|
display_name: .data.display_name
|
||||||
|
}' 2>/dev/null || {
|
||||||
|
_hvault_err "hvault_token_lookup" "failed to parse token info"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
118
lib/init/nomad/install.sh
Executable file
118
lib/init/nomad/install.sh
Executable file
|
|
@ -0,0 +1,118 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# =============================================================================
|
||||||
|
# lib/init/nomad/install.sh — Idempotent apt install of HashiCorp Nomad
|
||||||
|
#
|
||||||
|
# Part of the Nomad+Vault migration (S0.2, issue #822). Installs the `nomad`
|
||||||
|
# binary from the HashiCorp apt repository. Does NOT install Vault — S0.3
|
||||||
|
# owns that. Does NOT configure, start, or enable a systemd unit —
|
||||||
|
# lib/init/nomad/systemd-nomad.sh owns that. Does NOT wire this script into
|
||||||
|
# `disinto init` — S0.4 owns that.
|
||||||
|
#
|
||||||
|
# Idempotency contract:
|
||||||
|
# - Running twice back-to-back is a no-op once the target version is
|
||||||
|
# installed and the apt source is in place.
|
||||||
|
# - Adds the HashiCorp apt keyring only if it is absent.
|
||||||
|
# - Adds the HashiCorp apt sources list only if it is absent.
|
||||||
|
# - Skips `apt-get install` entirely when the installed version already
|
||||||
|
# matches ${NOMAD_VERSION}.
|
||||||
|
#
|
||||||
|
# Configuration:
|
||||||
|
# NOMAD_VERSION — pinned Nomad version (default: see below). The apt
|
||||||
|
# package name is versioned as "nomad=<version>-1".
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# sudo NOMAD_VERSION=1.9.5 lib/init/nomad/install.sh
|
||||||
|
#
|
||||||
|
# Exit codes:
|
||||||
|
# 0 success (installed or already present)
|
||||||
|
# 1 precondition failure (not Debian/Ubuntu, missing tools, not root)
|
||||||
|
# =============================================================================
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Pin to a specific Nomad 1.x release. Bump here, not at call sites.
|
||||||
|
NOMAD_VERSION="${NOMAD_VERSION:-1.9.5}"
|
||||||
|
|
||||||
|
HASHICORP_KEYRING="/usr/share/keyrings/hashicorp-archive-keyring.gpg"
|
||||||
|
HASHICORP_SOURCES="/etc/apt/sources.list.d/hashicorp.list"
|
||||||
|
HASHICORP_GPG_URL="https://apt.releases.hashicorp.com/gpg"
|
||||||
|
HASHICORP_REPO_URL="https://apt.releases.hashicorp.com"
|
||||||
|
|
||||||
|
log() { printf '[install-nomad] %s\n' "$*"; }
|
||||||
|
die() { printf '[install-nomad] ERROR: %s\n' "$*" >&2; exit 1; }
|
||||||
|
|
||||||
|
# ── Preconditions ────────────────────────────────────────────────────────────
|
||||||
|
if [ "$(id -u)" -ne 0 ]; then
|
||||||
|
die "must run as root (needs apt-get + /usr/share/keyrings write access)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
for bin in apt-get gpg curl lsb_release; do
|
||||||
|
command -v "$bin" >/dev/null 2>&1 \
|
||||||
|
|| die "required binary not found: ${bin}"
|
||||||
|
done
|
||||||
|
|
||||||
|
CODENAME="$(lsb_release -cs)"
|
||||||
|
[ -n "$CODENAME" ] || die "lsb_release returned empty codename"
|
||||||
|
|
||||||
|
# ── Fast-path: already at desired version? ───────────────────────────────────
|
||||||
|
installed_version=""
|
||||||
|
if command -v nomad >/dev/null 2>&1; then
|
||||||
|
# `nomad version` prints e.g. "Nomad v1.9.5" on the first line.
|
||||||
|
installed_version="$(nomad version 2>/dev/null \
|
||||||
|
| awk 'NR==1 {sub(/^v/, "", $2); print $2; exit}')"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$installed_version" = "$NOMAD_VERSION" ]; then
|
||||||
|
log "nomad ${NOMAD_VERSION} already installed — nothing to do"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Ensure HashiCorp apt keyring ─────────────────────────────────────────────
|
||||||
|
if [ ! -f "$HASHICORP_KEYRING" ]; then
|
||||||
|
log "adding HashiCorp apt keyring → ${HASHICORP_KEYRING}"
|
||||||
|
tmpkey="$(mktemp)"
|
||||||
|
trap 'rm -f "$tmpkey"' EXIT
|
||||||
|
curl -fsSL "$HASHICORP_GPG_URL" -o "$tmpkey" \
|
||||||
|
|| die "failed to fetch HashiCorp GPG key from ${HASHICORP_GPG_URL}"
|
||||||
|
gpg --dearmor -o "$HASHICORP_KEYRING" < "$tmpkey" \
|
||||||
|
|| die "failed to dearmor HashiCorp GPG key"
|
||||||
|
chmod 0644 "$HASHICORP_KEYRING"
|
||||||
|
rm -f "$tmpkey"
|
||||||
|
trap - EXIT
|
||||||
|
else
|
||||||
|
log "HashiCorp apt keyring already present"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Ensure HashiCorp apt sources list ────────────────────────────────────────
|
||||||
|
desired_source="deb [signed-by=${HASHICORP_KEYRING}] ${HASHICORP_REPO_URL} ${CODENAME} main"
|
||||||
|
if [ ! -f "$HASHICORP_SOURCES" ] \
|
||||||
|
|| ! grep -qxF "$desired_source" "$HASHICORP_SOURCES"; then
|
||||||
|
log "writing HashiCorp apt sources list → ${HASHICORP_SOURCES}"
|
||||||
|
printf '%s\n' "$desired_source" > "$HASHICORP_SOURCES"
|
||||||
|
apt_update_needed=1
|
||||||
|
else
|
||||||
|
log "HashiCorp apt sources list already present"
|
||||||
|
apt_update_needed=0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Install the pinned version ───────────────────────────────────────────────
|
||||||
|
if [ "$apt_update_needed" -eq 1 ]; then
|
||||||
|
log "running apt-get update"
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get update -qq \
|
||||||
|
|| die "apt-get update failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# HashiCorp apt packages use the "<version>-1" package-revision suffix.
|
||||||
|
pkg_spec="nomad=${NOMAD_VERSION}-1"
|
||||||
|
log "installing ${pkg_spec}"
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||||
|
"$pkg_spec" \
|
||||||
|
|| die "apt-get install ${pkg_spec} failed"
|
||||||
|
|
||||||
|
# ── Verify ───────────────────────────────────────────────────────────────────
|
||||||
|
final_version="$(nomad version 2>/dev/null \
|
||||||
|
| awk 'NR==1 {sub(/^v/, "", $2); print $2; exit}')"
|
||||||
|
if [ "$final_version" != "$NOMAD_VERSION" ]; then
|
||||||
|
die "post-install check: expected ${NOMAD_VERSION}, got '${final_version}'"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "nomad ${NOMAD_VERSION} installed successfully"
|
||||||
130
lib/init/nomad/systemd-nomad.sh
Executable file
130
lib/init/nomad/systemd-nomad.sh
Executable file
|
|
@ -0,0 +1,130 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# =============================================================================
|
||||||
|
# lib/init/nomad/systemd-nomad.sh — Idempotent systemd unit installer for Nomad
|
||||||
|
#
|
||||||
|
# Part of the Nomad+Vault migration (S0.2, issue #822). Writes
|
||||||
|
# /etc/systemd/system/nomad.service pointing at /etc/nomad.d/ and runs
|
||||||
|
# `systemctl enable nomad` WITHOUT starting the service — we don't launch
|
||||||
|
# the cluster until S0.4 wires everything together.
|
||||||
|
#
|
||||||
|
# Idempotency contract:
|
||||||
|
# - Existing unit file is NOT rewritten when on-disk content already
|
||||||
|
# matches the desired content (avoids spurious `daemon-reload`).
|
||||||
|
# - `systemctl enable` on an already-enabled unit is a no-op.
|
||||||
|
# - This script is safe to run unconditionally before every factory boot.
|
||||||
|
#
|
||||||
|
# Preconditions:
|
||||||
|
# - nomad binary installed (see lib/init/nomad/install.sh)
|
||||||
|
# - /etc/nomad.d/ will hold server.hcl / client.hcl (placed by S0.4)
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# sudo lib/init/nomad/systemd-nomad.sh
|
||||||
|
#
|
||||||
|
# Exit codes:
|
||||||
|
# 0 success (unit installed + enabled, or already so)
|
||||||
|
# 1 precondition failure (not root, no systemctl, no nomad binary)
|
||||||
|
# =============================================================================
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
UNIT_PATH="/etc/systemd/system/nomad.service"
|
||||||
|
NOMAD_CONFIG_DIR="/etc/nomad.d"
|
||||||
|
NOMAD_DATA_DIR="/var/lib/nomad"
|
||||||
|
|
||||||
|
log() { printf '[systemd-nomad] %s\n' "$*"; }
|
||||||
|
die() { printf '[systemd-nomad] ERROR: %s\n' "$*" >&2; exit 1; }
|
||||||
|
|
||||||
|
# ── Preconditions ────────────────────────────────────────────────────────────
|
||||||
|
if [ "$(id -u)" -ne 0 ]; then
|
||||||
|
die "must run as root (needs write access to ${UNIT_PATH})"
|
||||||
|
fi
|
||||||
|
|
||||||
|
command -v systemctl >/dev/null 2>&1 \
|
||||||
|
|| die "systemctl not found (systemd is required)"
|
||||||
|
|
||||||
|
NOMAD_BIN="$(command -v nomad 2>/dev/null || true)"
|
||||||
|
[ -n "$NOMAD_BIN" ] \
|
||||||
|
|| die "nomad binary not found — run lib/init/nomad/install.sh first"
|
||||||
|
|
||||||
|
# ── Desired unit content ─────────────────────────────────────────────────────
|
||||||
|
# Upstream-recommended baseline (https://developer.hashicorp.com/nomad/docs/install/production/deployment-guide)
|
||||||
|
# trimmed for a single-node combined server+client dev box.
|
||||||
|
# - Wants=/After= network-online: nomad must have networking up.
|
||||||
|
# - User/Group=root: the Docker driver needs root to talk to dockerd.
|
||||||
|
# - LimitNOFILE/LimitNPROC=infinity: avoid Nomad's startup warning.
|
||||||
|
# - KillSignal=SIGINT: triggers Nomad's graceful shutdown path.
|
||||||
|
# - Restart=on-failure with a bounded burst to avoid crash-loops eating the
|
||||||
|
# journal when /etc/nomad.d/ is mis-configured.
|
||||||
|
read -r -d '' DESIRED_UNIT <<EOF || true
|
||||||
|
[Unit]
|
||||||
|
Description=Nomad
|
||||||
|
Documentation=https://developer.hashicorp.com/nomad/docs
|
||||||
|
Wants=network-online.target
|
||||||
|
After=network-online.target
|
||||||
|
|
||||||
|
# When Docker is present, ensure dockerd is up before nomad starts — the
|
||||||
|
# Docker task driver needs the daemon socket available at startup.
|
||||||
|
Wants=docker.service
|
||||||
|
After=docker.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=notify
|
||||||
|
User=root
|
||||||
|
Group=root
|
||||||
|
ExecReload=/bin/kill -HUP \$MAINPID
|
||||||
|
ExecStart=${NOMAD_BIN} agent -config=${NOMAD_CONFIG_DIR}
|
||||||
|
KillMode=process
|
||||||
|
KillSignal=SIGINT
|
||||||
|
LimitNOFILE=infinity
|
||||||
|
LimitNPROC=infinity
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=2
|
||||||
|
StartLimitBurst=3
|
||||||
|
StartLimitIntervalSec=10
|
||||||
|
TasksMax=infinity
|
||||||
|
OOMScoreAdjust=-1000
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# ── Ensure config + data dirs exist ──────────────────────────────────────────
|
||||||
|
# We do not populate /etc/nomad.d/ here (that's S0.4). We do create the
|
||||||
|
# directory so `nomad agent -config=/etc/nomad.d` doesn't error if the unit
|
||||||
|
# is started before hcl files are dropped in.
|
||||||
|
for d in "$NOMAD_CONFIG_DIR" "$NOMAD_DATA_DIR"; do
|
||||||
|
if [ ! -d "$d" ]; then
|
||||||
|
log "creating ${d}"
|
||||||
|
install -d -m 0755 "$d"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# ── Install unit file only if content differs ────────────────────────────────
|
||||||
|
needs_reload=0
|
||||||
|
if [ ! -f "$UNIT_PATH" ] \
|
||||||
|
|| ! printf '%s\n' "$DESIRED_UNIT" | cmp -s - "$UNIT_PATH"; then
|
||||||
|
log "writing unit → ${UNIT_PATH}"
|
||||||
|
tmp="$(mktemp)"
|
||||||
|
trap 'rm -f "$tmp"' EXIT
|
||||||
|
printf '%s\n' "$DESIRED_UNIT" > "$tmp"
|
||||||
|
install -m 0644 -o root -g root "$tmp" "$UNIT_PATH"
|
||||||
|
rm -f "$tmp"
|
||||||
|
trap - EXIT
|
||||||
|
needs_reload=1
|
||||||
|
else
|
||||||
|
log "unit file already up to date"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Reload + enable ──────────────────────────────────────────────────────────
|
||||||
|
if [ "$needs_reload" -eq 1 ]; then
|
||||||
|
log "systemctl daemon-reload"
|
||||||
|
systemctl daemon-reload
|
||||||
|
fi
|
||||||
|
|
||||||
|
if systemctl is-enabled --quiet nomad.service 2>/dev/null; then
|
||||||
|
log "nomad.service already enabled"
|
||||||
|
else
|
||||||
|
log "systemctl enable nomad"
|
||||||
|
systemctl enable nomad.service >/dev/null
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "done — unit installed and enabled (NOT started; S0.4 brings the cluster up)"
|
||||||
|
|
@ -1,8 +1,10 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# mirrors.sh — Push primary branch + tags to configured mirror remotes.
|
# mirrors.sh — Mirror helpers: push to remotes + register pull mirrors via API.
|
||||||
#
|
#
|
||||||
# Usage: source lib/mirrors.sh; mirror_push
|
# Usage: source lib/mirrors.sh; mirror_push
|
||||||
|
# source lib/mirrors.sh; mirror_pull_register <clone_url> <owner> <repo_name> [interval]
|
||||||
# Requires: PROJECT_REPO_ROOT, PRIMARY_BRANCH, MIRROR_* vars from load-project.sh
|
# Requires: PROJECT_REPO_ROOT, PRIMARY_BRANCH, MIRROR_* vars from load-project.sh
|
||||||
|
# FORGE_API_BASE, FORGE_TOKEN for pull-mirror registration
|
||||||
|
|
||||||
# shellcheck disable=SC2154 # globals set by load-project.sh / calling script
|
# shellcheck disable=SC2154 # globals set by load-project.sh / calling script
|
||||||
|
|
||||||
|
|
@ -37,3 +39,73 @@ mirror_push() {
|
||||||
log "mirror: pushed to ${name} (pid $!)"
|
log "mirror: pushed to ${name} (pid $!)"
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# mirror_pull_register — register a Forgejo pull mirror via the /repos/migrate API.
|
||||||
|
#
|
||||||
|
# Creates a new repo as a pull mirror of an external source. Works against
|
||||||
|
# empty target repos (the repo is created by the API call itself).
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# mirror_pull_register <clone_url> <owner> <repo_name> [interval]
|
||||||
|
#
|
||||||
|
# Args:
|
||||||
|
# clone_url — HTTPS URL of the source repo (e.g. https://codeberg.org/johba/disinto.git)
|
||||||
|
# owner — Forgejo org or user that will own the mirror repo
|
||||||
|
# repo_name — name of the new mirror repo on Forgejo
|
||||||
|
# interval — sync interval (default: "8h0m0s"; Forgejo duration format)
|
||||||
|
#
|
||||||
|
# Requires:
|
||||||
|
# FORGE_API_BASE, FORGE_TOKEN (from env.sh)
|
||||||
|
#
|
||||||
|
# Returns 0 on success, 1 on failure. Prints the new repo JSON to stdout.
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
mirror_pull_register() {
|
||||||
|
local clone_url="$1"
|
||||||
|
local owner="$2"
|
||||||
|
local repo_name="$3"
|
||||||
|
local interval="${4:-8h0m0s}"
|
||||||
|
|
||||||
|
if [ -z "${FORGE_API_BASE:-}" ] || [ -z "${FORGE_TOKEN:-}" ]; then
|
||||||
|
echo "ERROR: FORGE_API_BASE and FORGE_TOKEN must be set" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$clone_url" ] || [ -z "$owner" ] || [ -z "$repo_name" ]; then
|
||||||
|
echo "Usage: mirror_pull_register <clone_url> <owner> <repo_name> [interval]" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local payload
|
||||||
|
payload=$(jq -n \
|
||||||
|
--arg clone_addr "$clone_url" \
|
||||||
|
--arg repo_name "$repo_name" \
|
||||||
|
--arg repo_owner "$owner" \
|
||||||
|
--arg interval "$interval" \
|
||||||
|
'{
|
||||||
|
clone_addr: $clone_addr,
|
||||||
|
repo_name: $repo_name,
|
||||||
|
repo_owner: $repo_owner,
|
||||||
|
mirror: true,
|
||||||
|
mirror_interval: $interval,
|
||||||
|
service: "git"
|
||||||
|
}')
|
||||||
|
|
||||||
|
local http_code body
|
||||||
|
body=$(curl -s -w "\n%{http_code}" -X POST \
|
||||||
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"${FORGE_API_BASE}/repos/migrate" \
|
||||||
|
-d "$payload")
|
||||||
|
|
||||||
|
http_code=$(printf '%s' "$body" | tail -n1)
|
||||||
|
body=$(printf '%s' "$body" | sed '$d')
|
||||||
|
|
||||||
|
if [ "$http_code" -ge 200 ] && [ "$http_code" -lt 300 ]; then
|
||||||
|
printf '%s\n' "$body"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
echo "ERROR: mirror_pull_register failed (HTTP ${http_code}): ${body}" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,8 +18,8 @@
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
# Source vault.sh for _vault_log helper
|
# Source action-vault.sh for _vault_log helper
|
||||||
source "${FACTORY_ROOT}/lib/vault.sh"
|
source "${FACTORY_ROOT}/lib/action-vault.sh"
|
||||||
|
|
||||||
# Assert required globals are set before using this module.
|
# Assert required globals are set before using this module.
|
||||||
_assert_release_globals() {
|
_assert_release_globals() {
|
||||||
|
|
|
||||||
585
lib/sprint-filer.sh
Executable file
585
lib/sprint-filer.sh
Executable file
|
|
@ -0,0 +1,585 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# =============================================================================
|
||||||
|
# sprint-filer.sh — Parse merged sprint PRs and file sub-issues via filer-bot
|
||||||
|
#
|
||||||
|
# Invoked by the ops-filer Woodpecker pipeline after a sprint PR merges on the
|
||||||
|
# ops repo main branch. Parses each sprints/*.md file for a structured
|
||||||
|
# ## Sub-issues block (filer:begin/end markers), then creates idempotent
|
||||||
|
# Forgejo issues on the project repo using FORGE_FILER_TOKEN.
|
||||||
|
#
|
||||||
|
# Permission model (#764):
|
||||||
|
# filer-bot has issues:write on the project repo.
|
||||||
|
# architect-bot is read-only on the project repo.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# sprint-filer.sh <sprint-file.md> — file sub-issues from one sprint
|
||||||
|
# sprint-filer.sh --all <sprints-dir> — scan all sprint files in dir
|
||||||
|
#
|
||||||
|
# Environment:
|
||||||
|
# FORGE_FILER_TOKEN — filer-bot API token (issues:write on project repo)
|
||||||
|
# FORGE_API — project repo API base (e.g. http://forgejo:3000/api/v1/repos/org/repo)
|
||||||
|
# FORGE_API_BASE — API base URL (e.g. http://forgejo:3000/api/v1)
|
||||||
|
# =============================================================================
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
|
||||||
|
# Source env.sh only if not already loaded (allows standalone + sourced use)
|
||||||
|
if [ -z "${FACTORY_ROOT:-}" ]; then
|
||||||
|
FACTORY_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||||
|
# shellcheck source=env.sh
|
||||||
|
source "$SCRIPT_DIR/env.sh"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Logging ──────────────────────────────────────────────────────────────
|
||||||
|
LOG_AGENT="${LOG_AGENT:-filer}"
|
||||||
|
|
||||||
|
filer_log() {
|
||||||
|
printf '[%s] %s: %s\n' "$(date -u '+%Y-%m-%dT%H:%M:%SZ')" "$LOG_AGENT" "$*" >&2
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Validate required environment ────────────────────────────────────────
|
||||||
|
: "${FORGE_FILER_TOKEN:?sprint-filer.sh requires FORGE_FILER_TOKEN}"
|
||||||
|
: "${FORGE_API:?sprint-filer.sh requires FORGE_API}"
|
||||||
|
|
||||||
|
# ── Paginated Forgejo API fetch ──────────────────────────────────────────
|
||||||
|
# Reuses forge_api_all from lib/env.sh with FORGE_FILER_TOKEN.
|
||||||
|
# Args: api_path (e.g. /issues?state=all&type=issues)
|
||||||
|
# Output: merged JSON array to stdout
|
||||||
|
filer_api_all() { forge_api_all "$1" "$FORGE_FILER_TOKEN"; }
|
||||||
|
|
||||||
|
# ── Parse sub-issues block from a sprint markdown file ───────────────────
|
||||||
|
# Extracts the YAML-in-markdown between <!-- filer:begin --> and <!-- filer:end -->
|
||||||
|
# Args: sprint_file_path
|
||||||
|
# Output: the raw sub-issues block (YAML lines) to stdout
|
||||||
|
# Returns: 0 if block found, 1 if not found or malformed
|
||||||
|
parse_subissues_block() {
|
||||||
|
local sprint_file="$1"
|
||||||
|
|
||||||
|
if [ ! -f "$sprint_file" ]; then
|
||||||
|
filer_log "ERROR: sprint file not found: ${sprint_file}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local in_block=false
|
||||||
|
local block=""
|
||||||
|
local found=false
|
||||||
|
|
||||||
|
while IFS= read -r line; do
|
||||||
|
if [[ "$line" == *"<!-- filer:begin -->"* ]]; then
|
||||||
|
in_block=true
|
||||||
|
found=true
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
if [[ "$line" == *"<!-- filer:end -->"* ]]; then
|
||||||
|
in_block=false
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
if [ "$in_block" = true ]; then
|
||||||
|
block+="${line}"$'\n'
|
||||||
|
fi
|
||||||
|
done < "$sprint_file"
|
||||||
|
|
||||||
|
if [ "$found" = false ]; then
|
||||||
|
filer_log "No filer:begin/end block found in ${sprint_file}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$in_block" = true ]; then
|
||||||
|
filer_log "ERROR: malformed sub-issues block in ${sprint_file} — filer:begin without filer:end"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$block" ]; then
|
||||||
|
filer_log "WARNING: empty sub-issues block in ${sprint_file}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf '%s' "$block"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Extract vision issue number from sprint file ─────────────────────────
|
||||||
|
# Looks for "#N" references specifically in the "## Vision issues" section
|
||||||
|
# to avoid picking up cross-links or related-issue mentions earlier in the file.
|
||||||
|
# Falls back to first #N in the file if no "## Vision issues" section found.
|
||||||
|
# Args: sprint_file_path
|
||||||
|
# Output: first vision issue number found
|
||||||
|
extract_vision_issue() {
|
||||||
|
local sprint_file="$1"
|
||||||
|
|
||||||
|
# Try to extract from "## Vision issues" section first
|
||||||
|
local in_section=false
|
||||||
|
local result=""
|
||||||
|
while IFS= read -r line; do
|
||||||
|
if [[ "$line" =~ ^##[[:space:]]+Vision[[:space:]]+issues ]]; then
|
||||||
|
in_section=true
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
# Stop at next heading
|
||||||
|
if [ "$in_section" = true ] && [[ "$line" =~ ^## ]]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$in_section" = true ]; then
|
||||||
|
result=$(printf '%s' "$line" | grep -oE '#[0-9]+' | head -1 | tr -d '#')
|
||||||
|
if [ -n "$result" ]; then
|
||||||
|
printf '%s' "$result"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done < "$sprint_file"
|
||||||
|
|
||||||
|
# Fallback: first #N in the entire file
|
||||||
|
grep -oE '#[0-9]+' "$sprint_file" | head -1 | tr -d '#'
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Extract sprint slug from file path ───────────────────────────────────
|
||||||
|
# Args: sprint_file_path
|
||||||
|
# Output: slug (filename without .md)
|
||||||
|
extract_sprint_slug() {
|
||||||
|
local sprint_file="$1"
|
||||||
|
basename "$sprint_file" .md
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Parse individual sub-issue entries from the block ────────────────────
|
||||||
|
# The block is a simple YAML-like format:
|
||||||
|
# - id: foo
|
||||||
|
# title: "..."
|
||||||
|
# labels: [backlog, priority]
|
||||||
|
# depends_on: [bar]
|
||||||
|
# body: |
|
||||||
|
# multi-line body
|
||||||
|
#
|
||||||
|
# Args: raw_block (via stdin)
|
||||||
|
# Output: JSON array of sub-issue objects
|
||||||
|
parse_subissue_entries() {
|
||||||
|
local block
|
||||||
|
block=$(cat)
|
||||||
|
|
||||||
|
# Use awk to parse the YAML-like structure into JSON
|
||||||
|
printf '%s' "$block" | awk '
|
||||||
|
BEGIN {
|
||||||
|
printf "["
|
||||||
|
first = 1
|
||||||
|
inbody = 0
|
||||||
|
id = ""; title = ""; labels = ""; depends = ""; body = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
function flush_entry() {
|
||||||
|
if (id == "") return
|
||||||
|
if (!first) printf ","
|
||||||
|
first = 0
|
||||||
|
|
||||||
|
# Escape JSON special characters in body
|
||||||
|
gsub(/\\/, "\\\\", body)
|
||||||
|
gsub(/"/, "\\\"", body)
|
||||||
|
gsub(/\t/, "\\t", body)
|
||||||
|
# Replace newlines with \n for JSON
|
||||||
|
gsub(/\n/, "\\n", body)
|
||||||
|
# Remove trailing \n
|
||||||
|
sub(/\\n$/, "", body)
|
||||||
|
|
||||||
|
# Clean up title (remove surrounding quotes)
|
||||||
|
gsub(/^"/, "", title)
|
||||||
|
gsub(/"$/, "", title)
|
||||||
|
|
||||||
|
printf "{\"id\":\"%s\",\"title\":\"%s\",\"labels\":%s,\"depends_on\":%s,\"body\":\"%s\"}", id, title, labels, depends, body
|
||||||
|
|
||||||
|
id = ""; title = ""; labels = "[]"; depends = "[]"; body = ""
|
||||||
|
inbody = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
/^- id:/ {
|
||||||
|
flush_entry()
|
||||||
|
sub(/^- id: */, "")
|
||||||
|
id = $0
|
||||||
|
labels = "[]"
|
||||||
|
depends = "[]"
|
||||||
|
next
|
||||||
|
}
|
||||||
|
|
||||||
|
/^ title:/ {
|
||||||
|
sub(/^ title: */, "")
|
||||||
|
title = $0
|
||||||
|
# Remove surrounding quotes
|
||||||
|
gsub(/^"/, "", title)
|
||||||
|
gsub(/"$/, "", title)
|
||||||
|
next
|
||||||
|
}
|
||||||
|
|
||||||
|
/^ labels:/ {
|
||||||
|
sub(/^ labels: */, "")
|
||||||
|
# Convert [a, b] to JSON array ["a","b"]
|
||||||
|
gsub(/\[/, "", $0)
|
||||||
|
gsub(/\]/, "", $0)
|
||||||
|
n = split($0, arr, /, */)
|
||||||
|
labels = "["
|
||||||
|
for (i = 1; i <= n; i++) {
|
||||||
|
gsub(/^ */, "", arr[i])
|
||||||
|
gsub(/ *$/, "", arr[i])
|
||||||
|
if (arr[i] != "") {
|
||||||
|
if (i > 1) labels = labels ","
|
||||||
|
labels = labels "\"" arr[i] "\""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
labels = labels "]"
|
||||||
|
next
|
||||||
|
}
|
||||||
|
|
||||||
|
/^ depends_on:/ {
|
||||||
|
sub(/^ depends_on: */, "")
|
||||||
|
gsub(/\[/, "", $0)
|
||||||
|
gsub(/\]/, "", $0)
|
||||||
|
n = split($0, arr, /, */)
|
||||||
|
depends = "["
|
||||||
|
for (i = 1; i <= n; i++) {
|
||||||
|
gsub(/^ */, "", arr[i])
|
||||||
|
gsub(/ *$/, "", arr[i])
|
||||||
|
if (arr[i] != "") {
|
||||||
|
if (i > 1) depends = depends ","
|
||||||
|
depends = depends "\"" arr[i] "\""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
depends = depends "]"
|
||||||
|
next
|
||||||
|
}
|
||||||
|
|
||||||
|
/^ body: *\|/ {
|
||||||
|
inbody = 1
|
||||||
|
body = ""
|
||||||
|
next
|
||||||
|
}
|
||||||
|
|
||||||
|
inbody && /^ / {
|
||||||
|
sub(/^ /, "")
|
||||||
|
body = body $0 "\n"
|
||||||
|
next
|
||||||
|
}
|
||||||
|
|
||||||
|
inbody && !/^ / && !/^$/ {
|
||||||
|
inbody = 0
|
||||||
|
# This line starts a new field or entry — re-process it
|
||||||
|
# (awk does not support re-scanning, so handle common cases)
|
||||||
|
if ($0 ~ /^- id:/) {
|
||||||
|
flush_entry()
|
||||||
|
sub(/^- id: */, "")
|
||||||
|
id = $0
|
||||||
|
labels = "[]"
|
||||||
|
depends = "[]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
END {
|
||||||
|
flush_entry()
|
||||||
|
printf "]"
|
||||||
|
}
|
||||||
|
'
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Check if sub-issue already exists (idempotency) ─────────────────────
|
||||||
|
# Searches for the decomposed-from marker in existing issues.
|
||||||
|
# Args: vision_issue_number sprint_slug subissue_id
|
||||||
|
# Returns: 0 if already exists, 1 if not
|
||||||
|
subissue_exists() {
|
||||||
|
local vision_issue="$1"
|
||||||
|
local sprint_slug="$2"
|
||||||
|
local subissue_id="$3"
|
||||||
|
|
||||||
|
local marker="<!-- decomposed-from: #${vision_issue}, sprint: ${sprint_slug}, id: ${subissue_id} -->"
|
||||||
|
|
||||||
|
# Search all issues (paginated) for the exact marker
|
||||||
|
local issues_json
|
||||||
|
issues_json=$(filer_api_all "/issues?state=all&type=issues")
|
||||||
|
|
||||||
|
if printf '%s' "$issues_json" | jq -e --arg marker "$marker" \
|
||||||
|
'[.[] | select(.body // "" | contains($marker))] | length > 0' >/dev/null 2>&1; then
|
||||||
|
return 0 # Already exists
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 1 # Does not exist
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Resolve label names to IDs ───────────────────────────────────────────
|
||||||
|
# Args: label_names_json (JSON array of strings)
|
||||||
|
# Output: JSON array of label IDs
|
||||||
|
resolve_label_ids() {
|
||||||
|
local label_names_json="$1"
|
||||||
|
|
||||||
|
# Fetch all labels from project repo
|
||||||
|
local all_labels
|
||||||
|
all_labels=$(curl -sf -H "Authorization: token ${FORGE_FILER_TOKEN}" \
|
||||||
|
"${FORGE_API}/labels" 2>/dev/null) || all_labels="[]"
|
||||||
|
|
||||||
|
# Map names to IDs
|
||||||
|
printf '%s' "$label_names_json" | jq -r '.[]' | while IFS= read -r label_name; do
|
||||||
|
[ -z "$label_name" ] && continue
|
||||||
|
printf '%s' "$all_labels" | jq -r --arg name "$label_name" \
|
||||||
|
'.[] | select(.name == $name) | .id' 2>/dev/null
|
||||||
|
done | jq -Rs 'split("\n") | map(select(. != "") | tonumber)'
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Add in-progress label to vision issue ────────────────────────────────
|
||||||
|
# Args: vision_issue_number
|
||||||
|
add_inprogress_label() {
|
||||||
|
local issue_num="$1"
|
||||||
|
|
||||||
|
local labels_json
|
||||||
|
labels_json=$(curl -sf -H "Authorization: token ${FORGE_FILER_TOKEN}" \
|
||||||
|
"${FORGE_API}/labels" 2>/dev/null) || return 1
|
||||||
|
|
||||||
|
local label_id
|
||||||
|
label_id=$(printf '%s' "$labels_json" | jq -r '.[] | select(.name == "in-progress") | .id' 2>/dev/null) || true
|
||||||
|
|
||||||
|
if [ -z "$label_id" ]; then
|
||||||
|
filer_log "WARNING: in-progress label not found"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if curl -sf -X POST \
|
||||||
|
-H "Authorization: token ${FORGE_FILER_TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"${FORGE_API}/issues/${issue_num}/labels" \
|
||||||
|
-d "{\"labels\": [${label_id}]}" >/dev/null 2>&1; then
|
||||||
|
filer_log "Added in-progress label to vision issue #${issue_num}"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
filer_log "WARNING: failed to add in-progress label to vision issue #${issue_num}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── File sub-issues from a sprint file ───────────────────────────────────
|
||||||
|
# This is the main entry point. Parses the sprint file, extracts sub-issues,
|
||||||
|
# and creates them idempotently via the Forgejo API.
|
||||||
|
# Args: sprint_file_path
|
||||||
|
# Returns: 0 on success, 1 on any error (fail-fast)
|
||||||
|
file_subissues() {
|
||||||
|
local sprint_file="$1"
|
||||||
|
|
||||||
|
filer_log "Processing sprint file: ${sprint_file}"
|
||||||
|
|
||||||
|
# Extract metadata
|
||||||
|
local vision_issue sprint_slug
|
||||||
|
vision_issue=$(extract_vision_issue "$sprint_file")
|
||||||
|
sprint_slug=$(extract_sprint_slug "$sprint_file")
|
||||||
|
|
||||||
|
if [ -z "$vision_issue" ]; then
|
||||||
|
filer_log "ERROR: could not extract vision issue number from ${sprint_file}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
filer_log "Vision issue: #${vision_issue}, sprint slug: ${sprint_slug}"
|
||||||
|
|
||||||
|
# Parse the sub-issues block
|
||||||
|
local raw_block
|
||||||
|
raw_block=$(parse_subissues_block "$sprint_file") || return 1
|
||||||
|
|
||||||
|
# Parse individual entries
|
||||||
|
local entries_json
|
||||||
|
entries_json=$(printf '%s' "$raw_block" | parse_subissue_entries)
|
||||||
|
|
||||||
|
# Validate parsing produced valid JSON
|
||||||
|
if ! printf '%s' "$entries_json" | jq empty 2>/dev/null; then
|
||||||
|
filer_log "ERROR: failed to parse sub-issues block as valid JSON in ${sprint_file}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local entry_count
|
||||||
|
entry_count=$(printf '%s' "$entries_json" | jq 'length')
|
||||||
|
|
||||||
|
if [ "$entry_count" -eq 0 ]; then
|
||||||
|
filer_log "WARNING: no sub-issue entries found in ${sprint_file}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
filer_log "Found ${entry_count} sub-issue(s) to file"
|
||||||
|
|
||||||
|
# File each sub-issue (fail-fast on first error)
|
||||||
|
local filed_count=0
|
||||||
|
local i=0
|
||||||
|
while [ "$i" -lt "$entry_count" ]; do
|
||||||
|
local entry
|
||||||
|
entry=$(printf '%s' "$entries_json" | jq ".[$i]")
|
||||||
|
|
||||||
|
local subissue_id subissue_title subissue_body labels_json
|
||||||
|
subissue_id=$(printf '%s' "$entry" | jq -r '.id')
|
||||||
|
subissue_title=$(printf '%s' "$entry" | jq -r '.title')
|
||||||
|
subissue_body=$(printf '%s' "$entry" | jq -r '.body')
|
||||||
|
labels_json=$(printf '%s' "$entry" | jq -c '.labels')
|
||||||
|
|
||||||
|
if [ -z "$subissue_id" ] || [ "$subissue_id" = "null" ]; then
|
||||||
|
filer_log "ERROR: sub-issue entry at index ${i} has no id — aborting"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$subissue_title" ] || [ "$subissue_title" = "null" ]; then
|
||||||
|
filer_log "ERROR: sub-issue '${subissue_id}' has no title — aborting"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Idempotency check
|
||||||
|
if subissue_exists "$vision_issue" "$sprint_slug" "$subissue_id"; then
|
||||||
|
filer_log "Sub-issue '${subissue_id}' already exists — skipping"
|
||||||
|
i=$((i + 1))
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Append decomposed-from marker to body
|
||||||
|
local marker="<!-- decomposed-from: #${vision_issue}, sprint: ${sprint_slug}, id: ${subissue_id} -->"
|
||||||
|
local full_body="${subissue_body}
|
||||||
|
|
||||||
|
${marker}"
|
||||||
|
|
||||||
|
# Resolve label names to IDs
|
||||||
|
local label_ids
|
||||||
|
label_ids=$(resolve_label_ids "$labels_json")
|
||||||
|
|
||||||
|
# Build issue payload using jq for safe JSON construction
|
||||||
|
local payload
|
||||||
|
payload=$(jq -n \
|
||||||
|
--arg title "$subissue_title" \
|
||||||
|
--arg body "$full_body" \
|
||||||
|
--argjson labels "$label_ids" \
|
||||||
|
'{title: $title, body: $body, labels: $labels}')
|
||||||
|
|
||||||
|
# Create the issue
|
||||||
|
local response
|
||||||
|
response=$(curl -sf -X POST \
|
||||||
|
-H "Authorization: token ${FORGE_FILER_TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"${FORGE_API}/issues" \
|
||||||
|
-d "$payload" 2>/dev/null) || {
|
||||||
|
filer_log "ERROR: failed to create sub-issue '${subissue_id}' — aborting (${filed_count}/${entry_count} filed so far)"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
local new_issue_num
|
||||||
|
new_issue_num=$(printf '%s' "$response" | jq -r '.number // empty')
|
||||||
|
filer_log "Filed sub-issue '${subissue_id}' as #${new_issue_num}: ${subissue_title}"
|
||||||
|
|
||||||
|
filed_count=$((filed_count + 1))
|
||||||
|
i=$((i + 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
# Add in-progress label to the vision issue
|
||||||
|
add_inprogress_label "$vision_issue" || true
|
||||||
|
|
||||||
|
filer_log "Successfully filed ${filed_count}/${entry_count} sub-issue(s) for sprint ${sprint_slug}"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Vision lifecycle: close completed vision issues ──────────────────────
|
||||||
|
# Checks open vision issues and closes any whose sub-issues are all closed.
|
||||||
|
# Uses the decomposed-from marker to find sub-issues.
|
||||||
|
check_and_close_completed_visions() {
|
||||||
|
filer_log "Checking for vision issues with all sub-issues complete..."
|
||||||
|
|
||||||
|
local vision_issues_json
|
||||||
|
vision_issues_json=$(filer_api_all "/issues?labels=vision&state=open")
|
||||||
|
|
||||||
|
if [ "$vision_issues_json" = "[]" ] || [ "$vision_issues_json" = "null" ]; then
|
||||||
|
filer_log "No open vision issues found"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
local all_issues
|
||||||
|
all_issues=$(filer_api_all "/issues?state=all&type=issues")
|
||||||
|
|
||||||
|
local vision_nums
|
||||||
|
vision_nums=$(printf '%s' "$vision_issues_json" | jq -r '.[].number' 2>/dev/null) || return 0
|
||||||
|
|
||||||
|
local closed_count=0
|
||||||
|
while IFS= read -r vid; do
|
||||||
|
[ -z "$vid" ] && continue
|
||||||
|
|
||||||
|
# Find sub-issues with decomposed-from marker for this vision
|
||||||
|
local sub_issues
|
||||||
|
sub_issues=$(printf '%s' "$all_issues" | jq --arg vid "$vid" \
|
||||||
|
'[.[] | select(.body // "" | contains("<!-- decomposed-from: #" + $vid))]')
|
||||||
|
|
||||||
|
local sub_count
|
||||||
|
sub_count=$(printf '%s' "$sub_issues" | jq 'length')
|
||||||
|
|
||||||
|
# No sub-issues means not ready to close
|
||||||
|
[ "$sub_count" -eq 0 ] && continue
|
||||||
|
|
||||||
|
# Check if all are closed
|
||||||
|
local open_count
|
||||||
|
open_count=$(printf '%s' "$sub_issues" | jq '[.[] | select(.state != "closed")] | length')
|
||||||
|
|
||||||
|
if [ "$open_count" -gt 0 ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# All sub-issues closed — close the vision issue
|
||||||
|
filer_log "All ${sub_count} sub-issues for vision #${vid} are closed — closing vision"
|
||||||
|
|
||||||
|
local comment_body
|
||||||
|
comment_body="## Vision Issue Completed
|
||||||
|
|
||||||
|
All sub-issues have been implemented and merged. This vision issue is now closed.
|
||||||
|
|
||||||
|
---
|
||||||
|
*Automated closure by filer-bot · $(date -u '+%Y-%m-%d %H:%M UTC')*"
|
||||||
|
|
||||||
|
local comment_payload
|
||||||
|
comment_payload=$(jq -n --arg body "$comment_body" '{body: $body}')
|
||||||
|
|
||||||
|
curl -sf -X POST \
|
||||||
|
-H "Authorization: token ${FORGE_FILER_TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"${FORGE_API}/issues/${vid}/comments" \
|
||||||
|
-d "$comment_payload" >/dev/null 2>&1 || true
|
||||||
|
|
||||||
|
curl -sf -X PATCH \
|
||||||
|
-H "Authorization: token ${FORGE_FILER_TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"${FORGE_API}/issues/${vid}" \
|
||||||
|
-d '{"state":"closed"}' >/dev/null 2>&1 || true
|
||||||
|
|
||||||
|
closed_count=$((closed_count + 1))
|
||||||
|
done <<< "$vision_nums"
|
||||||
|
|
||||||
|
if [ "$closed_count" -gt 0 ]; then
|
||||||
|
filer_log "Closed ${closed_count} vision issue(s)"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Main ─────────────────────────────────────────────────────────────────
|
||||||
|
main() {
|
||||||
|
if [ "${1:-}" = "--all" ]; then
|
||||||
|
local sprints_dir="${2:?Usage: sprint-filer.sh --all <sprints-dir>}"
|
||||||
|
local exit_code=0
|
||||||
|
|
||||||
|
for sprint_file in "${sprints_dir}"/*.md; do
|
||||||
|
[ -f "$sprint_file" ] || continue
|
||||||
|
|
||||||
|
# Only process files with filer:begin markers
|
||||||
|
if ! grep -q '<!-- filer:begin -->' "$sprint_file"; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! file_subissues "$sprint_file"; then
|
||||||
|
filer_log "ERROR: failed to process ${sprint_file}"
|
||||||
|
exit_code=1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Run vision lifecycle check after filing
|
||||||
|
check_and_close_completed_visions || true
|
||||||
|
|
||||||
|
return "$exit_code"
|
||||||
|
elif [ -n "${1:-}" ]; then
|
||||||
|
file_subissues "$1"
|
||||||
|
# Run vision lifecycle check after filing
|
||||||
|
check_and_close_completed_visions || true
|
||||||
|
else
|
||||||
|
echo "Usage: sprint-filer.sh <sprint-file.md>" >&2
|
||||||
|
echo " sprint-filer.sh --all <sprints-dir>" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run main only when executed directly (not when sourced for testing)
|
||||||
|
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
||||||
|
main "$@"
|
||||||
|
fi
|
||||||
88
nomad/client.hcl
Normal file
88
nomad/client.hcl
Normal file
|
|
@ -0,0 +1,88 @@
|
||||||
|
# =============================================================================
|
||||||
|
# nomad/client.hcl — Docker driver + host_volume declarations
|
||||||
|
#
|
||||||
|
# Part of the Nomad+Vault migration (S0.2, issue #822). Deployed to
|
||||||
|
# /etc/nomad.d/client.hcl on the factory dev box alongside server.hcl.
|
||||||
|
#
|
||||||
|
# This file owns: Docker driver plugin config + host_volume pre-wiring.
|
||||||
|
# server.hcl owns: agent role, bind, ports, data_dir.
|
||||||
|
#
|
||||||
|
# NOTE: Nomad merges every *.hcl under -config=/etc/nomad.d, so declaring
|
||||||
|
# a second `client { ... }` block here augments (not replaces) the one in
|
||||||
|
# server.hcl. On a single-node setup this file could be inlined into
|
||||||
|
# server.hcl — the split is for readability, not semantics.
|
||||||
|
#
|
||||||
|
# host_volume declarations let Nomad jobspecs mount factory state by name
|
||||||
|
# (volume = "forgejo-data", etc.) without coupling host paths into jobspec
|
||||||
|
# HCL. Host paths under /srv/disinto/* are created out-of-band by the
|
||||||
|
# orchestrator (S0.4) before any job references them.
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
client {
|
||||||
|
# forgejo git server data (repos, avatars, attachments).
|
||||||
|
host_volume "forgejo-data" {
|
||||||
|
path = "/srv/disinto/forgejo-data"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
# woodpecker CI data (pipeline artifacts, sqlite db).
|
||||||
|
host_volume "woodpecker-data" {
|
||||||
|
path = "/srv/disinto/woodpecker-data"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
# agent runtime data (claude config, logs, phase files).
|
||||||
|
host_volume "agent-data" {
|
||||||
|
path = "/srv/disinto/agent-data"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
# per-project git clones and worktrees.
|
||||||
|
host_volume "project-repos" {
|
||||||
|
path = "/srv/disinto/project-repos"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
# caddy config + ACME state.
|
||||||
|
host_volume "caddy-data" {
|
||||||
|
path = "/srv/disinto/caddy-data"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
# disinto chat transcripts + attachments.
|
||||||
|
host_volume "chat-history" {
|
||||||
|
path = "/srv/disinto/chat-history"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
# ops repo clone (vault actions, sprint artifacts, knowledge).
|
||||||
|
host_volume "ops-repo" {
|
||||||
|
path = "/srv/disinto/ops-repo"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Docker task driver. `volumes.enabled = true` is required so jobspecs
|
||||||
|
# can mount host_volume declarations defined above. `allow_privileged`
|
||||||
|
# stays false — no factory workload needs privileged containers today,
|
||||||
|
# and flipping it is an audit-worthy change.
|
||||||
|
plugin "docker" {
|
||||||
|
config {
|
||||||
|
allow_privileged = false
|
||||||
|
|
||||||
|
volumes {
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
# Leave images behind when jobs stop, so short job churn doesn't thrash
|
||||||
|
# the image cache. Factory disk is not constrained; `docker system prune`
|
||||||
|
# is the escape hatch.
|
||||||
|
gc {
|
||||||
|
image = false
|
||||||
|
container = true
|
||||||
|
dangling_containers {
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
53
nomad/server.hcl
Normal file
53
nomad/server.hcl
Normal file
|
|
@ -0,0 +1,53 @@
|
||||||
|
# =============================================================================
|
||||||
|
# nomad/server.hcl — Single-node combined server+client configuration
|
||||||
|
#
|
||||||
|
# Part of the Nomad+Vault migration (S0.2, issue #822). Deployed to
|
||||||
|
# /etc/nomad.d/server.hcl on the factory dev box alongside client.hcl.
|
||||||
|
#
|
||||||
|
# This file owns: agent role, ports, bind, data directory.
|
||||||
|
# client.hcl owns: Docker driver plugin config + host_volume declarations.
|
||||||
|
#
|
||||||
|
# NOTE: On single-node setups these two files could be merged into one
|
||||||
|
# (Nomad auto-merges every *.hcl under -config=/etc/nomad.d). The split is
|
||||||
|
# purely for readability — role/bind/port vs. plugin/volume wiring.
|
||||||
|
#
|
||||||
|
# This is a factory dev-box baseline — TLS, ACLs, gossip encryption, and
|
||||||
|
# consul/vault integration are deliberately absent and land in later steps.
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
data_dir = "/var/lib/nomad"
|
||||||
|
bind_addr = "127.0.0.1"
|
||||||
|
log_level = "INFO"
|
||||||
|
|
||||||
|
# All Nomad agent traffic stays on localhost — the factory box does not
|
||||||
|
# federate with peers. Ports are the Nomad defaults, pinned here so that
|
||||||
|
# future changes to these numbers are a visible diff.
|
||||||
|
ports {
|
||||||
|
http = 4646
|
||||||
|
rpc = 4647
|
||||||
|
serf = 4648
|
||||||
|
}
|
||||||
|
|
||||||
|
# Single-node combined mode: this agent is both the only server and the
|
||||||
|
# only client. bootstrap_expect=1 makes the server quorum-of-one.
|
||||||
|
server {
|
||||||
|
enabled = true
|
||||||
|
bootstrap_expect = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
client {
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
# Advertise localhost to self to avoid surprises if the default IP
|
||||||
|
# autodetection picks a transient interface (e.g. docker0, wg0).
|
||||||
|
advertise {
|
||||||
|
http = "127.0.0.1"
|
||||||
|
rpc = "127.0.0.1"
|
||||||
|
serf = "127.0.0.1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# UI on by default — same bind as http, no TLS (localhost only).
|
||||||
|
ui {
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: 3e65878093bbbcea6dfe4db341f82dc89d4e0ac0 -->
|
<!-- last-reviewed: c363ee0aea2ae447daab28c2c850d6abefc8c6b5 -->
|
||||||
# Planner Agent
|
# Planner Agent
|
||||||
|
|
||||||
**Role**: Strategic planning using a Prerequisite Tree (Theory of Constraints),
|
**Role**: Strategic planning using a Prerequisite Tree (Theory of Constraints),
|
||||||
|
|
@ -34,7 +34,9 @@ will then sections) and marks the prerequisite as blocked-on-vault in the tree.
|
||||||
Deduplication: checks pending/ + approved/ + fired/ before creating.
|
Deduplication: checks pending/ + approved/ + fired/ before creating.
|
||||||
Phase 4 (journal-and-memory): write updated prerequisite tree + daily journal
|
Phase 4 (journal-and-memory): write updated prerequisite tree + daily journal
|
||||||
entry (committed to ops repo) and update `$OPS_REPO_ROOT/knowledge/planner-memory.md`.
|
entry (committed to ops repo) and update `$OPS_REPO_ROOT/knowledge/planner-memory.md`.
|
||||||
Phase 5 (commit-ops): commit all ops repo changes, push directly.
|
Phase 5 (commit-ops): commit all ops repo changes to a `planner/run-YYYY-MM-DD`
|
||||||
|
branch, then create a PR and walk it to merge via review-bot (`pr_create` →
|
||||||
|
`pr_walk_to_merge`), mirroring the architect's ops flow. No direct push to main.
|
||||||
AGENTS.md maintenance is handled by the Gardener.
|
AGENTS.md maintenance is handled by the Gardener.
|
||||||
|
|
||||||
**Artifacts use `$OPS_REPO_ROOT`**: All planner artifacts (journal,
|
**Artifacts use `$OPS_REPO_ROOT`**: All planner artifacts (journal,
|
||||||
|
|
@ -55,7 +57,7 @@ nervous system component, not work.
|
||||||
creates tmux session, injects formula prompt, monitors phase file, handles crash recovery, cleans up
|
creates tmux session, injects formula prompt, monitors phase file, handles crash recovery, cleans up
|
||||||
- `formulas/run-planner.toml` — Execution spec: six steps (preflight,
|
- `formulas/run-planner.toml` — Execution spec: six steps (preflight,
|
||||||
prediction-triage, update-prerequisite-tree, file-at-constraints,
|
prediction-triage, update-prerequisite-tree, file-at-constraints,
|
||||||
journal-and-memory, commit-and-pr) with `needs` dependencies. Claude
|
journal-and-memory, commit-ops-changes) with `needs` dependencies. Claude
|
||||||
executes all steps in a single interactive session with tool access
|
executes all steps in a single interactive session with tool access
|
||||||
- `formulas/groom-backlog.toml` — Grooming formula for backlog triage and
|
- `formulas/groom-backlog.toml` — Grooming formula for backlog triage and
|
||||||
grooming. (Note: the planner no longer dispatches breakdown mode — complex
|
grooming. (Note: the planner no longer dispatches breakdown mode — complex
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,9 @@
|
||||||
# 2. Load formula (formulas/run-planner.toml)
|
# 2. Load formula (formulas/run-planner.toml)
|
||||||
# 3. Context: VISION.md, AGENTS.md, ops:RESOURCES.md, structural graph,
|
# 3. Context: VISION.md, AGENTS.md, ops:RESOURCES.md, structural graph,
|
||||||
# planner memory, journal entries
|
# planner memory, journal entries
|
||||||
# 4. agent_run(worktree, prompt) → Claude plans, may push knowledge updates
|
# 4. Create ops branch planner/run-YYYY-MM-DD for changes
|
||||||
|
# 5. agent_run(worktree, prompt) → Claude plans, commits to ops branch
|
||||||
|
# 6. If ops branch has commits: pr_create → pr_walk_to_merge (review-bot)
|
||||||
#
|
#
|
||||||
# Usage:
|
# Usage:
|
||||||
# planner-run.sh [projects/disinto.toml] # project config (default: disinto)
|
# planner-run.sh [projects/disinto.toml] # project config (default: disinto)
|
||||||
|
|
@ -22,10 +24,11 @@ FACTORY_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||||
|
|
||||||
# Accept project config from argument; default to disinto (planner is disinto infrastructure)
|
# Accept project config from argument; default to disinto (planner is disinto infrastructure)
|
||||||
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
||||||
|
# Set override BEFORE sourcing env.sh so it survives any later re-source of
|
||||||
|
# env.sh from nested shells / claude -p tools (#762, #747)
|
||||||
|
export FORGE_TOKEN_OVERRIDE="${FORGE_PLANNER_TOKEN:-}"
|
||||||
# shellcheck source=../lib/env.sh
|
# shellcheck source=../lib/env.sh
|
||||||
source "$FACTORY_ROOT/lib/env.sh"
|
source "$FACTORY_ROOT/lib/env.sh"
|
||||||
# Use planner-bot's own Forgejo identity (#747)
|
|
||||||
FORGE_TOKEN="${FORGE_PLANNER_TOKEN:-${FORGE_TOKEN}}"
|
|
||||||
# shellcheck source=../lib/formula-session.sh
|
# shellcheck source=../lib/formula-session.sh
|
||||||
source "$FACTORY_ROOT/lib/formula-session.sh"
|
source "$FACTORY_ROOT/lib/formula-session.sh"
|
||||||
# shellcheck source=../lib/worktree.sh
|
# shellcheck source=../lib/worktree.sh
|
||||||
|
|
@ -34,6 +37,10 @@ source "$FACTORY_ROOT/lib/worktree.sh"
|
||||||
source "$FACTORY_ROOT/lib/guard.sh"
|
source "$FACTORY_ROOT/lib/guard.sh"
|
||||||
# shellcheck source=../lib/agent-sdk.sh
|
# shellcheck source=../lib/agent-sdk.sh
|
||||||
source "$FACTORY_ROOT/lib/agent-sdk.sh"
|
source "$FACTORY_ROOT/lib/agent-sdk.sh"
|
||||||
|
# shellcheck source=../lib/ci-helpers.sh
|
||||||
|
source "$FACTORY_ROOT/lib/ci-helpers.sh"
|
||||||
|
# shellcheck source=../lib/pr-lifecycle.sh
|
||||||
|
source "$FACTORY_ROOT/lib/pr-lifecycle.sh"
|
||||||
|
|
||||||
LOG_FILE="${DISINTO_LOG_DIR}/planner/planner.log"
|
LOG_FILE="${DISINTO_LOG_DIR}/planner/planner.log"
|
||||||
# shellcheck disable=SC2034 # consumed by agent-sdk.sh
|
# shellcheck disable=SC2034 # consumed by agent-sdk.sh
|
||||||
|
|
@ -145,12 +152,69 @@ ${PROMPT_FOOTER}"
|
||||||
# ── Create worktree ──────────────────────────────────────────────────────
|
# ── Create worktree ──────────────────────────────────────────────────────
|
||||||
formula_worktree_setup "$WORKTREE"
|
formula_worktree_setup "$WORKTREE"
|
||||||
|
|
||||||
|
# ── Prepare ops branch for PR-based merge (#765) ────────────────────────
|
||||||
|
PLANNER_OPS_BRANCH="planner/run-$(date -u +%Y-%m-%d)"
|
||||||
|
(
|
||||||
|
cd "$OPS_REPO_ROOT"
|
||||||
|
git fetch origin "${PRIMARY_BRANCH}" --quiet 2>/dev/null || true
|
||||||
|
git checkout "${PRIMARY_BRANCH}" --quiet 2>/dev/null || true
|
||||||
|
git pull --ff-only origin "${PRIMARY_BRANCH}" --quiet 2>/dev/null || true
|
||||||
|
# Create (or reset to) a fresh branch from PRIMARY_BRANCH
|
||||||
|
git checkout -B "$PLANNER_OPS_BRANCH" "origin/${PRIMARY_BRANCH}" --quiet 2>/dev/null || \
|
||||||
|
git checkout -b "$PLANNER_OPS_BRANCH" --quiet 2>/dev/null || true
|
||||||
|
)
|
||||||
|
log "ops branch: ${PLANNER_OPS_BRANCH}"
|
||||||
|
|
||||||
# ── Run agent ─────────────────────────────────────────────────────────────
|
# ── Run agent ─────────────────────────────────────────────────────────────
|
||||||
export CLAUDE_MODEL="opus"
|
export CLAUDE_MODEL="opus"
|
||||||
|
|
||||||
agent_run --worktree "$WORKTREE" "$PROMPT"
|
agent_run --worktree "$WORKTREE" "$PROMPT"
|
||||||
log "agent_run complete"
|
log "agent_run complete"
|
||||||
|
|
||||||
|
# ── PR lifecycle: create PR on ops repo and walk to merge (#765) ─────────
|
||||||
|
OPS_FORGE_API="${FORGE_API_BASE}/repos/${FORGE_OPS_REPO}"
|
||||||
|
ops_has_commits=false
|
||||||
|
if ! git -C "$OPS_REPO_ROOT" diff --quiet "origin/${PRIMARY_BRANCH}..${PLANNER_OPS_BRANCH}" 2>/dev/null; then
|
||||||
|
ops_has_commits=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$ops_has_commits" = "true" ]; then
|
||||||
|
log "ops branch has commits — creating PR"
|
||||||
|
# Push the branch to the ops remote
|
||||||
|
git -C "$OPS_REPO_ROOT" push origin "$PLANNER_OPS_BRANCH" --quiet 2>/dev/null || \
|
||||||
|
git -C "$OPS_REPO_ROOT" push --force-with-lease origin "$PLANNER_OPS_BRANCH" 2>/dev/null
|
||||||
|
|
||||||
|
# Temporarily point FORGE_API at the ops repo for pr-lifecycle functions
|
||||||
|
ORIG_FORGE_API="$FORGE_API"
|
||||||
|
export FORGE_API="$OPS_FORGE_API"
|
||||||
|
# Ops repo typically has no Woodpecker CI — skip CI polling
|
||||||
|
ORIG_WOODPECKER_REPO_ID="${WOODPECKER_REPO_ID:-2}"
|
||||||
|
export WOODPECKER_REPO_ID="0"
|
||||||
|
|
||||||
|
PR_NUM=$(pr_create "$PLANNER_OPS_BRANCH" \
|
||||||
|
"chore: planner run $(date -u +%Y-%m-%d)" \
|
||||||
|
"Automated planner run — updates prerequisite tree, memory, and vault items." \
|
||||||
|
"${PRIMARY_BRANCH}" \
|
||||||
|
"$OPS_FORGE_API") || true
|
||||||
|
|
||||||
|
if [ -n "$PR_NUM" ]; then
|
||||||
|
log "ops PR #${PR_NUM} created — walking to merge"
|
||||||
|
SESSION_ID=$(cat "$SID_FILE" 2>/dev/null || echo "planner-$$")
|
||||||
|
pr_walk_to_merge "$PR_NUM" "$SESSION_ID" "$OPS_REPO_ROOT" 1 2 || {
|
||||||
|
log "ops PR #${PR_NUM} walk finished: ${_PR_WALK_EXIT_REASON:-unknown}"
|
||||||
|
}
|
||||||
|
log "ops PR #${PR_NUM} result: ${_PR_WALK_EXIT_REASON:-unknown}"
|
||||||
|
else
|
||||||
|
log "WARNING: failed to create ops PR for branch ${PLANNER_OPS_BRANCH}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Restore original FORGE_API
|
||||||
|
export FORGE_API="$ORIG_FORGE_API"
|
||||||
|
export WOODPECKER_REPO_ID="$ORIG_WOODPECKER_REPO_ID"
|
||||||
|
else
|
||||||
|
log "no ops changes — skipping PR creation"
|
||||||
|
fi
|
||||||
|
|
||||||
# Persist watermarks so next run can skip if nothing changed
|
# Persist watermarks so next run can skip if nothing changed
|
||||||
mkdir -p "$FACTORY_ROOT/state"
|
mkdir -p "$FACTORY_ROOT/state"
|
||||||
echo "$CURRENT_SHA" > "$LAST_SHA_FILE"
|
echo "$CURRENT_SHA" > "$LAST_SHA_FILE"
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: 3e65878093bbbcea6dfe4db341f82dc89d4e0ac0 -->
|
<!-- last-reviewed: c363ee0aea2ae447daab28c2c850d6abefc8c6b5 -->
|
||||||
# Predictor Agent
|
# Predictor Agent
|
||||||
|
|
||||||
**Role**: Abstract adversary (the "goblin"). Runs a 2-step formula
|
**Role**: Abstract adversary (the "goblin"). Runs a 2-step formula
|
||||||
|
|
|
||||||
|
|
@ -23,10 +23,11 @@ FACTORY_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||||
|
|
||||||
# Accept project config from argument; default to disinto
|
# Accept project config from argument; default to disinto
|
||||||
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
||||||
|
# Set override BEFORE sourcing env.sh so it survives any later re-source of
|
||||||
|
# env.sh from nested shells / claude -p tools (#762, #747)
|
||||||
|
export FORGE_TOKEN_OVERRIDE="${FORGE_PREDICTOR_TOKEN:-}"
|
||||||
# shellcheck source=../lib/env.sh
|
# shellcheck source=../lib/env.sh
|
||||||
source "$FACTORY_ROOT/lib/env.sh"
|
source "$FACTORY_ROOT/lib/env.sh"
|
||||||
# Use predictor-bot's own Forgejo identity (#747)
|
|
||||||
FORGE_TOKEN="${FORGE_PREDICTOR_TOKEN:-${FORGE_TOKEN}}"
|
|
||||||
# shellcheck source=../lib/formula-session.sh
|
# shellcheck source=../lib/formula-session.sh
|
||||||
source "$FACTORY_ROOT/lib/formula-session.sh"
|
source "$FACTORY_ROOT/lib/formula-session.sh"
|
||||||
# shellcheck source=../lib/worktree.sh
|
# shellcheck source=../lib/worktree.sh
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: 3e65878093bbbcea6dfe4db341f82dc89d4e0ac0 -->
|
<!-- last-reviewed: c363ee0aea2ae447daab28c2c850d6abefc8c6b5 -->
|
||||||
# Review Agent
|
# Review Agent
|
||||||
|
|
||||||
**Role**: AI-powered PR review — post structured findings and formal
|
**Role**: AI-powered PR review — post structured findings and formal
|
||||||
|
|
|
||||||
|
|
@ -59,6 +59,21 @@ fi
|
||||||
|
|
||||||
mkdir -p "$EVIDENCE_DIR"
|
mkdir -p "$EVIDENCE_DIR"
|
||||||
|
|
||||||
|
# Verify input is Caddy JSON format (not Combined Log Format or other)
|
||||||
|
first_line=$(grep -m1 '.' "$CADDY_LOG" || true)
|
||||||
|
if [ -z "$first_line" ]; then
|
||||||
|
log "WARN: Caddy access log is empty at ${CADDY_LOG}"
|
||||||
|
echo "WARN: Caddy access log is empty — nothing to parse." >&2
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
if ! printf '%s\n' "$first_line" | jq empty 2>/dev/null; then
|
||||||
|
preview="${first_line:0:200}"
|
||||||
|
log "ERROR: Input file is not Caddy JSON format (expected structured JSON access log). Got: ${preview}"
|
||||||
|
echo "ERROR: Input file is not Caddy JSON format (expected structured JSON access log)." >&2
|
||||||
|
echo "Got: ${preview}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# ── Parse access log ────────────────────────────────────────────────────────
|
# ── Parse access log ────────────────────────────────────────────────────────
|
||||||
|
|
||||||
log "Parsing ${CADDY_LOG} for entries since $(date -u -d "@${CUTOFF_TS}" +%Y-%m-%dT%H:%M:%SZ 2>/dev/null || echo "${CUTOFF_TS}")"
|
log "Parsing ${CADDY_LOG} for entries since $(date -u -d "@${CUTOFF_TS}" +%Y-%m-%dT%H:%M:%SZ 2>/dev/null || echo "${CUTOFF_TS}")"
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
<!-- last-reviewed: 3e65878093bbbcea6dfe4db341f82dc89d4e0ac0 -->
|
<!-- last-reviewed: c363ee0aea2ae447daab28c2c850d6abefc8c6b5 -->
|
||||||
# Supervisor Agent
|
# Supervisor Agent
|
||||||
|
|
||||||
**Role**: Health monitoring and auto-remediation, executed as a formula-driven
|
**Role**: Health monitoring and auto-remediation, executed as a formula-driven
|
||||||
|
|
@ -7,13 +7,11 @@ then runs an interactive Claude session (sonnet) that assesses health, auto-fixe
|
||||||
issues, and writes a daily journal. When blocked on external
|
issues, and writes a daily journal. When blocked on external
|
||||||
resources or human decisions, files vault items instead of escalating directly.
|
resources or human decisions, files vault items instead of escalating directly.
|
||||||
|
|
||||||
**Trigger**: `supervisor-run.sh` is invoked by the polling loop in `docker/edge/entrypoint-edge.sh`
|
**Trigger**: `supervisor-run.sh` is invoked by two polling loops:
|
||||||
every 20 minutes (line 50-53). Sources `lib/guard.sh` and calls `check_active supervisor` first
|
- **Agents container** (`docker/agents/entrypoint.sh`): every `SUPERVISOR_INTERVAL` seconds (default 1200 = 20 min). Controlled by the `supervisor` role in `AGENT_ROLES` (included in the default seven-role set since P1/#801). Logs to `supervisor.log` in the agents container.
|
||||||
— skips if `$FACTORY_ROOT/state/.supervisor-active` is absent. Then runs `claude -p` via
|
- **Edge container** (`docker/edge/entrypoint-edge.sh`): separate loop in the edge container (line 169-172). Runs independently of the agents container's polling schedule.
|
||||||
`agent-sdk.sh`, injects `formulas/run-supervisor.toml` with pre-collected metrics as context,
|
|
||||||
and cleans up on completion or timeout (20 min max session). Note: the supervisor runs in the
|
Both invoke the same `supervisor-run.sh`. Sources `lib/guard.sh` and calls `check_active supervisor` first — skips if `$FACTORY_ROOT/state/.supervisor-active` is absent. Then runs `claude -p` via `agent-sdk.sh`, injects `formulas/run-supervisor.toml` with pre-collected metrics as context, and cleans up on completion or timeout.
|
||||||
**edge container** (`entrypoint-edge.sh`), not the agent container — this distinction matters
|
|
||||||
for operators debugging the factory.
|
|
||||||
|
|
||||||
**Key files**:
|
**Key files**:
|
||||||
- `supervisor/supervisor-run.sh` — Polling loop participant + orchestrator: lock, memory guard,
|
- `supervisor/supervisor-run.sh` — Polling loop participant + orchestrator: lock, memory guard,
|
||||||
|
|
@ -39,6 +37,7 @@ P3 (degraded PRs, circular deps, stale deps), P4 (housekeeping).
|
||||||
**Environment variables consumed**:
|
**Environment variables consumed**:
|
||||||
- `FORGE_TOKEN`, `FORGE_SUPERVISOR_TOKEN` (falls back to FORGE_TOKEN), `FORGE_REPO`, `FORGE_API`, `PROJECT_NAME`, `PROJECT_REPO_ROOT`, `OPS_REPO_ROOT`
|
- `FORGE_TOKEN`, `FORGE_SUPERVISOR_TOKEN` (falls back to FORGE_TOKEN), `FORGE_REPO`, `FORGE_API`, `PROJECT_NAME`, `PROJECT_REPO_ROOT`, `OPS_REPO_ROOT`
|
||||||
- `PRIMARY_BRANCH`, `CLAUDE_MODEL` (set to sonnet by supervisor-run.sh)
|
- `PRIMARY_BRANCH`, `CLAUDE_MODEL` (set to sonnet by supervisor-run.sh)
|
||||||
|
- `SUPERVISOR_INTERVAL` — polling interval in seconds for agents container (default 1200 = 20 min)
|
||||||
- `WOODPECKER_TOKEN`, `WOODPECKER_SERVER`, `WOODPECKER_DB_PASSWORD`, `WOODPECKER_DB_USER`, `WOODPECKER_DB_HOST`, `WOODPECKER_DB_NAME` — CI database queries
|
- `WOODPECKER_TOKEN`, `WOODPECKER_SERVER`, `WOODPECKER_DB_PASSWORD`, `WOODPECKER_DB_USER`, `WOODPECKER_DB_HOST`, `WOODPECKER_DB_NAME` — CI database queries
|
||||||
|
|
||||||
**Degraded mode (Issue #544)**: When `OPS_REPO_ROOT` is not set or the directory doesn't exist, the supervisor runs in degraded mode:
|
**Degraded mode (Issue #544)**: When `OPS_REPO_ROOT` is not set or the directory doesn't exist, the supervisor runs in degraded mode:
|
||||||
|
|
|
||||||
|
|
@ -25,10 +25,11 @@ FACTORY_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||||
|
|
||||||
# Accept project config from argument; default to disinto
|
# Accept project config from argument; default to disinto
|
||||||
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
export PROJECT_TOML="${1:-$FACTORY_ROOT/projects/disinto.toml}"
|
||||||
|
# Set override BEFORE sourcing env.sh so it survives any later re-source of
|
||||||
|
# env.sh from nested shells / claude -p tools (#762, #747)
|
||||||
|
export FORGE_TOKEN_OVERRIDE="${FORGE_SUPERVISOR_TOKEN:-}"
|
||||||
# shellcheck source=../lib/env.sh
|
# shellcheck source=../lib/env.sh
|
||||||
source "$FACTORY_ROOT/lib/env.sh"
|
source "$FACTORY_ROOT/lib/env.sh"
|
||||||
# Use supervisor-bot's own Forgejo identity (#747)
|
|
||||||
FORGE_TOKEN="${FORGE_SUPERVISOR_TOKEN:-${FORGE_TOKEN}}"
|
|
||||||
# shellcheck source=../lib/formula-session.sh
|
# shellcheck source=../lib/formula-session.sh
|
||||||
source "$FACTORY_ROOT/lib/formula-session.sh"
|
source "$FACTORY_ROOT/lib/formula-session.sh"
|
||||||
# shellcheck source=../lib/worktree.sh
|
# shellcheck source=../lib/worktree.sh
|
||||||
|
|
|
||||||
215
tests/lib-hvault.bats
Normal file
215
tests/lib-hvault.bats
Normal file
|
|
@ -0,0 +1,215 @@
|
||||||
|
#!/usr/bin/env bats
|
||||||
|
# tests/lib-hvault.bats — Unit tests for lib/hvault.sh
|
||||||
|
#
|
||||||
|
# Runs against a dev-mode Vault server (single binary, no LXC needed).
|
||||||
|
# CI launches vault server -dev inline before running these tests.
|
||||||
|
|
||||||
|
VAULT_BIN="${VAULT_BIN:-vault}"
|
||||||
|
|
||||||
|
setup_file() {
|
||||||
|
export TEST_DIR
|
||||||
|
TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")/.." && pwd)"
|
||||||
|
|
||||||
|
# Start dev-mode vault on a random port
|
||||||
|
export VAULT_DEV_PORT
|
||||||
|
VAULT_DEV_PORT="$(shuf -i 18200-18299 -n 1)"
|
||||||
|
export VAULT_ADDR="http://127.0.0.1:${VAULT_DEV_PORT}"
|
||||||
|
|
||||||
|
"$VAULT_BIN" server -dev \
|
||||||
|
-dev-listen-address="127.0.0.1:${VAULT_DEV_PORT}" \
|
||||||
|
-dev-root-token-id="test-root-token" \
|
||||||
|
-dev-no-store-token \
|
||||||
|
&>"${BATS_FILE_TMPDIR}/vault.log" &
|
||||||
|
export VAULT_PID=$!
|
||||||
|
|
||||||
|
export VAULT_TOKEN="test-root-token"
|
||||||
|
|
||||||
|
# Wait for vault to be ready (up to 10s)
|
||||||
|
local i=0
|
||||||
|
while ! curl -sf "${VAULT_ADDR}/v1/sys/health" >/dev/null 2>&1; do
|
||||||
|
sleep 0.5
|
||||||
|
i=$((i + 1))
|
||||||
|
if [ "$i" -ge 20 ]; then
|
||||||
|
echo "Vault failed to start. Log:" >&2
|
||||||
|
cat "${BATS_FILE_TMPDIR}/vault.log" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
teardown_file() {
|
||||||
|
if [ -n "${VAULT_PID:-}" ]; then
|
||||||
|
kill "$VAULT_PID" 2>/dev/null || true
|
||||||
|
wait "$VAULT_PID" 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
setup() {
|
||||||
|
# Source the module under test
|
||||||
|
source "${TEST_DIR}/lib/hvault.sh"
|
||||||
|
export VAULT_ADDR VAULT_TOKEN
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── hvault_kv_put + hvault_kv_get ────────────────────────────────────────────
|
||||||
|
|
||||||
|
@test "hvault_kv_put writes and hvault_kv_get reads a secret" {
|
||||||
|
run hvault_kv_put "test/myapp" "username=admin" "password=s3cret"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
|
||||||
|
run hvault_kv_get "test/myapp"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
echo "$output" | jq -e '.username == "admin"'
|
||||||
|
echo "$output" | jq -e '.password == "s3cret"'
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hvault_kv_get extracts a single key" {
|
||||||
|
hvault_kv_put "test/single" "foo=bar" "baz=qux"
|
||||||
|
|
||||||
|
run hvault_kv_get "test/single" "foo"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
[ "$output" = "bar" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hvault_kv_get fails for missing key" {
|
||||||
|
hvault_kv_put "test/keymiss" "exists=yes"
|
||||||
|
|
||||||
|
run hvault_kv_get "test/keymiss" "nope"
|
||||||
|
[ "$status" -ne 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hvault_kv_get fails for missing path" {
|
||||||
|
run hvault_kv_get "test/does-not-exist-$(date +%s)"
|
||||||
|
[ "$status" -ne 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hvault_kv_put fails without KEY=VAL" {
|
||||||
|
run hvault_kv_put "test/bad"
|
||||||
|
[ "$status" -ne 0 ]
|
||||||
|
echo "$output" | grep -q '"error":true' || echo "$stderr" | grep -q '"error":true'
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hvault_kv_put rejects malformed pair (no =)" {
|
||||||
|
run hvault_kv_put "test/bad2" "noequals"
|
||||||
|
[ "$status" -ne 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hvault_kv_get fails without PATH" {
|
||||||
|
run hvault_kv_get
|
||||||
|
[ "$status" -ne 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── hvault_kv_list ───────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@test "hvault_kv_list lists keys at a path" {
|
||||||
|
hvault_kv_put "test/listdir/a" "k=1"
|
||||||
|
hvault_kv_put "test/listdir/b" "k=2"
|
||||||
|
|
||||||
|
run hvault_kv_list "test/listdir"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
echo "$output" | jq -e '. | length >= 2'
|
||||||
|
echo "$output" | jq -e 'index("a")'
|
||||||
|
echo "$output" | jq -e 'index("b")'
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hvault_kv_list fails on nonexistent path" {
|
||||||
|
run hvault_kv_list "test/no-such-path-$(date +%s)"
|
||||||
|
[ "$status" -ne 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hvault_kv_list fails without PATH" {
|
||||||
|
run hvault_kv_list
|
||||||
|
[ "$status" -ne 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── hvault_policy_apply ──────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@test "hvault_policy_apply creates a policy" {
|
||||||
|
local pfile="${BATS_TEST_TMPDIR}/test-policy.hcl"
|
||||||
|
cat > "$pfile" <<'HCL'
|
||||||
|
path "secret/data/test/*" {
|
||||||
|
capabilities = ["read"]
|
||||||
|
}
|
||||||
|
HCL
|
||||||
|
|
||||||
|
run hvault_policy_apply "test-reader" "$pfile"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
|
||||||
|
# Verify the policy exists via Vault API
|
||||||
|
run curl -sf -H "X-Vault-Token: ${VAULT_TOKEN}" \
|
||||||
|
"${VAULT_ADDR}/v1/sys/policies/acl/test-reader"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
echo "$output" | jq -e '.data.policy' | grep -q "secret/data/test"
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hvault_policy_apply is idempotent" {
|
||||||
|
local pfile="${BATS_TEST_TMPDIR}/idem-policy.hcl"
|
||||||
|
printf 'path "secret/*" { capabilities = ["list"] }\n' > "$pfile"
|
||||||
|
|
||||||
|
run hvault_policy_apply "idem-policy" "$pfile"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
|
||||||
|
# Apply again — should succeed
|
||||||
|
run hvault_policy_apply "idem-policy" "$pfile"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hvault_policy_apply fails with missing file" {
|
||||||
|
run hvault_policy_apply "bad-policy" "/nonexistent/policy.hcl"
|
||||||
|
[ "$status" -ne 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hvault_policy_apply fails without args" {
|
||||||
|
run hvault_policy_apply
|
||||||
|
[ "$status" -ne 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── hvault_token_lookup ──────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@test "hvault_token_lookup returns token info" {
|
||||||
|
run hvault_token_lookup
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
echo "$output" | jq -e '.policies'
|
||||||
|
echo "$output" | jq -e '.accessor'
|
||||||
|
echo "$output" | jq -e 'has("ttl")'
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hvault_token_lookup fails without VAULT_TOKEN" {
|
||||||
|
unset VAULT_TOKEN
|
||||||
|
run hvault_token_lookup
|
||||||
|
[ "$status" -ne 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hvault_token_lookup fails without VAULT_ADDR" {
|
||||||
|
unset VAULT_ADDR
|
||||||
|
run hvault_token_lookup
|
||||||
|
[ "$status" -ne 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── hvault_jwt_login ─────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@test "hvault_jwt_login fails without VAULT_ADDR" {
|
||||||
|
unset VAULT_ADDR
|
||||||
|
run hvault_jwt_login "myrole" "fakejwt"
|
||||||
|
[ "$status" -ne 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hvault_jwt_login fails without args" {
|
||||||
|
run hvault_jwt_login
|
||||||
|
[ "$status" -ne 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hvault_jwt_login returns error for unconfigured jwt auth" {
|
||||||
|
# JWT auth backend is not enabled in dev mode by default — expect failure
|
||||||
|
run hvault_jwt_login "myrole" "eyJhbGciOiJSUzI1NiJ9.fake.sig"
|
||||||
|
[ "$status" -ne 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Env / prereq errors ─────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@test "all functions fail with structured JSON error when VAULT_ADDR unset" {
|
||||||
|
unset VAULT_ADDR
|
||||||
|
for fn in hvault_kv_get hvault_kv_put hvault_kv_list hvault_policy_apply hvault_token_lookup; do
|
||||||
|
run $fn "dummy" "dummy"
|
||||||
|
[ "$status" -ne 0 ]
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
@ -29,7 +29,8 @@ cleanup() {
|
||||||
pkill -f "mock-forgejo.py" 2>/dev/null || true
|
pkill -f "mock-forgejo.py" 2>/dev/null || true
|
||||||
rm -rf "$MOCK_BIN" /tmp/smoke-test-repo \
|
rm -rf "$MOCK_BIN" /tmp/smoke-test-repo \
|
||||||
"${FACTORY_ROOT}/projects/smoke-repo.toml" \
|
"${FACTORY_ROOT}/projects/smoke-repo.toml" \
|
||||||
/tmp/smoke-claude-shared /tmp/smoke-home-claude
|
/tmp/smoke-claude-shared /tmp/smoke-home-claude \
|
||||||
|
/tmp/smoke-env-before-rerun /tmp/smoke-env-before-dryrun
|
||||||
# Restore .env only if we created the backup
|
# Restore .env only if we created the backup
|
||||||
if [ -f "${FACTORY_ROOT}/.env.smoke-backup" ]; then
|
if [ -f "${FACTORY_ROOT}/.env.smoke-backup" ]; then
|
||||||
mv "${FACTORY_ROOT}/.env.smoke-backup" "${FACTORY_ROOT}/.env"
|
mv "${FACTORY_ROOT}/.env.smoke-backup" "${FACTORY_ROOT}/.env"
|
||||||
|
|
@ -178,8 +179,30 @@ else
|
||||||
fail "disinto init exited non-zero"
|
fail "disinto init exited non-zero"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# ── Idempotency test: run init again ───────────────────────────────────────
|
# ── Dry-run test: must not modify state ────────────────────────────────────
|
||||||
|
echo "=== Dry-run test ==="
|
||||||
|
cp "${FACTORY_ROOT}/.env" /tmp/smoke-env-before-dryrun
|
||||||
|
if bash "${FACTORY_ROOT}/bin/disinto" init \
|
||||||
|
"${TEST_SLUG}" \
|
||||||
|
--bare --yes --dry-run \
|
||||||
|
--forge-url "$FORGE_URL" \
|
||||||
|
--repo-root "/tmp/smoke-test-repo" 2>&1 | grep -q "Dry run complete"; then
|
||||||
|
pass "disinto init --dry-run exited successfully"
|
||||||
|
else
|
||||||
|
fail "disinto init --dry-run did not complete"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Verify --dry-run did not modify .env
|
||||||
|
if diff -q /tmp/smoke-env-before-dryrun "${FACTORY_ROOT}/.env" >/dev/null 2>&1; then
|
||||||
|
pass "dry-run: .env unchanged"
|
||||||
|
else
|
||||||
|
fail "dry-run: .env was modified (should be read-only)"
|
||||||
|
fi
|
||||||
|
rm -f /tmp/smoke-env-before-dryrun
|
||||||
|
|
||||||
|
# ── Idempotency test: run init again, verify .env is stable ────────────────
|
||||||
echo "=== Idempotency test: running disinto init again ==="
|
echo "=== Idempotency test: running disinto init again ==="
|
||||||
|
cp "${FACTORY_ROOT}/.env" /tmp/smoke-env-before-rerun
|
||||||
if bash "${FACTORY_ROOT}/bin/disinto" init \
|
if bash "${FACTORY_ROOT}/bin/disinto" init \
|
||||||
"${TEST_SLUG}" \
|
"${TEST_SLUG}" \
|
||||||
--bare --yes \
|
--bare --yes \
|
||||||
|
|
@ -190,6 +213,29 @@ else
|
||||||
fail "disinto init (re-run) exited non-zero"
|
fail "disinto init (re-run) exited non-zero"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Verify .env is stable across re-runs (no token churn)
|
||||||
|
if diff -q /tmp/smoke-env-before-rerun "${FACTORY_ROOT}/.env" >/dev/null 2>&1; then
|
||||||
|
pass "idempotency: .env unchanged on re-run"
|
||||||
|
else
|
||||||
|
fail "idempotency: .env changed on re-run (token churn detected)"
|
||||||
|
diff /tmp/smoke-env-before-rerun "${FACTORY_ROOT}/.env" >&2 || true
|
||||||
|
fi
|
||||||
|
rm -f /tmp/smoke-env-before-rerun
|
||||||
|
|
||||||
|
# Verify FORGE_ADMIN_TOKEN is stored in .env
|
||||||
|
if grep -q '^FORGE_ADMIN_TOKEN=' "${FACTORY_ROOT}/.env"; then
|
||||||
|
pass ".env contains FORGE_ADMIN_TOKEN"
|
||||||
|
else
|
||||||
|
fail ".env missing FORGE_ADMIN_TOKEN"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Verify HUMAN_TOKEN is stored in .env
|
||||||
|
if grep -q '^HUMAN_TOKEN=' "${FACTORY_ROOT}/.env"; then
|
||||||
|
pass ".env contains HUMAN_TOKEN"
|
||||||
|
else
|
||||||
|
fail ".env missing HUMAN_TOKEN"
|
||||||
|
fi
|
||||||
|
|
||||||
# ── 4. Verify Forgejo state ─────────────────────────────────────────────────
|
# ── 4. Verify Forgejo state ─────────────────────────────────────────────────
|
||||||
echo "=== 4/6 Verifying Forgejo state ==="
|
echo "=== 4/6 Verifying Forgejo state ==="
|
||||||
|
|
||||||
|
|
|
||||||
162
tests/smoke-load-secret.sh
Normal file
162
tests/smoke-load-secret.sh
Normal file
|
|
@ -0,0 +1,162 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# tests/smoke-load-secret.sh — Unit tests for load_secret() precedence chain
|
||||||
|
#
|
||||||
|
# Covers the 4 precedence cases:
|
||||||
|
# 1. /secrets/<NAME>.env (Nomad template)
|
||||||
|
# 2. Current environment
|
||||||
|
# 3. secrets/<NAME>.enc (age-encrypted per-key file)
|
||||||
|
# 4. Default / empty fallback
|
||||||
|
#
|
||||||
|
# Required tools: bash, age (for case 3)
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
FACTORY_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
|
||||||
|
|
||||||
|
fail() { printf 'FAIL: %s\n' "$*" >&2; FAILED=1; }
|
||||||
|
pass() { printf 'PASS: %s\n' "$*"; }
|
||||||
|
FAILED=0
|
||||||
|
|
||||||
|
# Set up a temp workspace and fake HOME so age key paths work
|
||||||
|
test_dir=$(mktemp -d)
|
||||||
|
fake_home=$(mktemp -d)
|
||||||
|
trap 'rm -rf "$test_dir" "$fake_home"' EXIT
|
||||||
|
|
||||||
|
# Minimal env for sourcing env.sh's load_secret function without the full boot
|
||||||
|
# We source the function definition directly to isolate the unit under test.
|
||||||
|
# shellcheck disable=SC2034
|
||||||
|
export USER="${USER:-test}"
|
||||||
|
export HOME="$fake_home"
|
||||||
|
|
||||||
|
# Source env.sh to get load_secret (and FACTORY_ROOT)
|
||||||
|
source "${FACTORY_ROOT}/lib/env.sh"
|
||||||
|
|
||||||
|
# ── Case 4: Default / empty fallback ────────────────────────────────────────
|
||||||
|
echo "=== 1/5 Case 4: default fallback ==="
|
||||||
|
|
||||||
|
unset TEST_SECRET_FALLBACK 2>/dev/null || true
|
||||||
|
val=$(load_secret TEST_SECRET_FALLBACK "my-default")
|
||||||
|
if [ "$val" = "my-default" ]; then
|
||||||
|
pass "load_secret returns default when nothing is set"
|
||||||
|
else
|
||||||
|
fail "Expected 'my-default', got '${val}'"
|
||||||
|
fi
|
||||||
|
|
||||||
|
val=$(load_secret TEST_SECRET_FALLBACK)
|
||||||
|
if [ -z "$val" ]; then
|
||||||
|
pass "load_secret returns empty when no default and nothing set"
|
||||||
|
else
|
||||||
|
fail "Expected empty, got '${val}'"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Case 2: Environment variable already set ────────────────────────────────
|
||||||
|
echo "=== 2/5 Case 2: environment variable ==="
|
||||||
|
|
||||||
|
export TEST_SECRET_ENV="from-environment"
|
||||||
|
val=$(load_secret TEST_SECRET_ENV "ignored-default")
|
||||||
|
if [ "$val" = "from-environment" ]; then
|
||||||
|
pass "load_secret returns env value over default"
|
||||||
|
else
|
||||||
|
fail "Expected 'from-environment', got '${val}'"
|
||||||
|
fi
|
||||||
|
unset TEST_SECRET_ENV
|
||||||
|
|
||||||
|
# ── Case 3: Age-encrypted per-key file ──────────────────────────────────────
|
||||||
|
echo "=== 3/5 Case 3: age-encrypted secret ==="
|
||||||
|
|
||||||
|
if command -v age &>/dev/null && command -v age-keygen &>/dev/null; then
|
||||||
|
# Generate a test age key
|
||||||
|
age_key_dir="${fake_home}/.config/sops/age"
|
||||||
|
mkdir -p "$age_key_dir"
|
||||||
|
age-keygen -o "${age_key_dir}/keys.txt" 2>/dev/null
|
||||||
|
pub_key=$(age-keygen -y "${age_key_dir}/keys.txt")
|
||||||
|
|
||||||
|
# Create encrypted secret
|
||||||
|
secrets_dir="${FACTORY_ROOT}/secrets"
|
||||||
|
mkdir -p "$secrets_dir"
|
||||||
|
printf 'age-test-value' | age -r "$pub_key" -o "${secrets_dir}/TEST_SECRET_AGE.enc"
|
||||||
|
|
||||||
|
unset TEST_SECRET_AGE 2>/dev/null || true
|
||||||
|
val=$(load_secret TEST_SECRET_AGE "fallback")
|
||||||
|
if [ "$val" = "age-test-value" ]; then
|
||||||
|
pass "load_secret decrypts age-encrypted secret"
|
||||||
|
else
|
||||||
|
fail "Expected 'age-test-value', got '${val}'"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Verify caching: call load_secret directly (not in subshell) so export propagates
|
||||||
|
unset TEST_SECRET_AGE 2>/dev/null || true
|
||||||
|
load_secret TEST_SECRET_AGE >/dev/null
|
||||||
|
if [ "${TEST_SECRET_AGE:-}" = "age-test-value" ]; then
|
||||||
|
pass "load_secret caches decrypted value in environment (direct call)"
|
||||||
|
else
|
||||||
|
fail "Decrypted value not cached in environment"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up test secret
|
||||||
|
rm -f "${secrets_dir}/TEST_SECRET_AGE.enc"
|
||||||
|
rmdir "$secrets_dir" 2>/dev/null || true
|
||||||
|
unset TEST_SECRET_AGE
|
||||||
|
else
|
||||||
|
echo "SKIP: age/age-keygen not found — skipping age decryption test"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Case 1: Nomad template path ────────────────────────────────────────────
|
||||||
|
echo "=== 4/5 Case 1: Nomad template (/secrets/<NAME>.env) ==="
|
||||||
|
|
||||||
|
nomad_dir="/secrets"
|
||||||
|
if [ -w "$(dirname "$nomad_dir")" ] 2>/dev/null || [ -w "$nomad_dir" ] 2>/dev/null; then
|
||||||
|
mkdir -p "$nomad_dir"
|
||||||
|
printf 'TEST_SECRET_NOMAD=from-nomad-template\n' > "${nomad_dir}/TEST_SECRET_NOMAD.env"
|
||||||
|
|
||||||
|
# Even with env set, Nomad path takes precedence
|
||||||
|
export TEST_SECRET_NOMAD="from-env-should-lose"
|
||||||
|
val=$(load_secret TEST_SECRET_NOMAD "default")
|
||||||
|
if [ "$val" = "from-nomad-template" ]; then
|
||||||
|
pass "load_secret prefers Nomad template over env"
|
||||||
|
else
|
||||||
|
fail "Expected 'from-nomad-template', got '${val}'"
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -f "${nomad_dir}/TEST_SECRET_NOMAD.env"
|
||||||
|
rmdir "$nomad_dir" 2>/dev/null || true
|
||||||
|
unset TEST_SECRET_NOMAD
|
||||||
|
else
|
||||||
|
echo "SKIP: /secrets not writable — skipping Nomad template test (needs root or container)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Precedence: env beats age ────────────────────────────────────────────
|
||||||
|
echo "=== 5/5 Precedence: env beats age-encrypted ==="
|
||||||
|
|
||||||
|
if command -v age &>/dev/null && command -v age-keygen &>/dev/null; then
|
||||||
|
age_key_dir="${fake_home}/.config/sops/age"
|
||||||
|
mkdir -p "$age_key_dir"
|
||||||
|
[ -f "${age_key_dir}/keys.txt" ] || age-keygen -o "${age_key_dir}/keys.txt" 2>/dev/null
|
||||||
|
pub_key=$(age-keygen -y "${age_key_dir}/keys.txt")
|
||||||
|
|
||||||
|
secrets_dir="${FACTORY_ROOT}/secrets"
|
||||||
|
mkdir -p "$secrets_dir"
|
||||||
|
printf 'age-value-should-lose' | age -r "$pub_key" -o "${secrets_dir}/TEST_SECRET_PREC.enc"
|
||||||
|
|
||||||
|
export TEST_SECRET_PREC="env-value-wins"
|
||||||
|
val=$(load_secret TEST_SECRET_PREC "default")
|
||||||
|
if [ "$val" = "env-value-wins" ]; then
|
||||||
|
pass "load_secret prefers env over age-encrypted file"
|
||||||
|
else
|
||||||
|
fail "Expected 'env-value-wins', got '${val}'"
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -f "${secrets_dir}/TEST_SECRET_PREC.enc"
|
||||||
|
rmdir "$secrets_dir" 2>/dev/null || true
|
||||||
|
unset TEST_SECRET_PREC
|
||||||
|
else
|
||||||
|
echo "SKIP: age not found — skipping precedence test"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Summary ───────────────────────────────────────────────────────────────
|
||||||
|
echo ""
|
||||||
|
if [ "$FAILED" -ne 0 ]; then
|
||||||
|
echo "=== SMOKE-LOAD-SECRET TEST FAILED ==="
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "=== SMOKE-LOAD-SECRET TEST PASSED ==="
|
||||||
|
|
@ -83,9 +83,12 @@ curl -sL https://raw.githubusercontent.com/disinto-admin/disinto/fix/issue-621/t
|
||||||
- Permissions: `root:disinto-register 0750`
|
- Permissions: `root:disinto-register 0750`
|
||||||
|
|
||||||
3. **Installs Caddy**:
|
3. **Installs Caddy**:
|
||||||
|
- Backs up any pre-existing `/etc/caddy/Caddyfile` to `/etc/caddy/Caddyfile.pre-disinto`
|
||||||
- Download Caddy with Gandi DNS plugin
|
- Download Caddy with Gandi DNS plugin
|
||||||
- Enable admin API on `127.0.0.1:2019`
|
- Enable admin API on `127.0.0.1:2019`
|
||||||
- Configure wildcard cert for `*.disinto.ai` via DNS-01
|
- Configure wildcard cert for `*.disinto.ai` via DNS-01
|
||||||
|
- Creates `/etc/caddy/extra.d/` for operator-owned site blocks
|
||||||
|
- Emitted Caddyfile ends with `import /etc/caddy/extra.d/*.caddy`
|
||||||
|
|
||||||
4. **Sets up SSH**:
|
4. **Sets up SSH**:
|
||||||
- Creates `disinto-register` authorized_keys with forced command
|
- Creates `disinto-register` authorized_keys with forced command
|
||||||
|
|
@ -95,6 +98,27 @@ curl -sL https://raw.githubusercontent.com/disinto-admin/disinto/fix/issue-621/t
|
||||||
- `/opt/disinto-edge/register.sh` — forced command handler
|
- `/opt/disinto-edge/register.sh` — forced command handler
|
||||||
- `/opt/disinto-edge/lib/*.sh` — helper libraries
|
- `/opt/disinto-edge/lib/*.sh` — helper libraries
|
||||||
|
|
||||||
|
## Operator-Owned Site Blocks
|
||||||
|
|
||||||
|
Edge-control owns the top-level `/etc/caddy/Caddyfile` and dynamic `<project>.<DOMAIN_SUFFIX>` routes injected via the Caddy admin API. Operators own everything under `/etc/caddy/extra.d/`.
|
||||||
|
|
||||||
|
To serve non-tunnel content (apex domain, www redirect, static sites), drop `.caddy` files into `/etc/caddy/extra.d/`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Example: /etc/caddy/extra.d/landing.caddy
|
||||||
|
disinto.ai {
|
||||||
|
root * /home/debian/disinto-site
|
||||||
|
file_server
|
||||||
|
}
|
||||||
|
|
||||||
|
# Example: /etc/caddy/extra.d/www-redirect.caddy
|
||||||
|
www.disinto.ai {
|
||||||
|
redir https://disinto.ai{uri} permanent
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
These files survive across `install.sh` re-runs. The `--extra-caddyfile <path>` flag overrides the default import glob (`/etc/caddy/extra.d/*.caddy`) if needed.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
### Register a Tunnel (from dev box)
|
### Register a Tunnel (from dev box)
|
||||||
|
|
|
||||||
|
|
@ -43,18 +43,21 @@ INSTALL_DIR="/opt/disinto-edge"
|
||||||
REGISTRY_DIR="/var/lib/disinto"
|
REGISTRY_DIR="/var/lib/disinto"
|
||||||
CADDY_VERSION="2.8.4"
|
CADDY_VERSION="2.8.4"
|
||||||
DOMAIN_SUFFIX="disinto.ai"
|
DOMAIN_SUFFIX="disinto.ai"
|
||||||
|
EXTRA_CADDYFILE="/etc/caddy/extra.d/*.caddy"
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
Usage: $0 [options]
|
Usage: $0 [options]
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
--gandi-token <token> Gandi API token for wildcard cert (required)
|
--gandi-token <token> Gandi API token for wildcard cert (required)
|
||||||
--install-dir <dir> Install directory (default: /opt/disinto-edge)
|
--install-dir <dir> Install directory (default: /opt/disinto-edge)
|
||||||
--registry-dir <dir> Registry directory (default: /var/lib/disinto)
|
--registry-dir <dir> Registry directory (default: /var/lib/disinto)
|
||||||
--caddy-version <ver> Caddy version to install (default: ${CADDY_VERSION})
|
--caddy-version <ver> Caddy version to install (default: ${CADDY_VERSION})
|
||||||
--domain-suffix <suffix> Domain suffix for tunnels (default: disinto.ai)
|
--domain-suffix <suffix> Domain suffix for tunnels (default: disinto.ai)
|
||||||
-h, --help Show this help
|
--extra-caddyfile <path> Import path for operator-owned Caddy config
|
||||||
|
(default: /etc/caddy/extra.d/*.caddy)
|
||||||
|
-h, --help Show this help
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
$0 --gandi-token YOUR_GANDI_API_TOKEN
|
$0 --gandi-token YOUR_GANDI_API_TOKEN
|
||||||
|
|
@ -84,6 +87,10 @@ while [[ $# -gt 0 ]]; do
|
||||||
DOMAIN_SUFFIX="$2"
|
DOMAIN_SUFFIX="$2"
|
||||||
shift 2
|
shift 2
|
||||||
;;
|
;;
|
||||||
|
--extra-caddyfile)
|
||||||
|
EXTRA_CADDYFILE="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
-h|--help)
|
-h|--help)
|
||||||
usage
|
usage
|
||||||
;;
|
;;
|
||||||
|
|
@ -225,8 +232,29 @@ EOF
|
||||||
chmod 600 "$GANDI_ENV"
|
chmod 600 "$GANDI_ENV"
|
||||||
|
|
||||||
# Create Caddyfile with admin API and wildcard cert
|
# Create Caddyfile with admin API and wildcard cert
|
||||||
|
# Note: Caddy auto-generates server names (srv0, srv1, …). lib/caddy.sh
|
||||||
|
# discovers the server name dynamically via _discover_server_name() so we
|
||||||
|
# don't need to name the server here.
|
||||||
CADDYFILE="/etc/caddy/Caddyfile"
|
CADDYFILE="/etc/caddy/Caddyfile"
|
||||||
cat > "$CADDYFILE" <<EOF
|
|
||||||
|
# Back up existing Caddyfile before overwriting
|
||||||
|
if [ -f "$CADDYFILE" ] && [ ! -f "${CADDYFILE}.pre-disinto" ]; then
|
||||||
|
cp "$CADDYFILE" "${CADDYFILE}.pre-disinto"
|
||||||
|
log_info "Backed up existing Caddyfile to ${CADDYFILE}.pre-disinto"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create extra.d directory for operator-owned site blocks
|
||||||
|
EXTRA_DIR="/etc/caddy/extra.d"
|
||||||
|
mkdir -p "$EXTRA_DIR"
|
||||||
|
chmod 0755 "$EXTRA_DIR"
|
||||||
|
if getent group caddy >/dev/null 2>&1; then
|
||||||
|
chown root:caddy "$EXTRA_DIR"
|
||||||
|
else
|
||||||
|
log_warn "Group 'caddy' does not exist; extra.d owned by root:root"
|
||||||
|
fi
|
||||||
|
log_info "Created ${EXTRA_DIR} for operator-owned Caddy config"
|
||||||
|
|
||||||
|
cat > "$CADDYFILE" <<CADDYEOF
|
||||||
# Caddy configuration for edge control plane
|
# Caddy configuration for edge control plane
|
||||||
# Admin API enabled on 127.0.0.1:2019
|
# Admin API enabled on 127.0.0.1:2019
|
||||||
|
|
||||||
|
|
@ -240,7 +268,10 @@ cat > "$CADDYFILE" <<EOF
|
||||||
dns gandi {env.GANDI_API_KEY}
|
dns gandi {env.GANDI_API_KEY}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EOF
|
|
||||||
|
# Operator-owned site blocks (apex, www, static content, etc.)
|
||||||
|
import ${EXTRA_CADDYFILE}
|
||||||
|
CADDYEOF
|
||||||
|
|
||||||
# Start Caddy
|
# Start Caddy
|
||||||
systemctl restart caddy 2>/dev/null || {
|
systemctl restart caddy 2>/dev/null || {
|
||||||
|
|
@ -359,6 +390,7 @@ echo "Configuration:"
|
||||||
echo " Install directory: ${INSTALL_DIR}"
|
echo " Install directory: ${INSTALL_DIR}"
|
||||||
echo " Registry: ${REGISTRY_FILE}"
|
echo " Registry: ${REGISTRY_FILE}"
|
||||||
echo " Caddy admin API: http://127.0.0.1:2019"
|
echo " Caddy admin API: http://127.0.0.1:2019"
|
||||||
|
echo " Operator site blocks: ${EXTRA_DIR}/ (import ${EXTRA_CADDYFILE})"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Users:"
|
echo "Users:"
|
||||||
echo " disinto-register - SSH forced command (runs ${INSTALL_DIR}/register.sh)"
|
echo " disinto-register - SSH forced command (runs ${INSTALL_DIR}/register.sh)"
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,24 @@ CADDY_ADMIN_URL="${CADDY_ADMIN_URL:-http://127.0.0.1:2019}"
|
||||||
# Domain suffix for projects
|
# Domain suffix for projects
|
||||||
DOMAIN_SUFFIX="${DOMAIN_SUFFIX:-disinto.ai}"
|
DOMAIN_SUFFIX="${DOMAIN_SUFFIX:-disinto.ai}"
|
||||||
|
|
||||||
|
# Discover the Caddy server name that listens on :80/:443
|
||||||
|
# Usage: _discover_server_name
|
||||||
|
_discover_server_name() {
|
||||||
|
local server_name
|
||||||
|
server_name=$(curl -sS "${CADDY_ADMIN_URL}/config/apps/http/servers" \
|
||||||
|
| jq -r 'to_entries | map(select(.value.listen[]? | test(":(80|443)$"))) | .[0].key // empty') || {
|
||||||
|
echo "Error: could not query Caddy admin API for servers" >&2
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ -z "$server_name" ]; then
|
||||||
|
echo "Error: could not find a Caddy server listening on :80/:443" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "$server_name"
|
||||||
|
}
|
||||||
|
|
||||||
# Add a route for a project
|
# Add a route for a project
|
||||||
# Usage: add_route <project> <port>
|
# Usage: add_route <project> <port>
|
||||||
add_route() {
|
add_route() {
|
||||||
|
|
@ -26,6 +44,9 @@ add_route() {
|
||||||
local port="$2"
|
local port="$2"
|
||||||
local fqdn="${project}.${DOMAIN_SUFFIX}"
|
local fqdn="${project}.${DOMAIN_SUFFIX}"
|
||||||
|
|
||||||
|
local server_name
|
||||||
|
server_name=$(_discover_server_name) || return 1
|
||||||
|
|
||||||
# Build the route configuration (partial config)
|
# Build the route configuration (partial config)
|
||||||
local route_config
|
local route_config
|
||||||
route_config=$(cat <<EOF
|
route_config=$(cat <<EOF
|
||||||
|
|
@ -58,16 +79,21 @@ add_route() {
|
||||||
EOF
|
EOF
|
||||||
)
|
)
|
||||||
|
|
||||||
# Append route using POST /config/apps/http/servers/edge/routes
|
# Append route via admin API, checking HTTP status
|
||||||
local response
|
local response status body
|
||||||
response=$(curl -s -X POST \
|
response=$(curl -sS -w '\n%{http_code}' -X POST \
|
||||||
"${CADDY_ADMIN_URL}/config/apps/http/servers/edge/routes" \
|
"${CADDY_ADMIN_URL}/config/apps/http/servers/${server_name}/routes" \
|
||||||
-H "Content-Type: application/json" \
|
-H "Content-Type: application/json" \
|
||||||
-d "$route_config" 2>&1) || {
|
-d "$route_config") || {
|
||||||
echo "Error: failed to add route for ${fqdn}" >&2
|
echo "Error: failed to add route for ${fqdn}" >&2
|
||||||
echo "Response: ${response}" >&2
|
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
status=$(echo "$response" | tail -n1)
|
||||||
|
body=$(echo "$response" | sed '$d')
|
||||||
|
if [ "$status" -ge 400 ]; then
|
||||||
|
echo "Error: Caddy admin API returned ${status}: ${body}" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Added route: ${fqdn} → 127.0.0.1:${port}" >&2
|
echo "Added route: ${fqdn} → 127.0.0.1:${port}" >&2
|
||||||
}
|
}
|
||||||
|
|
@ -78,31 +104,45 @@ remove_route() {
|
||||||
local project="$1"
|
local project="$1"
|
||||||
local fqdn="${project}.${DOMAIN_SUFFIX}"
|
local fqdn="${project}.${DOMAIN_SUFFIX}"
|
||||||
|
|
||||||
# First, get current routes
|
local server_name
|
||||||
local routes_json
|
server_name=$(_discover_server_name) || return 1
|
||||||
routes_json=$(curl -s "${CADDY_ADMIN_URL}/config/apps/http/servers/edge/routes" 2>&1) || {
|
|
||||||
|
# First, get current routes, checking HTTP status
|
||||||
|
local response status body
|
||||||
|
response=$(curl -sS -w '\n%{http_code}' \
|
||||||
|
"${CADDY_ADMIN_URL}/config/apps/http/servers/${server_name}/routes") || {
|
||||||
echo "Error: failed to get current routes" >&2
|
echo "Error: failed to get current routes" >&2
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
status=$(echo "$response" | tail -n1)
|
||||||
|
body=$(echo "$response" | sed '$d')
|
||||||
|
if [ "$status" -ge 400 ]; then
|
||||||
|
echo "Error: Caddy admin API returned ${status}: ${body}" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Find the route index that matches our fqdn using jq
|
# Find the route index that matches our fqdn using jq
|
||||||
local route_index
|
local route_index
|
||||||
route_index=$(echo "$routes_json" | jq -r "to_entries[] | select(.value.match[]?.host[]? == \"${fqdn}\") | .key" 2>/dev/null | head -1)
|
route_index=$(echo "$body" | jq -r "to_entries[] | select(.value.match[]?.host[]? == \"${fqdn}\") | .key" 2>/dev/null | head -1)
|
||||||
|
|
||||||
if [ -z "$route_index" ] || [ "$route_index" = "null" ]; then
|
if [ -z "$route_index" ] || [ "$route_index" = "null" ]; then
|
||||||
echo "Warning: route for ${fqdn} not found" >&2
|
echo "Warning: route for ${fqdn} not found" >&2
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Delete the route at the found index
|
# Delete the route at the found index, checking HTTP status
|
||||||
local response
|
response=$(curl -sS -w '\n%{http_code}' -X DELETE \
|
||||||
response=$(curl -s -X DELETE \
|
"${CADDY_ADMIN_URL}/config/apps/http/servers/${server_name}/routes/${route_index}" \
|
||||||
"${CADDY_ADMIN_URL}/config/apps/http/servers/edge/routes/${route_index}" \
|
-H "Content-Type: application/json") || {
|
||||||
-H "Content-Type: application/json" 2>&1) || {
|
|
||||||
echo "Error: failed to remove route for ${fqdn}" >&2
|
echo "Error: failed to remove route for ${fqdn}" >&2
|
||||||
echo "Response: ${response}" >&2
|
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
status=$(echo "$response" | tail -n1)
|
||||||
|
body=$(echo "$response" | sed '$d')
|
||||||
|
if [ "$status" -ge 400 ]; then
|
||||||
|
echo "Error: Caddy admin API returned ${status}: ${body}" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Removed route: ${fqdn}" >&2
|
echo "Removed route: ${fqdn}" >&2
|
||||||
}
|
}
|
||||||
|
|
@ -110,13 +150,18 @@ remove_route() {
|
||||||
# Reload Caddy to apply configuration changes
|
# Reload Caddy to apply configuration changes
|
||||||
# Usage: reload_caddy
|
# Usage: reload_caddy
|
||||||
reload_caddy() {
|
reload_caddy() {
|
||||||
local response
|
local response status body
|
||||||
response=$(curl -s -X POST \
|
response=$(curl -sS -w '\n%{http_code}' -X POST \
|
||||||
"${CADDY_ADMIN_URL}/reload" 2>&1) || {
|
"${CADDY_ADMIN_URL}/reload") || {
|
||||||
echo "Error: failed to reload Caddy" >&2
|
echo "Error: failed to reload Caddy" >&2
|
||||||
echo "Response: ${response}" >&2
|
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
status=$(echo "$response" | tail -n1)
|
||||||
|
body=$(echo "$response" | sed '$d')
|
||||||
|
if [ "$status" -ge 400 ]; then
|
||||||
|
echo "Error: Caddy reload returned ${status}: ${body}" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Caddy reloaded" >&2
|
echo "Caddy reloaded" >&2
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -39,6 +39,11 @@ EOF
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# TODO(#713): Subdomain fallback — if subpath routing (#704/#708) fails, this
|
||||||
|
# function would need to register additional routes for forge.<project>,
|
||||||
|
# ci.<project>, chat.<project> subdomains (or accept a --subdomain parameter).
|
||||||
|
# See docs/edge-routing-fallback.md for the full pivot plan.
|
||||||
|
|
||||||
# Register a new tunnel
|
# Register a new tunnel
|
||||||
# Usage: do_register <project> <pubkey>
|
# Usage: do_register <project> <pubkey>
|
||||||
do_register() {
|
do_register() {
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue