diff --git a/.env.example b/.env.example index a1f24d5..c1c0b98 100644 --- a/.env.example +++ b/.env.example @@ -32,10 +32,13 @@ FORGE_URL=http://localhost:3000 # [CONFIG] local Forgejo instance # - FORGE_PASS_DEV_QWEN2 # Name conversion: tr 'a-z-' 'A-Z_' (lowercase→UPPER, hyphens→underscores). # The compose generator looks these up via the agent's `forge_user` field in -# the project TOML. Configure local-model agents via [agents.X] sections in -# projects/*.toml — this is the canonical activation path. +# the project TOML. The pre-existing `dev-qwen` llama agent uses +# FORGE_TOKEN_LLAMA / FORGE_PASS_LLAMA (kept for backwards-compat with the +# legacy `ENABLE_LLAMA_AGENT=1` single-agent path). FORGE_TOKEN= # [SECRET] dev-bot API token (default for all agents) FORGE_PASS= # [SECRET] dev-bot password for git HTTP push (#361) +FORGE_TOKEN_LLAMA= # [SECRET] dev-qwen API token (for agents-llama) +FORGE_PASS_LLAMA= # [SECRET] dev-qwen password for git HTTP push FORGE_REVIEW_TOKEN= # [SECRET] review-bot API token FORGE_REVIEW_PASS= # [SECRET] review-bot password for git HTTP push FORGE_PLANNER_TOKEN= # [SECRET] planner-bot API token @@ -104,6 +107,13 @@ FORWARD_AUTH_SECRET= # [SECRET] Shared secret for Caddy ↔ # Store all project secrets here so formulas reference env vars, never hardcode. BASE_RPC_URL= # [SECRET] on-chain RPC endpoint +# ── Local Qwen dev agent (optional) ────────────────────────────────────── +# Set ENABLE_LLAMA_AGENT=1 to emit agents-llama in docker-compose.yml. +# Requires a running llama-server reachable at ANTHROPIC_BASE_URL. +# See docs/agents-llama.md for details. +ENABLE_LLAMA_AGENT=0 # [CONFIG] 1 = enable agents-llama service +ANTHROPIC_BASE_URL= # [CONFIG] e.g. http://host.docker.internal:8081 + # ── Tuning ──────────────────────────────────────────────────────────────── CLAUDE_TIMEOUT=7200 # [CONFIG] max seconds per Claude invocation diff --git a/AGENTS.md b/AGENTS.md index ad3867b..ef5f00d 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -122,7 +122,8 @@ bash dev/phase-test.sh | Reproduce | `docker/reproduce/` | Bug reproduction using Playwright MCP | `formulas/reproduce.toml` | | Triage | `docker/reproduce/` | Deep root cause analysis | `formulas/triage.toml` | | Edge dispatcher | `docker/edge/` | Polls ops repo for vault actions, executes via Claude sessions | `docker/edge/dispatcher.sh` | -| Local-model agents | `docker/agents/` (same image) | Local llama-server agents configured via `[agents.X]` sections in project TOML | [docs/agents-llama.md](docs/agents-llama.md) | +| agents-llama | `docker/agents/` (same image) | Local-Qwen dev agent (`AGENT_ROLES=dev`), gated on `ENABLE_LLAMA_AGENT=1` | [docs/agents-llama.md](docs/agents-llama.md) | +| agents-llama-all | `docker/agents/` (same image) | Local-Qwen all-roles agent (all 7 roles), profile `agents-llama-all` | [docs/agents-llama.md](docs/agents-llama.md) | > **Vault:** Being redesigned as a PR-based approval workflow (issues #73-#77). > See [docs/VAULT.md](docs/VAULT.md) for the vault PR workflow details. diff --git a/bin/disinto b/bin/disinto index 2b676a3..a752bac 100755 --- a/bin/disinto +++ b/bin/disinto @@ -684,21 +684,13 @@ _disinto_init_nomad() { exit 1 fi - # --empty short-circuits after cluster-up: no policies, no auth, no - # import, no deploy. It's the "cluster-only escape hatch" for debugging - # (docs/nomad-migration.md). Caller-side validation already rejects - # --empty combined with --with or any --import-* flag, so reaching - # this branch with those set is a bug in the caller. - # - # On the default (non-empty) path, vault-apply-policies.sh and - # vault-nomad-auth.sh are invoked unconditionally — they are idempotent - # and cheap to re-run, and subsequent --with deployments depend on - # them. vault-import.sh is invoked only when an --import-* flag is set. + # Step 2/3/4 scripts must exist as soon as any --import-* flag is set, + # since we unconditionally invoke policies+auth and optionally import. local import_any=false if [ -n "$import_env" ] || [ -n "$import_sops" ]; then import_any=true fi - if [ "$empty" != "true" ]; then + if [ "$import_any" = true ]; then if [ ! -x "$vault_policies_sh" ]; then echo "Error: ${vault_policies_sh} not found or not executable" >&2 exit 1 @@ -707,7 +699,7 @@ _disinto_init_nomad() { echo "Error: ${vault_auth_sh} not found or not executable" >&2 exit 1 fi - if [ "$import_any" = true ] && [ ! -x "$vault_import_sh" ]; then + if [ ! -x "$vault_import_sh" ]; then echo "Error: ${vault_import_sh} not found or not executable" >&2 exit 1 fi @@ -730,13 +722,6 @@ _disinto_init_nomad() { "${cmd[@]}" || true echo "" - # --empty skips policies/auth/import/deploy — cluster-up only, no - # workloads. The operator-visible dry-run plan must match the real - # run, so short-circuit here too. - if [ "$empty" = "true" ]; then - exit 0 - fi - # Vault policies + auth are invoked on every nomad real-run path # regardless of --import-* flags (they're idempotent; S2.1 + S2.3). # Mirror that ordering in the dry-run plan so the operator sees the @@ -808,12 +793,6 @@ _disinto_init_nomad() { sudo -n -- "${cluster_cmd[@]}" || exit $? fi - # --empty short-circuits here: cluster-up only, no policies/auth/import - # and no deploy. Matches the dry-run plan above and the docs/runbook. - if [ "$empty" = "true" ]; then - exit 0 - fi - # Apply Vault policies (S2.1) — idempotent, safe to re-run. echo "" echo "── Applying Vault policies ────────────────────────────" @@ -1026,15 +1005,6 @@ disinto_init() { exit 1 fi - # --empty is the cluster-only escape hatch — it skips policies, auth, - # import, and deploy. Pairing it with --import-* silently does nothing, - # which is a worse failure mode than a clear error. Reject explicitly. - if [ "$empty" = true ] \ - && { [ -n "$import_env" ] || [ -n "$import_sops" ] || [ -n "$age_key" ]; }; then - echo "Error: --empty and --import-env/--import-sops/--age-key are mutually exclusive" >&2 - exit 1 - fi - # Dispatch on backend — the nomad path runs lib/init/nomad/cluster-up.sh # (S0.4). The default and --empty variants are identical today; Step 1 # will branch on $empty to add job deployment to the default path. @@ -1154,6 +1124,7 @@ p.write_text(text) echo "" echo "[ensure] Forgejo admin user 'disinto-admin'" echo "[ensure] 8 bot users: dev-bot, review-bot, planner-bot, gardener-bot, vault-bot, supervisor-bot, predictor-bot, architect-bot" + echo "[ensure] 2 llama bot users: dev-qwen, dev-qwen-nightly" echo "[ensure] .profile repos for all bots" echo "[ensure] repo ${forge_repo} on Forgejo with collaborators" echo "[run] preflight checks" @@ -1349,6 +1320,19 @@ p.write_text(text) echo "Config: CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=1 saved to .env" fi + # Write local-Qwen dev agent env keys with safe defaults (#769) + if ! grep -q '^ENABLE_LLAMA_AGENT=' "$env_file" 2>/dev/null; then + cat >> "$env_file" <<'LLAMAENVEOF' + +# Local Qwen dev agent (optional) — set to 1 to enable +ENABLE_LLAMA_AGENT=0 +FORGE_TOKEN_LLAMA= +FORGE_PASS_LLAMA= +ANTHROPIC_BASE_URL= +LLAMAENVEOF + echo "Config: ENABLE_LLAMA_AGENT keys written to .env (disabled by default)" + fi + # Create labels on remote create_labels "$forge_repo" "$forge_url" diff --git a/docker/agents/entrypoint.sh b/docker/agents/entrypoint.sh index 7c58674..f838c15 100644 --- a/docker/agents/entrypoint.sh +++ b/docker/agents/entrypoint.sh @@ -17,38 +17,6 @@ set -euo pipefail # - predictor: every 24 hours (288 iterations * 5 min) # - supervisor: every SUPERVISOR_INTERVAL seconds (default: 1200 = 20 min) -# ── Migration check: reject ENABLE_LLAMA_AGENT ─────────────────────────────── -# #846: The legacy ENABLE_LLAMA_AGENT env flag is no longer supported. -# Activation is now done exclusively via [agents.X] sections in project TOML. -# If this legacy flag is detected, fail immediately with a migration message. -if [ "${ENABLE_LLAMA_AGENT:-}" = "1" ]; then - cat <<'MIGRATION_ERR' -FATAL: ENABLE_LLAMA_AGENT is no longer supported. - -The legacy ENABLE_LLAMA_AGENT=1 flag has been removed (#846). -Activation is now done exclusively via [agents.X] sections in projects/*.toml. - -To migrate: - 1. Remove ENABLE_LLAMA_AGENT from your .env or .env.enc file - 2. Add an [agents.] section to your project TOML: - - [agents.dev-qwen] - base_url = "http://your-llama-server:8081" - model = "unsloth/Qwen3.5-35B-A3B" - api_key = "sk-no-key-required" - roles = ["dev"] - forge_user = "dev-qwen" - compact_pct = 60 - poll_interval = 60 - - 3. Run: disinto init - 4. Start the agent: docker compose up -d agents-dev-qwen - -See docs/agents-llama.md for full details. -MIGRATION_ERR - exit 1 -fi - DISINTO_BAKED="/home/agent/disinto" DISINTO_LIVE="/home/agent/repos/_factory" DISINTO_DIR="$DISINTO_BAKED" # start with baked copy; switched to live checkout after bootstrap diff --git a/docs/agents-llama.md b/docs/agents-llama.md index b3a1334..bc973b7 100644 --- a/docs/agents-llama.md +++ b/docs/agents-llama.md @@ -2,12 +2,9 @@ Local-model agents run the same agent code as the Claude-backed agents, but connect to a local llama-server (or compatible OpenAI-API endpoint) instead of -the Anthropic API. This document describes the canonical activation flow using +the Anthropic API. This document describes the current activation flow using `disinto hire-an-agent` and `[agents.X]` TOML configuration. -> **Note:** The legacy `ENABLE_LLAMA_AGENT=1` env flag has been removed (#846). -> Activation is now done exclusively via `[agents.X]` sections in project TOML. - ## Overview Local-model agents are configured via `[agents.]` sections in diff --git a/docs/nomad-migration.md b/docs/nomad-migration.md index 02ff023..8984b10 100644 --- a/docs/nomad-migration.md +++ b/docs/nomad-migration.md @@ -60,9 +60,6 @@ This runs, in order: - `--age-key` without `--import-sops` → error. - `--import-env` alone (no sops) → OK (imports just the plaintext `.env`). - `--backend=docker` with any `--import-*` flag → error. -- `--empty` with any `--import-*` flag → error (mutually exclusive: `--empty` - skips the import step, so pairing them silently discards the import - intent). ## Idempotency diff --git a/formulas/release.sh b/formulas/release.sh index 6526d1a..b8c4eb6 100644 --- a/formulas/release.sh +++ b/formulas/release.sh @@ -178,8 +178,8 @@ log "Tagged disinto/agents:${RELEASE_VERSION}" log "Step 6/6: Restarting agent containers" -docker compose stop agents 2>/dev/null || true -docker compose up -d agents +docker compose stop agents agents-llama 2>/dev/null || true +docker compose up -d agents agents-llama log "Agent containers restarted" # ── Done ───────────────────────────────────────────────────────────────── diff --git a/formulas/release.toml b/formulas/release.toml index ccd7f95..f702f42 100644 --- a/formulas/release.toml +++ b/formulas/release.toml @@ -189,10 +189,10 @@ Restart agent containers to use the new image. - docker compose pull agents 2. Stop and remove existing agent containers: - - docker compose down agents + - docker compose down agents agents-llama 2>/dev/null || true 3. Start agents with new image: - - docker compose up -d agents + - docker compose up -d agents agents-llama 4. Wait for containers to be healthy: - for i in {1..30}; do @@ -203,7 +203,7 @@ Restart agent containers to use the new image. - done 5. Verify containers are running: - - docker compose ps agents + - docker compose ps agents agents-llama 6. Log restart: - echo "Restarted agents containers" diff --git a/lib/action-vault.sh b/lib/action-vault.sh index 7602a39..6348cc6 100644 --- a/lib/action-vault.sh +++ b/lib/action-vault.sh @@ -128,6 +128,7 @@ vault_request() { # Validate TOML content local tmp_toml tmp_toml=$(mktemp /tmp/vault-XXXXXX.toml) + trap 'rm -f "$tmp_toml"' RETURN printf '%s' "$toml_content" > "$tmp_toml" @@ -135,7 +136,6 @@ vault_request() { local vault_env="${FACTORY_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}/action-vault/vault-env.sh" if [ ! -f "$vault_env" ]; then echo "ERROR: vault-env.sh not found at $vault_env" >&2 - rm -f "$tmp_toml" return 1 fi @@ -145,15 +145,11 @@ vault_request() { if ! source "$vault_env"; then FORGE_TOKEN="${_saved_forge_token:-}" echo "ERROR: failed to source vault-env.sh" >&2 - rm -f "$tmp_toml" return 1 fi # Restore caller's FORGE_TOKEN after validation FORGE_TOKEN="${_saved_forge_token:-}" - # Set trap AFTER sourcing vault-env.sh to avoid RETURN trap firing during source - trap 'rm -f "$tmp_toml"' RETURN - # Run validation if ! validate_vault_action "$tmp_toml"; then echo "ERROR: TOML validation failed" >&2 diff --git a/lib/forge-setup.sh b/lib/forge-setup.sh index 2f8b117..2b7b697 100644 --- a/lib/forge-setup.sh +++ b/lib/forge-setup.sh @@ -356,6 +356,16 @@ setup_forge() { [predictor-bot]="FORGE_PREDICTOR_PASS" [architect-bot]="FORGE_ARCHITECT_PASS" ) + # Llama bot users (local-model agents) — separate from main agents + # Each llama agent gets its own Forgejo user, token, and password + local -A llama_token_vars=( + [dev-qwen]="FORGE_TOKEN_LLAMA" + [dev-qwen-nightly]="FORGE_TOKEN_LLAMA_NIGHTLY" + ) + local -A llama_pass_vars=( + [dev-qwen]="FORGE_PASS_LLAMA" + [dev-qwen-nightly]="FORGE_PASS_LLAMA_NIGHTLY" + ) local bot_user bot_pass token token_var pass_var @@ -505,12 +515,159 @@ setup_forge() { fi done + # Create llama bot users and tokens (local-model agents) + # These are separate from the main agents and get their own credentials + echo "" + echo "── Setting up llama bot users ────────────────────────────" + + local llama_user llama_pass llama_token llama_token_var llama_pass_var + for llama_user in "${!llama_token_vars[@]}"; do + llama_token_var="${llama_token_vars[$llama_user]}" + llama_pass_var="${llama_pass_vars[$llama_user]}" + + # Check if token already exists in .env + local token_exists=false + if _token_exists_in_env "$llama_token_var" "$env_file"; then + token_exists=true + fi + + # Check if password already exists in .env + local pass_exists=false + if _pass_exists_in_env "$llama_pass_var" "$env_file"; then + pass_exists=true + fi + + # Check if llama bot user exists on Forgejo + local llama_user_exists=false + if curl -sf --max-time 5 \ + -H "Authorization: token ${admin_token}" \ + "${forge_url}/api/v1/users/${llama_user}" >/dev/null 2>&1; then + llama_user_exists=true + fi + + # Skip token/password regeneration if both exist in .env and not forcing rotation + if [ "$token_exists" = true ] && [ "$pass_exists" = true ] && [ "$rotate_tokens" = false ]; then + echo " ${llama_user} token and password preserved (use --rotate-tokens to force)" + # Still export the existing token for use within this run + local existing_token existing_pass + existing_token=$(grep "^${llama_token_var}=" "$env_file" | head -1 | cut -d= -f2-) + existing_pass=$(grep "^${llama_pass_var}=" "$env_file" | head -1 | cut -d= -f2-) + export "${llama_token_var}=${existing_token}" + export "${llama_pass_var}=${existing_pass}" + continue + fi + + # Generate new credentials if: + # - Token doesn't exist (first run) + # - Password doesn't exist (first run) + # - --rotate-tokens flag is set (explicit rotation) + if [ "$llama_user_exists" = false ]; then + # User doesn't exist - create it + llama_pass="llama-$(head -c 16 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 20)" + echo "Creating llama bot user: ${llama_user}" + local create_output + if ! create_output=$(_forgejo_exec forgejo admin user create \ + --username "${llama_user}" \ + --password "${llama_pass}" \ + --email "${llama_user}@disinto.local" \ + --must-change-password=false 2>&1); then + echo "Error: failed to create llama bot user '${llama_user}':" >&2 + echo " ${create_output}" >&2 + exit 1 + fi + # Forgejo 11.x ignores --must-change-password=false on create; + # explicitly clear the flag so basic-auth token creation works. + _forgejo_exec forgejo admin user change-password \ + --username "${llama_user}" \ + --password "${llama_pass}" \ + --must-change-password=false + + # Verify llama bot user was actually created + if ! curl -sf --max-time 5 \ + -H "Authorization: token ${admin_token}" \ + "${forge_url}/api/v1/users/${llama_user}" >/dev/null 2>&1; then + echo "Error: llama bot user '${llama_user}' not found after creation" >&2 + exit 1 + fi + echo " ${llama_user} user created" + else + # User exists - reset password if needed + echo " ${llama_user} user exists" + if [ "$rotate_tokens" = true ] || [ "$pass_exists" = false ]; then + llama_pass="llama-$(head -c 16 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 20)" + _forgejo_exec forgejo admin user change-password \ + --username "${llama_user}" \ + --password "${llama_pass}" \ + --must-change-password=false || { + echo "Error: failed to reset password for existing llama bot user '${llama_user}'" >&2 + exit 1 + } + echo " ${llama_user} password reset for token generation" + else + # Password exists, get it from .env + llama_pass=$(grep "^${llama_pass_var}=" "$env_file" | head -1 | cut -d= -f2-) + fi + fi + + # Generate token via API (basic auth as the llama user) + # First, delete any existing tokens to avoid name collision + local existing_llama_token_ids + existing_llama_token_ids=$(curl -sf \ + -u "${llama_user}:${llama_pass}" \ + "${forge_url}/api/v1/users/${llama_user}/tokens" 2>/dev/null \ + | jq -r '.[].id // empty' 2>/dev/null) || existing_llama_token_ids="" + + # Delete any existing tokens for this user + if [ -n "$existing_llama_token_ids" ]; then + while IFS= read -r tid; do + [ -n "$tid" ] && curl -sf -X DELETE \ + -u "${llama_user}:${llama_pass}" \ + "${forge_url}/api/v1/users/${llama_user}/tokens/${tid}" >/dev/null 2>&1 || true + done <<< "$existing_llama_token_ids" + fi + + llama_token=$(curl -sf -X POST \ + -u "${llama_user}:${llama_pass}" \ + -H "Content-Type: application/json" \ + "${forge_url}/api/v1/users/${llama_user}/tokens" \ + -d "{\"name\":\"disinto-${llama_user}-token\",\"scopes\":[\"all\"]}" 2>/dev/null \ + | jq -r '.sha1 // empty') || llama_token="" + + if [ -z "$llama_token" ]; then + echo "Error: failed to create API token for '${llama_user}'" >&2 + exit 1 + fi + + # Store token in .env under the llama-specific variable name + if grep -q "^${llama_token_var}=" "$env_file" 2>/dev/null; then + sed -i "s|^${llama_token_var}=.*|${llama_token_var}=${llama_token}|" "$env_file" + else + printf '%s=%s\n' "$llama_token_var" "$llama_token" >> "$env_file" + fi + export "${llama_token_var}=${llama_token}" + echo " ${llama_user} token generated and saved (${llama_token_var})" + + # Store password in .env for git HTTP push (#361) + # Forgejo 11.x API tokens don't work for git push; password auth does. + if grep -q "^${llama_pass_var}=" "$env_file" 2>/dev/null; then + sed -i "s|^${llama_pass_var}=.*|${llama_pass_var}=${llama_pass}|" "$env_file" + else + printf '%s=%s\n' "$llama_pass_var" "$llama_pass" >> "$env_file" + fi + export "${llama_pass_var}=${llama_pass}" + echo " ${llama_user} password saved (${llama_pass_var})" + done + # Create .profile repos for all bot users (if they don't already exist) # This runs the same logic as hire-an-agent Step 2-3 for idempotent setup echo "" echo "── Setting up .profile repos ────────────────────────────" local -a bot_users=(dev-bot review-bot planner-bot gardener-bot vault-bot supervisor-bot predictor-bot architect-bot) + # Add llama bot users to .profile repo creation + for llama_user in "${!llama_token_vars[@]}"; do + bot_users+=("$llama_user") + done local bot_user for bot_user in "${bot_users[@]}"; do @@ -618,6 +775,15 @@ setup_forge() { -d "{\"permission\":\"${bot_perm}\"}" >/dev/null 2>&1 || true done + # Add llama bot users as write collaborators for local-model agents + for llama_user in "${!llama_token_vars[@]}"; do + curl -sf -X PUT \ + -H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \ + -H "Content-Type: application/json" \ + "${forge_url}/api/v1/repos/${repo_slug}/collaborators/${llama_user}" \ + -d '{"permission":"write"}' >/dev/null 2>&1 || true + done + # Add disinto-admin as admin collaborator curl -sf -X PUT \ -H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \ diff --git a/lib/generators.sh b/lib/generators.sh index 0df5725..3f88e39 100644 --- a/lib/generators.sh +++ b/lib/generators.sh @@ -438,6 +438,136 @@ services: COMPOSEEOF + # ── Conditional agents-llama block (ENABLE_LLAMA_AGENT=1) ────────────── + # Local-Qwen dev agent — gated on ENABLE_LLAMA_AGENT so factories without + # a local llama endpoint don't try to start it. See docs/agents-llama.md. + if [ "${ENABLE_LLAMA_AGENT:-0}" = "1" ]; then + cat >> "$compose_file" <<'LLAMAEOF' + + agents-llama: + build: + context: . + dockerfile: docker/agents/Dockerfile + # Rebuild on every up (#887): makes docker/agents/ source changes reach this + # container without a manual \`docker compose build\`. Cache-fast when clean. + pull_policy: build + container_name: disinto-agents-llama + restart: unless-stopped + security_opt: + - apparmor=unconfined + volumes: + - agent-data:/home/agent/data + - project-repos:/home/agent/repos + - ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared} + - ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro + - ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro + - ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro + - ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro + - woodpecker-data:/woodpecker-data:ro + environment: + FORGE_URL: http://forgejo:3000 + FORGE_REPO: ${FORGE_REPO:-disinto-admin/disinto} + FORGE_TOKEN: ${FORGE_TOKEN_LLAMA:-} + FORGE_PASS: ${FORGE_PASS_LLAMA:-} + FORGE_BOT_USERNAMES: ${FORGE_BOT_USERNAMES:-} + WOODPECKER_TOKEN: ${WOODPECKER_TOKEN:-} + CLAUDE_TIMEOUT: ${CLAUDE_TIMEOUT:-7200} + CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: ${CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC:-1} + CLAUDE_AUTOCOMPACT_PCT_OVERRIDE: "60" + ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY:-} + ANTHROPIC_BASE_URL: ${ANTHROPIC_BASE_URL:-} + FORGE_ADMIN_PASS: ${FORGE_ADMIN_PASS:-} + DISINTO_CONTAINER: "1" + PROJECT_NAME: ${PROJECT_NAME:-project} + PROJECT_REPO_ROOT: /home/agent/repos/${PROJECT_NAME:-project} + WOODPECKER_DATA_DIR: /woodpecker-data + WOODPECKER_REPO_ID: "PLACEHOLDER_WP_REPO_ID" + CLAUDE_CONFIG_DIR: ${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config} + POLL_INTERVAL: ${POLL_INTERVAL:-300} + AGENT_ROLES: dev + healthcheck: + test: ["CMD", "pgrep", "-f", "entrypoint.sh"] + interval: 60s + timeout: 5s + retries: 3 + start_period: 30s + depends_on: + forgejo: + condition: service_healthy + networks: + - disinto-net + + agents-llama-all: + build: + context: . + dockerfile: docker/agents/Dockerfile + # Rebuild on every up (#887): makes docker/agents/ source changes reach this + # container without a manual \`docker compose build\`. Cache-fast when clean. + pull_policy: build + container_name: disinto-agents-llama-all + restart: unless-stopped + profiles: ["agents-llama-all"] + security_opt: + - apparmor=unconfined + volumes: + - agent-data:/home/agent/data + - project-repos:/home/agent/repos + - ${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared}:${CLAUDE_SHARED_DIR:-/var/lib/disinto/claude-shared} + - ${CLAUDE_CONFIG_FILE:-${HOME}/.claude.json}:/home/agent/.claude.json:ro + - ${CLAUDE_BIN_DIR}:/usr/local/bin/claude:ro + - ${AGENT_SSH_DIR:-${HOME}/.ssh}:/home/agent/.ssh:ro + - ${SOPS_AGE_DIR:-${HOME}/.config/sops/age}:/home/agent/.config/sops/age:ro + - woodpecker-data:/woodpecker-data:ro + environment: + FORGE_URL: http://forgejo:3000 + FORGE_REPO: ${FORGE_REPO:-disinto-admin/disinto} + FORGE_TOKEN: ${FORGE_TOKEN_LLAMA:-} + FORGE_PASS: ${FORGE_PASS_LLAMA:-} + FORGE_REVIEW_TOKEN: ${FORGE_REVIEW_TOKEN:-} + FORGE_PLANNER_TOKEN: ${FORGE_PLANNER_TOKEN:-} + FORGE_GARDENER_TOKEN: ${FORGE_GARDENER_TOKEN:-} + FORGE_VAULT_TOKEN: ${FORGE_VAULT_TOKEN:-} + FORGE_SUPERVISOR_TOKEN: ${FORGE_SUPERVISOR_TOKEN:-} + FORGE_PREDICTOR_TOKEN: ${FORGE_PREDICTOR_TOKEN:-} + FORGE_ARCHITECT_TOKEN: ${FORGE_ARCHITECT_TOKEN:-} + FORGE_FILER_TOKEN: ${FORGE_FILER_TOKEN:-} + FORGE_BOT_USERNAMES: ${FORGE_BOT_USERNAMES:-} + WOODPECKER_TOKEN: ${WOODPECKER_TOKEN:-} + CLAUDE_TIMEOUT: ${CLAUDE_TIMEOUT:-7200} + CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: ${CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC:-1} + CLAUDE_AUTOCOMPACT_PCT_OVERRIDE: "60" + CLAUDE_CODE_DISABLE_EXPERIMENTAL_BETAS: "1" + ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY:-} + ANTHROPIC_BASE_URL: ${ANTHROPIC_BASE_URL:-} + FORGE_ADMIN_PASS: ${FORGE_ADMIN_PASS:-} + DISINTO_CONTAINER: "1" + PROJECT_NAME: ${PROJECT_NAME:-project} + PROJECT_REPO_ROOT: /home/agent/repos/${PROJECT_NAME:-project} + WOODPECKER_DATA_DIR: /woodpecker-data + WOODPECKER_REPO_ID: "PLACEHOLDER_WP_REPO_ID" + CLAUDE_CONFIG_DIR: ${CLAUDE_CONFIG_DIR:-/var/lib/disinto/claude-shared/config} + POLL_INTERVAL: ${POLL_INTERVAL:-300} + GARDENER_INTERVAL: ${GARDENER_INTERVAL:-21600} + ARCHITECT_INTERVAL: ${ARCHITECT_INTERVAL:-21600} + PLANNER_INTERVAL: ${PLANNER_INTERVAL:-43200} + SUPERVISOR_INTERVAL: ${SUPERVISOR_INTERVAL:-1200} + AGENT_ROLES: review,dev,gardener,architect,planner,predictor,supervisor + healthcheck: + test: ["CMD", "pgrep", "-f", "entrypoint.sh"] + interval: 60s + timeout: 5s + retries: 3 + start_period: 30s + depends_on: + forgejo: + condition: service_healthy + woodpecker: + condition: service_started + networks: + - disinto-net +LLAMAEOF + fi + # Resume the rest of the compose file (runner onward) cat >> "$compose_file" <<'COMPOSEEOF' diff --git a/tests/disinto-init-nomad.bats b/tests/disinto-init-nomad.bats index f38805e..30c7f7c 100644 --- a/tests/disinto-init-nomad.bats +++ b/tests/disinto-init-nomad.bats @@ -280,33 +280,3 @@ setup_file() { [ "$status" -eq 0 ] [[ "$output" == *"env file: /tmp/.env"* ]] } - -# --empty short-circuits after cluster-up: no policies, no auth, no -# import, no deploy. The dry-run plan must match that — cluster-up plan -# appears, but none of the S2.x section banners do. -@test "disinto init --backend=nomad --empty --dry-run skips policies/auth/import sections" { - run "$DISINTO_BIN" init placeholder/repo --backend=nomad --empty --dry-run - [ "$status" -eq 0 ] - # Cluster-up still runs (it's what --empty brings up). - [[ "$output" == *"Cluster-up dry-run"* ]] - # Policies + auth + import must NOT appear under --empty. - [[ "$output" != *"Vault policies dry-run"* ]] - [[ "$output" != *"Vault auth dry-run"* ]] - [[ "$output" != *"Vault import dry-run"* ]] - [[ "$output" != *"no --import-env/--import-sops"* ]] -} - -# --empty + any --import-* flag silently does nothing (import is skipped), -# so the CLI rejects the combination up front rather than letting it -# look like the import "succeeded". -@test "disinto init --backend=nomad --empty --import-env errors" { - run "$DISINTO_BIN" init placeholder/repo --backend=nomad --empty --import-env /tmp/.env --dry-run - [ "$status" -ne 0 ] - [[ "$output" == *"--empty and --import-env/--import-sops/--age-key are mutually exclusive"* ]] -} - -@test "disinto init --backend=nomad --empty --import-sops --age-key errors" { - run "$DISINTO_BIN" init placeholder/repo --backend=nomad --empty --import-sops /tmp/.env.vault.enc --age-key /tmp/keys.txt --dry-run - [ "$status" -ne 0 ] - [[ "$output" == *"--empty and --import-env/--import-sops/--age-key are mutually exclusive"* ]] -}