#!/usr/bin/env bash # ============================================================================= # disinto — CLI entry point for the disinto code factory # # Commands: # disinto init [options] Bootstrap a new project # disinto up Start the full stack (docker compose) # disinto down Stop the full stack # disinto logs [service] Tail service logs # disinto shell Shell into the agent container # disinto status Show factory status # disinto secrets Manage encrypted secrets # disinto run Run action in ephemeral runner container # disinto ci-logs [--step ] Read CI logs from Woodpecker SQLite # # Usage: # disinto init https://github.com/user/repo # disinto init user/repo --branch main --ci-id 3 # disinto init user/repo --bare (bare-metal, no compose) # disinto up # disinto down # disinto status # ============================================================================= set -euo pipefail FACTORY_ROOT="$(cd "$(dirname "$0")/.." && pwd)" source "${FACTORY_ROOT}/lib/env.sh" source "${FACTORY_ROOT}/lib/ops-setup.sh" # setup_ops_repo, migrate_ops_repo source "${FACTORY_ROOT}/lib/hire-agent.sh" source "${FACTORY_ROOT}/lib/forge-setup.sh" source "${FACTORY_ROOT}/lib/generators.sh" source "${FACTORY_ROOT}/lib/forge-push.sh" source "${FACTORY_ROOT}/lib/ci-setup.sh" source "${FACTORY_ROOT}/lib/release.sh" # ── Helpers ────────────────────────────────────────────────────────────────── usage() { cat < [options] Bootstrap a new project disinto up Start the full stack (docker compose) disinto down Stop the full stack disinto logs [service] Tail service logs disinto shell Shell into the agent container disinto status Show factory status disinto secrets Manage encrypted secrets disinto run Run action in ephemeral runner container disinto ci-logs [--step ] Read CI logs from Woodpecker SQLite disinto release Create vault PR for release (e.g., v1.2.0) disinto hire-an-agent [--formula ] Hire a new agent (create user + .profile repo) Init options: --branch Primary branch (default: auto-detect) --repo-root Local clone path (default: ~/name) --ci-id Woodpecker CI repo ID (default: 0 = no CI) --forge-url Forge base URL (default: http://localhost:3000) --bare Skip compose generation (bare-metal setup) --yes Skip confirmation prompts Hire an agent options: --formula Path to role formula TOML (default: formulas/.toml) CI logs options: --step Filter logs to a specific step (e.g., smoke-init) EOF exit 1 } # Extract org/repo slug from various URL formats. # Accepts: https://github.com/user/repo, https://codeberg.org/user/repo, # http://localhost:3000/user/repo, user/repo, *.git parse_repo_slug() { local url="$1" url="${url#https://}" url="${url#http://}" # Strip any hostname (anything before the first / that contains a dot or colon) if [[ "$url" =~ ^[a-zA-Z0-9._:-]+/[a-zA-Z0-9._-]+/[a-zA-Z0-9._-]+ ]]; then url="${url#*/}" # strip host part fi url="${url%.git}" url="${url%/}" if [[ ! "$url" =~ ^[a-zA-Z0-9._-]+/[a-zA-Z0-9._-]+$ ]]; then echo "Error: invalid repo URL — expected https://host/org/repo or org/repo" >&2 exit 1 fi printf '%s' "$url" } # Build a clone-able URL from a slug and forge URL. clone_url_from_slug() { local slug="$1" forge_url="${2:-${FORGE_URL:-http://localhost:3000}}" printf '%s/%s.git' "$forge_url" "$slug" } # Ensure an age key exists; generate one if missing. # Exports AGE_PUBLIC_KEY on success. ensure_age_key() { local key_dir="${HOME}/.config/sops/age" local key_file="${key_dir}/keys.txt" if [ -f "$key_file" ]; then AGE_PUBLIC_KEY="$(age-keygen -y "$key_file" 2>/dev/null)" [ -n "$AGE_PUBLIC_KEY" ] || return 1 export AGE_PUBLIC_KEY return 0 fi if ! command -v age-keygen &>/dev/null; then return 1 fi mkdir -p "$key_dir" age-keygen -o "$key_file" 2>/dev/null chmod 600 "$key_file" AGE_PUBLIC_KEY="$(age-keygen -y "$key_file" 2>/dev/null)" [ -n "$AGE_PUBLIC_KEY" ] || return 1 export AGE_PUBLIC_KEY echo "Generated age key: ${key_file}" } # Write .sops.yaml pinning the age recipient for .env.enc files. write_sops_yaml() { local pub_key="$1" cat > "${FACTORY_ROOT}/.sops.yaml" < encrypt_env_file() { local input="$1" output="$2" sops -e --input-type dotenv --output-type dotenv "$input" > "$output" } # Store secrets into .env.enc (encrypted) if SOPS + age available, else .env (plaintext). write_secrets_encrypted() { local env_file="${FACTORY_ROOT}/.env" local enc_file="${FACTORY_ROOT}/.env.enc" if command -v sops &>/dev/null && command -v age-keygen &>/dev/null; then if ensure_age_key; then # Write .sops.yaml if missing if [ ! -f "${FACTORY_ROOT}/.sops.yaml" ]; then write_sops_yaml "$AGE_PUBLIC_KEY" fi # Encrypt the plaintext .env to .env.enc if [ -f "$env_file" ]; then encrypt_env_file "$env_file" "$enc_file" rm -f "$env_file" echo "Secrets encrypted to .env.enc (plaintext .env removed)" return 0 fi fi fi # Fallback: keep plaintext .env echo "Warning: sops/age not available — secrets stored in plaintext .env" >&2 return 0 } export FORGEJO_DATA_DIR="${HOME}/.disinto/forgejo" # Generate docker-compose.yml in the factory root. # (Implementation in lib/generators.sh) generate_compose() { _generate_compose_impl "$@" } # Generate docker/agents/ files if they don't already exist. # (Implementation in lib/generators.sh) generate_agent_docker() { _generate_agent_docker_impl "$@" } # Generate docker/Caddyfile template for edge proxy. # (Implementation in lib/generators.sh) generate_caddyfile() { _generate_caddyfile_impl "$@" } # Generate docker/index.html default page. # (Implementation in lib/generators.sh) generate_staging_index() { _generate_staging_index_impl "$@" } # Generate template .woodpecker/ deployment pipeline configs in a project repo. # Creates staging.yml and production.yml alongside the project's existing CI config. # These pipelines trigger on Woodpecker's deployment event with environment filters. # (Implementation in lib/generators.sh) generate_deploy_pipelines() { _generate_deploy_pipelines_impl "$@" } # Check whether compose mode is active (docker-compose.yml exists). is_compose_mode() { [ -f "${FACTORY_ROOT}/docker-compose.yml" ] } # Create and seed the {project}-ops repo on Forgejo with initial directory structure. # The ops repo holds operational data: vault items, journals, evidence, prerequisites. # ops repo setup is now in lib/ops-setup.sh # push_to_forge() is sourced from lib/forge-push.sh # Preflight check — verify all factory requirements before proceeding. preflight_check() { local repo_slug="${1:-}" local forge_url="${2:-${FORGE_URL:-http://localhost:3000}}" local errors=0 # ── Required commands ── local -A hints=( [claude]="Install: https://docs.anthropic.com/en/docs/claude-code/overview" [tmux]="Install: apt install tmux / brew install tmux" [git]="Install: apt install git / brew install git" [jq]="Install: apt install jq / brew install jq" [python3]="Install: apt install python3 / brew install python3" [curl]="Install: apt install curl / brew install curl" ) local cmd for cmd in claude tmux git jq python3 curl; do if ! command -v "$cmd" &>/dev/null; then echo "Error: ${cmd} not found" >&2 echo " ${hints[$cmd]}" >&2 errors=$((errors + 1)) fi done # ── Claude Code authentication ── if command -v claude &>/dev/null && command -v jq &>/dev/null; then local auth_json auth_stderr auth_rc=0 auth_stderr=$(claude auth status 2>&1 >/dev/null) || auth_rc=$? auth_json=$(claude auth status 2>/dev/null) || auth_json="" # Only skip check if subcommand is unrecognized (old claude version) if printf '%s' "$auth_stderr" | grep -qi "unknown command"; then : # claude version doesn't support auth status — skip elif [ -z "$auth_json" ] || [ "$auth_rc" -ne 0 ]; then echo "Error: Claude Code is not authenticated (auth check failed)" >&2 echo " Run: claude auth login" >&2 errors=$((errors + 1)) else local logged_in logged_in=$(printf '%s' "$auth_json" | jq -r '.loggedIn // false' 2>/dev/null) || logged_in="false" if [ "$logged_in" != "true" ]; then echo "Error: Claude Code is not authenticated" >&2 echo " Run: claude auth login" >&2 errors=$((errors + 1)) fi fi fi # ── Forge API check (verify the forge is reachable and token works) ── if [ -n "${FORGE_TOKEN:-}" ] && command -v curl &>/dev/null; then if ! curl -sf --max-time 10 \ -H "Authorization: token ${FORGE_TOKEN}" \ "${forge_url}/api/v1/repos/${repo_slug}" >/dev/null 2>&1; then echo "Error: Forge API auth failed at ${forge_url}" >&2 echo " Verify your FORGE_TOKEN and that Forgejo is running" >&2 errors=$((errors + 1)) fi fi # ── Git identity check ── if command -v git &>/dev/null; then local git_name git_email git_name=$(git config user.name 2>/dev/null) || git_name="" git_email=$(git config user.email 2>/dev/null) || git_email="" if [ -z "$git_name" ] || [ -z "$git_email" ]; then echo "Warning: git user.name/user.email not configured" >&2 echo " Init will set a repo-local identity for ops commits" >&2 fi fi # ── Optional tools (warn only) ── if ! command -v docker &>/dev/null; then echo "Warning: docker not found (needed for Forgejo provisioning)" >&2 fi if ! command -v sops &>/dev/null; then echo "Warning: sops not found (secrets will be stored in plaintext .env)" >&2 echo " Install: https://github.com/getsops/sops/releases" >&2 fi if ! command -v age-keygen &>/dev/null; then echo "Warning: age not found (needed for secret encryption with SOPS)" >&2 echo " Install: apt install age / brew install age" >&2 fi if [ "$errors" -gt 0 ]; then echo "" >&2 echo "${errors} preflight error(s) — fix the above before running disinto init" >&2 exit 1 fi } # Clone the repo if the target directory doesn't exist; validate if it does. clone_or_validate() { local slug="$1" target="$2" forge_url="${3:-${FORGE_URL:-http://localhost:3000}}" if [ -d "${target}/.git" ]; then echo "Repo: ${target} (existing clone)" return fi local url url=$(clone_url_from_slug "$slug" "$forge_url") echo "Cloning: ${url} -> ${target}" git clone "$url" "$target" } # Detect the primary branch from the remote HEAD or fallback to main/master. detect_branch() { local repo_root="$1" local branch branch=$(git -C "$repo_root" symbolic-ref refs/remotes/origin/HEAD 2>/dev/null \ | sed 's|refs/remotes/origin/||') || true if [ -z "$branch" ]; then if git -C "$repo_root" show-ref --verify --quiet refs/remotes/origin/main 2>/dev/null; then branch="main" else branch="master" fi fi printf '%s' "$branch" } # Generate projects/.toml config file. generate_toml() { local path="$1" name="$2" repo="$3" root="$4" branch="$5" ci_id="$6" forge_url="$7" cat > "$path" </dev/null \ | grep -o '"name":"[^"]*"' | cut -d'"' -f4) || existing="" local name color local created=0 skipped=0 failed=0 for name in backlog in-progress blocked tech-debt underspecified vision action bug-report prediction/unreviewed prediction/dismissed prediction/actioned needs-triage reproduced cannot-reproduce in-triage rejected; do if echo "$existing" | grep -qx "$name"; then echo " . ${name} (already exists)" skipped=$((skipped + 1)) continue fi color="${labels[$name]}" if curl -sf -X POST \ -H "Authorization: token ${FORGE_TOKEN}" \ -H "Content-Type: application/json" \ "${api}/labels" \ -d "{\"name\":\"${name}\",\"color\":\"${color}\"}" >/dev/null 2>&1; then echo " + ${name} (created)" created=$((created + 1)) else echo " ! ${name} (failed to create)" failed=$((failed + 1)) fi done echo "Labels: ${created} created, ${skipped} skipped, ${failed} failed" } # Generate a minimal VISION.md template in the target project. generate_vision() { local repo_root="$1" name="$2" local vision_path="${repo_root}/VISION.md" if [ -f "$vision_path" ]; then echo "VISION: ${vision_path} (already exists, skipping)" return fi cat > "$vision_path" < ## Who it's for ## Design principles - - - ## Milestones ### Current - ### Next - EOF echo "Created: ${vision_path}" echo " Commit this to your repo when ready" } # Copy issue templates from templates/ to target project repo. copy_issue_templates() { local repo_root="$1" local template_dir="${FACTORY_ROOT}/templates" local target_dir="${repo_root}/.forgejo/ISSUE_TEMPLATE" # Skip if templates directory doesn't exist if [ ! -d "$template_dir" ]; then return fi # Create target directory mkdir -p "$target_dir" # Copy each template file if it doesn't already exist for template in "$template_dir"/issue/*; do [ -f "$template" ] || continue local filename filename=$(basename "$template") local target_path="${target_dir}/${filename}" if [ ! -f "$target_path" ]; then cp "$template" "$target_path" echo "Copied: ${target_path}" else echo "Skipped: ${target_path} (already exists)" fi done } # Install cron entries for project agents (implementation in lib/ci-setup.sh) install_cron() { _load_ci_context _install_cron_impl "$@" } # Create Woodpecker OAuth2 app on Forgejo (implementation in lib/ci-setup.sh) create_woodpecker_oauth() { _load_ci_context _create_woodpecker_oauth_impl "$@" } # Generate WOODPECKER_TOKEN via Forgejo OAuth2 flow (implementation in lib/ci-setup.sh) generate_woodpecker_token() { _load_ci_context _generate_woodpecker_token_impl "$@" } # Activate repo in Woodpecker CI (implementation in lib/ci-setup.sh) activate_woodpecker_repo() { _load_ci_context _activate_woodpecker_repo_impl "$@" } # ── init command ───────────────────────────────────────────────────────────── disinto_init() { local repo_url="${1:-}" if [ -z "$repo_url" ]; then echo "Error: repo URL required" >&2 echo "Usage: disinto init " >&2 exit 1 fi shift # Parse flags local branch="" repo_root="" ci_id="0" auto_yes=false forge_url_flag="" bare=false while [ $# -gt 0 ]; do case "$1" in --branch) branch="$2"; shift 2 ;; --repo-root) repo_root="$2"; shift 2 ;; --ci-id) ci_id="$2"; shift 2 ;; --forge-url) forge_url_flag="$2"; shift 2 ;; --bare) bare=true; shift ;; --yes) auto_yes=true; shift ;; *) echo "Unknown option: $1" >&2; exit 1 ;; esac done # Export bare-metal flag for setup_forge export DISINTO_BARE="$bare" # Extract org/repo slug local forge_repo forge_repo=$(parse_repo_slug "$repo_url") local project_name="${forge_repo##*/}" local toml_path="${FACTORY_ROOT}/projects/${project_name}.toml" # Determine forge URL (flag > env > default) local forge_url="${forge_url_flag:-${FORGE_URL:-http://localhost:3000}}" echo "=== disinto init ===" echo "Project: ${forge_repo}" echo "Name: ${project_name}" echo "Forge: ${forge_url}" # Check for existing config local toml_exists=false if [ -f "$toml_path" ]; then toml_exists=true echo "Config: ${toml_path} (already exists, reusing)" # Read repo_root and branch from existing TOML local existing_root existing_branch existing_root=$(python3 -c " import sys, tomllib with open(sys.argv[1], 'rb') as f: cfg = tomllib.load(f) print(cfg.get('repo_root', '')) " "$toml_path" 2>/dev/null) || existing_root="" existing_branch=$(python3 -c " import sys, tomllib with open(sys.argv[1], 'rb') as f: cfg = tomllib.load(f) print(cfg.get('primary_branch', '')) " "$toml_path" 2>/dev/null) || existing_branch="" # Use existing values as defaults if [ -n "$existing_branch" ] && [ -z "$branch" ]; then branch="$existing_branch" fi # Handle repo_root: flag overrides TOML, prompt if they differ if [ -z "$repo_root" ]; then repo_root="${existing_root:-/home/${USER}/${project_name}}" elif [ -n "$existing_root" ] && [ "$repo_root" != "$existing_root" ]; then echo "Note: --repo-root (${repo_root}) differs from TOML (${existing_root})" local update_toml=false if [ "$auto_yes" = true ]; then update_toml=true elif [ -t 0 ]; then read -rp "Update repo_root in TOML to ${repo_root}? [y/N] " confirm if [[ "$confirm" =~ ^[Yy] ]]; then update_toml=true else repo_root="$existing_root" fi fi if [ "$update_toml" = true ]; then python3 -c " import sys, re, pathlib p = pathlib.Path(sys.argv[1]) text = p.read_text() text = re.sub(r'^repo_root\s*=\s*.*$', 'repo_root = \"' + sys.argv[2] + '\"', text, flags=re.MULTILINE) p.write_text(text) " "$toml_path" "$repo_root" echo "Updated: repo_root in ${toml_path}" fi fi fi # Generate compose files (unless --bare) if [ "$bare" = false ]; then local forge_port forge_port=$(printf '%s' "$forge_url" | sed -E 's|.*:([0-9]+)/?$|\1|') forge_port="${forge_port:-3000}" generate_compose "$forge_port" generate_agent_docker generate_caddyfile generate_staging_index # Create empty .env so docker compose can parse the agents service # env_file reference before setup_forge generates the real tokens (#769) touch "${FACTORY_ROOT}/.env" fi # Set up local Forgejo instance (provision if needed, create users/tokens/repo) setup_forge "$forge_url" "$forge_repo" # Preflight: verify factory requirements preflight_check "$forge_repo" "$forge_url" # Determine repo root (for new projects) repo_root="${repo_root:-/home/${USER}/${project_name}}" # Clone or validate (try origin first for initial clone from upstream) if [ ! -d "${repo_root}/.git" ]; then # For initial setup, clone from the provided URL directly echo "Cloning: ${repo_url} -> ${repo_root}" git clone "$repo_url" "$repo_root" 2>/dev/null || \ clone_or_validate "$forge_repo" "$repo_root" "$forge_url" else echo "Repo: ${repo_root} (existing clone)" fi # Push to local Forgejo (skip if SKIP_PUSH is set) if [ "${SKIP_PUSH:-false}" = "false" ]; then push_to_forge "$repo_root" "$forge_url" "$forge_repo" fi # Detect primary branch if [ -z "$branch" ]; then branch=$(detect_branch "$repo_root") fi echo "Branch: ${branch}" # Set up {project}-ops repo (#757) # Always use disinto-admin as the ops repo owner — forge_repo owner may be # the calling user (e.g. johba) but the ops repo belongs to disinto-admin. local ops_slug="disinto-admin/${project_name}-ops" local ops_root="/home/${USER}/${project_name}-ops" setup_ops_repo "$forge_url" "$ops_slug" "$ops_root" "$branch" # Migrate ops repo to canonical structure (seed missing directories/files) # This brings pre-#407 deployments up to date with the canonical structure migrate_ops_repo "$ops_root" "$branch" # Set up vault branch protection on ops repo (#77) # This ensures admin-only merge to main, blocking bots from merging vault PRs # Use HUMAN_TOKEN (disinto-admin) or FORGE_TOKEN (dev-bot) for admin operations export FORGE_OPS_REPO="$ops_slug" # Source env.sh to ensure FORGE_TOKEN is available source "${FACTORY_ROOT}/lib/env.sh" source "${FACTORY_ROOT}/lib/branch-protection.sh" if setup_vault_branch_protection "$branch"; then echo "Branch protection: vault protection configured on ${ops_slug}" else echo "Warning: failed to set up vault branch protection" >&2 fi unset FORGE_OPS_REPO # Generate project TOML (skip if already exists) if [ "$toml_exists" = false ]; then # Prompt for CI ID if interactive and not already set via flag if [ "$ci_id" = "0" ] && [ "$auto_yes" = false ] && [ -t 0 ]; then read -rp "Woodpecker CI repo ID (0 to skip CI): " user_ci_id ci_id="${user_ci_id:-0}" fi generate_toml "$toml_path" "$project_name" "$forge_repo" "$repo_root" "$branch" "$ci_id" "$forge_url" echo "Created: ${toml_path}" fi # Update ops_repo in TOML with the resolved actual ops slug. # Uses in-place substitution to prevent duplicate keys on repeated init runs. # If the key is missing (manually created TOML), it is inserted after the repo line. if [ -n "${_ACTUAL_OPS_SLUG:-}" ] && [ -f "$toml_path" ]; then python3 -c " import sys, re, pathlib p = pathlib.Path(sys.argv[1]) text = p.read_text() new_val = 'ops_repo = \"' + sys.argv[2] + '\"' if re.search(r'^ops_repo\s*=', text, re.MULTILINE): text = re.sub(r'^ops_repo\s*=\s*.*\$', new_val, text, flags=re.MULTILINE) else: text = re.sub(r'^(repo\s*=\s*\"[^\"]*\")', r'\1\n' + new_val, text, flags=re.MULTILINE) p.write_text(text) " "$toml_path" "${_ACTUAL_OPS_SLUG}" echo "Updated: ops_repo in ${toml_path}" fi # Create OAuth2 app on Forgejo for Woodpecker (before compose up) _WP_REPO_ID="" create_woodpecker_oauth "$forge_url" "$forge_repo" # Generate WOODPECKER_AGENT_SECRET for server↔agent auth local env_file="${FACTORY_ROOT}/.env" if ! grep -q '^WOODPECKER_AGENT_SECRET=' "$env_file" 2>/dev/null; then local agent_secret agent_secret="$(head -c 32 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 40)" printf 'WOODPECKER_AGENT_SECRET=%s\n' "$agent_secret" >> "$env_file" echo "Config: WOODPECKER_AGENT_SECRET generated and saved to .env" fi # Ensure Claude Code never auto-updates, phones home, or sends telemetry (#725) if ! grep -q '^CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=' "$env_file" 2>/dev/null; then printf 'CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=1\n' >> "$env_file" echo "Config: CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=1 saved to .env" fi # Create labels on remote create_labels "$forge_repo" "$forge_url" # Set up branch protection on project repo (#10) # This enforces PR flow: no direct pushes, 1 approval required, dev-bot can merge after CI if setup_project_branch_protection "$forge_repo" "$branch"; then echo "Branch protection: project protection configured on ${forge_repo}" else echo "Warning: failed to set up project branch protection" >&2 fi # Generate VISION.md template generate_vision "$repo_root" "$project_name" # Generate template deployment pipeline configs in project repo generate_deploy_pipelines "$repo_root" "$project_name" # Copy issue templates to target project copy_issue_templates "$repo_root" # Install cron jobs install_cron "$project_name" "$toml_path" "$auto_yes" "$bare" # Set up mirror remotes if [mirrors] configured in TOML source "${FACTORY_ROOT}/lib/load-project.sh" "$toml_path" if [ -n "${MIRROR_NAMES:-}" ]; then echo "Mirrors: setting up remotes" local mname murl local mirrors_ok=true for mname in $MIRROR_NAMES; do murl=$(eval "echo \"\$MIRROR_$(echo "$mname" | tr '[:lower:]' '[:upper:]')\"") || true [ -z "$murl" ] && continue if git -C "$repo_root" remote get-url "$mname" >/dev/null 2>&1; then if git -C "$repo_root" remote set-url "$mname" "$murl"; then echo " + ${mname} -> ${murl} (updated)" else echo " ! ${mname} -> ${murl} (failed to update URL)" mirrors_ok=false fi else if git -C "$repo_root" remote add "$mname" "$murl"; then echo " + ${mname} -> ${murl} (added)" else echo " ! ${mname} -> ${murl} (failed to add remote)" mirrors_ok=false fi fi done # Initial sync: push current primary branch to mirrors if [ "$mirrors_ok" = true ]; then source "${FACTORY_ROOT}/lib/mirrors.sh" export PROJECT_REPO_ROOT="$repo_root" if mirror_push; then echo "Mirrors: initial sync complete" else echo "Warning: mirror push failed" >&2 fi fi fi # Encrypt secrets if SOPS + age are available write_secrets_encrypted # Bring up the full stack (compose mode only) if [ "$bare" = false ] && [ -f "${FACTORY_ROOT}/docker-compose.yml" ]; then echo "" echo "── Starting full stack ────────────────────────────────" docker compose -f "${FACTORY_ROOT}/docker-compose.yml" up -d echo "Stack: running (forgejo + woodpecker + agents)" # Generate WOODPECKER_TOKEN via Forgejo OAuth2 flow (#779) generate_woodpecker_token "$forge_url" || true # Activate repo in Woodpecker now that stack is running activate_woodpecker_repo "$forge_repo" # Use detected Woodpecker repo ID if ci_id was not explicitly set if [ "$ci_id" = "0" ] && [ -n "${_WP_REPO_ID:-}" ]; then ci_id="$_WP_REPO_ID" echo "CI ID: ${ci_id} (from Woodpecker)" # Update TOML with Woodpecker repo ID if [ -f "$toml_path" ]; then python3 -c " import sys, re, pathlib p = pathlib.Path(sys.argv[1]) text = p.read_text() text = re.sub(r'^woodpecker_repo_id\s*=\s*.*$', 'woodpecker_repo_id = ' + sys.argv[2], text, flags=re.MULTILINE) p.write_text(text) " "$toml_path" "$ci_id" fi fi fi # Activate default agents (zero-cost when idle — they only invoke Claude # when there is actual work, so an empty project burns no LLM tokens) mkdir -p "${FACTORY_ROOT}/state" # State files are idempotent — create if missing, skip if present for state_file in ".dev-active" ".reviewer-active" ".gardener-active"; do if [ -f "${FACTORY_ROOT}/state/${state_file}" ]; then echo "State: ${state_file} (already active)" else touch "${FACTORY_ROOT}/state/${state_file}" echo "State: ${state_file} (created)" fi done echo "" echo "Done. Project ${project_name} is ready." echo " Config: ${toml_path}" echo " Clone: ${repo_root}" echo " Forge: ${forge_url}/${forge_repo}" if [ "$bare" = false ]; then echo " Stack: docker compose (use 'disinto up/down/logs/shell')" else echo " Mode: bare-metal" fi echo "" echo "── Claude authentication ──────────────────────────────" echo " OAuth (shared across containers):" echo " Run 'claude auth login' on the host once." echo " Credentials in ~/.claude are mounted into containers." echo " API key (alternative — metered billing, no rotation issues):" echo " Set ANTHROPIC_API_KEY in .env to skip OAuth entirely." echo "" echo " Run 'disinto status' to verify." } # ── status command ─────────────────────────────────────────────────────────── disinto_status() { local toml_dir="${FACTORY_ROOT}/projects" local found=false for toml in "${toml_dir}"/*.toml; do [ -f "$toml" ] || continue found=true # Parse name, repo, forge_url from TOML local pname prepo pforge_url pname=$(python3 -c " import sys, tomllib with open(sys.argv[1], 'rb') as f: print(tomllib.load(f)['name']) " "$toml" 2>/dev/null) || continue prepo=$(python3 -c " import sys, tomllib with open(sys.argv[1], 'rb') as f: print(tomllib.load(f)['repo']) " "$toml" 2>/dev/null) || continue pforge_url=$(python3 -c " import sys, tomllib with open(sys.argv[1], 'rb') as f: print(tomllib.load(f).get('forge_url', '')) " "$toml" 2>/dev/null) || pforge_url="" pforge_url="${pforge_url:-${FORGE_URL:-http://localhost:3000}}" echo "== ${pname} (${prepo}) ==" # Active dev sessions local has_sessions=false for pf in /tmp/dev-session-"${pname}"-*.phase; do [ -f "$pf" ] || continue has_sessions=true local issue phase_line issue=$(basename "$pf" | sed "s/dev-session-${pname}-//;s/\.phase//") phase_line=$(head -1 "$pf" 2>/dev/null || echo "unknown") echo " Session #${issue}: ${phase_line}" done if [ "$has_sessions" = false ]; then echo " Sessions: none" fi # Backlog depth via API if [ -n "${FORGE_TOKEN:-}" ]; then local api="${pforge_url}/api/v1/repos/${prepo}" local backlog_count pr_count backlog_count=$(curl -sf -I \ -H "Authorization: token ${FORGE_TOKEN}" \ "${api}/issues?state=open&labels=backlog&limit=1" 2>/dev/null \ | grep -i 'x-total-count' | tr -d '\r' | awk '{print $2}') || backlog_count="?" echo " Backlog: ${backlog_count:-0} issues" pr_count=$(curl -sf -I \ -H "Authorization: token ${FORGE_TOKEN}" \ "${api}/pulls?state=open&limit=1" 2>/dev/null \ | grep -i 'x-total-count' | tr -d '\r' | awk '{print $2}') || pr_count="?" echo " Open PRs: ${pr_count:-0}" else echo " Backlog: (no FORGE_TOKEN)" echo " Open PRs: (no FORGE_TOKEN)" fi echo "" done if [ "$found" = false ]; then echo "No projects configured." echo "Run 'disinto init ' to get started." fi } # ── secrets command ──────────────────────────────────────────────────────────── disinto_secrets() { local subcmd="${1:-}" local enc_file="${FACTORY_ROOT}/.env.enc" local env_file="${FACTORY_ROOT}/.env" local vault_enc_file="${FACTORY_ROOT}/.env.vault.enc" local vault_env_file="${FACTORY_ROOT}/.env.vault" # Shared helper: ensure sops+age and .sops.yaml exist _secrets_ensure_sops() { if ! command -v sops &>/dev/null || ! command -v age-keygen &>/dev/null; then echo "Error: sops and age are required." >&2 echo " Install sops: https://github.com/getsops/sops/releases" >&2 echo " Install age: apt install age / brew install age" >&2 exit 1 fi if ! ensure_age_key; then echo "Error: failed to generate age key" >&2 exit 1 fi if [ ! -f "${FACTORY_ROOT}/.sops.yaml" ]; then write_sops_yaml "$AGE_PUBLIC_KEY" echo "Created: .sops.yaml" fi } local secrets_dir="${FACTORY_ROOT}/secrets" local age_key_file="${HOME}/.config/sops/age/keys.txt" # Shared helper: ensure age key exists and export AGE_PUBLIC_KEY _secrets_ensure_age_key() { if ! command -v age &>/dev/null; then echo "Error: age is required." >&2 echo " Install age: apt install age / brew install age" >&2 exit 1 fi if [ ! -f "$age_key_file" ]; then echo "Error: age key not found at ${age_key_file}" >&2 echo " Run 'disinto init' to generate one, or create manually with:" >&2 echo " mkdir -p ~/.config/sops/age && age-keygen -o ${age_key_file}" >&2 exit 1 fi AGE_PUBLIC_KEY="$(age-keygen -y "$age_key_file" 2>/dev/null)" if [ -z "$AGE_PUBLIC_KEY" ]; then echo "Error: failed to read public key from ${age_key_file}" >&2 exit 1 fi export AGE_PUBLIC_KEY } case "$subcmd" in add) local name="${2:-}" if [ -z "$name" ]; then echo "Usage: disinto secrets add " >&2 exit 1 fi _secrets_ensure_age_key mkdir -p "$secrets_dir" printf 'Enter value for %s: ' "$name" >&2 local value IFS= read -rs value echo >&2 if [ -z "$value" ]; then echo "Error: empty value" >&2 exit 1 fi local enc_path="${secrets_dir}/${name}.enc" if [ -f "$enc_path" ]; then printf 'Secret %s already exists. Overwrite? [y/N] ' "$name" >&2 local confirm read -r confirm if [ "$confirm" != "y" ] && [ "$confirm" != "Y" ]; then echo "Aborted." >&2 exit 1 fi fi if ! printf '%s' "$value" | age -r "$AGE_PUBLIC_KEY" -o "$enc_path"; then echo "Error: encryption failed" >&2 exit 1 fi echo "Stored: ${enc_path}" ;; show) local name="${2:-}" if [ -n "$name" ]; then # Show individual secret: disinto secrets show local enc_path="${secrets_dir}/${name}.enc" if [ ! -f "$enc_path" ]; then echo "Error: ${enc_path} not found" >&2 exit 1 fi if [ ! -f "$age_key_file" ]; then echo "Error: age key not found at ${age_key_file}" >&2 exit 1 fi age -d -i "$age_key_file" "$enc_path" else # Show all agent secrets: disinto secrets show if [ ! -f "$enc_file" ]; then echo "Error: ${enc_file} not found." >&2 exit 1 fi sops -d "$enc_file" fi ;; edit) if [ ! -f "$enc_file" ]; then echo "Error: ${enc_file} not found. Run 'disinto secrets migrate' first." >&2 exit 1 fi sops "$enc_file" ;; migrate) if [ ! -f "$env_file" ]; then echo "Error: ${env_file} not found — nothing to migrate." >&2 exit 1 fi _secrets_ensure_sops encrypt_env_file "$env_file" "$enc_file" # Verify decryption works if ! sops -d "$enc_file" >/dev/null 2>&1; then echo "Error: failed to verify .env.enc decryption" >&2 rm -f "$enc_file" exit 1 fi rm -f "$env_file" echo "Migrated: .env -> .env.enc (plaintext removed)" ;; edit-vault) if [ ! -f "$vault_enc_file" ]; then echo "Error: ${vault_enc_file} not found. Run 'disinto secrets migrate-vault' first." >&2 exit 1 fi sops "$vault_enc_file" ;; show-vault) if [ ! -f "$vault_enc_file" ]; then echo "Error: ${vault_enc_file} not found." >&2 exit 1 fi sops -d "$vault_enc_file" ;; migrate-vault) if [ ! -f "$vault_env_file" ]; then echo "Error: ${vault_env_file} not found — nothing to migrate." >&2 echo " Create .env.vault with vault secrets (GITHUB_TOKEN, deploy keys, etc.)" >&2 exit 1 fi _secrets_ensure_sops encrypt_env_file "$vault_env_file" "$vault_enc_file" # Verify decryption works before removing plaintext if ! sops -d "$vault_enc_file" >/dev/null 2>&1; then echo "Error: failed to verify .env.vault.enc decryption" >&2 rm -f "$vault_enc_file" exit 1 fi rm -f "$vault_env_file" echo "Migrated: .env.vault -> .env.vault.enc (plaintext removed)" ;; *) cat <&2 Usage: disinto secrets Individual secrets (secrets/.enc): add Prompt for value, encrypt, store in secrets/.enc show Decrypt and print an individual secret Agent secrets (.env.enc): edit Edit agent secrets (FORGE_TOKEN, CLAUDE_API_KEY, etc.) show Show decrypted agent secrets (no argument) migrate Encrypt .env -> .env.enc Vault secrets (.env.vault.enc): edit-vault Edit vault secrets (GITHUB_TOKEN, deploy keys, etc.) show-vault Show decrypted vault secrets migrate-vault Encrypt .env.vault -> .env.vault.enc EOF exit 1 ;; esac } # ── run command ─────────────────────────────────────────────────────────────── disinto_run() { local action_id="${1:?Usage: disinto run }" local compose_file="${FACTORY_ROOT}/docker-compose.yml" local vault_enc="${FACTORY_ROOT}/.env.vault.enc" if [ ! -f "$compose_file" ]; then echo "Error: docker-compose.yml not found" >&2 echo " Run 'disinto init ' first (without --bare)" >&2 exit 1 fi if [ ! -f "$vault_enc" ]; then echo "Error: .env.vault.enc not found — create vault secrets first" >&2 echo " Run 'disinto secrets migrate-vault' after creating .env.vault" >&2 exit 1 fi if ! command -v sops &>/dev/null; then echo "Error: sops not found — required to decrypt vault secrets" >&2 exit 1 fi # Decrypt vault secrets to temp file local tmp_env tmp_env=$(mktemp /tmp/disinto-vault-XXXXXX) trap 'rm -f "$tmp_env"' EXIT if ! sops -d --output-type dotenv "$vault_enc" > "$tmp_env" 2>/dev/null; then rm -f "$tmp_env" echo "Error: failed to decrypt .env.vault.enc" >&2 exit 1 fi echo "Vault secrets decrypted to tmpfile" # Run action in ephemeral runner container local rc=0 docker compose -f "$compose_file" \ run --rm --env-file "$tmp_env" \ runner "$action_id" || rc=$? # Clean up — secrets gone rm -f "$tmp_env" echo "Run tmpfile removed" if [ "$rc" -eq 0 ]; then echo "Run action ${action_id} completed successfully" else echo "Run action ${action_id} failed (exit ${rc})" >&2 fi return "$rc" } # ── Pre-build: download binaries to docker/agents/bin/ ──────────────────────── # This avoids network calls during docker build (needed for Docker-in-LXD builds) # Returns 0 on success, 1 on failure download_agent_binaries() { local bin_dir="${FACTORY_ROOT}/docker/agents/bin" mkdir -p "$bin_dir" echo "Downloading agent binaries to ${bin_dir}..." # Download SOPS local sops_file="${bin_dir}/sops" if [ ! -f "$sops_file" ]; then echo " Downloading SOPS v3.9.4..." curl -sL https://github.com/getsops/sops/releases/download/v3.9.4/sops-v3.9.4.linux.amd64 -o "$sops_file" if [ ! -f "$sops_file" ]; then echo "Error: failed to download SOPS" >&2 return 1 fi fi # Verify checksum echo " Verifying SOPS checksum..." if ! echo "5488e32bc471de7982ad895dd054bbab3ab91c417a118426134551e9626e4e85 ${sops_file}" | sha256sum -c - >/dev/null 2>&1; then echo "Error: SOPS checksum verification failed" >&2 return 1 fi chmod +x "$sops_file" # Download tea CLI local tea_file="${bin_dir}/tea" if [ ! -f "$tea_file" ]; then echo " Downloading tea CLI v0.9.2..." curl -sL https://dl.gitea.com/tea/0.9.2/tea-0.9.2-linux-amd64 -o "$tea_file" if [ ! -f "$tea_file" ]; then echo "Error: failed to download tea CLI" >&2 return 1 fi fi # Verify checksum echo " Verifying tea CLI checksum..." if ! echo "be10cdf9a619e3c0f121df874960ed19b53e62d1c7036cf60313a28b5227d54d ${tea_file}" | sha256sum -c - >/dev/null 2>&1; then echo "Error: tea CLI checksum verification failed" >&2 return 1 fi chmod +x "$tea_file" echo "Binaries downloaded and verified successfully" return 0 } # ── up command ──────────────────────────────────────────────────────────────── disinto_up() { local compose_file="${FACTORY_ROOT}/docker-compose.yml" if [ ! -f "$compose_file" ]; then echo "Error: docker-compose.yml not found" >&2 echo " Run 'disinto init ' first (without --bare)" >&2 exit 1 fi # Pre-build: download binaries to docker/agents/bin/ to avoid network calls during docker build echo "── Pre-build: downloading agent binaries ────────────────────────" if ! download_agent_binaries; then echo "Error: failed to download agent binaries" >&2 exit 1 fi echo "" # Decrypt secrets to temp .env if SOPS available and .env.enc exists local tmp_env="" local enc_file="${FACTORY_ROOT}/.env.enc" local env_file="${FACTORY_ROOT}/.env" if [ -f "$enc_file" ] && command -v sops &>/dev/null && [ ! -f "$env_file" ]; then tmp_env="${env_file}" sops -d --output-type dotenv "$enc_file" > "$tmp_env" trap '[ -n "${tmp_env:-}" ] && rm -f "$tmp_env"' EXIT echo "Decrypted secrets for compose" fi docker compose -f "$compose_file" up -d "$@" echo "Stack is up" # Clean up temp .env (also handled by EXIT trap if compose fails) if [ -n "$tmp_env" ] && [ -f "$tmp_env" ]; then rm -f "$tmp_env" echo "Removed temporary .env" fi } # ── down command ────────────────────────────────────────────────────────────── disinto_down() { local compose_file="${FACTORY_ROOT}/docker-compose.yml" if [ ! -f "$compose_file" ]; then echo "Error: docker-compose.yml not found" >&2 exit 1 fi docker compose -f "$compose_file" down "$@" echo "Stack is down" } # ── logs command ────────────────────────────────────────────────────────────── disinto_logs() { local compose_file="${FACTORY_ROOT}/docker-compose.yml" if [ ! -f "$compose_file" ]; then echo "Error: docker-compose.yml not found" >&2 exit 1 fi docker compose -f "$compose_file" logs -f "$@" } # ── shell command ───────────────────────────────────────────────────────────── disinto_shell() { local compose_file="${FACTORY_ROOT}/docker-compose.yml" if [ ! -f "$compose_file" ]; then echo "Error: docker-compose.yml not found" >&2 exit 1 fi docker compose -f "$compose_file" exec agents bash } # ── hire-an-agent command ───────────────────────────────────────────────────── # Creates a Forgejo user and .profile repo for an agent. # Usage: disinto hire-an-agent [--formula ] # disinto_hire_an_agent() is sourced from lib/hire-agent.sh # ── release command ─────────────────────────────────────────────────────────── # disinto_release() is sourced from lib/release.sh # ── ci-logs command ────────────────────────────────────────────────────────── # Reads CI logs from the Woodpecker SQLite database. # Usage: disinto ci-logs [--step ] disinto_ci_logs() { local pipeline_number="" step_name="" if [ $# -lt 1 ]; then echo "Error: pipeline number required" >&2 echo "Usage: disinto ci-logs [--step ]" >&2 exit 1 fi # Parse arguments while [ $# -gt 0 ]; do case "$1" in --step|-s) step_name="$2" shift 2 ;; -*) echo "Unknown option: $1" >&2 exit 1 ;; *) if [ -z "$pipeline_number" ]; then pipeline_number="$1" else echo "Unexpected argument: $1" >&2 exit 1 fi shift ;; esac done if [ -z "$pipeline_number" ] || ! [[ "$pipeline_number" =~ ^[0-9]+$ ]]; then echo "Error: pipeline number must be a positive integer" >&2 exit 1 fi local log_reader="${FACTORY_ROOT}/lib/ci-log-reader.py" if [ ! -f "$log_reader" ]; then echo "Error: ci-log-reader.py not found at $log_reader" >&2 exit 1 fi if [ -n "$step_name" ]; then python3 "$log_reader" "$pipeline_number" --step "$step_name" else python3 "$log_reader" "$pipeline_number" fi } # ── Main dispatch ──────────────────────────────────────────────────────────── case "${1:-}" in init) shift; disinto_init "$@" ;; up) shift; disinto_up "$@" ;; down) shift; disinto_down "$@" ;; logs) shift; disinto_logs "$@" ;; shell) shift; disinto_shell ;; status) shift; disinto_status "$@" ;; secrets) shift; disinto_secrets "$@" ;; run) shift; disinto_run "$@" ;; ci-logs) shift; disinto_ci_logs "$@" ;; release) shift; disinto_release "$@" ;; hire-an-agent) shift; disinto_hire_an_agent "$@" ;; -h|--help) usage ;; *) usage ;; esac