#!/usr/bin/env bash # ============================================================================= # disinto — CLI entry point for the disinto code factory # # Commands: # disinto init [options] Bootstrap a new project # disinto up Start the full stack (docker compose) # disinto down Stop the full stack # disinto logs [service] Tail service logs # disinto shell Shell into the agent container # disinto status Show factory status # disinto secrets Manage encrypted secrets # disinto run Run action in ephemeral runner container # disinto ci-logs [--step ] Read CI logs from Woodpecker SQLite # # Usage: # disinto init https://github.com/user/repo # disinto init user/repo --branch main --ci-id 3 # disinto init user/repo --bare (bare-metal, no compose) # disinto up # disinto down # disinto status # ============================================================================= set -euo pipefail FACTORY_ROOT="$(cd "$(dirname "$0")/.." && pwd)" # Ensure USER and HOME are set — preconditions for lib/env.sh (#674). # On the host these are normally provided by the shell; defensive defaults # handle edge cases (cron, minimal containers). export USER="${USER:-$(id -un)}" export HOME="${HOME:-$(eval echo "~${USER}")}" source "${FACTORY_ROOT}/lib/env.sh" source "${FACTORY_ROOT}/lib/ops-setup.sh" # setup_ops_repo, migrate_ops_repo source "${FACTORY_ROOT}/lib/hire-agent.sh" source "${FACTORY_ROOT}/lib/forge-setup.sh" source "${FACTORY_ROOT}/lib/generators.sh" source "${FACTORY_ROOT}/lib/forge-push.sh" source "${FACTORY_ROOT}/lib/ci-setup.sh" source "${FACTORY_ROOT}/lib/release.sh" source "${FACTORY_ROOT}/lib/claude-config.sh" # ── Helpers ────────────────────────────────────────────────────────────────── usage() { cat < [options] Bootstrap a new project disinto up Start the full stack (docker compose) disinto down Stop the full stack disinto logs [service] Tail service logs disinto shell Shell into the agent container disinto status Show factory status disinto secrets Manage encrypted secrets disinto run Run action in ephemeral runner container disinto ci-logs [--step ] Read CI logs from Woodpecker SQLite disinto release Create vault PR for release (e.g., v1.2.0) disinto hire-an-agent [--formula ] [--local-model ] [--model ] Hire a new agent (create user + .profile repo) disinto agent Manage agent state (enable/disable) disinto edge [options] Manage edge tunnel registrations Edge subcommands: register [project] Register a new tunnel (generates keypair if needed) deregister Remove a tunnel registration status Show registered tunnels Agent subcommands: disable Remove state file to disable agent enable Create state file to enable agent disable --all Disable all agents enable --all Enable all agents status Show which agents are enabled/disabled Init options: --branch Primary branch (default: auto-detect) --repo-root Local clone path (default: ~/name) --ci-id Woodpecker CI repo ID (default: 0 = no CI) --forge-url Forge base URL (default: http://localhost:3000) --bare Skip compose generation (bare-metal setup) --yes Skip confirmation prompts --rotate-tokens Force regeneration of all bot tokens/passwords (idempotent by default) Hire an agent options: --formula Path to role formula TOML (default: formulas/.toml) --local-model Base URL for local model server (e.g., http://10.10.10.1:8081) --model Model name for local model (e.g., unsloth/Qwen3.5-35B-A3B) --poll-interval Poll interval in seconds (default: 60) CI logs options: --step Filter logs to a specific step (e.g., smoke-init) EOF exit 1 } # Extract org/repo slug from various URL formats. # Accepts: https://github.com/user/repo, https://codeberg.org/user/repo, # http://localhost:3000/user/repo, user/repo, *.git parse_repo_slug() { local url="$1" url="${url#https://}" url="${url#http://}" # Strip any hostname (anything before the first / that contains a dot or colon) if [[ "$url" =~ ^[a-zA-Z0-9._:-]+/[a-zA-Z0-9._-]+/[a-zA-Z0-9._-]+ ]]; then url="${url#*/}" # strip host part fi url="${url%.git}" url="${url%/}" if [[ ! "$url" =~ ^[a-zA-Z0-9._-]+/[a-zA-Z0-9._-]+$ ]]; then echo "Error: invalid repo URL — expected https://host/org/repo or org/repo" >&2 exit 1 fi printf '%s' "$url" } # Build a clone-able URL from a slug and forge URL. clone_url_from_slug() { local slug="$1" forge_url="${2:-${FORGE_URL:-http://localhost:3000}}" printf '%s/%s.git' "$forge_url" "$slug" } # Ensure an age key exists; generate one if missing. # Exports AGE_PUBLIC_KEY on success. ensure_age_key() { local key_dir="${HOME}/.config/sops/age" local key_file="${key_dir}/keys.txt" if [ -f "$key_file" ]; then AGE_PUBLIC_KEY="$(age-keygen -y "$key_file" 2>/dev/null)" [ -n "$AGE_PUBLIC_KEY" ] || return 1 export AGE_PUBLIC_KEY return 0 fi if ! command -v age-keygen &>/dev/null; then return 1 fi mkdir -p "$key_dir" age-keygen -o "$key_file" 2>/dev/null chmod 600 "$key_file" AGE_PUBLIC_KEY="$(age-keygen -y "$key_file" 2>/dev/null)" [ -n "$AGE_PUBLIC_KEY" ] || return 1 export AGE_PUBLIC_KEY echo "Generated age key: ${key_file}" } # Write .sops.yaml pinning the age recipient for .env.enc files. write_sops_yaml() { local pub_key="$1" cat > "${FACTORY_ROOT}/.sops.yaml" < encrypt_env_file() { local input="$1" output="$2" sops -e --input-type dotenv --output-type dotenv "$input" > "$output" } # Store secrets into .env.enc (encrypted) if SOPS + age available, else .env (plaintext). write_secrets_encrypted() { local env_file="${FACTORY_ROOT}/.env" local enc_file="${FACTORY_ROOT}/.env.enc" if command -v sops &>/dev/null && command -v age-keygen &>/dev/null; then if ensure_age_key; then # Write .sops.yaml if missing if [ ! -f "${FACTORY_ROOT}/.sops.yaml" ]; then write_sops_yaml "$AGE_PUBLIC_KEY" fi # Encrypt the plaintext .env to .env.enc if [ -f "$env_file" ]; then encrypt_env_file "$env_file" "$enc_file" rm -f "$env_file" echo "Secrets encrypted to .env.enc (plaintext .env removed)" return 0 fi fi fi # Fallback: keep plaintext .env echo "Warning: sops/age not available — secrets stored in plaintext .env" >&2 return 0 } export FORGEJO_DATA_DIR="${HOME}/.disinto/forgejo" # Generate docker-compose.yml in the factory root. # (Implementation in lib/generators.sh) generate_compose() { _generate_compose_impl "$@" } # Generate docker/agents/ files if they don't already exist. # (Implementation in lib/generators.sh) generate_agent_docker() { _generate_agent_docker_impl "$@" } # Generate docker/Caddyfile template for edge proxy. # (Implementation in lib/generators.sh) generate_caddyfile() { _generate_caddyfile_impl "$@" } # Generate docker/index.html default page. # (Implementation in lib/generators.sh) generate_staging_index() { _generate_staging_index_impl "$@" } # Generate template .woodpecker/ deployment pipeline configs in a project repo. # Creates staging.yml and production.yml alongside the project's existing CI config. # These pipelines trigger on Woodpecker's deployment event with environment filters. # (Implementation in lib/generators.sh) generate_deploy_pipelines() { _generate_deploy_pipelines_impl "$@" } # Check whether compose mode is active (docker-compose.yml exists). is_compose_mode() { [ -f "${FACTORY_ROOT}/docker-compose.yml" ] } # Create and seed the {project}-ops repo on Forgejo with initial directory structure. # The ops repo holds operational data: vault items, journals, evidence, prerequisites. # ops repo setup is now in lib/ops-setup.sh # push_to_forge() is sourced from lib/forge-push.sh # Preflight check — verify all factory requirements before proceeding. preflight_check() { local repo_slug="${1:-}" local forge_url="${2:-${FORGE_URL:-http://localhost:3000}}" local errors=0 # ── Required commands ── local -A hints=( [claude]="Install: https://docs.anthropic.com/en/docs/claude-code/overview" [tmux]="Install: apt install tmux / brew install tmux" [git]="Install: apt install git / brew install git" [jq]="Install: apt install jq / brew install jq" [python3]="Install: apt install python3 / brew install python3" [curl]="Install: apt install curl / brew install curl" ) local cmd for cmd in claude tmux git jq python3 curl; do if ! command -v "$cmd" &>/dev/null; then echo "Error: ${cmd} not found" >&2 echo " ${hints[$cmd]}" >&2 errors=$((errors + 1)) fi done # ── Claude Code authentication ── if command -v claude &>/dev/null && command -v jq &>/dev/null; then local auth_json auth_stderr auth_rc=0 auth_stderr=$(claude auth status 2>&1 >/dev/null) || auth_rc=$? auth_json=$(claude auth status 2>/dev/null) || auth_json="" # Only skip check if subcommand is unrecognized (old claude version) if printf '%s' "$auth_stderr" | grep -qi "unknown command"; then : # claude version doesn't support auth status — skip elif [ -z "$auth_json" ] || [ "$auth_rc" -ne 0 ]; then echo "Error: Claude Code is not authenticated (auth check failed)" >&2 echo " Run: claude auth login" >&2 errors=$((errors + 1)) else local logged_in logged_in=$(printf '%s' "$auth_json" | jq -r '.loggedIn // false' 2>/dev/null) || logged_in="false" if [ "$logged_in" != "true" ]; then echo "Error: Claude Code is not authenticated" >&2 echo " Run: claude auth login" >&2 errors=$((errors + 1)) fi fi fi # ── Forge API check (verify the forge is reachable and token works) ── if [ -n "${FORGE_TOKEN:-}" ] && command -v curl &>/dev/null; then if ! curl -sf --max-time 10 \ -H "Authorization: token ${FORGE_TOKEN}" \ "${forge_url}/api/v1/repos/${repo_slug}" >/dev/null 2>&1; then echo "Error: Forge API auth failed at ${forge_url}" >&2 echo " Verify your FORGE_TOKEN and that Forgejo is running" >&2 errors=$((errors + 1)) fi fi # ── Git identity check ── if command -v git &>/dev/null; then local git_name git_email git_name=$(git config user.name 2>/dev/null) || git_name="" git_email=$(git config user.email 2>/dev/null) || git_email="" if [ -z "$git_name" ] || [ -z "$git_email" ]; then echo "Warning: git user.name/user.email not configured" >&2 echo " Init will set a repo-local identity for ops commits" >&2 fi fi # ── Optional tools (warn only) ── if ! command -v docker &>/dev/null; then echo "Warning: docker not found (needed for Forgejo provisioning)" >&2 fi if ! command -v sops &>/dev/null; then echo "Warning: sops not found (secrets will be stored in plaintext .env)" >&2 echo " Install: https://github.com/getsops/sops/releases" >&2 fi if ! command -v age-keygen &>/dev/null; then echo "Warning: age not found (needed for secret encryption with SOPS)" >&2 echo " Install: apt install age / brew install age" >&2 fi if [ "$errors" -gt 0 ]; then echo "" >&2 echo "${errors} preflight error(s) — fix the above before running disinto init" >&2 exit 1 fi } # Clone the repo if the target directory doesn't exist; validate if it does. clone_or_validate() { local slug="$1" target="$2" forge_url="${3:-${FORGE_URL:-http://localhost:3000}}" if [ -d "${target}/.git" ]; then echo "Repo: ${target} (existing clone)" return fi local url url=$(clone_url_from_slug "$slug" "$forge_url") echo "Cloning: ${url} -> ${target}" git clone "$url" "$target" } # Detect the primary branch from the remote HEAD or fallback to main/master. detect_branch() { local repo_root="$1" local branch branch=$(git -C "$repo_root" symbolic-ref refs/remotes/origin/HEAD 2>/dev/null \ | sed 's|refs/remotes/origin/||') || true if [ -z "$branch" ]; then if git -C "$repo_root" show-ref --verify --quiet refs/remotes/origin/main 2>/dev/null; then branch="main" else branch="master" fi fi printf '%s' "$branch" } # Generate projects/.toml config file. generate_toml() { local path="$1" name="$2" repo="$3" root="$4" branch="$5" ci_id="$6" forge_url="$7" cat > "$path" </dev/null \ | grep -o '"name":"[^"]*"' | cut -d'"' -f4) || existing="" local name color local created=0 skipped=0 failed=0 for name in backlog in-progress blocked tech-debt underspecified vision action bug-report prediction/unreviewed prediction/dismissed prediction/actioned needs-triage reproduced cannot-reproduce in-triage rejected; do if echo "$existing" | grep -qx "$name"; then echo " . ${name} (already exists)" skipped=$((skipped + 1)) continue fi color="${labels[$name]}" if curl -sf -X POST \ -H "Authorization: token ${FORGE_TOKEN}" \ -H "Content-Type: application/json" \ "${api}/labels" \ -d "{\"name\":\"${name}\",\"color\":\"${color}\"}" >/dev/null 2>&1; then echo " + ${name} (created)" created=$((created + 1)) else echo " ! ${name} (failed to create)" failed=$((failed + 1)) fi done echo "Labels: ${created} created, ${skipped} skipped, ${failed} failed" } # Generate a minimal VISION.md template in the target project. generate_vision() { local repo_root="$1" name="$2" local vision_path="${repo_root}/VISION.md" if [ -f "$vision_path" ]; then echo "VISION: ${vision_path} (already exists, skipping)" return fi cat > "$vision_path" < ## Who it's for ## Design principles - - - ## Milestones ### Current - ### Next - EOF echo "Created: ${vision_path}" echo " Commit this to your repo when ready" } # Copy issue templates from templates/ to target project repo. copy_issue_templates() { local repo_root="$1" local template_dir="${FACTORY_ROOT}/templates" local target_dir="${repo_root}/.forgejo/ISSUE_TEMPLATE" # Skip if templates directory doesn't exist if [ ! -d "$template_dir" ]; then return fi # Create target directory mkdir -p "$target_dir" # Copy each template file if it doesn't already exist for template in "$template_dir"/issue/*; do [ -f "$template" ] || continue local filename filename=$(basename "$template") local target_path="${target_dir}/${filename}" if [ ! -f "$target_path" ]; then cp "$template" "$target_path" echo "Copied: ${target_path}" else echo "Skipped: ${target_path} (already exists)" fi done } # Install scheduling entries for project agents (implementation in lib/ci-setup.sh) # In compose mode this is a no-op (the agents container uses a polling loop). install_cron() { _load_ci_context _install_cron_impl "$@" } # Create Woodpecker OAuth2 app on Forgejo (implementation in lib/ci-setup.sh) create_woodpecker_oauth() { _load_ci_context _create_woodpecker_oauth_impl "$@" } # Create Chat OAuth2 app on Forgejo (implementation in lib/ci-setup.sh) create_chat_oauth() { _load_ci_context _create_chat_oauth_impl "$@" } # Generate WOODPECKER_TOKEN via Forgejo OAuth2 flow (implementation in lib/ci-setup.sh) generate_woodpecker_token() { _load_ci_context _generate_woodpecker_token_impl "$@" } # Activate repo in Woodpecker CI (implementation in lib/ci-setup.sh) activate_woodpecker_repo() { _load_ci_context _activate_woodpecker_repo_impl "$@" } # ── Password prompt helper ──────────────────────────────────────────────────── # Prompts for FORGE_ADMIN_PASS with confirmation. # Returns 0 on success, 1 on failure. # Usage: prompt_admin_password [] prompt_admin_password() { local env_file="${1:-${FACTORY_ROOT}/.env}" # Check if password already exists in .env (resumable init) if grep -q '^FORGE_ADMIN_PASS=' "$env_file" 2>/dev/null; then echo "Forge: FORGE_ADMIN_PASS already set (resuming from .env)" return 0 fi # Non-interactive mode without pre-exported password if [ "$auto_yes" = true ]; then if [ -z "${FORGE_ADMIN_PASS:-}" ]; then echo "Error: FORGE_ADMIN_PASS environment variable is required in non-interactive mode" >&2 echo " Export the password before running: export FORGE_ADMIN_PASS=''" >&2 exit 1 fi # Write the pre-exported password to .env if grep -q '^FORGE_ADMIN_PASS=' "$env_file" 2>/dev/null; then sed -i "s|^FORGE_ADMIN_PASS=.*|FORGE_ADMIN_PASS=${FORGE_ADMIN_PASS}|" "$env_file" else printf 'FORGE_ADMIN_PASS=%s\n' "$FORGE_ADMIN_PASS" >> "$env_file" fi echo "Forge: FORGE_ADMIN_PASS set from environment" return 0 fi # Interactive mode: prompt for password with confirmation if [ -t 0 ]; then local pass1 pass2 min_length=8 attempts=0 max_attempts=3 echo "Forge: Setting disinto-admin password" echo " Password must be at least ${min_length} characters" echo "" while [ "$attempts" -lt "$max_attempts" ]; do attempts=$((attempts + 1)) # First attempt (or retry): read password printf "Enter password [%d/%d]: " "$attempts" "$max_attempts" IFS= read -rs -p '' pass1 echo "" # Read confirmation printf "Confirm password: " IFS= read -rs -p '' pass2 echo "" # Validate length if [ "${#pass1}" -lt "$min_length" ]; then echo "Error: password must be at least ${min_length} characters (got ${#pass1})" >&2 continue fi # Validate match if [ "$pass1" != "$pass2" ]; then echo "Error: passwords do not match" >&2 continue fi # Success: write to .env printf 'FORGE_ADMIN_PASS=%s\n' "$pass1" >> "$env_file" echo "Forge: FORGE_ADMIN_PASS set (saved to .env)" return 0 done echo "Error: exceeded ${max_attempts} attempts — password not set" >&2 return 1 fi # Non-interactive, no TTY, no pre-exported password echo "Error: FORGE_ADMIN_PASS is not set and cannot prompt (no TTY)" >&2 echo " Either:" >&2 echo " 1) Export the password before running: export FORGE_ADMIN_PASS=''" >&2 echo " 2) Run interactively (attach a TTY) to be prompted" >&2 exit 1 } # ── init command ───────────────────────────────────────────────────────────── disinto_init() { local repo_url="${1:-}" if [ -z "$repo_url" ]; then echo "Error: repo URL required" >&2 echo "Usage: disinto init " >&2 exit 1 fi shift # Parse flags local branch="" repo_root="" ci_id="0" auto_yes=false forge_url_flag="" bare=false rotate_tokens=false while [ $# -gt 0 ]; do case "$1" in --branch) branch="$2"; shift 2 ;; --repo-root) repo_root="$2"; shift 2 ;; --ci-id) ci_id="$2"; shift 2 ;; --forge-url) forge_url_flag="$2"; shift 2 ;; --bare) bare=true; shift ;; --yes) auto_yes=true; shift ;; --rotate-tokens) rotate_tokens=true; shift ;; *) echo "Unknown option: $1" >&2; exit 1 ;; esac done # Export bare-metal flag for setup_forge export DISINTO_BARE="$bare" # Extract org/repo slug local forge_repo forge_repo=$(parse_repo_slug "$repo_url") local project_name="${forge_repo##*/}" local toml_path="${FACTORY_ROOT}/projects/${project_name}.toml" # Determine forge URL (flag > env > default) local forge_url="${forge_url_flag:-${FORGE_URL:-http://localhost:3000}}" echo "=== disinto init ===" echo "Project: ${forge_repo}" echo "Name: ${project_name}" echo "Forge: ${forge_url}" # Check for existing config local toml_exists=false if [ -f "$toml_path" ]; then toml_exists=true echo "Config: ${toml_path} (already exists, reusing)" # Read repo_root and branch from existing TOML local existing_root existing_branch existing_root=$(python3 -c " import sys, tomllib with open(sys.argv[1], 'rb') as f: cfg = tomllib.load(f) print(cfg.get('repo_root', '')) " "$toml_path" 2>/dev/null) || existing_root="" existing_branch=$(python3 -c " import sys, tomllib with open(sys.argv[1], 'rb') as f: cfg = tomllib.load(f) print(cfg.get('primary_branch', '')) " "$toml_path" 2>/dev/null) || existing_branch="" # Use existing values as defaults if [ -n "$existing_branch" ] && [ -z "$branch" ]; then branch="$existing_branch" fi # Handle repo_root: flag overrides TOML, prompt if they differ if [ -z "$repo_root" ]; then repo_root="${existing_root:-/home/${USER}/${project_name}}" elif [ -n "$existing_root" ] && [ "$repo_root" != "$existing_root" ]; then echo "Note: --repo-root (${repo_root}) differs from TOML (${existing_root})" local update_toml=false if [ "$auto_yes" = true ]; then update_toml=true elif [ -t 0 ]; then read -rp "Update repo_root in TOML to ${repo_root}? [y/N] " confirm if [[ "$confirm" =~ ^[Yy] ]]; then update_toml=true else repo_root="$existing_root" fi fi if [ "$update_toml" = true ]; then python3 -c " import sys, re, pathlib p = pathlib.Path(sys.argv[1]) text = p.read_text() text = re.sub(r'^repo_root\s*=\s*.*$', 'repo_root = \"' + sys.argv[2] + '\"', text, flags=re.MULTILINE) p.write_text(text) " "$toml_path" "$repo_root" echo "Updated: repo_root in ${toml_path}" fi fi fi # Generate compose files (unless --bare) if [ "$bare" = false ]; then local forge_port forge_port=$(printf '%s' "$forge_url" | sed -E 's|.*:([0-9]+)/?$|\1|') forge_port="${forge_port:-3000}" generate_compose "$forge_port" generate_agent_docker generate_caddyfile generate_staging_index # Create empty .env so docker compose can parse the agents service # env_file reference before setup_forge generates the real tokens (#769) touch "${FACTORY_ROOT}/.env" fi # Configure Forgejo and Woodpecker subpath URLs when EDGE_TUNNEL_FQDN is set if [ -n "${EDGE_TUNNEL_FQDN:-}" ]; then # Forgejo ROOT_URL with /forge/ subpath (note trailing slash - Forgejo needs it) if ! grep -q '^FORGEJO_ROOT_URL=' "${FACTORY_ROOT}/.env" 2>/dev/null; then echo "FORGEJO_ROOT_URL=https://${EDGE_TUNNEL_FQDN}/forge/" >> "${FACTORY_ROOT}/.env" fi # Woodpecker WOODPECKER_HOST with /ci subpath (no trailing slash for v3) if ! grep -q '^WOODPECKER_HOST=' "${FACTORY_ROOT}/.env" 2>/dev/null; then echo "WOODPECKER_HOST=https://${EDGE_TUNNEL_FQDN}/ci" >> "${FACTORY_ROOT}/.env" fi fi # Prompt for FORGE_ADMIN_PASS before setup_forge # This ensures the password is set before Forgejo user creation prompt_admin_password "${FACTORY_ROOT}/.env" # Set up local Forgejo instance (provision if needed, create users/tokens/repo) if [ "$rotate_tokens" = true ]; then echo "Note: Forcing token rotation (tokens/passwords will be regenerated)" setup_forge --rotate-tokens "$forge_url" "$forge_repo" else setup_forge "$forge_url" "$forge_repo" fi # Preflight: verify factory requirements preflight_check "$forge_repo" "$forge_url" # Determine repo root (for new projects) # This host-side clone is operator-only convenience (#589): it enables # `cd ~/project && $EDITOR .` but is NOT read by agents at runtime. # Agents clone independently from FORGE_URL/FORGE_REPO into the # project-repos named volume at /home/agent/repos/${project_name}. repo_root="${repo_root:-/home/${USER}/${project_name}}" # Clone or validate (try origin first for initial clone from upstream) if [ ! -d "${repo_root}/.git" ]; then # For initial setup, clone from the provided URL directly echo "Cloning: ${repo_url} -> ${repo_root}" git clone "$repo_url" "$repo_root" 2>/dev/null || \ clone_or_validate "$forge_repo" "$repo_root" "$forge_url" else echo "Repo: ${repo_root} (existing clone)" fi # Push to local Forgejo (skip if SKIP_PUSH is set) if [ "${SKIP_PUSH:-false}" = "false" ]; then push_to_forge "$repo_root" "$forge_url" "$forge_repo" fi # Detect primary branch if [ -z "$branch" ]; then branch=$(detect_branch "$repo_root") fi echo "Branch: ${branch}" # Set up {project}-ops repo (#757) # Always use disinto-admin as the ops repo owner — forge_repo owner may be # the calling user (e.g. johba) but the ops repo belongs to disinto-admin. local ops_slug="disinto-admin/${project_name}-ops" local ops_root="/home/${USER}/${project_name}-ops" setup_ops_repo "$forge_url" "$ops_slug" "$ops_root" "$branch" "${HUMAN_TOKEN:-}" # Migrate ops repo to canonical structure (seed missing directories/files) # This brings pre-#407 deployments up to date with the canonical structure migrate_ops_repo "$ops_root" "$branch" # Set up vault branch protection on ops repo (#77) # This ensures admin-only merge to main, blocking bots from merging vault PRs # Use HUMAN_TOKEN (disinto-admin) or FORGE_TOKEN (dev-bot) for admin operations export FORGE_OPS_REPO="$ops_slug" # Source env.sh to ensure FORGE_TOKEN is available source "${FACTORY_ROOT}/lib/env.sh" source "${FACTORY_ROOT}/lib/branch-protection.sh" if setup_vault_branch_protection "$branch"; then echo "Branch protection: vault protection configured on ${ops_slug}" else echo "ERROR: failed to set up vault branch protection — security rules not applied" >&2 fi unset FORGE_OPS_REPO # Generate project TOML (skip if already exists) if [ "$toml_exists" = false ]; then # Prompt for CI ID if interactive and not already set via flag if [ "$ci_id" = "0" ] && [ "$auto_yes" = false ] && [ -t 0 ]; then read -rp "Woodpecker CI repo ID (0 to skip CI): " user_ci_id ci_id="${user_ci_id:-0}" fi generate_toml "$toml_path" "$project_name" "$forge_repo" "$repo_root" "$branch" "$ci_id" "$forge_url" echo "Created: ${toml_path}" fi # Update ops_repo in TOML with the resolved actual ops slug. # Uses in-place substitution to prevent duplicate keys on repeated init runs. # If the key is missing (manually created TOML), it is inserted after the repo line. if [ -n "${_ACTUAL_OPS_SLUG:-}" ] && [ -f "$toml_path" ]; then python3 -c " import sys, re, pathlib p = pathlib.Path(sys.argv[1]) text = p.read_text() new_val = 'ops_repo = \"' + sys.argv[2] + '\"' if re.search(r'^ops_repo\s*=', text, re.MULTILINE): text = re.sub(r'^ops_repo\s*=\s*.*\$', new_val, text, flags=re.MULTILINE) else: text = re.sub(r'^(repo\s*=\s*\"[^\"]*\")', r'\1\n' + new_val, text, flags=re.MULTILINE) p.write_text(text) " "$toml_path" "${_ACTUAL_OPS_SLUG}" echo "Updated: ops_repo in ${toml_path}" fi # Create OAuth2 app on Forgejo for Woodpecker (before compose up) _WP_REPO_ID="" create_woodpecker_oauth "$forge_url" "$forge_repo" # Create OAuth2 app on Forgejo for disinto-chat (#708) local chat_redirect_uri if [ -n "${EDGE_TUNNEL_FQDN:-}" ]; then chat_redirect_uri="https://${EDGE_TUNNEL_FQDN}/chat/oauth/callback" else chat_redirect_uri="http://localhost/chat/oauth/callback" fi create_chat_oauth "$chat_redirect_uri" # Generate WOODPECKER_AGENT_SECRET for server↔agent auth local env_file="${FACTORY_ROOT}/.env" if ! grep -q '^WOODPECKER_AGENT_SECRET=' "$env_file" 2>/dev/null; then local agent_secret agent_secret="$(head -c 32 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 40)" printf 'WOODPECKER_AGENT_SECRET=%s\n' "$agent_secret" >> "$env_file" echo "Config: WOODPECKER_AGENT_SECRET generated and saved to .env" fi # Ensure Claude Code never auto-updates, phones home, or sends telemetry (#725) if ! grep -q '^CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=' "$env_file" 2>/dev/null; then printf 'CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=1\n' >> "$env_file" echo "Config: CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=1 saved to .env" fi # Create labels on remote create_labels "$forge_repo" "$forge_url" # Set up branch protection on project repo (#10) # This enforces PR flow: no direct pushes, 1 approval required, dev-bot can merge after CI if setup_project_branch_protection "$forge_repo" "$branch"; then echo "Branch protection: project protection configured on ${forge_repo}" else echo "ERROR: failed to set up project branch protection — security rules not applied" >&2 fi # Generate VISION.md template generate_vision "$repo_root" "$project_name" # Generate template deployment pipeline configs in project repo generate_deploy_pipelines "$repo_root" "$project_name" # Copy issue templates to target project copy_issue_templates "$repo_root" # Install scheduling (bare-metal: cron; compose: polling loop in entrypoint.sh) install_cron "$project_name" "$toml_path" "$auto_yes" "$bare" # Set up mirror remotes if [mirrors] configured in TOML source "${FACTORY_ROOT}/lib/load-project.sh" "$toml_path" if [ -n "${MIRROR_NAMES:-}" ]; then echo "Mirrors: setting up remotes" local mname murl local mirrors_ok=true for mname in $MIRROR_NAMES; do murl=$(eval "echo \"\$MIRROR_$(echo "$mname" | tr '[:lower:]' '[:upper:]')\"") || true [ -z "$murl" ] && continue if git -C "$repo_root" remote get-url "$mname" >/dev/null 2>&1; then if git -C "$repo_root" remote set-url "$mname" "$murl"; then echo " + ${mname} -> ${murl} (updated)" else echo " ! ${mname} -> ${murl} (failed to update URL)" mirrors_ok=false fi else if git -C "$repo_root" remote add "$mname" "$murl"; then echo " + ${mname} -> ${murl} (added)" else echo " ! ${mname} -> ${murl} (failed to add remote)" mirrors_ok=false fi fi done # Initial sync: push current primary branch to mirrors if [ "$mirrors_ok" = true ]; then source "${FACTORY_ROOT}/lib/mirrors.sh" export PROJECT_REPO_ROOT="$repo_root" if mirror_push; then echo "Mirrors: initial sync complete" else echo "Warning: mirror push failed" >&2 fi fi fi # Encrypt secrets if SOPS + age are available write_secrets_encrypted # Bring up the full stack (compose mode only) if [ "$bare" = false ] && [ -f "${FACTORY_ROOT}/docker-compose.yml" ]; then echo "" echo "── Starting full stack ────────────────────────────────" docker compose -f "${FACTORY_ROOT}/docker-compose.yml" up -d echo "Stack: running (forgejo + woodpecker + agents)" # Generate WOODPECKER_TOKEN via Forgejo OAuth2 flow (#779) generate_woodpecker_token "$forge_url" || true # Activate repo in Woodpecker now that stack is running activate_woodpecker_repo "$forge_repo" # Use detected Woodpecker repo ID if ci_id was not explicitly set if [ "$ci_id" = "0" ] && [ -n "${_WP_REPO_ID:-}" ]; then ci_id="$_WP_REPO_ID" echo "CI ID: ${ci_id} (from Woodpecker)" # Update TOML with Woodpecker repo ID if [ -f "$toml_path" ]; then python3 -c " import sys, re, pathlib p = pathlib.Path(sys.argv[1]) text = p.read_text() text = re.sub(r'^woodpecker_repo_id\s*=\s*.*$', 'woodpecker_repo_id = ' + sys.argv[2], text, flags=re.MULTILINE) p.write_text(text) " "$toml_path" "$ci_id" fi fi fi # ── Claude shared config directory (#641) ─────────────────────────── # Create CLAUDE_CONFIG_DIR for cross-container OAuth lock coherence. # proper-lockfile uses atomic mkdir(${CLAUDE_CONFIG_DIR}.lock), so all # containers sharing this path get native cross-container locking. if ! setup_claude_config_dir "$auto_yes"; then exit 1 fi # Write CLAUDE_SHARED_DIR and CLAUDE_CONFIG_DIR to .env (idempotent) _env_set_idempotent "CLAUDE_SHARED_DIR" "$CLAUDE_SHARED_DIR" "$env_file" _env_set_idempotent "CLAUDE_CONFIG_DIR" "$CLAUDE_CONFIG_DIR" "$env_file" # Activate default agents (zero-cost when idle — they only invoke Claude # when there is actual work, so an empty project burns no LLM tokens) mkdir -p "${FACTORY_ROOT}/state" # State files are idempotent — create if missing, skip if present for state_file in ".dev-active" ".reviewer-active" ".gardener-active"; do if [ -f "${FACTORY_ROOT}/state/${state_file}" ]; then echo "State: ${state_file} (already active)" else touch "${FACTORY_ROOT}/state/${state_file}" echo "State: ${state_file} (created)" fi done echo "" echo "Done. Project ${project_name} is ready." echo " Config: ${toml_path}" echo " Clone: ${repo_root}" echo " Forge: ${forge_url}/${forge_repo}" if [ "$bare" = false ]; then echo " Stack: docker compose (use 'disinto up/down/logs/shell')" else echo " Mode: bare-metal" fi echo "" echo "── Claude authentication ──────────────────────────────" echo " OAuth (shared across containers):" echo " Run 'claude auth login' on the host once." echo " Credentials in ${CLAUDE_CONFIG_DIR} are shared across containers." echo " API key (alternative — metered billing, no rotation issues):" echo " Set ANTHROPIC_API_KEY in .env to skip OAuth entirely." echo "" echo "── Claude config directory ────────────────────────────" echo " CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR}" echo " Add this to your shell rc (~/.bashrc or ~/.zshrc):" echo " export CLAUDE_CONFIG_DIR=${CLAUDE_CONFIG_DIR}" echo " This ensures interactive Claude Code sessions on this host" echo " share the same OAuth lock and token store as the factory." echo "" echo " Run 'disinto status' to verify." } # ── status command ─────────────────────────────────────────────────────────── disinto_status() { local toml_dir="${FACTORY_ROOT}/projects" local found=false for toml in "${toml_dir}"/*.toml; do [ -f "$toml" ] || continue found=true # Parse name, repo, forge_url from TOML local pname prepo pforge_url pname=$(python3 -c " import sys, tomllib with open(sys.argv[1], 'rb') as f: print(tomllib.load(f)['name']) " "$toml" 2>/dev/null) || continue prepo=$(python3 -c " import sys, tomllib with open(sys.argv[1], 'rb') as f: print(tomllib.load(f)['repo']) " "$toml" 2>/dev/null) || continue pforge_url=$(python3 -c " import sys, tomllib with open(sys.argv[1], 'rb') as f: print(tomllib.load(f).get('forge_url', '')) " "$toml" 2>/dev/null) || pforge_url="" pforge_url="${pforge_url:-${FORGE_URL:-http://localhost:3000}}" echo "== ${pname} (${prepo}) ==" # Active dev sessions local has_sessions=false for pf in /tmp/dev-session-"${pname}"-*.phase; do [ -f "$pf" ] || continue has_sessions=true local issue phase_line issue=$(basename "$pf" | sed "s/dev-session-${pname}-//;s/\.phase//") phase_line=$(head -1 "$pf" 2>/dev/null || echo "unknown") echo " Session #${issue}: ${phase_line}" done if [ "$has_sessions" = false ]; then echo " Sessions: none" fi # Backlog depth via API if [ -n "${FORGE_TOKEN:-}" ]; then local api="${pforge_url}/api/v1/repos/${prepo}" local backlog_count pr_count backlog_count=$(curl -sf -I \ -H "Authorization: token ${FORGE_TOKEN}" \ "${api}/issues?state=open&labels=backlog&limit=1" 2>/dev/null \ | grep -i 'x-total-count' | tr -d '\r' | awk '{print $2}') || backlog_count="?" echo " Backlog: ${backlog_count:-0} issues" pr_count=$(curl -sf -I \ -H "Authorization: token ${FORGE_TOKEN}" \ "${api}/pulls?state=open&limit=1" 2>/dev/null \ | grep -i 'x-total-count' | tr -d '\r' | awk '{print $2}') || pr_count="?" echo " Open PRs: ${pr_count:-0}" else echo " Backlog: (no FORGE_TOKEN)" echo " Open PRs: (no FORGE_TOKEN)" fi echo "" done if [ "$found" = false ]; then echo "No projects configured." echo "Run 'disinto init ' to get started." fi } # ── secrets command ──────────────────────────────────────────────────────────── disinto_secrets() { local subcmd="${1:-}" local enc_file="${FACTORY_ROOT}/.env.enc" local env_file="${FACTORY_ROOT}/.env" local vault_enc_file="${FACTORY_ROOT}/.env.vault.enc" local vault_env_file="${FACTORY_ROOT}/.env.vault" # Shared helper: ensure sops+age and .sops.yaml exist _secrets_ensure_sops() { if ! command -v sops &>/dev/null || ! command -v age-keygen &>/dev/null; then echo "Error: sops and age are required." >&2 echo " Install sops: https://github.com/getsops/sops/releases" >&2 echo " Install age: apt install age / brew install age" >&2 exit 1 fi if ! ensure_age_key; then echo "Error: failed to generate age key" >&2 exit 1 fi if [ ! -f "${FACTORY_ROOT}/.sops.yaml" ]; then write_sops_yaml "$AGE_PUBLIC_KEY" echo "Created: .sops.yaml" fi } local secrets_dir="${FACTORY_ROOT}/secrets" local age_key_file="${HOME}/.config/sops/age/keys.txt" # Shared helper: ensure age key exists and export AGE_PUBLIC_KEY _secrets_ensure_age_key() { if ! command -v age &>/dev/null; then echo "Error: age is required." >&2 echo " Install age: apt install age / brew install age" >&2 exit 1 fi if [ ! -f "$age_key_file" ]; then echo "Error: age key not found at ${age_key_file}" >&2 echo " Run 'disinto init' to generate one, or create manually with:" >&2 echo " mkdir -p ~/.config/sops/age && age-keygen -o ${age_key_file}" >&2 exit 1 fi AGE_PUBLIC_KEY="$(age-keygen -y "$age_key_file" 2>/dev/null)" if [ -z "$AGE_PUBLIC_KEY" ]; then echo "Error: failed to read public key from ${age_key_file}" >&2 exit 1 fi export AGE_PUBLIC_KEY } case "$subcmd" in add) local name="${2:-}" if [ -z "$name" ]; then echo "Usage: disinto secrets add " >&2 exit 1 fi _secrets_ensure_age_key mkdir -p "$secrets_dir" printf 'Enter value for %s: ' "$name" >&2 local value IFS= read -rs value echo >&2 if [ -z "$value" ]; then echo "Error: empty value" >&2 exit 1 fi local enc_path="${secrets_dir}/${name}.enc" if [ -f "$enc_path" ]; then printf 'Secret %s already exists. Overwrite? [y/N] ' "$name" >&2 local confirm read -r confirm if [ "$confirm" != "y" ] && [ "$confirm" != "Y" ]; then echo "Aborted." >&2 exit 1 fi fi if ! printf '%s' "$value" | age -r "$AGE_PUBLIC_KEY" -o "$enc_path"; then echo "Error: encryption failed" >&2 exit 1 fi echo "Stored: ${enc_path}" ;; show) local name="${2:-}" if [ -n "$name" ]; then # Show individual secret: disinto secrets show local enc_path="${secrets_dir}/${name}.enc" if [ ! -f "$enc_path" ]; then echo "Error: ${enc_path} not found" >&2 exit 1 fi if [ ! -f "$age_key_file" ]; then echo "Error: age key not found at ${age_key_file}" >&2 exit 1 fi age -d -i "$age_key_file" "$enc_path" else # Show all agent secrets: disinto secrets show if [ ! -f "$enc_file" ]; then echo "Error: ${enc_file} not found." >&2 exit 1 fi sops -d "$enc_file" fi ;; edit) if [ ! -f "$enc_file" ]; then echo "Error: ${enc_file} not found. Run 'disinto secrets migrate' first." >&2 exit 1 fi sops "$enc_file" ;; migrate) if [ ! -f "$env_file" ]; then echo "Error: ${env_file} not found — nothing to migrate." >&2 exit 1 fi _secrets_ensure_sops encrypt_env_file "$env_file" "$enc_file" # Verify decryption works if ! sops -d "$enc_file" >/dev/null 2>&1; then echo "Error: failed to verify .env.enc decryption" >&2 rm -f "$enc_file" exit 1 fi rm -f "$env_file" echo "Migrated: .env -> .env.enc (plaintext removed)" ;; edit-vault) if [ ! -f "$vault_enc_file" ]; then echo "Error: ${vault_enc_file} not found. Run 'disinto secrets migrate-vault' first." >&2 exit 1 fi sops "$vault_enc_file" ;; show-vault) if [ ! -f "$vault_enc_file" ]; then echo "Error: ${vault_enc_file} not found." >&2 exit 1 fi sops -d "$vault_enc_file" ;; migrate-vault) if [ ! -f "$vault_env_file" ]; then echo "Error: ${vault_env_file} not found — nothing to migrate." >&2 echo " Create .env.vault with vault secrets (GITHUB_TOKEN, deploy keys, etc.)" >&2 exit 1 fi _secrets_ensure_sops encrypt_env_file "$vault_env_file" "$vault_enc_file" # Verify decryption works before removing plaintext if ! sops -d "$vault_enc_file" >/dev/null 2>&1; then echo "Error: failed to verify .env.vault.enc decryption" >&2 rm -f "$vault_enc_file" exit 1 fi rm -f "$vault_env_file" echo "Migrated: .env.vault -> .env.vault.enc (plaintext removed)" ;; *) cat <&2 Usage: disinto secrets Individual secrets (secrets/.enc): add Prompt for value, encrypt, store in secrets/.enc show Decrypt and print an individual secret Agent secrets (.env.enc): edit Edit agent secrets (FORGE_TOKEN, CLAUDE_API_KEY, etc.) show Show decrypted agent secrets (no argument) migrate Encrypt .env -> .env.enc Vault secrets (.env.vault.enc): edit-vault Edit vault secrets (GITHUB_TOKEN, deploy keys, etc.) show-vault Show decrypted vault secrets migrate-vault Encrypt .env.vault -> .env.vault.enc EOF exit 1 ;; esac } # ── run command ─────────────────────────────────────────────────────────────── disinto_run() { local action_id="${1:?Usage: disinto run }" local compose_file="${FACTORY_ROOT}/docker-compose.yml" local vault_enc="${FACTORY_ROOT}/.env.vault.enc" if [ ! -f "$compose_file" ]; then echo "Error: docker-compose.yml not found" >&2 echo " Run 'disinto init ' first (without --bare)" >&2 exit 1 fi if [ ! -f "$vault_enc" ]; then echo "Error: .env.vault.enc not found — create vault secrets first" >&2 echo " Run 'disinto secrets migrate-vault' after creating .env.vault" >&2 exit 1 fi if ! command -v sops &>/dev/null; then echo "Error: sops not found — required to decrypt vault secrets" >&2 exit 1 fi # Decrypt vault secrets to temp file local tmp_env tmp_env=$(mktemp /tmp/disinto-vault-XXXXXX) trap 'rm -f "$tmp_env"' EXIT if ! sops -d --output-type dotenv "$vault_enc" > "$tmp_env" 2>/dev/null; then rm -f "$tmp_env" echo "Error: failed to decrypt .env.vault.enc" >&2 exit 1 fi echo "Vault secrets decrypted to tmpfile" # Run action in ephemeral runner container local rc=0 docker compose -f "$compose_file" \ run --rm --env-file "$tmp_env" \ runner "$action_id" || rc=$? # Clean up — secrets gone rm -f "$tmp_env" echo "Run tmpfile removed" if [ "$rc" -eq 0 ]; then echo "Run action ${action_id} completed successfully" else echo "Run action ${action_id} failed (exit ${rc})" >&2 fi return "$rc" } # ── Pre-build: download binaries to docker/agents/bin/ ──────────────────────── # This avoids network calls during docker build (needed for Docker-in-LXD builds) # Returns 0 on success, 1 on failure download_agent_binaries() { local bin_dir="${FACTORY_ROOT}/docker/agents/bin" mkdir -p "$bin_dir" echo "Downloading agent binaries to ${bin_dir}..." # Download SOPS local sops_file="${bin_dir}/sops" if [ ! -f "$sops_file" ]; then echo " Downloading SOPS v3.9.4..." curl -sL https://github.com/getsops/sops/releases/download/v3.9.4/sops-v3.9.4.linux.amd64 -o "$sops_file" if [ ! -f "$sops_file" ]; then echo "Error: failed to download SOPS" >&2 return 1 fi fi # Verify checksum echo " Verifying SOPS checksum..." if ! echo "5488e32bc471de7982ad895dd054bbab3ab91c417a118426134551e9626e4e85 ${sops_file}" | sha256sum -c - >/dev/null 2>&1; then echo "Error: SOPS checksum verification failed" >&2 return 1 fi chmod +x "$sops_file" # Download tea CLI local tea_file="${bin_dir}/tea" if [ ! -f "$tea_file" ]; then echo " Downloading tea CLI v0.9.2..." curl -sL https://dl.gitea.com/tea/0.9.2/tea-0.9.2-linux-amd64 -o "$tea_file" if [ ! -f "$tea_file" ]; then echo "Error: failed to download tea CLI" >&2 return 1 fi fi # Verify checksum echo " Verifying tea CLI checksum..." if ! echo "be10cdf9a619e3c0f121df874960ed19b53e62d1c7036cf60313a28b5227d54d ${tea_file}" | sha256sum -c - >/dev/null 2>&1; then echo "Error: tea CLI checksum verification failed" >&2 return 1 fi chmod +x "$tea_file" echo "Binaries downloaded and verified successfully" return 0 } # ── up command ──────────────────────────────────────────────────────────────── disinto_up() { local compose_file="${FACTORY_ROOT}/docker-compose.yml" if [ ! -f "$compose_file" ]; then echo "Error: docker-compose.yml not found" >&2 echo " Run 'disinto init ' first (without --bare)" >&2 exit 1 fi # Pre-build: download binaries to docker/agents/bin/ to avoid network calls during docker build echo "── Pre-build: downloading agent binaries ────────────────────────" if ! download_agent_binaries; then echo "Error: failed to download agent binaries" >&2 exit 1 fi echo "" # Decrypt secrets to temp .env if SOPS available and .env.enc exists local tmp_env="" local enc_file="${FACTORY_ROOT}/.env.enc" local env_file="${FACTORY_ROOT}/.env" if [ -f "$enc_file" ] && command -v sops &>/dev/null && [ ! -f "$env_file" ]; then tmp_env="${env_file}" sops -d --output-type dotenv "$enc_file" > "$tmp_env" trap '[ -n "${tmp_env:-}" ] && rm -f "$tmp_env"' EXIT echo "Decrypted secrets for compose" fi docker compose -f "$compose_file" up -d "$@" echo "Stack is up" # Clean up temp .env (also handled by EXIT trap if compose fails) if [ -n "$tmp_env" ] && [ -f "$tmp_env" ]; then rm -f "$tmp_env" echo "Removed temporary .env" fi } # ── down command ────────────────────────────────────────────────────────────── disinto_down() { local compose_file="${FACTORY_ROOT}/docker-compose.yml" if [ ! -f "$compose_file" ]; then echo "Error: docker-compose.yml not found" >&2 exit 1 fi docker compose -f "$compose_file" down "$@" echo "Stack is down" } # ── logs command ────────────────────────────────────────────────────────────── disinto_logs() { local compose_file="${FACTORY_ROOT}/docker-compose.yml" if [ ! -f "$compose_file" ]; then echo "Error: docker-compose.yml not found" >&2 exit 1 fi docker compose -f "$compose_file" logs -f "$@" } # ── shell command ───────────────────────────────────────────────────────────── disinto_shell() { local compose_file="${FACTORY_ROOT}/docker-compose.yml" if [ ! -f "$compose_file" ]; then echo "Error: docker-compose.yml not found" >&2 exit 1 fi docker compose -f "$compose_file" exec agents bash } # ── hire-an-agent command ───────────────────────────────────────────────────── # Creates a Forgejo user and .profile repo for an agent. # Usage: disinto hire-an-agent [--formula ] # disinto_hire_an_agent() is sourced from lib/hire-agent.sh # ── release command ─────────────────────────────────────────────────────────── # disinto_release() is sourced from lib/release.sh # ── ci-logs command ────────────────────────────────────────────────────────── # Reads CI logs from the Woodpecker SQLite database. # Usage: disinto ci-logs [--step ] disinto_ci_logs() { local pipeline_number="" step_name="" if [ $# -lt 1 ]; then echo "Error: pipeline number required" >&2 echo "Usage: disinto ci-logs [--step ]" >&2 exit 1 fi # Parse arguments while [ $# -gt 0 ]; do case "$1" in --step|-s) step_name="$2" shift 2 ;; -*) echo "Unknown option: $1" >&2 exit 1 ;; *) if [ -z "$pipeline_number" ]; then pipeline_number="$1" else echo "Unexpected argument: $1" >&2 exit 1 fi shift ;; esac done if [ -z "$pipeline_number" ] || ! [[ "$pipeline_number" =~ ^[0-9]+$ ]]; then echo "Error: pipeline number must be a positive integer" >&2 exit 1 fi local log_reader="${FACTORY_ROOT}/lib/ci-log-reader.py" if [ ! -f "$log_reader" ]; then echo "Error: ci-log-reader.py not found at $log_reader" >&2 exit 1 fi if [ -n "$step_name" ]; then python3 "$log_reader" "$pipeline_number" --step "$step_name" else python3 "$log_reader" "$pipeline_number" fi } # ── agent command ───────────────────────────────────────────────────────────── # Manage agent state files (enable/disable agents) # Usage: disinto agent [agent-name] # disable Remove state file to disable agent # enable Create state file to enable agent # disable --all Disable all agents # enable --all Enable all agents # status Show enabled/disabled agents disinto_agent() { local subcmd="${1:-}" local state_dir="${FACTORY_ROOT}/state" local all_agents=("dev" "reviewer" "gardener" "architect" "planner" "predictor") # Ensure state directory exists mkdir -p "$state_dir" case "$subcmd" in disable) local agent="${2:-}" if [ -z "$agent" ]; then echo "Error: agent name required" >&2 echo "Usage: disinto agent disable " >&2 echo " disinto agent disable --all" >&2 exit 1 fi if [ "$agent" = "--all" ]; then echo "Disabling all agents..." for a in "${all_agents[@]}"; do local state_file="${state_dir}/.${a}-active" if [ -f "$state_file" ]; then rm -f "$state_file" echo " Disabled: ${a}" else echo " Already disabled: ${a}" fi done else # Validate agent name local valid=false for a in "${all_agents[@]}"; do if [ "$a" = "$agent" ]; then valid=true break fi done if [ "$valid" = false ]; then echo "Error: unknown agent '${agent}'" >&2 echo "Valid agents: ${all_agents[*]}" >&2 exit 1 fi local state_file="${state_dir}/.${agent}-active" if [ -f "$state_file" ]; then rm -f "$state_file" echo "Disabled: ${agent}" else echo "Already disabled: ${agent}" fi fi ;; enable) local agent="${2:-}" if [ -z "$agent" ]; then echo "Error: agent name required" >&2 echo "Usage: disinto agent enable " >&2 echo " disinto agent enable --all" >&2 exit 1 fi if [ "$agent" = "--all" ]; then echo "Enabling all agents..." for a in "${all_agents[@]}"; do local state_file="${state_dir}/.${a}-active" if [ -f "$state_file" ]; then echo " Already enabled: ${a}" else touch "$state_file" echo " Enabled: ${a}" fi done else # Validate agent name local valid=false for a in "${all_agents[@]}"; do if [ "$a" = "$agent" ]; then valid=true break fi done if [ "$valid" = false ]; then echo "Error: unknown agent '${agent}'" >&2 echo "Valid agents: ${all_agents[*]}" >&2 exit 1 fi local state_file="${state_dir}/.${agent}-active" if [ -f "$state_file" ]; then echo "Already enabled: ${agent}" else touch "$state_file" echo "Enabled: ${agent}" fi fi ;; status) echo "Agent Status" echo "============" printf "%-12s %s\n" "AGENT" "STATUS" printf "%-12s %s\n" "------" "------" for a in "${all_agents[@]}"; do local state_file="${state_dir}/.${a}-active" local status if [ -f "$state_file" ]; then status="enabled" else status="disabled" fi printf "%-12s %s\n" "$a" "$status" done ;; *) cat <&2 Usage: disinto agent Manage agent state files (enable/disable agents): disable Remove state file to disable agent enable Create state file to enable agent disable --all Disable all agents enable --all Enable all agents status Show which agents are enabled/disabled Valid agents: dev, reviewer, gardener, architect, planner, predictor EOF exit 1 ;; esac } # ── edge command ────────────────────────────────────────────────────────────── # Manage edge tunnel registrations (reverse SSH tunnels to edge hosts) # Usage: disinto edge [options] # register [project] Register a new tunnel (generates keypair if needed) # deregister Remove a tunnel registration # status Show registered tunnels disinto_edge() { local subcmd="${1:-}" local EDGE_HOST="${EDGE_HOST:-}" local env_file="${FACTORY_ROOT}/.env" # Determine edge host (flag > env var > default) local edge_host="${EDGE_HOST:-edge.disinto.ai}" shift || true case "$subcmd" in register) local project="${1:-}" local env_file="${FACTORY_ROOT}/.env" # Parse flags while [ $# -gt 0 ]; do case "$1" in --edge-host) edge_host="$2" shift 2 ;; *) if [ -z "$project" ]; then project="$1" fi shift ;; esac done if [ -z "$project" ]; then echo "Error: project name required" >&2 echo "Usage: disinto edge register [project] [--edge-host ]" >&2 exit 1 fi # Validate project name if ! [[ "$project" =~ ^[a-zA-Z0-9_-]+$ ]]; then echo "Error: invalid project name (use alphanumeric, hyphens, underscores)" >&2 exit 1 fi # Determine edge host (flag > env > default) if [ -z "$edge_host" ]; then edge_host="${EDGE_HOST:-edge.disinto.ai}" fi # Check for tunnel keypair local secrets_dir="${FACTORY_ROOT}/secrets" local tunnel_key="${secrets_dir}/tunnel_key" local tunnel_pubkey="${tunnel_key}.pub" if [ ! -f "$tunnel_pubkey" ]; then echo "Generating tunnel keypair..." mkdir -p "$secrets_dir" chmod 700 "$secrets_dir" ssh-keygen -t ed25519 -f "$tunnel_key" -N "" -C "edge-tunnel@${project}" 2>/dev/null chmod 600 "$tunnel_key" "$tunnel_pubkey" echo "Generated: ${tunnel_pubkey}" fi # Read pubkey (single line, remove trailing newline) local pubkey pubkey=$(tr -d '\n' < "$tunnel_pubkey") # SSH to edge host and register echo "Registering tunnel for ${project} on ${edge_host}..." local response response=$(ssh -o StrictHostKeyChecking=accept-new -o BatchMode=yes \ "disinto-register@${edge_host}" \ "register ${project} ${pubkey}" 2>&1) || { echo "Error: failed to register tunnel" >&2 echo "Response: ${response}" >&2 exit 1 } # Parse response and write to .env local port fqdn port=$(echo "$response" | jq -r '.port // empty' 2>/dev/null) || port="" fqdn=$(echo "$response" | jq -r '.fqdn // empty' 2>/dev/null) || fqdn="" if [ -z "$port" ] || [ -z "$fqdn" ]; then echo "Error: invalid response from edge host" >&2 echo "Response: ${response}" >&2 exit 1 fi # Write to .env (replace existing entries to avoid duplicates) local tmp_env tmp_env=$(mktemp) grep -Ev "^EDGE_TUNNEL_(HOST|PORT|FQDN)=" "$env_file" > "$tmp_env" 2>/dev/null || true mv "$tmp_env" "$env_file" echo "EDGE_TUNNEL_HOST=${edge_host}" >> "$env_file" echo "EDGE_TUNNEL_PORT=${port}" >> "$env_file" echo "EDGE_TUNNEL_FQDN=${fqdn}" >> "$env_file" echo "Registered: ${project}" echo " Port: ${port}" echo " FQDN: ${fqdn}" echo " Saved to: ${env_file}" ;; deregister) local project="${1:-}" # Parse flags while [ $# -gt 0 ]; do case "$1" in --edge-host) edge_host="$2" shift 2 ;; *) if [ -z "$project" ]; then project="$1" fi shift ;; esac done if [ -z "$project" ]; then echo "Error: project name required" >&2 echo "Usage: disinto edge deregister [--edge-host ]" >&2 exit 1 fi # Determine edge host if [ -z "$edge_host" ]; then edge_host="${EDGE_HOST:-edge.disinto.ai}" fi # SSH to edge host and deregister echo "Deregistering tunnel for ${project} on ${edge_host}..." local response response=$(ssh -o StrictHostKeyChecking=accept-new -o BatchMode=yes \ "disinto-register@${edge_host}" \ "deregister ${project}" 2>&1) || { echo "Error: failed to deregister tunnel" >&2 echo "Response: ${response}" >&2 exit 1 } # Remove from .env if present if [ -f "$env_file" ]; then local tmp_env tmp_env=$(mktemp) grep -Ev "^EDGE_TUNNEL_(HOST|PORT|FQDN)=" "$env_file" > "$tmp_env" 2>/dev/null || true mv "$tmp_env" "$env_file" fi echo "Deregistered: ${project}" ;; status) # Parse flags while [ $# -gt 0 ]; do case "$1" in --edge-host) edge_host="$2" shift 2 ;; *) shift ;; esac done # Determine edge host if [ -z "$edge_host" ]; then edge_host="${EDGE_HOST:-edge.disinto.ai}" fi # SSH to edge host and get status echo "Checking tunnel status on ${edge_host}..." local response response=$(ssh -o StrictHostKeyChecking=accept-new -o BatchMode=yes \ "disinto-register@${edge_host}" \ "list" 2>&1) || { echo "Error: failed to get status" >&2 echo "Response: ${response}" >&2 exit 1 } # Parse and display local tunnels tunnels=$(echo "$response" | jq -r '.tunnels // [] | length' 2>/dev/null) || tunnels="0" if [ "$tunnels" = "0" ]; then echo "No tunnels registered" else echo "Registered tunnels:" echo "$response" | jq -r '.tunnels[] | " \(.name): port=\(.port) fqdn=\(.fqdn)"' fi ;; *) cat <&2 Usage: disinto edge [options] Manage edge tunnel registrations: register [project] Register a new tunnel (generates keypair if needed) deregister Remove a tunnel registration status Show registered tunnels Options: --edge-host Edge host FQDN (default: edge.disinto.ai or EDGE_HOST env) Examples: disinto edge register myproject disinto edge register myproject --edge-host custom.example.com disinto edge deregister myproject disinto edge status EOF exit 1 ;; esac } # ── Main dispatch ──────────────────────────────────────────────────────────── case "${1:-}" in init) shift; disinto_init "$@" ;; up) shift; disinto_up "$@" ;; down) shift; disinto_down "$@" ;; logs) shift; disinto_logs "$@" ;; shell) shift; disinto_shell ;; status) shift; disinto_status "$@" ;; secrets) shift; disinto_secrets "$@" ;; run) shift; disinto_run "$@" ;; ci-logs) shift; disinto_ci_logs "$@" ;; release) shift; disinto_release "$@" ;; hire-an-agent) shift; disinto_hire_an_agent "$@" ;; agent) shift; disinto_agent "$@" ;; edge) shift; disinto_edge "$@" ;; -h|--help) usage ;; *) usage ;; esac