1716 lines
57 KiB
Bash
Executable file
1716 lines
57 KiB
Bash
Executable file
#!/usr/bin/env bash
|
|
# =============================================================================
|
|
# disinto — CLI entry point for the disinto code factory
|
|
#
|
|
# Commands:
|
|
# disinto init <repo-url> [options] Bootstrap a new project
|
|
# disinto up Start the full stack (docker compose)
|
|
# disinto down Stop the full stack
|
|
# disinto logs [service] Tail service logs
|
|
# disinto shell Shell into the agent container
|
|
# disinto status Show factory status
|
|
# disinto secrets <subcommand> Manage encrypted secrets
|
|
# disinto vault-run <action-id> Run action in ephemeral vault container
|
|
#
|
|
# Usage:
|
|
# disinto init https://github.com/user/repo
|
|
# disinto init user/repo --branch main --ci-id 3
|
|
# disinto init user/repo --bare (bare-metal, no compose)
|
|
# disinto up
|
|
# disinto down
|
|
# disinto status
|
|
# =============================================================================
|
|
set -euo pipefail
|
|
|
|
FACTORY_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
|
|
source "${FACTORY_ROOT}/lib/env.sh"
|
|
|
|
# ── Helpers ──────────────────────────────────────────────────────────────────
|
|
|
|
usage() {
|
|
cat <<EOF
|
|
disinto — autonomous code factory CLI
|
|
|
|
Usage:
|
|
disinto init <repo-url> [options] Bootstrap a new project
|
|
disinto up Start the full stack (docker compose)
|
|
disinto down Stop the full stack
|
|
disinto logs [service] Tail service logs
|
|
disinto shell Shell into the agent container
|
|
disinto status Show factory status
|
|
disinto secrets <subcommand> Manage encrypted secrets
|
|
disinto vault-run <action-id> Run action in ephemeral vault container
|
|
|
|
Init options:
|
|
--branch <name> Primary branch (default: auto-detect)
|
|
--repo-root <path> Local clone path (default: ~/name)
|
|
--ci-id <n> Woodpecker CI repo ID (default: 0 = no CI)
|
|
--forge-url <url> Forge base URL (default: http://localhost:3000)
|
|
--bare Skip compose generation (bare-metal setup)
|
|
--yes Skip confirmation prompts
|
|
EOF
|
|
exit 1
|
|
}
|
|
|
|
# Extract org/repo slug from various URL formats.
|
|
# Accepts: https://github.com/user/repo, https://codeberg.org/user/repo,
|
|
# http://localhost:3000/user/repo, user/repo, *.git
|
|
parse_repo_slug() {
|
|
local url="$1"
|
|
url="${url#https://}"
|
|
url="${url#http://}"
|
|
# Strip any hostname (anything before the first / that contains a dot or colon)
|
|
if [[ "$url" =~ ^[a-zA-Z0-9._:-]+/[a-zA-Z0-9._-]+/[a-zA-Z0-9._-]+ ]]; then
|
|
url="${url#*/}" # strip host part
|
|
fi
|
|
url="${url%.git}"
|
|
url="${url%/}"
|
|
if [[ ! "$url" =~ ^[a-zA-Z0-9._-]+/[a-zA-Z0-9._-]+$ ]]; then
|
|
echo "Error: invalid repo URL — expected https://host/org/repo or org/repo" >&2
|
|
exit 1
|
|
fi
|
|
printf '%s' "$url"
|
|
}
|
|
|
|
# Build a clone-able URL from a slug and forge URL.
|
|
clone_url_from_slug() {
|
|
local slug="$1" forge_url="${2:-${FORGE_URL:-http://localhost:3000}}"
|
|
printf '%s/%s.git' "$forge_url" "$slug"
|
|
}
|
|
|
|
# Ensure an age key exists; generate one if missing.
|
|
# Exports AGE_PUBLIC_KEY on success.
|
|
ensure_age_key() {
|
|
local key_dir="${HOME}/.config/sops/age"
|
|
local key_file="${key_dir}/keys.txt"
|
|
|
|
if [ -f "$key_file" ]; then
|
|
AGE_PUBLIC_KEY="$(age-keygen -y "$key_file" 2>/dev/null)"
|
|
[ -n "$AGE_PUBLIC_KEY" ] || return 1
|
|
export AGE_PUBLIC_KEY
|
|
return 0
|
|
fi
|
|
|
|
if ! command -v age-keygen &>/dev/null; then
|
|
return 1
|
|
fi
|
|
|
|
mkdir -p "$key_dir"
|
|
age-keygen -o "$key_file" 2>/dev/null
|
|
chmod 600 "$key_file"
|
|
AGE_PUBLIC_KEY="$(age-keygen -y "$key_file" 2>/dev/null)"
|
|
[ -n "$AGE_PUBLIC_KEY" ] || return 1
|
|
export AGE_PUBLIC_KEY
|
|
echo "Generated age key: ${key_file}"
|
|
}
|
|
|
|
# Write .sops.yaml pinning the age recipient for .env.enc files.
|
|
write_sops_yaml() {
|
|
local pub_key="$1"
|
|
cat > "${FACTORY_ROOT}/.sops.yaml" <<EOF
|
|
creation_rules:
|
|
- path_regex: \.env(\.vault)?\.enc$
|
|
age: "${pub_key}"
|
|
EOF
|
|
}
|
|
|
|
# Encrypt a dotenv file to .env.enc using SOPS + age.
|
|
# Usage: encrypt_env_file <input> <output>
|
|
encrypt_env_file() {
|
|
local input="$1" output="$2"
|
|
sops -e --input-type dotenv --output-type dotenv "$input" > "$output"
|
|
}
|
|
|
|
# Store secrets into .env.enc (encrypted) if SOPS + age available, else .env (plaintext).
|
|
write_secrets_encrypted() {
|
|
local env_file="${FACTORY_ROOT}/.env"
|
|
local enc_file="${FACTORY_ROOT}/.env.enc"
|
|
|
|
if command -v sops &>/dev/null && command -v age-keygen &>/dev/null; then
|
|
if ensure_age_key; then
|
|
# Write .sops.yaml if missing
|
|
if [ ! -f "${FACTORY_ROOT}/.sops.yaml" ]; then
|
|
write_sops_yaml "$AGE_PUBLIC_KEY"
|
|
fi
|
|
|
|
# Encrypt the plaintext .env to .env.enc
|
|
if [ -f "$env_file" ]; then
|
|
encrypt_env_file "$env_file" "$enc_file"
|
|
rm -f "$env_file"
|
|
echo "Secrets encrypted to .env.enc (plaintext .env removed)"
|
|
return 0
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
# Fallback: keep plaintext .env
|
|
echo "Warning: sops/age not available — secrets stored in plaintext .env" >&2
|
|
return 0
|
|
}
|
|
|
|
FORGEJO_DATA_DIR="${HOME}/.disinto/forgejo"
|
|
|
|
# Generate docker-compose.yml in the factory root.
|
|
generate_compose() {
|
|
local forge_port="${1:-3000}"
|
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
|
|
|
cat > "$compose_file" <<'COMPOSEEOF'
|
|
# docker-compose.yml — generated by disinto init
|
|
# Brings up Forgejo, Woodpecker, and the agent runtime.
|
|
|
|
services:
|
|
forgejo:
|
|
image: codeberg.org/forgejo/forgejo:11.0
|
|
restart: unless-stopped
|
|
security_opt:
|
|
- apparmor=unconfined
|
|
volumes:
|
|
- forgejo-data:/data
|
|
environment:
|
|
FORGEJO__database__DB_TYPE: sqlite3
|
|
FORGEJO__server__ROOT_URL: http://forgejo:3000/
|
|
FORGEJO__server__HTTP_PORT: "3000"
|
|
FORGEJO__security__INSTALL_LOCK: "true"
|
|
FORGEJO__service__DISABLE_REGISTRATION: "true"
|
|
networks:
|
|
- disinto-net
|
|
|
|
woodpecker:
|
|
image: woodpeckerci/woodpecker-server:v3
|
|
restart: unless-stopped
|
|
security_opt:
|
|
- apparmor=unconfined
|
|
ports:
|
|
- "8000:8000"
|
|
volumes:
|
|
- woodpecker-data:/var/lib/woodpecker
|
|
environment:
|
|
WOODPECKER_FORGEJO: "true"
|
|
WOODPECKER_FORGEJO_URL: http://forgejo:3000
|
|
WOODPECKER_FORGEJO_CLIENT: ${WP_FORGEJO_CLIENT:-}
|
|
WOODPECKER_FORGEJO_SECRET: ${WP_FORGEJO_SECRET:-}
|
|
WOODPECKER_HOST: http://woodpecker:8000
|
|
WOODPECKER_AGENT_SECRET: ${WOODPECKER_AGENT_SECRET:-}
|
|
WOODPECKER_DATABASE_DRIVER: sqlite3
|
|
WOODPECKER_DATABASE_DATASOURCE: /var/lib/woodpecker/woodpecker.sqlite
|
|
depends_on:
|
|
- forgejo
|
|
networks:
|
|
- disinto-net
|
|
|
|
woodpecker-agent:
|
|
image: woodpeckerci/woodpecker-agent:v3
|
|
restart: unless-stopped
|
|
security_opt:
|
|
- apparmor=unconfined
|
|
volumes:
|
|
- /var/run/docker.sock:/var/run/docker.sock
|
|
environment:
|
|
WOODPECKER_SERVER: woodpecker:9000
|
|
WOODPECKER_AGENT_SECRET: ${WOODPECKER_AGENT_SECRET:-}
|
|
WOODPECKER_MAX_WORKFLOWS: 1
|
|
depends_on:
|
|
- woodpecker
|
|
networks:
|
|
- disinto-net
|
|
|
|
agents:
|
|
build: ./docker/agents
|
|
restart: unless-stopped
|
|
security_opt:
|
|
- apparmor=unconfined
|
|
volumes:
|
|
- agent-data:/home/agent/data
|
|
- project-repos:/home/agent/repos
|
|
- ./:/home/agent/disinto:ro
|
|
- ${HOME}/.claude:/home/agent/.claude
|
|
- ${HOME}/.claude.json:/home/agent/.claude.json:ro
|
|
- CLAUDE_BIN_PLACEHOLDER:/usr/local/bin/claude:ro
|
|
environment:
|
|
FORGE_URL: http://forgejo:3000
|
|
WOODPECKER_SERVER: http://woodpecker:8000
|
|
DISINTO_CONTAINER: "1"
|
|
env_file:
|
|
- .env
|
|
# IMPORTANT: agents get .env only (forge tokens, CI tokens, config).
|
|
# Vault-only secrets (GITHUB_TOKEN, CLAWHUB_TOKEN, deploy keys) live in
|
|
# .env.vault.enc and are NEVER injected here — only the vault-runner
|
|
# container receives them at fire time (AD-006, #745).
|
|
depends_on:
|
|
- forgejo
|
|
- woodpecker
|
|
networks:
|
|
- disinto-net
|
|
|
|
vault-runner:
|
|
build: ./docker/agents
|
|
profiles: ["vault"]
|
|
security_opt:
|
|
- apparmor=unconfined
|
|
volumes:
|
|
- ./vault:/home/agent/disinto/vault
|
|
- ./lib:/home/agent/disinto/lib:ro
|
|
- ./formulas:/home/agent/disinto/formulas:ro
|
|
environment:
|
|
FORGE_URL: http://forgejo:3000
|
|
DISINTO_CONTAINER: "1"
|
|
# env_file set at runtime by: disinto vault-run --env-file <tmpfile>
|
|
entrypoint: ["bash", "/home/agent/disinto/vault/vault-run-action.sh"]
|
|
networks:
|
|
- disinto-net
|
|
|
|
# Staging deployment slot — activated by Woodpecker staging pipeline (#755).
|
|
# Profile-gated: only starts when explicitly targeted by deploy commands.
|
|
# Customize image/ports/volumes for your project after init.
|
|
staging:
|
|
image: alpine:3
|
|
profiles: ["staging"]
|
|
security_opt:
|
|
- apparmor=unconfined
|
|
environment:
|
|
DEPLOY_ENV: staging
|
|
networks:
|
|
- disinto-net
|
|
command: ["echo", "staging slot — replace with project image"]
|
|
|
|
volumes:
|
|
forgejo-data:
|
|
woodpecker-data:
|
|
agent-data:
|
|
project-repos:
|
|
|
|
networks:
|
|
disinto-net:
|
|
driver: bridge
|
|
COMPOSEEOF
|
|
|
|
# Patch the Claude CLI binary path — resolve from host PATH at init time.
|
|
local claude_bin
|
|
claude_bin="$(command -v claude 2>/dev/null || true)"
|
|
if [ -n "$claude_bin" ]; then
|
|
# Resolve symlinks to get the real binary path
|
|
claude_bin="$(readlink -f "$claude_bin")"
|
|
sed -i "s|CLAUDE_BIN_PLACEHOLDER|${claude_bin}|" "$compose_file"
|
|
else
|
|
echo "Warning: claude CLI not found in PATH — update docker-compose.yml volumes manually" >&2
|
|
sed -i "s|CLAUDE_BIN_PLACEHOLDER|/usr/local/bin/claude|" "$compose_file"
|
|
fi
|
|
|
|
# Patch the forgejo port mapping into the file if non-default
|
|
if [ "$forge_port" != "3000" ]; then
|
|
# Add port mapping to forgejo service so it's reachable from host during init
|
|
sed -i "/image: codeberg\.org\/forgejo\/forgejo:11\.0/a\\ ports:\\n - \"${forge_port}:3000\"" "$compose_file"
|
|
else
|
|
sed -i "/image: codeberg\.org\/forgejo\/forgejo:11\.0/a\\ ports:\\n - \"3000:3000\"" "$compose_file"
|
|
fi
|
|
|
|
echo "Created: ${compose_file}"
|
|
}
|
|
|
|
# Generate docker/agents/ files if they don't already exist.
|
|
generate_agent_docker() {
|
|
local docker_dir="${FACTORY_ROOT}/docker/agents"
|
|
mkdir -p "$docker_dir"
|
|
|
|
if [ ! -f "${docker_dir}/Dockerfile" ]; then
|
|
echo "Warning: docker/agents/Dockerfile not found — expected in repo" >&2
|
|
fi
|
|
if [ ! -f "${docker_dir}/entrypoint.sh" ]; then
|
|
echo "Warning: docker/agents/entrypoint.sh not found — expected in repo" >&2
|
|
fi
|
|
}
|
|
|
|
# Generate template .woodpecker/ deployment pipeline configs in a project repo.
|
|
# Creates staging.yml and production.yml alongside the project's existing CI config.
|
|
# These pipelines trigger on Woodpecker's deployment event with environment filters.
|
|
generate_deploy_pipelines() {
|
|
local repo_root="$1" project_name="$2"
|
|
local wp_dir="${repo_root}/.woodpecker"
|
|
|
|
mkdir -p "$wp_dir"
|
|
|
|
# Skip if deploy pipelines already exist
|
|
if [ -f "${wp_dir}/staging.yml" ] && [ -f "${wp_dir}/production.yml" ]; then
|
|
echo "Deploy: .woodpecker/{staging,production}.yml (already exist)"
|
|
return
|
|
fi
|
|
|
|
if [ ! -f "${wp_dir}/staging.yml" ]; then
|
|
cat > "${wp_dir}/staging.yml" <<'STAGINGEOF'
|
|
# .woodpecker/staging.yml — Staging deployment pipeline
|
|
# Triggered by vault-runner via Woodpecker promote API.
|
|
# Human approves promotion in vault → vault-runner calls promote → this runs.
|
|
|
|
when:
|
|
event: deployment
|
|
environment: staging
|
|
|
|
steps:
|
|
- name: deploy-staging
|
|
image: docker:27
|
|
commands:
|
|
- echo "Deploying to staging environment..."
|
|
- echo "Pipeline ${CI_PIPELINE_NUMBER} promoted from CI #${CI_PIPELINE_PARENT}"
|
|
# Pull the image built by CI and deploy to staging
|
|
# Customize these commands for your project:
|
|
# - docker compose -f docker-compose.yml --profile staging up -d
|
|
- echo "Staging deployment complete"
|
|
|
|
- name: verify-staging
|
|
image: alpine:3
|
|
commands:
|
|
- echo "Verifying staging deployment..."
|
|
# Add health checks, smoke tests, or integration tests here:
|
|
# - curl -sf http://staging:8080/health || exit 1
|
|
- echo "Staging verification complete"
|
|
STAGINGEOF
|
|
echo "Created: ${wp_dir}/staging.yml"
|
|
fi
|
|
|
|
if [ ! -f "${wp_dir}/production.yml" ]; then
|
|
cat > "${wp_dir}/production.yml" <<'PRODUCTIONEOF'
|
|
# .woodpecker/production.yml — Production deployment pipeline
|
|
# Triggered by vault-runner via Woodpecker promote API.
|
|
# Human approves promotion in vault → vault-runner calls promote → this runs.
|
|
|
|
when:
|
|
event: deployment
|
|
environment: production
|
|
|
|
steps:
|
|
- name: deploy-production
|
|
image: docker:27
|
|
commands:
|
|
- echo "Deploying to production environment..."
|
|
- echo "Pipeline ${CI_PIPELINE_NUMBER} promoted from staging"
|
|
# Pull the verified image and deploy to production
|
|
# Customize these commands for your project:
|
|
# - docker compose -f docker-compose.yml up -d
|
|
- echo "Production deployment complete"
|
|
|
|
- name: verify-production
|
|
image: alpine:3
|
|
commands:
|
|
- echo "Verifying production deployment..."
|
|
# Add production health checks here:
|
|
# - curl -sf http://production:8080/health || exit 1
|
|
- echo "Production verification complete"
|
|
PRODUCTIONEOF
|
|
echo "Created: ${wp_dir}/production.yml"
|
|
fi
|
|
}
|
|
|
|
# Check whether compose mode is active (docker-compose.yml exists).
|
|
is_compose_mode() {
|
|
[ -f "${FACTORY_ROOT}/docker-compose.yml" ]
|
|
}
|
|
|
|
# Provision or connect to a local Forgejo instance.
|
|
# Creates admin + bot users, generates API tokens, stores in .env.
|
|
# When $DISINTO_BARE is set, uses standalone docker run; otherwise uses compose.
|
|
setup_forge() {
|
|
local forge_url="$1"
|
|
local repo_slug="$2"
|
|
local use_bare="${DISINTO_BARE:-false}"
|
|
|
|
echo ""
|
|
echo "── Forge setup ────────────────────────────────────────"
|
|
|
|
# Helper: run a command inside the Forgejo container
|
|
_forgejo_exec() {
|
|
if [ "$use_bare" = true ]; then
|
|
docker exec -u git disinto-forgejo "$@"
|
|
else
|
|
docker compose -f "${FACTORY_ROOT}/docker-compose.yml" exec -T -u git forgejo "$@"
|
|
fi
|
|
}
|
|
|
|
# Check if Forgejo is already running
|
|
if curl -sf --max-time 5 "${forge_url}/api/v1/version" >/dev/null 2>&1; then
|
|
echo "Forgejo: ${forge_url} (already running)"
|
|
else
|
|
echo "Forgejo not reachable at ${forge_url}"
|
|
echo "Starting Forgejo via Docker..."
|
|
|
|
if ! command -v docker &>/dev/null; then
|
|
echo "Error: docker not found — needed to provision Forgejo" >&2
|
|
echo " Install Docker or start Forgejo manually at ${forge_url}" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Extract port from forge_url
|
|
local forge_port
|
|
forge_port=$(printf '%s' "$forge_url" | sed -E 's|.*:([0-9]+)/?$|\1|')
|
|
forge_port="${forge_port:-3000}"
|
|
|
|
if [ "$use_bare" = true ]; then
|
|
# Bare-metal mode: standalone docker run
|
|
mkdir -p "${FORGEJO_DATA_DIR}"
|
|
|
|
if docker ps -a --format '{{.Names}}' | grep -q '^disinto-forgejo$'; then
|
|
docker start disinto-forgejo >/dev/null 2>&1 || true
|
|
else
|
|
docker run -d \
|
|
--name disinto-forgejo \
|
|
--restart unless-stopped \
|
|
-p "${forge_port}:3000" \
|
|
-p 2222:22 \
|
|
-v "${FORGEJO_DATA_DIR}:/data" \
|
|
-e "FORGEJO__database__DB_TYPE=sqlite3" \
|
|
-e "FORGEJO__server__ROOT_URL=${forge_url}/" \
|
|
-e "FORGEJO__server__HTTP_PORT=3000" \
|
|
-e "FORGEJO__service__DISABLE_REGISTRATION=true" \
|
|
codeberg.org/forgejo/forgejo:11.0
|
|
fi
|
|
else
|
|
# Compose mode: start Forgejo via docker compose
|
|
docker compose -f "${FACTORY_ROOT}/docker-compose.yml" up -d forgejo
|
|
fi
|
|
|
|
# Wait for Forgejo to become healthy
|
|
echo -n "Waiting for Forgejo to start"
|
|
local retries=0
|
|
while ! curl -sf --max-time 3 "${forge_url}/api/v1/version" >/dev/null 2>&1; do
|
|
retries=$((retries + 1))
|
|
if [ "$retries" -gt 60 ]; then
|
|
echo ""
|
|
echo "Error: Forgejo did not become ready within 60s" >&2
|
|
exit 1
|
|
fi
|
|
echo -n "."
|
|
sleep 1
|
|
done
|
|
echo " ready"
|
|
fi
|
|
|
|
# Wait for Forgejo database to accept writes (API may be ready before DB is)
|
|
echo -n "Waiting for Forgejo database"
|
|
local db_ready=false
|
|
for _i in $(seq 1 30); do
|
|
if _forgejo_exec forgejo admin user list >/dev/null 2>&1; then
|
|
db_ready=true
|
|
break
|
|
fi
|
|
echo -n "."
|
|
sleep 1
|
|
done
|
|
echo ""
|
|
if [ "$db_ready" != true ]; then
|
|
echo "Error: Forgejo database not ready after 30s" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Create admin user if it doesn't exist
|
|
local admin_user="disinto-admin"
|
|
local admin_pass
|
|
admin_pass="admin-$(head -c 16 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 20)"
|
|
|
|
if ! curl -sf --max-time 5 "${forge_url}/api/v1/users/${admin_user}" >/dev/null 2>&1; then
|
|
echo "Creating admin user: ${admin_user}"
|
|
local create_output
|
|
if ! create_output=$(_forgejo_exec forgejo admin user create \
|
|
--admin \
|
|
--username "${admin_user}" \
|
|
--password "${admin_pass}" \
|
|
--email "admin@disinto.local" \
|
|
--must-change-password=false 2>&1); then
|
|
echo "Error: failed to create admin user '${admin_user}':" >&2
|
|
echo " ${create_output}" >&2
|
|
exit 1
|
|
fi
|
|
# Forgejo 11.x ignores --must-change-password=false on create;
|
|
# explicitly clear the flag so basic-auth token creation works.
|
|
_forgejo_exec forgejo admin user change-password \
|
|
--username "${admin_user}" \
|
|
--password "${admin_pass}" \
|
|
--must-change-password=false
|
|
|
|
# Verify admin user was actually created
|
|
if ! curl -sf --max-time 5 "${forge_url}/api/v1/users/${admin_user}" >/dev/null 2>&1; then
|
|
echo "Error: admin user '${admin_user}' not found after creation" >&2
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
# Get or create admin token
|
|
local admin_token
|
|
admin_token=$(curl -sf -X POST \
|
|
-u "${admin_user}:${admin_pass}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/users/${admin_user}/tokens" \
|
|
-d '{"name":"disinto-admin-token","scopes":["all"]}' 2>/dev/null \
|
|
| jq -r '.sha1 // empty') || admin_token=""
|
|
|
|
if [ -z "$admin_token" ]; then
|
|
# Token might already exist — try listing
|
|
admin_token=$(curl -sf \
|
|
-u "${admin_user}:${admin_pass}" \
|
|
"${forge_url}/api/v1/users/${admin_user}/tokens" 2>/dev/null \
|
|
| jq -r '.[0].sha1 // empty') || admin_token=""
|
|
fi
|
|
|
|
if [ -z "$admin_token" ]; then
|
|
echo "Error: failed to obtain admin API token" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Create bot users and tokens
|
|
# Each agent gets its own Forgejo account for identity and audit trail (#747).
|
|
# Map: bot-username -> env-var-name for the token
|
|
local -A bot_token_vars=(
|
|
[dev-bot]="FORGE_TOKEN"
|
|
[review-bot]="FORGE_REVIEW_TOKEN"
|
|
[planner-bot]="FORGE_PLANNER_TOKEN"
|
|
[gardener-bot]="FORGE_GARDENER_TOKEN"
|
|
[vault-bot]="FORGE_VAULT_TOKEN"
|
|
[supervisor-bot]="FORGE_SUPERVISOR_TOKEN"
|
|
[predictor-bot]="FORGE_PREDICTOR_TOKEN"
|
|
[action-bot]="FORGE_ACTION_TOKEN"
|
|
)
|
|
|
|
local env_file="${FACTORY_ROOT}/.env"
|
|
local bot_user bot_pass token token_var
|
|
|
|
for bot_user in dev-bot review-bot planner-bot gardener-bot vault-bot supervisor-bot predictor-bot action-bot; do
|
|
bot_pass="bot-$(head -c 16 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 20)"
|
|
token_var="${bot_token_vars[$bot_user]}"
|
|
|
|
if ! curl -sf --max-time 5 \
|
|
-H "Authorization: token ${admin_token}" \
|
|
"${forge_url}/api/v1/users/${bot_user}" >/dev/null 2>&1; then
|
|
echo "Creating bot user: ${bot_user}"
|
|
local create_output
|
|
if ! create_output=$(_forgejo_exec forgejo admin user create \
|
|
--username "${bot_user}" \
|
|
--password "${bot_pass}" \
|
|
--email "${bot_user}@disinto.local" \
|
|
--must-change-password=false 2>&1); then
|
|
echo "Error: failed to create bot user '${bot_user}':" >&2
|
|
echo " ${create_output}" >&2
|
|
exit 1
|
|
fi
|
|
# Forgejo 11.x ignores --must-change-password=false on create;
|
|
# explicitly clear the flag so basic-auth token creation works.
|
|
_forgejo_exec forgejo admin user change-password \
|
|
--username "${bot_user}" \
|
|
--password "${bot_pass}" \
|
|
--must-change-password=false
|
|
|
|
# Verify bot user was actually created
|
|
if ! curl -sf --max-time 5 \
|
|
-H "Authorization: token ${admin_token}" \
|
|
"${forge_url}/api/v1/users/${bot_user}" >/dev/null 2>&1; then
|
|
echo "Error: bot user '${bot_user}' not found after creation" >&2
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
# Generate token via API (basic auth as the bot user — Forgejo requires
|
|
# basic auth on POST /users/{username}/tokens, token auth is rejected)
|
|
token=$(curl -sf -X POST \
|
|
-u "${bot_user}:${bot_pass}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/users/${bot_user}/tokens" \
|
|
-d "{\"name\":\"disinto-${bot_user}-token\",\"scopes\":[\"all\"]}" 2>/dev/null \
|
|
| jq -r '.sha1 // empty') || token=""
|
|
|
|
if [ -z "$token" ]; then
|
|
# Token name collision — create with timestamp suffix
|
|
token=$(curl -sf -X POST \
|
|
-u "${bot_user}:${bot_pass}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/users/${bot_user}/tokens" \
|
|
-d "{\"name\":\"disinto-${bot_user}-$(date +%s)\",\"scopes\":[\"all\"]}" 2>/dev/null \
|
|
| jq -r '.sha1 // empty') || token=""
|
|
fi
|
|
|
|
if [ -z "$token" ]; then
|
|
echo "Error: failed to create API token for '${bot_user}'" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Store token in .env under the per-agent variable name
|
|
if grep -q "^${token_var}=" "$env_file" 2>/dev/null; then
|
|
sed -i "s|^${token_var}=.*|${token_var}=${token}|" "$env_file"
|
|
else
|
|
printf '%s=%s\n' "$token_var" "$token" >> "$env_file"
|
|
fi
|
|
export "${token_var}=${token}"
|
|
echo " ${bot_user} token saved (${token_var})"
|
|
|
|
# Backwards-compat aliases for dev-bot and review-bot
|
|
if [ "$bot_user" = "dev-bot" ]; then
|
|
export CODEBERG_TOKEN="$token"
|
|
elif [ "$bot_user" = "review-bot" ]; then
|
|
export REVIEW_BOT_TOKEN="$token"
|
|
fi
|
|
done
|
|
|
|
# Store FORGE_URL in .env if not already present
|
|
if ! grep -q '^FORGE_URL=' "$env_file" 2>/dev/null; then
|
|
printf 'FORGE_URL=%s\n' "$forge_url" >> "$env_file"
|
|
fi
|
|
|
|
# Create the repo on Forgejo if it doesn't exist
|
|
local org_name="${repo_slug%%/*}"
|
|
local repo_name="${repo_slug##*/}"
|
|
|
|
# Check if repo already exists
|
|
if ! curl -sf --max-time 5 \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${forge_url}/api/v1/repos/${repo_slug}" >/dev/null 2>&1; then
|
|
|
|
# Try creating org first (ignore if exists)
|
|
curl -sf -X POST \
|
|
-H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/orgs" \
|
|
-d "{\"username\":\"${org_name}\",\"visibility\":\"public\"}" >/dev/null 2>&1 || true
|
|
|
|
# Create repo under org
|
|
if ! curl -sf -X POST \
|
|
-H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/orgs/${org_name}/repos" \
|
|
-d "{\"name\":\"${repo_name}\",\"auto_init\":false,\"default_branch\":\"main\"}" >/dev/null 2>&1; then
|
|
# Fallback: create under the dev-bot user
|
|
curl -sf -X POST \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/user/repos" \
|
|
-d "{\"name\":\"${repo_name}\",\"auto_init\":false,\"default_branch\":\"main\"}" >/dev/null 2>&1 || true
|
|
fi
|
|
|
|
# Add all bot users as collaborators
|
|
for bot_user in dev-bot review-bot planner-bot gardener-bot vault-bot supervisor-bot predictor-bot action-bot; do
|
|
curl -sf -X PUT \
|
|
-H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/repos/${repo_slug}/collaborators/${bot_user}" \
|
|
-d '{"permission":"write"}' >/dev/null 2>&1 || true
|
|
done
|
|
|
|
echo "Repo: ${repo_slug} created on Forgejo"
|
|
else
|
|
echo "Repo: ${repo_slug} (already exists on Forgejo)"
|
|
fi
|
|
|
|
echo "Forge: ${forge_url} (ready)"
|
|
}
|
|
|
|
# Push local clone to the Forgejo remote.
|
|
push_to_forge() {
|
|
local repo_root="$1" forge_url="$2" repo_slug="$3"
|
|
|
|
# Build authenticated remote URL: http://dev-bot:<token>@host:port/org/repo.git
|
|
if [ -z "${FORGE_TOKEN:-}" ]; then
|
|
echo "Error: FORGE_TOKEN not set — cannot push to Forgejo" >&2
|
|
return 1
|
|
fi
|
|
local auth_url
|
|
auth_url=$(printf '%s' "$forge_url" | sed "s|://|://dev-bot:${FORGE_TOKEN}@|")
|
|
local remote_url="${auth_url}/${repo_slug}.git"
|
|
# Display URL without token
|
|
local display_url="${forge_url}/${repo_slug}.git"
|
|
|
|
# Always set the remote URL to ensure credentials are current
|
|
if git -C "$repo_root" remote get-url forgejo >/dev/null 2>&1; then
|
|
git -C "$repo_root" remote set-url forgejo "$remote_url"
|
|
else
|
|
git -C "$repo_root" remote add forgejo "$remote_url"
|
|
fi
|
|
echo "Remote: forgejo -> ${display_url}"
|
|
|
|
# Skip push if local repo has no commits (e.g. cloned from empty Forgejo repo)
|
|
if ! git -C "$repo_root" rev-parse HEAD >/dev/null 2>&1; then
|
|
echo "Push: skipped (local repo has no commits)"
|
|
return 0
|
|
fi
|
|
|
|
# Push all branches and tags
|
|
echo "Pushing: branches to forgejo"
|
|
if ! git -C "$repo_root" push forgejo --all 2>&1; then
|
|
echo "Error: failed to push branches to Forgejo" >&2
|
|
return 1
|
|
fi
|
|
echo "Pushing: tags to forgejo"
|
|
if ! git -C "$repo_root" push forgejo --tags 2>&1; then
|
|
echo "Error: failed to push tags to Forgejo" >&2
|
|
return 1
|
|
fi
|
|
|
|
# Verify the repo is no longer empty
|
|
local repo_info
|
|
repo_info=$(curl -sf --max-time 10 \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${forge_url}/api/v1/repos/${repo_slug}" 2>/dev/null) || repo_info=""
|
|
if [ -n "$repo_info" ]; then
|
|
local is_empty
|
|
is_empty=$(printf '%s' "$repo_info" | jq -r '.empty // "unknown"')
|
|
if [ "$is_empty" = "true" ]; then
|
|
echo "Warning: Forgejo repo still reports empty after push" >&2
|
|
return 1
|
|
fi
|
|
echo "Verify: repo is not empty (push confirmed)"
|
|
fi
|
|
}
|
|
|
|
# Preflight check — verify all factory requirements before proceeding.
|
|
preflight_check() {
|
|
local repo_slug="${1:-}"
|
|
local forge_url="${2:-${FORGE_URL:-http://localhost:3000}}"
|
|
local errors=0
|
|
|
|
# ── Required commands ──
|
|
local -A hints=(
|
|
[claude]="Install: https://docs.anthropic.com/en/docs/claude-code/overview"
|
|
[tmux]="Install: apt install tmux / brew install tmux"
|
|
[git]="Install: apt install git / brew install git"
|
|
[jq]="Install: apt install jq / brew install jq"
|
|
[python3]="Install: apt install python3 / brew install python3"
|
|
[curl]="Install: apt install curl / brew install curl"
|
|
)
|
|
|
|
local cmd
|
|
for cmd in claude tmux git jq python3 curl; do
|
|
if ! command -v "$cmd" &>/dev/null; then
|
|
echo "Error: ${cmd} not found" >&2
|
|
echo " ${hints[$cmd]}" >&2
|
|
errors=$((errors + 1))
|
|
fi
|
|
done
|
|
|
|
# ── Claude Code authentication ──
|
|
if command -v claude &>/dev/null && command -v jq &>/dev/null; then
|
|
local auth_json auth_stderr auth_rc=0
|
|
auth_stderr=$(claude auth status 2>&1 >/dev/null) || auth_rc=$?
|
|
auth_json=$(claude auth status 2>/dev/null) || auth_json=""
|
|
# Only skip check if subcommand is unrecognized (old claude version)
|
|
if printf '%s' "$auth_stderr" | grep -qi "unknown command"; then
|
|
: # claude version doesn't support auth status — skip
|
|
elif [ -z "$auth_json" ] || [ "$auth_rc" -ne 0 ]; then
|
|
echo "Error: Claude Code is not authenticated (auth check failed)" >&2
|
|
echo " Run: claude auth login" >&2
|
|
errors=$((errors + 1))
|
|
else
|
|
local logged_in
|
|
logged_in=$(printf '%s' "$auth_json" | jq -r '.loggedIn // false' 2>/dev/null) || logged_in="false"
|
|
if [ "$logged_in" != "true" ]; then
|
|
echo "Error: Claude Code is not authenticated" >&2
|
|
echo " Run: claude auth login" >&2
|
|
errors=$((errors + 1))
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
# ── Forge API check (verify the forge is reachable and token works) ──
|
|
if [ -n "${FORGE_TOKEN:-}" ] && command -v curl &>/dev/null; then
|
|
if ! curl -sf --max-time 10 \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${forge_url}/api/v1/repos/${repo_slug}" >/dev/null 2>&1; then
|
|
echo "Error: Forge API auth failed at ${forge_url}" >&2
|
|
echo " Verify your FORGE_TOKEN and that Forgejo is running" >&2
|
|
errors=$((errors + 1))
|
|
fi
|
|
fi
|
|
|
|
# ── Optional tools (warn only) ──
|
|
if ! command -v docker &>/dev/null; then
|
|
echo "Warning: docker not found (needed for Forgejo provisioning)" >&2
|
|
fi
|
|
if ! command -v sops &>/dev/null; then
|
|
echo "Warning: sops not found (secrets will be stored in plaintext .env)" >&2
|
|
echo " Install: https://github.com/getsops/sops/releases" >&2
|
|
fi
|
|
if ! command -v age-keygen &>/dev/null; then
|
|
echo "Warning: age not found (needed for secret encryption with SOPS)" >&2
|
|
echo " Install: apt install age / brew install age" >&2
|
|
fi
|
|
|
|
if [ "$errors" -gt 0 ]; then
|
|
echo "" >&2
|
|
echo "${errors} preflight error(s) — fix the above before running disinto init" >&2
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
# Clone the repo if the target directory doesn't exist; validate if it does.
|
|
clone_or_validate() {
|
|
local slug="$1" target="$2" forge_url="${3:-${FORGE_URL:-http://localhost:3000}}"
|
|
if [ -d "${target}/.git" ]; then
|
|
echo "Repo: ${target} (existing clone)"
|
|
return
|
|
fi
|
|
local url
|
|
url=$(clone_url_from_slug "$slug" "$forge_url")
|
|
echo "Cloning: ${url} -> ${target}"
|
|
git clone "$url" "$target"
|
|
}
|
|
|
|
# Detect the primary branch from the remote HEAD or fallback to main/master.
|
|
detect_branch() {
|
|
local repo_root="$1"
|
|
local branch
|
|
branch=$(git -C "$repo_root" symbolic-ref refs/remotes/origin/HEAD 2>/dev/null \
|
|
| sed 's|refs/remotes/origin/||') || true
|
|
if [ -z "$branch" ]; then
|
|
if git -C "$repo_root" show-ref --verify --quiet refs/remotes/origin/main 2>/dev/null; then
|
|
branch="main"
|
|
else
|
|
branch="master"
|
|
fi
|
|
fi
|
|
printf '%s' "$branch"
|
|
}
|
|
|
|
# Generate projects/<name>.toml config file.
|
|
generate_toml() {
|
|
local path="$1" name="$2" repo="$3" root="$4" branch="$5" ci_id="$6" forge_url="$7"
|
|
cat > "$path" <<EOF
|
|
# projects/${name}.toml — Project config for ${repo}
|
|
#
|
|
# Generated by disinto init
|
|
|
|
name = "${name}"
|
|
repo = "${repo}"
|
|
forge_url = "${forge_url}"
|
|
repo_root = "${root}"
|
|
primary_branch = "${branch}"
|
|
|
|
[ci]
|
|
woodpecker_repo_id = ${ci_id}
|
|
stale_minutes = 60
|
|
|
|
[services]
|
|
containers = []
|
|
|
|
[monitoring]
|
|
check_prs = true
|
|
check_dev_agent = true
|
|
check_pipeline_stall = false
|
|
|
|
# [mirrors]
|
|
# github = "git@github.com:user/repo.git"
|
|
# codeberg = "git@codeberg.org:user/repo.git"
|
|
EOF
|
|
}
|
|
|
|
# Create standard labels on the forge repo.
|
|
create_labels() {
|
|
local repo="$1"
|
|
local forge_url="${2:-${FORGE_URL:-http://localhost:3000}}"
|
|
local api="${forge_url}/api/v1/repos/${repo}"
|
|
|
|
local -A labels=(
|
|
["backlog"]="#0075ca"
|
|
["in-progress"]="#e4e669"
|
|
["blocked"]="#d73a4a"
|
|
["tech-debt"]="#cfd3d7"
|
|
["underspecified"]="#fbca04"
|
|
["vision"]="#0e8a16"
|
|
["action"]="#1d76db"
|
|
)
|
|
|
|
echo "Creating labels on ${repo}..."
|
|
|
|
# Fetch existing labels so we can skip duplicates
|
|
local existing
|
|
existing=$(curl -sf \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${api}/labels?limit=50" 2>/dev/null \
|
|
| grep -o '"name":"[^"]*"' | cut -d'"' -f4) || existing=""
|
|
|
|
local name color
|
|
for name in backlog in-progress blocked tech-debt underspecified vision action; do
|
|
if echo "$existing" | grep -qx "$name"; then
|
|
echo " . ${name} (already exists)"
|
|
continue
|
|
fi
|
|
color="${labels[$name]}"
|
|
if curl -sf -X POST \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
-H "Content-Type: application/json" \
|
|
"${api}/labels" \
|
|
-d "{\"name\":\"${name}\",\"color\":\"${color}\"}" >/dev/null 2>&1; then
|
|
echo " + ${name}"
|
|
else
|
|
echo " ! ${name} (failed to create)"
|
|
fi
|
|
done
|
|
}
|
|
|
|
# Generate a minimal VISION.md template in the target project.
|
|
generate_vision() {
|
|
local repo_root="$1" name="$2"
|
|
local vision_path="${repo_root}/VISION.md"
|
|
if [ -f "$vision_path" ]; then
|
|
echo "VISION: ${vision_path} (already exists, skipping)"
|
|
return
|
|
fi
|
|
cat > "$vision_path" <<EOF
|
|
# Vision
|
|
|
|
## What ${name} does
|
|
|
|
<!-- Describe the purpose of this project in one paragraph -->
|
|
|
|
## Who it's for
|
|
|
|
<!-- Describe the target audience -->
|
|
|
|
## Design principles
|
|
|
|
- <!-- Principle 1 -->
|
|
- <!-- Principle 2 -->
|
|
- <!-- Principle 3 -->
|
|
|
|
## Milestones
|
|
|
|
### Current
|
|
- <!-- What you're working on now -->
|
|
|
|
### Next
|
|
- <!-- What comes after -->
|
|
EOF
|
|
echo "Created: ${vision_path}"
|
|
echo " Commit this to your repo when ready"
|
|
}
|
|
|
|
# Generate and optionally install cron entries for the project agents.
|
|
install_cron() {
|
|
local name="$1" toml="$2" auto_yes="$3" bare="${4:-false}"
|
|
|
|
# In compose mode, skip host cron — the agents container runs cron internally
|
|
if [ "$bare" = false ]; then
|
|
echo ""
|
|
echo "Cron: skipped (agents container handles scheduling in compose mode)"
|
|
return
|
|
fi
|
|
|
|
# Bare mode: crontab is required on the host
|
|
if ! command -v crontab &>/dev/null; then
|
|
echo "Error: crontab not found (required for bare-metal mode)" >&2
|
|
echo " Install: apt install cron / brew install cron" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Use absolute path for the TOML in cron entries
|
|
local abs_toml
|
|
abs_toml="$(cd "$(dirname "$toml")" && pwd)/$(basename "$toml")"
|
|
|
|
local cron_block
|
|
cron_block="# disinto: ${name}
|
|
2,7,12,17,22,27,32,37,42,47,52,57 * * * * ${FACTORY_ROOT}/review/review-poll.sh ${abs_toml} >/dev/null 2>&1
|
|
4,9,14,19,24,29,34,39,44,49,54,59 * * * * ${FACTORY_ROOT}/dev/dev-poll.sh ${abs_toml} >/dev/null 2>&1
|
|
0 0,6,12,18 * * * cd ${FACTORY_ROOT} && bash gardener/gardener-run.sh ${abs_toml} >/dev/null 2>&1"
|
|
|
|
echo ""
|
|
echo "Cron entries to install:"
|
|
echo "$cron_block"
|
|
echo ""
|
|
|
|
if [ "$auto_yes" = false ] && [ -t 0 ]; then
|
|
read -rp "Install these cron entries? [y/N] " confirm
|
|
if [[ ! "$confirm" =~ ^[Yy] ]]; then
|
|
echo "Skipped cron install. Add manually with: crontab -e"
|
|
return
|
|
fi
|
|
fi
|
|
|
|
# Append to existing crontab
|
|
{ crontab -l 2>/dev/null || true; printf '%s\n' "$cron_block"; } | crontab -
|
|
echo "Cron entries installed"
|
|
}
|
|
|
|
# Set up Woodpecker CI to use Forgejo as its forge backend.
|
|
# Creates an OAuth2 app on Forgejo for Woodpecker, activates the repo.
|
|
create_woodpecker_oauth() {
|
|
local forge_url="$1" repo_slug="$2"
|
|
|
|
echo ""
|
|
echo "── Woodpecker OAuth2 setup ────────────────────────────"
|
|
|
|
# Create OAuth2 application on Forgejo for Woodpecker
|
|
local oauth2_name="woodpecker-ci"
|
|
local redirect_uri="http://localhost:8000/authorize"
|
|
local existing_app client_id client_secret
|
|
|
|
# Check if OAuth2 app already exists
|
|
existing_app=$(curl -sf \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${forge_url}/api/v1/user/applications/oauth2" 2>/dev/null \
|
|
| jq -r --arg name "$oauth2_name" '.[] | select(.name == $name) | .client_id // empty' 2>/dev/null) || true
|
|
|
|
if [ -n "$existing_app" ]; then
|
|
echo "OAuth2: ${oauth2_name} (already exists, client_id=${existing_app})"
|
|
client_id="$existing_app"
|
|
else
|
|
local oauth2_resp
|
|
oauth2_resp=$(curl -sf -X POST \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/user/applications/oauth2" \
|
|
-d "{\"name\":\"${oauth2_name}\",\"redirect_uris\":[\"${redirect_uri}\"],\"confidential_client\":true}" \
|
|
2>/dev/null) || oauth2_resp=""
|
|
|
|
if [ -z "$oauth2_resp" ]; then
|
|
echo "Warning: failed to create OAuth2 app on Forgejo" >&2
|
|
return
|
|
fi
|
|
|
|
client_id=$(printf '%s' "$oauth2_resp" | jq -r '.client_id // empty')
|
|
client_secret=$(printf '%s' "$oauth2_resp" | jq -r '.client_secret // empty')
|
|
|
|
if [ -z "$client_id" ]; then
|
|
echo "Warning: OAuth2 app creation returned no client_id" >&2
|
|
return
|
|
fi
|
|
|
|
echo "OAuth2: ${oauth2_name} created (client_id=${client_id})"
|
|
fi
|
|
|
|
# Store Woodpecker forge config in .env
|
|
# WP_FORGEJO_CLIENT/SECRET match the docker-compose.yml variable references
|
|
local env_file="${FACTORY_ROOT}/.env"
|
|
local wp_vars=(
|
|
"WOODPECKER_FORGEJO=true"
|
|
"WOODPECKER_FORGEJO_URL=${forge_url}"
|
|
)
|
|
if [ -n "${client_id:-}" ]; then
|
|
wp_vars+=("WP_FORGEJO_CLIENT=${client_id}")
|
|
fi
|
|
if [ -n "${client_secret:-}" ]; then
|
|
wp_vars+=("WP_FORGEJO_SECRET=${client_secret}")
|
|
fi
|
|
|
|
for var_line in "${wp_vars[@]}"; do
|
|
local var_name="${var_line%%=*}"
|
|
if grep -q "^${var_name}=" "$env_file" 2>/dev/null; then
|
|
sed -i "s|^${var_name}=.*|${var_line}|" "$env_file"
|
|
else
|
|
printf '%s\n' "$var_line" >> "$env_file"
|
|
fi
|
|
done
|
|
echo "Config: Woodpecker forge vars written to .env"
|
|
}
|
|
|
|
activate_woodpecker_repo() {
|
|
local forge_repo="$1"
|
|
local wp_server="${WOODPECKER_SERVER:-http://localhost:8000}"
|
|
|
|
# Wait for Woodpecker to become ready after stack start
|
|
local retries=0
|
|
while [ $retries -lt 10 ]; do
|
|
if curl -sf --max-time 3 "${wp_server}/api/version" >/dev/null 2>&1; then
|
|
break
|
|
fi
|
|
retries=$((retries + 1))
|
|
sleep 2
|
|
done
|
|
|
|
if ! curl -sf --max-time 5 "${wp_server}/api/version" >/dev/null 2>&1; then
|
|
echo "Woodpecker: not reachable at ${wp_server} after stack start, skipping repo activation" >&2
|
|
return
|
|
fi
|
|
|
|
echo ""
|
|
echo "── Woodpecker repo activation ─────────────────────────"
|
|
|
|
local wp_token="${WOODPECKER_TOKEN:-}"
|
|
if [ -z "$wp_token" ]; then
|
|
echo "Warning: WOODPECKER_TOKEN not set — cannot activate repo" >&2
|
|
echo " Activate manually: woodpecker-cli repo add ${forge_repo}" >&2
|
|
return
|
|
fi
|
|
|
|
local wp_repo_id
|
|
wp_repo_id=$(curl -sf \
|
|
-H "Authorization: Bearer ${wp_token}" \
|
|
"${wp_server}/api/repos/lookup/${forge_repo}" 2>/dev/null \
|
|
| jq -r '.id // empty' 2>/dev/null) || true
|
|
|
|
if [ -n "$wp_repo_id" ] && [ "$wp_repo_id" != "0" ]; then
|
|
echo "Repo: ${forge_repo} already active in Woodpecker (id=${wp_repo_id})"
|
|
else
|
|
local activate_resp
|
|
activate_resp=$(curl -sf -X POST \
|
|
-H "Authorization: Bearer ${wp_token}" \
|
|
-H "Content-Type: application/json" \
|
|
"${wp_server}/api/repos" \
|
|
-d "{\"forge_remote_id\":\"${forge_repo}\"}" 2>/dev/null) || activate_resp=""
|
|
|
|
wp_repo_id=$(printf '%s' "$activate_resp" | jq -r '.id // empty' 2>/dev/null) || true
|
|
|
|
if [ -n "$wp_repo_id" ] && [ "$wp_repo_id" != "0" ]; then
|
|
echo "Repo: ${forge_repo} activated in Woodpecker (id=${wp_repo_id})"
|
|
else
|
|
echo "Warning: could not activate repo in Woodpecker" >&2
|
|
echo " Activate manually: woodpecker-cli repo add ${forge_repo}" >&2
|
|
fi
|
|
fi
|
|
|
|
# Store repo ID for later TOML generation
|
|
if [ -n "$wp_repo_id" ] && [ "$wp_repo_id" != "0" ]; then
|
|
_WP_REPO_ID="$wp_repo_id"
|
|
fi
|
|
}
|
|
|
|
# ── init command ─────────────────────────────────────────────────────────────
|
|
|
|
disinto_init() {
|
|
local repo_url="${1:-}"
|
|
if [ -z "$repo_url" ]; then
|
|
echo "Error: repo URL required" >&2
|
|
echo "Usage: disinto init <repo-url>" >&2
|
|
exit 1
|
|
fi
|
|
shift
|
|
|
|
# Parse flags
|
|
local branch="" repo_root="" ci_id="0" auto_yes=false forge_url_flag="" bare=false
|
|
while [ $# -gt 0 ]; do
|
|
case "$1" in
|
|
--branch) branch="$2"; shift 2 ;;
|
|
--repo-root) repo_root="$2"; shift 2 ;;
|
|
--ci-id) ci_id="$2"; shift 2 ;;
|
|
--forge-url) forge_url_flag="$2"; shift 2 ;;
|
|
--bare) bare=true; shift ;;
|
|
--yes) auto_yes=true; shift ;;
|
|
*) echo "Unknown option: $1" >&2; exit 1 ;;
|
|
esac
|
|
done
|
|
|
|
# Export bare-metal flag for setup_forge
|
|
export DISINTO_BARE="$bare"
|
|
|
|
# Extract org/repo slug
|
|
local forge_repo
|
|
forge_repo=$(parse_repo_slug "$repo_url")
|
|
local project_name="${forge_repo##*/}"
|
|
local toml_path="${FACTORY_ROOT}/projects/${project_name}.toml"
|
|
|
|
# Determine forge URL (flag > env > default)
|
|
local forge_url="${forge_url_flag:-${FORGE_URL:-http://localhost:3000}}"
|
|
|
|
echo "=== disinto init ==="
|
|
echo "Project: ${forge_repo}"
|
|
echo "Name: ${project_name}"
|
|
echo "Forge: ${forge_url}"
|
|
|
|
# Check for existing config
|
|
local toml_exists=false
|
|
if [ -f "$toml_path" ]; then
|
|
toml_exists=true
|
|
echo "Config: ${toml_path} (already exists, reusing)"
|
|
|
|
# Read repo_root and branch from existing TOML
|
|
local existing_root existing_branch
|
|
existing_root=$(python3 -c "
|
|
import sys, tomllib
|
|
with open(sys.argv[1], 'rb') as f:
|
|
cfg = tomllib.load(f)
|
|
print(cfg.get('repo_root', ''))
|
|
" "$toml_path" 2>/dev/null) || existing_root=""
|
|
existing_branch=$(python3 -c "
|
|
import sys, tomllib
|
|
with open(sys.argv[1], 'rb') as f:
|
|
cfg = tomllib.load(f)
|
|
print(cfg.get('primary_branch', ''))
|
|
" "$toml_path" 2>/dev/null) || existing_branch=""
|
|
|
|
# Use existing values as defaults
|
|
if [ -n "$existing_branch" ] && [ -z "$branch" ]; then
|
|
branch="$existing_branch"
|
|
fi
|
|
|
|
# Handle repo_root: flag overrides TOML, prompt if they differ
|
|
if [ -z "$repo_root" ]; then
|
|
repo_root="${existing_root:-/home/${USER}/${project_name}}"
|
|
elif [ -n "$existing_root" ] && [ "$repo_root" != "$existing_root" ]; then
|
|
echo "Note: --repo-root (${repo_root}) differs from TOML (${existing_root})"
|
|
local update_toml=false
|
|
if [ "$auto_yes" = true ]; then
|
|
update_toml=true
|
|
elif [ -t 0 ]; then
|
|
read -rp "Update repo_root in TOML to ${repo_root}? [y/N] " confirm
|
|
if [[ "$confirm" =~ ^[Yy] ]]; then
|
|
update_toml=true
|
|
else
|
|
repo_root="$existing_root"
|
|
fi
|
|
fi
|
|
if [ "$update_toml" = true ]; then
|
|
python3 -c "
|
|
import sys, re, pathlib
|
|
p = pathlib.Path(sys.argv[1])
|
|
text = p.read_text()
|
|
text = re.sub(r'^repo_root\s*=\s*.*$', 'repo_root = \"' + sys.argv[2] + '\"', text, flags=re.MULTILINE)
|
|
p.write_text(text)
|
|
" "$toml_path" "$repo_root"
|
|
echo "Updated: repo_root in ${toml_path}"
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
# Generate compose files (unless --bare)
|
|
if [ "$bare" = false ]; then
|
|
local forge_port
|
|
forge_port=$(printf '%s' "$forge_url" | sed -E 's|.*:([0-9]+)/?$|\1|')
|
|
forge_port="${forge_port:-3000}"
|
|
generate_compose "$forge_port"
|
|
generate_agent_docker
|
|
fi
|
|
|
|
# Set up local Forgejo instance (provision if needed, create users/tokens/repo)
|
|
setup_forge "$forge_url" "$forge_repo"
|
|
|
|
# Preflight: verify factory requirements
|
|
preflight_check "$forge_repo" "$forge_url"
|
|
|
|
# Determine repo root (for new projects)
|
|
repo_root="${repo_root:-/home/${USER}/${project_name}}"
|
|
|
|
# Clone or validate (try origin first for initial clone from upstream)
|
|
if [ ! -d "${repo_root}/.git" ]; then
|
|
# For initial setup, clone from the provided URL directly
|
|
echo "Cloning: ${repo_url} -> ${repo_root}"
|
|
git clone "$repo_url" "$repo_root" 2>/dev/null || \
|
|
clone_or_validate "$forge_repo" "$repo_root" "$forge_url"
|
|
else
|
|
echo "Repo: ${repo_root} (existing clone)"
|
|
fi
|
|
|
|
# Push to local Forgejo
|
|
push_to_forge "$repo_root" "$forge_url" "$forge_repo"
|
|
|
|
# Detect primary branch
|
|
if [ -z "$branch" ]; then
|
|
branch=$(detect_branch "$repo_root")
|
|
fi
|
|
echo "Branch: ${branch}"
|
|
|
|
# Generate project TOML (skip if already exists)
|
|
if [ "$toml_exists" = false ]; then
|
|
# Prompt for CI ID if interactive and not already set via flag
|
|
if [ "$ci_id" = "0" ] && [ "$auto_yes" = false ] && [ -t 0 ]; then
|
|
read -rp "Woodpecker CI repo ID (0 to skip CI): " user_ci_id
|
|
ci_id="${user_ci_id:-0}"
|
|
fi
|
|
|
|
generate_toml "$toml_path" "$project_name" "$forge_repo" "$repo_root" "$branch" "$ci_id" "$forge_url"
|
|
echo "Created: ${toml_path}"
|
|
fi
|
|
|
|
# Create OAuth2 app on Forgejo for Woodpecker (before compose up)
|
|
_WP_REPO_ID=""
|
|
create_woodpecker_oauth "$forge_url" "$forge_repo"
|
|
|
|
# Generate WOODPECKER_AGENT_SECRET for server↔agent auth
|
|
local env_file="${FACTORY_ROOT}/.env"
|
|
if ! grep -q '^WOODPECKER_AGENT_SECRET=' "$env_file" 2>/dev/null; then
|
|
local agent_secret
|
|
agent_secret="$(head -c 32 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 40)"
|
|
printf 'WOODPECKER_AGENT_SECRET=%s\n' "$agent_secret" >> "$env_file"
|
|
echo "Config: WOODPECKER_AGENT_SECRET generated and saved to .env"
|
|
fi
|
|
|
|
# Ensure Claude Code never auto-updates, phones home, or sends telemetry (#725)
|
|
if ! grep -q '^CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=' "$env_file" 2>/dev/null; then
|
|
printf 'CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=1\n' >> "$env_file"
|
|
echo "Config: CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=1 saved to .env"
|
|
fi
|
|
|
|
# Create labels on remote
|
|
create_labels "$forge_repo" "$forge_url"
|
|
|
|
# Generate VISION.md template
|
|
generate_vision "$repo_root" "$project_name"
|
|
|
|
# Generate template deployment pipeline configs in project repo
|
|
generate_deploy_pipelines "$repo_root" "$project_name"
|
|
|
|
# Install cron jobs
|
|
install_cron "$project_name" "$toml_path" "$auto_yes" "$bare"
|
|
|
|
# Set up mirror remotes if [mirrors] configured in TOML
|
|
source "${FACTORY_ROOT}/lib/load-project.sh" "$toml_path"
|
|
if [ -n "${MIRROR_NAMES:-}" ]; then
|
|
echo "Mirrors: setting up remotes"
|
|
local mname murl
|
|
for mname in $MIRROR_NAMES; do
|
|
murl=$(eval "echo \"\$MIRROR_$(echo "$mname" | tr '[:lower:]' '[:upper:]')\"") || true
|
|
[ -z "$murl" ] && continue
|
|
git -C "$repo_root" remote add "$mname" "$murl" 2>/dev/null \
|
|
|| git -C "$repo_root" remote set-url "$mname" "$murl" 2>/dev/null || true
|
|
echo " + ${mname} -> ${murl}"
|
|
done
|
|
# Initial sync: push current primary branch to mirrors
|
|
source "${FACTORY_ROOT}/lib/mirrors.sh"
|
|
export PROJECT_REPO_ROOT="$repo_root"
|
|
mirror_push
|
|
fi
|
|
|
|
# Encrypt secrets if SOPS + age are available
|
|
write_secrets_encrypted
|
|
|
|
# Bring up the full stack (compose mode only)
|
|
if [ "$bare" = false ] && [ -f "${FACTORY_ROOT}/docker-compose.yml" ]; then
|
|
echo ""
|
|
echo "── Starting full stack ────────────────────────────────"
|
|
docker compose -f "${FACTORY_ROOT}/docker-compose.yml" up -d
|
|
echo "Stack: running (forgejo + woodpecker + agents)"
|
|
|
|
# Activate repo in Woodpecker now that stack is running
|
|
activate_woodpecker_repo "$forge_repo"
|
|
|
|
# Use detected Woodpecker repo ID if ci_id was not explicitly set
|
|
if [ "$ci_id" = "0" ] && [ -n "${_WP_REPO_ID:-}" ]; then
|
|
ci_id="$_WP_REPO_ID"
|
|
echo "CI ID: ${ci_id} (from Woodpecker)"
|
|
# Update TOML with Woodpecker repo ID
|
|
if [ -f "$toml_path" ]; then
|
|
python3 -c "
|
|
import sys, re, pathlib
|
|
p = pathlib.Path(sys.argv[1])
|
|
text = p.read_text()
|
|
text = re.sub(r'^woodpecker_repo_id\s*=\s*.*$', 'woodpecker_repo_id = ' + sys.argv[2], text, flags=re.MULTILINE)
|
|
p.write_text(text)
|
|
" "$toml_path" "$ci_id"
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
# Activate default agents (zero-cost when idle — they only invoke Claude
|
|
# when there is actual work, so an empty project burns no LLM tokens)
|
|
mkdir -p "${FACTORY_ROOT}/state"
|
|
touch "${FACTORY_ROOT}/state/.dev-active"
|
|
touch "${FACTORY_ROOT}/state/.reviewer-active"
|
|
touch "${FACTORY_ROOT}/state/.gardener-active"
|
|
|
|
echo ""
|
|
echo "Done. Project ${project_name} is ready."
|
|
echo " Config: ${toml_path}"
|
|
echo " Clone: ${repo_root}"
|
|
echo " Forge: ${forge_url}/${forge_repo}"
|
|
if [ "$bare" = false ]; then
|
|
echo " Stack: docker compose (use 'disinto up/down/logs/shell')"
|
|
else
|
|
echo " Mode: bare-metal"
|
|
fi
|
|
echo ""
|
|
echo "── Claude authentication ──────────────────────────────"
|
|
echo " OAuth (shared across containers):"
|
|
echo " Run 'claude auth login' on the host once."
|
|
echo " Credentials in ~/.claude are mounted into containers."
|
|
echo " API key (alternative — metered billing, no rotation issues):"
|
|
echo " Set ANTHROPIC_API_KEY in .env to skip OAuth entirely."
|
|
echo ""
|
|
echo " Run 'disinto status' to verify."
|
|
}
|
|
|
|
# ── status command ───────────────────────────────────────────────────────────
|
|
|
|
disinto_status() {
|
|
local toml_dir="${FACTORY_ROOT}/projects"
|
|
local found=false
|
|
|
|
for toml in "${toml_dir}"/*.toml; do
|
|
[ -f "$toml" ] || continue
|
|
found=true
|
|
|
|
# Parse name, repo, forge_url from TOML
|
|
local pname prepo pforge_url
|
|
pname=$(python3 -c "
|
|
import sys, tomllib
|
|
with open(sys.argv[1], 'rb') as f:
|
|
print(tomllib.load(f)['name'])
|
|
" "$toml" 2>/dev/null) || continue
|
|
prepo=$(python3 -c "
|
|
import sys, tomllib
|
|
with open(sys.argv[1], 'rb') as f:
|
|
print(tomllib.load(f)['repo'])
|
|
" "$toml" 2>/dev/null) || continue
|
|
pforge_url=$(python3 -c "
|
|
import sys, tomllib
|
|
with open(sys.argv[1], 'rb') as f:
|
|
print(tomllib.load(f).get('forge_url', ''))
|
|
" "$toml" 2>/dev/null) || pforge_url=""
|
|
pforge_url="${pforge_url:-${FORGE_URL:-http://localhost:3000}}"
|
|
|
|
echo "== ${pname} (${prepo}) =="
|
|
|
|
# Active dev sessions
|
|
local has_sessions=false
|
|
for pf in /tmp/dev-session-"${pname}"-*.phase; do
|
|
[ -f "$pf" ] || continue
|
|
has_sessions=true
|
|
local issue phase_line
|
|
issue=$(basename "$pf" | sed "s/dev-session-${pname}-//;s/\.phase//")
|
|
phase_line=$(head -1 "$pf" 2>/dev/null || echo "unknown")
|
|
echo " Session #${issue}: ${phase_line}"
|
|
done
|
|
if [ "$has_sessions" = false ]; then
|
|
echo " Sessions: none"
|
|
fi
|
|
|
|
# Backlog depth via API
|
|
if [ -n "${FORGE_TOKEN:-}" ]; then
|
|
local api="${pforge_url}/api/v1/repos/${prepo}"
|
|
local backlog_count pr_count
|
|
|
|
backlog_count=$(curl -sf -I \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${api}/issues?state=open&labels=backlog&limit=1" 2>/dev/null \
|
|
| grep -i 'x-total-count' | tr -d '\r' | awk '{print $2}') || backlog_count="?"
|
|
echo " Backlog: ${backlog_count:-0} issues"
|
|
|
|
pr_count=$(curl -sf -I \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${api}/pulls?state=open&limit=1" 2>/dev/null \
|
|
| grep -i 'x-total-count' | tr -d '\r' | awk '{print $2}') || pr_count="?"
|
|
echo " Open PRs: ${pr_count:-0}"
|
|
else
|
|
echo " Backlog: (no FORGE_TOKEN)"
|
|
echo " Open PRs: (no FORGE_TOKEN)"
|
|
fi
|
|
|
|
echo ""
|
|
done
|
|
|
|
if [ "$found" = false ]; then
|
|
echo "No projects configured."
|
|
echo "Run 'disinto init <repo-url>' to get started."
|
|
fi
|
|
}
|
|
|
|
# ── secrets command ────────────────────────────────────────────────────────────
|
|
|
|
disinto_secrets() {
|
|
local subcmd="${1:-}"
|
|
local enc_file="${FACTORY_ROOT}/.env.enc"
|
|
local env_file="${FACTORY_ROOT}/.env"
|
|
local vault_enc_file="${FACTORY_ROOT}/.env.vault.enc"
|
|
local vault_env_file="${FACTORY_ROOT}/.env.vault"
|
|
|
|
# Shared helper: ensure sops+age and .sops.yaml exist
|
|
_secrets_ensure_sops() {
|
|
if ! command -v sops &>/dev/null || ! command -v age-keygen &>/dev/null; then
|
|
echo "Error: sops and age are required." >&2
|
|
echo " Install sops: https://github.com/getsops/sops/releases" >&2
|
|
echo " Install age: apt install age / brew install age" >&2
|
|
exit 1
|
|
fi
|
|
if ! ensure_age_key; then
|
|
echo "Error: failed to generate age key" >&2
|
|
exit 1
|
|
fi
|
|
if [ ! -f "${FACTORY_ROOT}/.sops.yaml" ]; then
|
|
write_sops_yaml "$AGE_PUBLIC_KEY"
|
|
echo "Created: .sops.yaml"
|
|
fi
|
|
}
|
|
|
|
case "$subcmd" in
|
|
edit)
|
|
if [ ! -f "$enc_file" ]; then
|
|
echo "Error: ${enc_file} not found. Run 'disinto secrets migrate' first." >&2
|
|
exit 1
|
|
fi
|
|
sops "$enc_file"
|
|
;;
|
|
show)
|
|
if [ ! -f "$enc_file" ]; then
|
|
echo "Error: ${enc_file} not found." >&2
|
|
exit 1
|
|
fi
|
|
sops -d "$enc_file"
|
|
;;
|
|
migrate)
|
|
if [ ! -f "$env_file" ]; then
|
|
echo "Error: ${env_file} not found — nothing to migrate." >&2
|
|
exit 1
|
|
fi
|
|
_secrets_ensure_sops
|
|
encrypt_env_file "$env_file" "$enc_file"
|
|
rm -f "$env_file"
|
|
echo "Migrated: .env -> .env.enc (plaintext removed)"
|
|
;;
|
|
edit-vault)
|
|
if [ ! -f "$vault_enc_file" ]; then
|
|
echo "Error: ${vault_enc_file} not found. Run 'disinto secrets migrate-vault' first." >&2
|
|
exit 1
|
|
fi
|
|
sops "$vault_enc_file"
|
|
;;
|
|
show-vault)
|
|
if [ ! -f "$vault_enc_file" ]; then
|
|
echo "Error: ${vault_enc_file} not found." >&2
|
|
exit 1
|
|
fi
|
|
sops -d "$vault_enc_file"
|
|
;;
|
|
migrate-vault)
|
|
if [ ! -f "$vault_env_file" ]; then
|
|
echo "Error: ${vault_env_file} not found — nothing to migrate." >&2
|
|
echo " Create .env.vault with vault secrets (GITHUB_TOKEN, deploy keys, etc.)" >&2
|
|
exit 1
|
|
fi
|
|
_secrets_ensure_sops
|
|
encrypt_env_file "$vault_env_file" "$vault_enc_file"
|
|
rm -f "$vault_env_file"
|
|
echo "Migrated: .env.vault -> .env.vault.enc (plaintext removed)"
|
|
;;
|
|
*)
|
|
cat <<EOF >&2
|
|
Usage: disinto secrets <subcommand>
|
|
|
|
Agent secrets (.env.enc):
|
|
edit Edit agent secrets (FORGE_TOKEN, CLAUDE_API_KEY, etc.)
|
|
show Show decrypted agent secrets
|
|
migrate Encrypt .env -> .env.enc
|
|
|
|
Vault secrets (.env.vault.enc):
|
|
edit-vault Edit vault secrets (GITHUB_TOKEN, deploy keys, etc.)
|
|
show-vault Show decrypted vault secrets
|
|
migrate-vault Encrypt .env.vault -> .env.vault.enc
|
|
EOF
|
|
exit 1
|
|
;;
|
|
esac
|
|
}
|
|
|
|
# ── vault-run command ─────────────────────────────────────────────────────────
|
|
|
|
disinto_vault_run() {
|
|
local action_id="${1:?Usage: disinto vault-run <action-id>}"
|
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
|
local vault_enc="${FACTORY_ROOT}/.env.vault.enc"
|
|
|
|
if [ ! -f "$compose_file" ]; then
|
|
echo "Error: docker-compose.yml not found" >&2
|
|
echo " Run 'disinto init <repo-url>' first (without --bare)" >&2
|
|
exit 1
|
|
fi
|
|
|
|
if [ ! -f "$vault_enc" ]; then
|
|
echo "Error: .env.vault.enc not found — create vault secrets first" >&2
|
|
echo " Run 'disinto secrets migrate-vault' after creating .env.vault" >&2
|
|
exit 1
|
|
fi
|
|
|
|
if ! command -v sops &>/dev/null; then
|
|
echo "Error: sops not found — required to decrypt vault secrets" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Decrypt vault secrets to temp file
|
|
local tmp_env
|
|
tmp_env=$(mktemp /tmp/disinto-vault-XXXXXX)
|
|
trap 'rm -f "$tmp_env"' EXIT
|
|
|
|
if ! sops -d --output-type dotenv "$vault_enc" > "$tmp_env" 2>/dev/null; then
|
|
rm -f "$tmp_env"
|
|
echo "Error: failed to decrypt .env.vault.enc" >&2
|
|
exit 1
|
|
fi
|
|
|
|
echo "Vault secrets decrypted to tmpfile"
|
|
|
|
# Run action in ephemeral vault-runner container
|
|
local rc=0
|
|
docker compose -f "$compose_file" \
|
|
run --rm --env-file "$tmp_env" \
|
|
vault-runner "$action_id" || rc=$?
|
|
|
|
# Clean up — secrets gone
|
|
rm -f "$tmp_env"
|
|
echo "Vault tmpfile removed"
|
|
|
|
if [ "$rc" -eq 0 ]; then
|
|
echo "Vault action ${action_id} completed successfully"
|
|
else
|
|
echo "Vault action ${action_id} failed (exit ${rc})" >&2
|
|
fi
|
|
return "$rc"
|
|
}
|
|
|
|
# ── up command ────────────────────────────────────────────────────────────────
|
|
|
|
disinto_up() {
|
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
|
if [ ! -f "$compose_file" ]; then
|
|
echo "Error: docker-compose.yml not found" >&2
|
|
echo " Run 'disinto init <repo-url>' first (without --bare)" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Decrypt secrets to temp .env if SOPS available and .env.enc exists
|
|
local tmp_env=""
|
|
local enc_file="${FACTORY_ROOT}/.env.enc"
|
|
local env_file="${FACTORY_ROOT}/.env"
|
|
if [ -f "$enc_file" ] && command -v sops &>/dev/null && [ ! -f "$env_file" ]; then
|
|
tmp_env="${env_file}"
|
|
sops -d --output-type dotenv "$enc_file" > "$tmp_env"
|
|
trap '[ -n "${tmp_env:-}" ] && rm -f "$tmp_env"' EXIT
|
|
echo "Decrypted secrets for compose"
|
|
fi
|
|
|
|
docker compose -f "$compose_file" up -d "$@"
|
|
echo "Stack is up"
|
|
|
|
# Clean up temp .env (also handled by EXIT trap if compose fails)
|
|
if [ -n "$tmp_env" ] && [ -f "$tmp_env" ]; then
|
|
rm -f "$tmp_env"
|
|
echo "Removed temporary .env"
|
|
fi
|
|
}
|
|
|
|
# ── down command ──────────────────────────────────────────────────────────────
|
|
|
|
disinto_down() {
|
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
|
if [ ! -f "$compose_file" ]; then
|
|
echo "Error: docker-compose.yml not found" >&2
|
|
exit 1
|
|
fi
|
|
docker compose -f "$compose_file" down "$@"
|
|
echo "Stack is down"
|
|
}
|
|
|
|
# ── logs command ──────────────────────────────────────────────────────────────
|
|
|
|
disinto_logs() {
|
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
|
if [ ! -f "$compose_file" ]; then
|
|
echo "Error: docker-compose.yml not found" >&2
|
|
exit 1
|
|
fi
|
|
docker compose -f "$compose_file" logs -f "$@"
|
|
}
|
|
|
|
# ── shell command ─────────────────────────────────────────────────────────────
|
|
|
|
disinto_shell() {
|
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
|
if [ ! -f "$compose_file" ]; then
|
|
echo "Error: docker-compose.yml not found" >&2
|
|
exit 1
|
|
fi
|
|
docker compose -f "$compose_file" exec agents bash
|
|
}
|
|
|
|
# ── Main dispatch ────────────────────────────────────────────────────────────
|
|
|
|
case "${1:-}" in
|
|
init) shift; disinto_init "$@" ;;
|
|
up) shift; disinto_up "$@" ;;
|
|
down) shift; disinto_down "$@" ;;
|
|
logs) shift; disinto_logs "$@" ;;
|
|
shell) shift; disinto_shell ;;
|
|
status) shift; disinto_status "$@" ;;
|
|
secrets) shift; disinto_secrets "$@" ;;
|
|
vault-run) shift; disinto_vault_run "$@" ;;
|
|
-h|--help) usage ;;
|
|
*) usage ;;
|
|
esac
|