2872 lines
98 KiB
Bash
Executable file
2872 lines
98 KiB
Bash
Executable file
#!/usr/bin/env bash
|
|
# =============================================================================
|
|
# disinto — CLI entry point for the disinto code factory
|
|
#
|
|
# Commands:
|
|
# disinto init <repo-url> [options] Bootstrap a new project
|
|
# disinto up Start the full stack (docker compose)
|
|
# disinto down Stop the full stack
|
|
# disinto logs [service] Tail service logs
|
|
# disinto shell Shell into the agent container
|
|
# disinto status Show factory status
|
|
# disinto secrets <subcommand> Manage encrypted secrets
|
|
# disinto run <action-id> Run action in ephemeral runner container
|
|
#
|
|
# Usage:
|
|
# disinto init https://github.com/user/repo
|
|
# disinto init user/repo --branch main --ci-id 3
|
|
# disinto init user/repo --bare (bare-metal, no compose)
|
|
# disinto up
|
|
# disinto down
|
|
# disinto status
|
|
# =============================================================================
|
|
set -euo pipefail
|
|
|
|
FACTORY_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
|
|
source "${FACTORY_ROOT}/lib/env.sh"
|
|
|
|
# ── Helpers ──────────────────────────────────────────────────────────────────
|
|
|
|
usage() {
|
|
cat <<EOF
|
|
disinto — autonomous code factory CLI
|
|
|
|
Usage:
|
|
disinto init <repo-url> [options] Bootstrap a new project
|
|
disinto up Start the full stack (docker compose)
|
|
disinto down Stop the full stack
|
|
disinto logs [service] Tail service logs
|
|
disinto shell Shell into the agent container
|
|
disinto status Show factory status
|
|
disinto secrets <subcommand> Manage encrypted secrets
|
|
disinto run <action-id> Run action in ephemeral runner container
|
|
disinto release <version> Create vault PR for release (e.g., v1.2.0)
|
|
disinto hire-an-agent <agent-name> <role> [--formula <path>]
|
|
Hire a new agent (create user + .profile repo)
|
|
|
|
Init options:
|
|
--branch <name> Primary branch (default: auto-detect)
|
|
--repo-root <path> Local clone path (default: ~/name)
|
|
--ci-id <n> Woodpecker CI repo ID (default: 0 = no CI)
|
|
--forge-url <url> Forge base URL (default: http://localhost:3000)
|
|
--bare Skip compose generation (bare-metal setup)
|
|
--yes Skip confirmation prompts
|
|
|
|
Hire an agent options:
|
|
--formula <path> Path to role formula TOML (default: formulas/<role>.toml)
|
|
EOF
|
|
exit 1
|
|
}
|
|
|
|
# Extract org/repo slug from various URL formats.
|
|
# Accepts: https://github.com/user/repo, https://codeberg.org/user/repo,
|
|
# http://localhost:3000/user/repo, user/repo, *.git
|
|
parse_repo_slug() {
|
|
local url="$1"
|
|
url="${url#https://}"
|
|
url="${url#http://}"
|
|
# Strip any hostname (anything before the first / that contains a dot or colon)
|
|
if [[ "$url" =~ ^[a-zA-Z0-9._:-]+/[a-zA-Z0-9._-]+/[a-zA-Z0-9._-]+ ]]; then
|
|
url="${url#*/}" # strip host part
|
|
fi
|
|
url="${url%.git}"
|
|
url="${url%/}"
|
|
if [[ ! "$url" =~ ^[a-zA-Z0-9._-]+/[a-zA-Z0-9._-]+$ ]]; then
|
|
echo "Error: invalid repo URL — expected https://host/org/repo or org/repo" >&2
|
|
exit 1
|
|
fi
|
|
printf '%s' "$url"
|
|
}
|
|
|
|
# Build a clone-able URL from a slug and forge URL.
|
|
clone_url_from_slug() {
|
|
local slug="$1" forge_url="${2:-${FORGE_URL:-http://localhost:3000}}"
|
|
printf '%s/%s.git' "$forge_url" "$slug"
|
|
}
|
|
|
|
# Ensure an age key exists; generate one if missing.
|
|
# Exports AGE_PUBLIC_KEY on success.
|
|
ensure_age_key() {
|
|
local key_dir="${HOME}/.config/sops/age"
|
|
local key_file="${key_dir}/keys.txt"
|
|
|
|
if [ -f "$key_file" ]; then
|
|
AGE_PUBLIC_KEY="$(age-keygen -y "$key_file" 2>/dev/null)"
|
|
[ -n "$AGE_PUBLIC_KEY" ] || return 1
|
|
export AGE_PUBLIC_KEY
|
|
return 0
|
|
fi
|
|
|
|
if ! command -v age-keygen &>/dev/null; then
|
|
return 1
|
|
fi
|
|
|
|
mkdir -p "$key_dir"
|
|
age-keygen -o "$key_file" 2>/dev/null
|
|
chmod 600 "$key_file"
|
|
AGE_PUBLIC_KEY="$(age-keygen -y "$key_file" 2>/dev/null)"
|
|
[ -n "$AGE_PUBLIC_KEY" ] || return 1
|
|
export AGE_PUBLIC_KEY
|
|
echo "Generated age key: ${key_file}"
|
|
}
|
|
|
|
# Write .sops.yaml pinning the age recipient for .env.enc files.
|
|
write_sops_yaml() {
|
|
local pub_key="$1"
|
|
cat > "${FACTORY_ROOT}/.sops.yaml" <<EOF
|
|
creation_rules:
|
|
- path_regex: \.env(\.vault)?\.enc$
|
|
age: "${pub_key}"
|
|
EOF
|
|
}
|
|
|
|
# Encrypt a dotenv file to .env.enc using SOPS + age.
|
|
# Usage: encrypt_env_file <input> <output>
|
|
encrypt_env_file() {
|
|
local input="$1" output="$2"
|
|
sops -e --input-type dotenv --output-type dotenv "$input" > "$output"
|
|
}
|
|
|
|
# Store secrets into .env.enc (encrypted) if SOPS + age available, else .env (plaintext).
|
|
write_secrets_encrypted() {
|
|
local env_file="${FACTORY_ROOT}/.env"
|
|
local enc_file="${FACTORY_ROOT}/.env.enc"
|
|
|
|
if command -v sops &>/dev/null && command -v age-keygen &>/dev/null; then
|
|
if ensure_age_key; then
|
|
# Write .sops.yaml if missing
|
|
if [ ! -f "${FACTORY_ROOT}/.sops.yaml" ]; then
|
|
write_sops_yaml "$AGE_PUBLIC_KEY"
|
|
fi
|
|
|
|
# Encrypt the plaintext .env to .env.enc
|
|
if [ -f "$env_file" ]; then
|
|
encrypt_env_file "$env_file" "$enc_file"
|
|
rm -f "$env_file"
|
|
echo "Secrets encrypted to .env.enc (plaintext .env removed)"
|
|
return 0
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
# Fallback: keep plaintext .env
|
|
echo "Warning: sops/age not available — secrets stored in plaintext .env" >&2
|
|
return 0
|
|
}
|
|
|
|
FORGEJO_DATA_DIR="${HOME}/.disinto/forgejo"
|
|
|
|
# Generate docker-compose.yml in the factory root.
|
|
generate_compose() {
|
|
local forge_port="${1:-3000}"
|
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
|
|
|
cat > "$compose_file" <<'COMPOSEEOF'
|
|
# docker-compose.yml — generated by disinto init
|
|
# Brings up Forgejo, Woodpecker, and the agent runtime.
|
|
|
|
services:
|
|
forgejo:
|
|
image: codeberg.org/forgejo/forgejo:11.0
|
|
restart: unless-stopped
|
|
security_opt:
|
|
- apparmor=unconfined
|
|
volumes:
|
|
- forgejo-data:/data
|
|
environment:
|
|
FORGEJO__database__DB_TYPE: sqlite3
|
|
FORGEJO__server__ROOT_URL: http://forgejo:3000/
|
|
FORGEJO__server__HTTP_PORT: "3000"
|
|
FORGEJO__security__INSTALL_LOCK: "true"
|
|
FORGEJO__service__DISABLE_REGISTRATION: "true"
|
|
FORGEJO__webhook__ALLOWED_HOST_LIST: "private"
|
|
networks:
|
|
- disinto-net
|
|
|
|
woodpecker:
|
|
image: woodpeckerci/woodpecker-server:v3
|
|
restart: unless-stopped
|
|
security_opt:
|
|
- apparmor=unconfined
|
|
ports:
|
|
- "8000:8000"
|
|
- "9000:9000"
|
|
volumes:
|
|
- woodpecker-data:/var/lib/woodpecker
|
|
environment:
|
|
WOODPECKER_FORGEJO: "true"
|
|
WOODPECKER_FORGEJO_URL: http://forgejo:3000
|
|
WOODPECKER_FORGEJO_CLIENT: ${WP_FORGEJO_CLIENT:-}
|
|
WOODPECKER_FORGEJO_SECRET: ${WP_FORGEJO_SECRET:-}
|
|
WOODPECKER_HOST: http://woodpecker:8000
|
|
WOODPECKER_OPEN: "true"
|
|
WOODPECKER_AGENT_SECRET: ${WOODPECKER_AGENT_SECRET:-}
|
|
WOODPECKER_DATABASE_DRIVER: sqlite3
|
|
WOODPECKER_DATABASE_DATASOURCE: /var/lib/woodpecker/woodpecker.sqlite
|
|
depends_on:
|
|
- forgejo
|
|
networks:
|
|
- disinto-net
|
|
|
|
woodpecker-agent:
|
|
image: woodpeckerci/woodpecker-agent:v3
|
|
restart: unless-stopped
|
|
network_mode: host
|
|
privileged: true
|
|
volumes:
|
|
- /var/run/docker.sock:/var/run/docker.sock
|
|
environment:
|
|
WOODPECKER_SERVER: localhost:9000
|
|
WOODPECKER_AGENT_SECRET: ${WOODPECKER_AGENT_SECRET:-}
|
|
WOODPECKER_GRPC_SECURE: "false"
|
|
WOODPECKER_HEALTHCHECK_ADDR: ":3333"
|
|
WOODPECKER_BACKEND_DOCKER_NETWORK: disinto_disinto-net
|
|
WOODPECKER_MAX_WORKFLOWS: 1
|
|
depends_on:
|
|
- woodpecker
|
|
|
|
agents:
|
|
build: ./docker/agents
|
|
restart: unless-stopped
|
|
security_opt:
|
|
- apparmor=unconfined
|
|
volumes:
|
|
- agent-data:/home/agent/data
|
|
- project-repos:/home/agent/repos
|
|
- ${HOME}/.claude:/home/agent/.claude
|
|
- ${HOME}/.claude.json:/home/agent/.claude.json:ro
|
|
- CLAUDE_BIN_PLACEHOLDER:/usr/local/bin/claude:ro
|
|
- ${HOME}/.ssh:/home/agent/.ssh:ro
|
|
- ${HOME}/.config/sops/age:/home/agent/.config/sops/age:ro
|
|
environment:
|
|
FORGE_URL: http://forgejo:3000
|
|
WOODPECKER_SERVER: http://woodpecker:8000
|
|
DISINTO_CONTAINER: "1"
|
|
PROJECT_REPO_ROOT: /home/agent/repos/${PROJECT_NAME:-project}
|
|
env_file:
|
|
- .env
|
|
# IMPORTANT: agents get .env only (forge tokens, CI tokens, config).
|
|
# Vault-only secrets (GITHUB_TOKEN, CLAWHUB_TOKEN, deploy keys) live in
|
|
# .env.vault.enc and are NEVER injected here — only the runner
|
|
# container receives them at fire time (AD-006, #745).
|
|
depends_on:
|
|
- forgejo
|
|
- woodpecker
|
|
networks:
|
|
- disinto-net
|
|
|
|
runner:
|
|
build: ./docker/agents
|
|
profiles: ["vault"]
|
|
security_opt:
|
|
- apparmor=unconfined
|
|
volumes:
|
|
- agent-data:/home/agent/data
|
|
environment:
|
|
FORGE_URL: http://forgejo:3000
|
|
DISINTO_CONTAINER: "1"
|
|
PROJECT_REPO_ROOT: /home/agent/repos/${PROJECT_NAME:-project}
|
|
# Vault redesign in progress (PR-based approval, see #73-#77)
|
|
# This container is being replaced — entrypoint will be updated in follow-up
|
|
networks:
|
|
- disinto-net
|
|
|
|
# Edge proxy — reverse proxy to Forgejo, Woodpecker, and staging
|
|
# Serves on ports 80/443, routes based on path
|
|
edge:
|
|
build: ./docker/edge
|
|
ports:
|
|
- "80:80"
|
|
- "443:443"
|
|
volumes:
|
|
- ./docker/Caddyfile:/etc/caddy/Caddyfile
|
|
- ./docker/edge/dispatcher.sh:/usr/local/bin/dispatcher.sh:ro
|
|
- caddy_data:/data
|
|
- /var/run/docker.sock:/var/run/docker.sock
|
|
depends_on:
|
|
- forgejo
|
|
- woodpecker
|
|
- staging
|
|
networks:
|
|
- disinto-net
|
|
|
|
# Staging container — static file server for staging artifacts
|
|
# Edge proxy routes to this container for default requests
|
|
staging:
|
|
image: caddy:alpine
|
|
command: ["caddy", "file-server", "--root", "/srv/site"]
|
|
volumes:
|
|
- ./docker:/srv/site:ro
|
|
networks:
|
|
- disinto-net
|
|
|
|
# Staging deployment slot — activated by Woodpecker staging pipeline (#755).
|
|
# Profile-gated: only starts when explicitly targeted by deploy commands.
|
|
# Customize image/ports/volumes for your project after init.
|
|
staging-deploy:
|
|
image: alpine:3
|
|
profiles: ["staging"]
|
|
security_opt:
|
|
- apparmor=unconfined
|
|
environment:
|
|
DEPLOY_ENV: staging
|
|
networks:
|
|
- disinto-net
|
|
command: ["echo", "staging slot — replace with project image"]
|
|
|
|
volumes:
|
|
forgejo-data:
|
|
woodpecker-data:
|
|
agent-data:
|
|
project-repos:
|
|
caddy_data:
|
|
|
|
networks:
|
|
disinto-net:
|
|
driver: bridge
|
|
COMPOSEEOF
|
|
|
|
# Patch the Claude CLI binary path — resolve from host PATH at init time.
|
|
local claude_bin
|
|
claude_bin="$(command -v claude 2>/dev/null || true)"
|
|
if [ -n "$claude_bin" ]; then
|
|
# Resolve symlinks to get the real binary path
|
|
claude_bin="$(readlink -f "$claude_bin")"
|
|
sed -i "s|CLAUDE_BIN_PLACEHOLDER|${claude_bin}|" "$compose_file"
|
|
else
|
|
echo "Warning: claude CLI not found in PATH — update docker-compose.yml volumes manually" >&2
|
|
sed -i "s|CLAUDE_BIN_PLACEHOLDER|/usr/local/bin/claude|" "$compose_file"
|
|
fi
|
|
|
|
# Patch the forgejo port mapping into the file if non-default
|
|
if [ "$forge_port" != "3000" ]; then
|
|
# Add port mapping to forgejo service so it's reachable from host during init
|
|
sed -i "/image: codeberg\.org\/forgejo\/forgejo:11\.0/a\\ ports:\\n - \"${forge_port}:3000\"" "$compose_file"
|
|
else
|
|
sed -i "/image: codeberg\.org\/forgejo\/forgejo:11\.0/a\\ ports:\\n - \"3000:3000\"" "$compose_file"
|
|
fi
|
|
|
|
echo "Created: ${compose_file}"
|
|
}
|
|
|
|
# Generate docker/agents/ files if they don't already exist.
|
|
generate_agent_docker() {
|
|
local docker_dir="${FACTORY_ROOT}/docker/agents"
|
|
mkdir -p "$docker_dir"
|
|
|
|
if [ ! -f "${docker_dir}/Dockerfile" ]; then
|
|
echo "Warning: docker/agents/Dockerfile not found — expected in repo" >&2
|
|
fi
|
|
if [ ! -f "${docker_dir}/entrypoint.sh" ]; then
|
|
echo "Warning: docker/agents/entrypoint.sh not found — expected in repo" >&2
|
|
fi
|
|
}
|
|
|
|
# Generate docker/Caddyfile template for edge proxy.
|
|
generate_caddyfile() {
|
|
local docker_dir="${FACTORY_ROOT}/docker"
|
|
local caddyfile="${docker_dir}/Caddyfile"
|
|
|
|
if [ -f "$caddyfile" ]; then
|
|
echo "Caddyfile: ${caddyfile} (already exists, skipping)"
|
|
return
|
|
fi
|
|
|
|
cat > "$caddyfile" <<'CADDYFILEEOF'
|
|
# Caddyfile — edge proxy configuration
|
|
# IP-only binding at bootstrap; domain + TLS added later via vault resource request
|
|
|
|
:80 {
|
|
# Reverse proxy to Forgejo
|
|
handle /forgejo/* {
|
|
reverse_proxy forgejo:3000
|
|
}
|
|
|
|
# Reverse proxy to Woodpecker CI
|
|
handle /ci/* {
|
|
reverse_proxy woodpecker:8000
|
|
}
|
|
|
|
# Default: proxy to staging container
|
|
handle {
|
|
reverse_proxy staging:80
|
|
}
|
|
}
|
|
CADDYFILEEOF
|
|
|
|
echo "Created: ${caddyfile}"
|
|
}
|
|
|
|
# Generate docker/index.html default page.
|
|
generate_staging_index() {
|
|
local docker_dir="${FACTORY_ROOT}/docker"
|
|
local index_file="${docker_dir}/index.html"
|
|
|
|
if [ -f "$index_file" ]; then
|
|
echo "Staging: ${index_file} (already exists, skipping)"
|
|
return
|
|
fi
|
|
|
|
cat > "$index_file" <<'INDEXEOF'
|
|
<!DOCTYPE html>
|
|
<html lang="en">
|
|
<head>
|
|
<meta charset="UTF-8">
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
<title>Nothing shipped yet</title>
|
|
<style>
|
|
body {
|
|
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, sans-serif;
|
|
display: flex;
|
|
align-items: center;
|
|
justify-content: center;
|
|
min-height: 100vh;
|
|
margin: 0;
|
|
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
|
color: white;
|
|
}
|
|
.container {
|
|
text-align: center;
|
|
padding: 2rem;
|
|
}
|
|
h1 {
|
|
font-size: 3rem;
|
|
margin: 0 0 1rem 0;
|
|
}
|
|
p {
|
|
font-size: 1.25rem;
|
|
opacity: 0.9;
|
|
}
|
|
</style>
|
|
</head>
|
|
<body>
|
|
<div class="container">
|
|
<h1>Nothing shipped yet</h1>
|
|
<p>CI pipelines will update this page with your staging artifacts.</p>
|
|
</div>
|
|
</body>
|
|
</html>
|
|
INDEXEOF
|
|
|
|
echo "Created: ${index_file}"
|
|
}
|
|
|
|
# Generate template .woodpecker/ deployment pipeline configs in a project repo.
|
|
# Creates staging.yml and production.yml alongside the project's existing CI config.
|
|
# These pipelines trigger on Woodpecker's deployment event with environment filters.
|
|
generate_deploy_pipelines() {
|
|
local repo_root="$1" project_name="$2"
|
|
local wp_dir="${repo_root}/.woodpecker"
|
|
|
|
mkdir -p "$wp_dir"
|
|
|
|
# Skip if deploy pipelines already exist
|
|
if [ -f "${wp_dir}/staging.yml" ] && [ -f "${wp_dir}/production.yml" ]; then
|
|
echo "Deploy: .woodpecker/{staging,production}.yml (already exist)"
|
|
return
|
|
fi
|
|
|
|
if [ ! -f "${wp_dir}/staging.yml" ]; then
|
|
cat > "${wp_dir}/staging.yml" <<'STAGINGEOF'
|
|
# .woodpecker/staging.yml — Staging deployment pipeline
|
|
# Triggered by runner via Woodpecker promote API.
|
|
# Human approves promotion in vault → runner calls promote → this runs.
|
|
|
|
when:
|
|
event: deployment
|
|
environment: staging
|
|
|
|
steps:
|
|
- name: deploy-staging
|
|
image: docker:27
|
|
commands:
|
|
- echo "Deploying to staging environment..."
|
|
- echo "Pipeline ${CI_PIPELINE_NUMBER} promoted from CI #${CI_PIPELINE_PARENT}"
|
|
# Pull the image built by CI and deploy to staging
|
|
# Customize these commands for your project:
|
|
# - docker compose -f docker-compose.yml --profile staging up -d
|
|
- echo "Staging deployment complete"
|
|
|
|
- name: verify-staging
|
|
image: alpine:3
|
|
commands:
|
|
- echo "Verifying staging deployment..."
|
|
# Add health checks, smoke tests, or integration tests here:
|
|
# - curl -sf http://staging:8080/health || exit 1
|
|
- echo "Staging verification complete"
|
|
STAGINGEOF
|
|
echo "Created: ${wp_dir}/staging.yml"
|
|
fi
|
|
|
|
if [ ! -f "${wp_dir}/production.yml" ]; then
|
|
cat > "${wp_dir}/production.yml" <<'PRODUCTIONEOF'
|
|
# .woodpecker/production.yml — Production deployment pipeline
|
|
# Triggered by runner via Woodpecker promote API.
|
|
# Human approves promotion in vault → runner calls promote → this runs.
|
|
|
|
when:
|
|
event: deployment
|
|
environment: production
|
|
|
|
steps:
|
|
- name: deploy-production
|
|
image: docker:27
|
|
commands:
|
|
- echo "Deploying to production environment..."
|
|
- echo "Pipeline ${CI_PIPELINE_NUMBER} promoted from staging"
|
|
# Pull the verified image and deploy to production
|
|
# Customize these commands for your project:
|
|
# - docker compose -f docker-compose.yml up -d
|
|
- echo "Production deployment complete"
|
|
|
|
- name: verify-production
|
|
image: alpine:3
|
|
commands:
|
|
- echo "Verifying production deployment..."
|
|
# Add production health checks here:
|
|
# - curl -sf http://production:8080/health || exit 1
|
|
- echo "Production verification complete"
|
|
PRODUCTIONEOF
|
|
echo "Created: ${wp_dir}/production.yml"
|
|
fi
|
|
}
|
|
|
|
# Check whether compose mode is active (docker-compose.yml exists).
|
|
is_compose_mode() {
|
|
[ -f "${FACTORY_ROOT}/docker-compose.yml" ]
|
|
}
|
|
|
|
# Provision or connect to a local Forgejo instance.
|
|
# Creates admin + bot users, generates API tokens, stores in .env.
|
|
# When $DISINTO_BARE is set, uses standalone docker run; otherwise uses compose.
|
|
setup_forge() {
|
|
local forge_url="$1"
|
|
local repo_slug="$2"
|
|
local use_bare="${DISINTO_BARE:-false}"
|
|
|
|
echo ""
|
|
echo "── Forge setup ────────────────────────────────────────"
|
|
|
|
# Helper: run a command inside the Forgejo container
|
|
_forgejo_exec() {
|
|
if [ "$use_bare" = true ]; then
|
|
docker exec -u git disinto-forgejo "$@"
|
|
else
|
|
docker compose -f "${FACTORY_ROOT}/docker-compose.yml" exec -T -u git forgejo "$@"
|
|
fi
|
|
}
|
|
|
|
# Check if Forgejo is already running
|
|
if curl -sf --max-time 5 "${forge_url}/api/v1/version" >/dev/null 2>&1; then
|
|
echo "Forgejo: ${forge_url} (already running)"
|
|
else
|
|
echo "Forgejo not reachable at ${forge_url}"
|
|
echo "Starting Forgejo via Docker..."
|
|
|
|
if ! command -v docker &>/dev/null; then
|
|
echo "Error: docker not found — needed to provision Forgejo" >&2
|
|
echo " Install Docker or start Forgejo manually at ${forge_url}" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Extract port from forge_url
|
|
local forge_port
|
|
forge_port=$(printf '%s' "$forge_url" | sed -E 's|.*:([0-9]+)/?$|\1|')
|
|
forge_port="${forge_port:-3000}"
|
|
|
|
if [ "$use_bare" = true ]; then
|
|
# Bare-metal mode: standalone docker run
|
|
mkdir -p "${FORGEJO_DATA_DIR}"
|
|
|
|
if docker ps -a --format '{{.Names}}' | grep -q '^disinto-forgejo$'; then
|
|
docker start disinto-forgejo >/dev/null 2>&1 || true
|
|
else
|
|
docker run -d \
|
|
--name disinto-forgejo \
|
|
--restart unless-stopped \
|
|
-p "${forge_port}:3000" \
|
|
-p 2222:22 \
|
|
-v "${FORGEJO_DATA_DIR}:/data" \
|
|
-e "FORGEJO__database__DB_TYPE=sqlite3" \
|
|
-e "FORGEJO__server__ROOT_URL=${forge_url}/" \
|
|
-e "FORGEJO__server__HTTP_PORT=3000" \
|
|
-e "FORGEJO__service__DISABLE_REGISTRATION=true" \
|
|
codeberg.org/forgejo/forgejo:11.0
|
|
fi
|
|
else
|
|
# Compose mode: start Forgejo via docker compose
|
|
docker compose -f "${FACTORY_ROOT}/docker-compose.yml" up -d forgejo
|
|
fi
|
|
|
|
# Wait for Forgejo to become healthy
|
|
echo -n "Waiting for Forgejo to start"
|
|
local retries=0
|
|
while ! curl -sf --max-time 3 "${forge_url}/api/v1/version" >/dev/null 2>&1; do
|
|
retries=$((retries + 1))
|
|
if [ "$retries" -gt 60 ]; then
|
|
echo ""
|
|
echo "Error: Forgejo did not become ready within 60s" >&2
|
|
exit 1
|
|
fi
|
|
echo -n "."
|
|
sleep 1
|
|
done
|
|
echo " ready"
|
|
fi
|
|
|
|
# Wait for Forgejo database to accept writes (API may be ready before DB is)
|
|
echo -n "Waiting for Forgejo database"
|
|
local db_ready=false
|
|
for _i in $(seq 1 30); do
|
|
if _forgejo_exec forgejo admin user list >/dev/null 2>&1; then
|
|
db_ready=true
|
|
break
|
|
fi
|
|
echo -n "."
|
|
sleep 1
|
|
done
|
|
echo ""
|
|
if [ "$db_ready" != true ]; then
|
|
echo "Error: Forgejo database not ready after 30s" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Create admin user if it doesn't exist
|
|
local admin_user="disinto-admin"
|
|
local admin_pass
|
|
admin_pass="admin-$(head -c 16 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 20)"
|
|
|
|
if ! curl -sf --max-time 5 "${forge_url}/api/v1/users/${admin_user}" >/dev/null 2>&1; then
|
|
echo "Creating admin user: ${admin_user}"
|
|
local create_output
|
|
if ! create_output=$(_forgejo_exec forgejo admin user create \
|
|
--admin \
|
|
--username "${admin_user}" \
|
|
--password "${admin_pass}" \
|
|
--email "admin@disinto.local" \
|
|
--must-change-password=false 2>&1); then
|
|
echo "Error: failed to create admin user '${admin_user}':" >&2
|
|
echo " ${create_output}" >&2
|
|
exit 1
|
|
fi
|
|
# Forgejo 11.x ignores --must-change-password=false on create;
|
|
# explicitly clear the flag so basic-auth token creation works.
|
|
_forgejo_exec forgejo admin user change-password \
|
|
--username "${admin_user}" \
|
|
--password "${admin_pass}" \
|
|
--must-change-password=false
|
|
|
|
# Verify admin user was actually created
|
|
if ! curl -sf --max-time 5 "${forge_url}/api/v1/users/${admin_user}" >/dev/null 2>&1; then
|
|
echo "Error: admin user '${admin_user}' not found after creation" >&2
|
|
exit 1
|
|
fi
|
|
# Preserve password for Woodpecker OAuth2 token generation (#779)
|
|
_FORGE_ADMIN_PASS="$admin_pass"
|
|
fi
|
|
|
|
# Create human user (johba) as site admin if it doesn't exist
|
|
local human_user="johba"
|
|
local human_pass
|
|
human_pass="human-$(head -c 16 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 20)"
|
|
|
|
if ! curl -sf --max-time 5 "${forge_url}/api/v1/users/${human_user}" >/dev/null 2>&1; then
|
|
echo "Creating human user: ${human_user}"
|
|
local create_output
|
|
if ! create_output=$(_forgejo_exec forgejo admin user create \
|
|
--admin \
|
|
--username "${human_user}" \
|
|
--password "${human_pass}" \
|
|
--email "johba@disinto.local" \
|
|
--must-change-password=false 2>&1); then
|
|
echo "Error: failed to create human user '${human_user}':" >&2
|
|
echo " ${create_output}" >&2
|
|
exit 1
|
|
fi
|
|
# Forgejo 11.x ignores --must-change-password=false on create;
|
|
# explicitly clear the flag so basic-auth token creation works.
|
|
_forgejo_exec forgejo admin user change-password \
|
|
--username "${human_user}" \
|
|
--password "${human_pass}" \
|
|
--must-change-password=false
|
|
|
|
# Verify human user was actually created
|
|
if ! curl -sf --max-time 5 "${forge_url}/api/v1/users/${human_user}" >/dev/null 2>&1; then
|
|
echo "Error: human user '${human_user}' not found after creation" >&2
|
|
exit 1
|
|
fi
|
|
echo " Human user '${human_user}' created as site admin"
|
|
else
|
|
echo "Human user: ${human_user} (already exists)"
|
|
fi
|
|
|
|
# Get or create admin token
|
|
local admin_token
|
|
admin_token=$(curl -sf -X POST \
|
|
-u "${admin_user}:${admin_pass}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/users/${admin_user}/tokens" \
|
|
-d '{"name":"disinto-admin-token","scopes":["all"]}' 2>/dev/null \
|
|
| jq -r '.sha1 // empty') || admin_token=""
|
|
|
|
if [ -z "$admin_token" ]; then
|
|
# Token might already exist — try listing
|
|
admin_token=$(curl -sf \
|
|
-u "${admin_user}:${admin_pass}" \
|
|
"${forge_url}/api/v1/users/${admin_user}/tokens" 2>/dev/null \
|
|
| jq -r '.[0].sha1 // empty') || admin_token=""
|
|
fi
|
|
|
|
if [ -z "$admin_token" ]; then
|
|
echo "Error: failed to obtain admin API token" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Get or create human user token
|
|
local human_token
|
|
if curl -sf --max-time 5 "${forge_url}/api/v1/users/${human_user}" >/dev/null 2>&1; then
|
|
human_token=$(curl -sf -X POST \
|
|
-u "${human_user}:${human_pass}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/users/${human_user}/tokens" \
|
|
-d '{"name":"disinto-human-token","scopes":["all"]}' 2>/dev/null \
|
|
| jq -r '.sha1 // empty') || human_token=""
|
|
|
|
if [ -z "$human_token" ]; then
|
|
# Token might already exist — try listing
|
|
human_token=$(curl -sf \
|
|
-u "${human_user}:${human_pass}" \
|
|
"${forge_url}/api/v1/users/${human_user}/tokens" 2>/dev/null \
|
|
| jq -r '.[0].sha1 // empty') || human_token=""
|
|
fi
|
|
|
|
if [ -n "$human_token" ]; then
|
|
# Store human token in .env
|
|
if grep -q '^HUMAN_TOKEN=' "$env_file" 2>/dev/null; then
|
|
sed -i "s|^HUMAN_TOKEN=.*|HUMAN_TOKEN=${human_token}|" "$env_file"
|
|
else
|
|
printf 'HUMAN_TOKEN=%s\n' "$human_token" >> "$env_file"
|
|
fi
|
|
export HUMAN_TOKEN="$human_token"
|
|
echo " Human token saved (HUMAN_TOKEN)"
|
|
fi
|
|
fi
|
|
|
|
# Create bot users and tokens
|
|
# Each agent gets its own Forgejo account for identity and audit trail (#747).
|
|
# Map: bot-username -> env-var-name for the token
|
|
local -A bot_token_vars=(
|
|
[dev-bot]="FORGE_TOKEN"
|
|
[review-bot]="FORGE_REVIEW_TOKEN"
|
|
[planner-bot]="FORGE_PLANNER_TOKEN"
|
|
[gardener-bot]="FORGE_GARDENER_TOKEN"
|
|
[vault-bot]="FORGE_VAULT_TOKEN"
|
|
[supervisor-bot]="FORGE_SUPERVISOR_TOKEN"
|
|
[predictor-bot]="FORGE_PREDICTOR_TOKEN"
|
|
)
|
|
|
|
local env_file="${FACTORY_ROOT}/.env"
|
|
local bot_user bot_pass token token_var
|
|
|
|
for bot_user in dev-bot review-bot planner-bot gardener-bot vault-bot supervisor-bot predictor-bot architect-bot; do
|
|
bot_pass="bot-$(head -c 16 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 20)"
|
|
token_var="${bot_token_vars[$bot_user]}"
|
|
|
|
if ! curl -sf --max-time 5 \
|
|
-H "Authorization: token ${admin_token}" \
|
|
"${forge_url}/api/v1/users/${bot_user}" >/dev/null 2>&1; then
|
|
echo "Creating bot user: ${bot_user}"
|
|
local create_output
|
|
if ! create_output=$(_forgejo_exec forgejo admin user create \
|
|
--username "${bot_user}" \
|
|
--password "${bot_pass}" \
|
|
--email "${bot_user}@disinto.local" \
|
|
--must-change-password=false 2>&1); then
|
|
echo "Error: failed to create bot user '${bot_user}':" >&2
|
|
echo " ${create_output}" >&2
|
|
exit 1
|
|
fi
|
|
# Forgejo 11.x ignores --must-change-password=false on create;
|
|
# explicitly clear the flag so basic-auth token creation works.
|
|
_forgejo_exec forgejo admin user change-password \
|
|
--username "${bot_user}" \
|
|
--password "${bot_pass}" \
|
|
--must-change-password=false
|
|
|
|
# Verify bot user was actually created
|
|
if ! curl -sf --max-time 5 \
|
|
-H "Authorization: token ${admin_token}" \
|
|
"${forge_url}/api/v1/users/${bot_user}" >/dev/null 2>&1; then
|
|
echo "Error: bot user '${bot_user}' not found after creation" >&2
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
# Generate token via API (basic auth as the bot user — Forgejo requires
|
|
# basic auth on POST /users/{username}/tokens, token auth is rejected)
|
|
token=$(curl -sf -X POST \
|
|
-u "${bot_user}:${bot_pass}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/users/${bot_user}/tokens" \
|
|
-d "{\"name\":\"disinto-${bot_user}-token\",\"scopes\":[\"all\"]}" 2>/dev/null \
|
|
| jq -r '.sha1 // empty') || token=""
|
|
|
|
if [ -z "$token" ]; then
|
|
# Token name collision — create with timestamp suffix
|
|
token=$(curl -sf -X POST \
|
|
-u "${bot_user}:${bot_pass}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/users/${bot_user}/tokens" \
|
|
-d "{\"name\":\"disinto-${bot_user}-$(date +%s)\",\"scopes\":[\"all\"]}" 2>/dev/null \
|
|
| jq -r '.sha1 // empty') || token=""
|
|
fi
|
|
|
|
if [ -z "$token" ]; then
|
|
echo "Error: failed to create API token for '${bot_user}'" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Store token in .env under the per-agent variable name
|
|
if grep -q "^${token_var}=" "$env_file" 2>/dev/null; then
|
|
sed -i "s|^${token_var}=.*|${token_var}=${token}|" "$env_file"
|
|
else
|
|
printf '%s=%s\n' "$token_var" "$token" >> "$env_file"
|
|
fi
|
|
export "${token_var}=${token}"
|
|
echo " ${bot_user} token saved (${token_var})"
|
|
|
|
# Backwards-compat aliases for dev-bot and review-bot
|
|
if [ "$bot_user" = "dev-bot" ]; then
|
|
export CODEBERG_TOKEN="$token"
|
|
elif [ "$bot_user" = "review-bot" ]; then
|
|
export REVIEW_BOT_TOKEN="$token"
|
|
fi
|
|
done
|
|
|
|
# Store FORGE_URL in .env if not already present
|
|
if ! grep -q '^FORGE_URL=' "$env_file" 2>/dev/null; then
|
|
printf 'FORGE_URL=%s\n' "$forge_url" >> "$env_file"
|
|
fi
|
|
|
|
# Create the repo on Forgejo if it doesn't exist
|
|
local org_name="${repo_slug%%/*}"
|
|
local repo_name="${repo_slug##*/}"
|
|
|
|
# Check if repo already exists
|
|
if ! curl -sf --max-time 5 \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${forge_url}/api/v1/repos/${repo_slug}" >/dev/null 2>&1; then
|
|
|
|
# Try creating org first (ignore if exists)
|
|
curl -sf -X POST \
|
|
-H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/orgs" \
|
|
-d "{\"username\":\"${org_name}\",\"visibility\":\"public\"}" >/dev/null 2>&1 || true
|
|
|
|
# Create repo under org
|
|
if ! curl -sf -X POST \
|
|
-H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/orgs/${org_name}/repos" \
|
|
-d "{\"name\":\"${repo_name}\",\"auto_init\":false,\"default_branch\":\"main\"}" >/dev/null 2>&1; then
|
|
# Fallback: create under the human user namespace (johba)
|
|
curl -sf -X POST \
|
|
-H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/users/${human_user}/repos" \
|
|
-d "{\"name\":\"${repo_name}\",\"auto_init\":false,\"default_branch\":\"main\"}" >/dev/null 2>&1 || true
|
|
fi
|
|
|
|
# Add all bot users as collaborators with appropriate permissions
|
|
# dev-bot: write (PR creation via lib/vault.sh)
|
|
# review-bot: read (PR review)
|
|
# planner-bot: write (prerequisites.md, memory)
|
|
# gardener-bot: write (backlog grooming)
|
|
# vault-bot: write (vault items)
|
|
# supervisor-bot: read (health monitoring)
|
|
# predictor-bot: read (pattern detection)
|
|
# architect-bot: write (sprint PRs)
|
|
local bot_user bot_perm
|
|
declare -A bot_permissions=(
|
|
[dev-bot]="write"
|
|
[review-bot]="read"
|
|
[planner-bot]="write"
|
|
[gardener-bot]="write"
|
|
[vault-bot]="write"
|
|
[supervisor-bot]="read"
|
|
[predictor-bot]="read"
|
|
[architect-bot]="write"
|
|
)
|
|
for bot_user in "${!bot_permissions[@]}"; do
|
|
bot_perm="${bot_permissions[$bot_user]}"
|
|
curl -sf -X PUT \
|
|
-H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/repos/${repo_slug}/collaborators/${bot_user}" \
|
|
-d "{\"permission\":\"${bot_perm}\"}" >/dev/null 2>&1 || true
|
|
done
|
|
|
|
# Add disinto-admin as admin collaborator
|
|
curl -sf -X PUT \
|
|
-H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/repos/${repo_slug}/collaborators/disinto-admin" \
|
|
-d '{"permission":"admin"}' >/dev/null 2>&1 || true
|
|
|
|
echo "Repo: ${repo_slug} created on Forgejo"
|
|
else
|
|
echo "Repo: ${repo_slug} (already exists on Forgejo)"
|
|
fi
|
|
|
|
echo "Forge: ${forge_url} (ready)"
|
|
}
|
|
|
|
# Create and seed the {project}-ops repo on Forgejo with initial directory structure.
|
|
# The ops repo holds operational data: vault items, journals, evidence, prerequisites.
|
|
setup_ops_repo() {
|
|
local forge_url="$1" ops_slug="$2" ops_root="$3" primary_branch="${4:-main}"
|
|
local org_name="${ops_slug%%/*}"
|
|
local ops_name="${ops_slug##*/}"
|
|
|
|
echo ""
|
|
echo "── Ops repo setup ─────────────────────────────────────"
|
|
|
|
# Check if ops repo already exists on Forgejo
|
|
if curl -sf --max-time 5 \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${forge_url}/api/v1/repos/${ops_slug}" >/dev/null 2>&1; then
|
|
echo "Ops repo: ${ops_slug} (already exists on Forgejo)"
|
|
else
|
|
# Create ops repo under org (or human user if org creation failed)
|
|
if ! curl -sf -X POST \
|
|
-H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/orgs/${org_name}/repos" \
|
|
-d "{\"name\":\"${ops_name}\",\"auto_init\":true,\"default_branch\":\"${primary_branch}\",\"description\":\"Operational data for ${org_name}/${ops_name%-ops}\"}" >/dev/null 2>&1; then
|
|
# Fallback: create under the human user namespace
|
|
curl -sf -X POST \
|
|
-H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/users/${human_user}/repos" \
|
|
-d "{\"name\":\"${ops_name}\",\"auto_init\":true,\"default_branch\":\"${primary_branch}\",\"description\":\"Operational data\"}" >/dev/null 2>&1 || true
|
|
fi
|
|
|
|
# Add all bot users as collaborators with appropriate permissions
|
|
# vault branch protection (#77) requires:
|
|
# - Admin-only merge to main (enforced by admin_enforced: true)
|
|
# - Bots can push branches and create PRs, but cannot merge
|
|
local bot_user bot_perm
|
|
declare -A bot_permissions=(
|
|
[dev-bot]="write"
|
|
[review-bot]="read"
|
|
[planner-bot]="write"
|
|
[gardener-bot]="write"
|
|
[vault-bot]="write"
|
|
[supervisor-bot]="read"
|
|
[predictor-bot]="read"
|
|
[architect-bot]="write"
|
|
)
|
|
for bot_user in "${!bot_permissions[@]}"; do
|
|
bot_perm="${bot_permissions[$bot_user]}"
|
|
curl -sf -X PUT \
|
|
-H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/repos/${ops_slug}/collaborators/${bot_user}" \
|
|
-d "{\"permission\":\"${bot_perm}\"}" >/dev/null 2>&1 || true
|
|
done
|
|
|
|
# Add disinto-admin as admin collaborator
|
|
curl -sf -X PUT \
|
|
-H "Authorization: token ${admin_token:-${FORGE_TOKEN}}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/repos/${ops_slug}/collaborators/disinto-admin" \
|
|
-d '{"permission":"admin"}' >/dev/null 2>&1 || true
|
|
|
|
echo "Ops repo: ${ops_slug} created on Forgejo"
|
|
fi
|
|
|
|
# Clone ops repo locally if not present
|
|
if [ ! -d "${ops_root}/.git" ]; then
|
|
local auth_url
|
|
auth_url=$(printf '%s' "$forge_url" | sed "s|://|://dev-bot:${FORGE_TOKEN}@|")
|
|
local clone_url="${auth_url}/${ops_slug}.git"
|
|
echo "Cloning: ops repo -> ${ops_root}"
|
|
git clone --quiet "$clone_url" "$ops_root" 2>/dev/null || {
|
|
echo "Initializing: ops repo at ${ops_root}"
|
|
mkdir -p "$ops_root"
|
|
git -C "$ops_root" init --initial-branch="${primary_branch}" -q
|
|
}
|
|
else
|
|
echo "Ops repo: ${ops_root} (already exists locally)"
|
|
fi
|
|
|
|
# Seed directory structure
|
|
local seeded=false
|
|
mkdir -p "${ops_root}/vault/pending"
|
|
mkdir -p "${ops_root}/vault/approved"
|
|
mkdir -p "${ops_root}/vault/fired"
|
|
mkdir -p "${ops_root}/vault/rejected"
|
|
mkdir -p "${ops_root}/knowledge"
|
|
mkdir -p "${ops_root}/evidence/engagement"
|
|
|
|
if [ ! -f "${ops_root}/README.md" ]; then
|
|
cat > "${ops_root}/README.md" <<OPSEOF
|
|
# ${ops_name}
|
|
|
|
Operational data for the ${ops_name%-ops} project.
|
|
|
|
## Structure
|
|
|
|
\`\`\`
|
|
${ops_name}/
|
|
├── vault/
|
|
│ ├── pending/ # vault items awaiting approval
|
|
│ ├── approved/ # approved vault items
|
|
│ ├── fired/ # executed vault items
|
|
│ └── rejected/ # rejected vault items
|
|
├── knowledge/ # shared agent knowledge and best practices
|
|
├── evidence/ # engagement data, experiment results
|
|
├── portfolio.md # addressables + observables
|
|
├── prerequisites.md # dependency graph
|
|
└── RESOURCES.md # accounts, tokens (refs), infra inventory
|
|
\`\`\`
|
|
|
|
> **Note:** Journal directories (journal/planner/ and journal/supervisor/) have been removed from the ops repo. Agent journals are now stored in each agent's .profile repo on Forgejo.
|
|
|
|
## Branch protection
|
|
|
|
- \`main\`: 2 reviewers required for vault items
|
|
- Journal/evidence commits may use lighter rules
|
|
OPSEOF
|
|
seeded=true
|
|
fi
|
|
|
|
# Create stub files if they don't exist
|
|
[ -f "${ops_root}/portfolio.md" ] || { echo "# Portfolio" > "${ops_root}/portfolio.md"; seeded=true; }
|
|
[ -f "${ops_root}/prerequisites.md" ] || { echo "# Prerequisite Tree" > "${ops_root}/prerequisites.md"; seeded=true; }
|
|
[ -f "${ops_root}/RESOURCES.md" ] || { echo "# Resources" > "${ops_root}/RESOURCES.md"; seeded=true; }
|
|
|
|
# Commit and push seed content
|
|
if [ "$seeded" = true ] && [ -d "${ops_root}/.git" ]; then
|
|
# Auto-configure repo-local git identity if missing (#778)
|
|
if [ -z "$(git -C "$ops_root" config user.name 2>/dev/null)" ]; then
|
|
git -C "$ops_root" config user.name "disinto-admin"
|
|
fi
|
|
if [ -z "$(git -C "$ops_root" config user.email 2>/dev/null)" ]; then
|
|
git -C "$ops_root" config user.email "disinto-admin@localhost"
|
|
fi
|
|
|
|
git -C "$ops_root" add -A
|
|
if ! git -C "$ops_root" diff --cached --quiet 2>/dev/null; then
|
|
git -C "$ops_root" commit -m "chore: seed ops repo structure" -q
|
|
# Push if remote exists
|
|
if git -C "$ops_root" remote get-url origin >/dev/null 2>&1; then
|
|
git -C "$ops_root" push origin "${primary_branch}" -q 2>/dev/null || true
|
|
fi
|
|
fi
|
|
echo "Seeded: ops repo with initial structure"
|
|
fi
|
|
}
|
|
|
|
# Push local clone to the Forgejo remote.
|
|
push_to_forge() {
|
|
local repo_root="$1" forge_url="$2" repo_slug="$3"
|
|
|
|
# Build authenticated remote URL: http://dev-bot:<token>@host:port/org/repo.git
|
|
if [ -z "${FORGE_TOKEN:-}" ]; then
|
|
echo "Error: FORGE_TOKEN not set — cannot push to Forgejo" >&2
|
|
return 1
|
|
fi
|
|
local auth_url
|
|
auth_url=$(printf '%s' "$forge_url" | sed "s|://|://dev-bot:${FORGE_TOKEN}@|")
|
|
local remote_url="${auth_url}/${repo_slug}.git"
|
|
# Display URL without token
|
|
local display_url="${forge_url}/${repo_slug}.git"
|
|
|
|
# Always set the remote URL to ensure credentials are current
|
|
if git -C "$repo_root" remote get-url forgejo >/dev/null 2>&1; then
|
|
git -C "$repo_root" remote set-url forgejo "$remote_url"
|
|
else
|
|
git -C "$repo_root" remote add forgejo "$remote_url"
|
|
fi
|
|
echo "Remote: forgejo -> ${display_url}"
|
|
|
|
# Skip push if local repo has no commits (e.g. cloned from empty Forgejo repo)
|
|
if ! git -C "$repo_root" rev-parse HEAD >/dev/null 2>&1; then
|
|
echo "Push: skipped (local repo has no commits)"
|
|
return 0
|
|
fi
|
|
|
|
# Push all branches and tags
|
|
echo "Pushing: branches to forgejo"
|
|
if ! git -C "$repo_root" push forgejo --all 2>&1; then
|
|
echo "Error: failed to push branches to Forgejo" >&2
|
|
return 1
|
|
fi
|
|
echo "Pushing: tags to forgejo"
|
|
if ! git -C "$repo_root" push forgejo --tags 2>&1; then
|
|
echo "Error: failed to push tags to Forgejo" >&2
|
|
return 1
|
|
fi
|
|
|
|
# Verify the repo is no longer empty (Forgejo may need a moment to index pushed refs)
|
|
local is_empty="true"
|
|
local verify_attempt
|
|
for verify_attempt in $(seq 1 5); do
|
|
local repo_info
|
|
repo_info=$(curl -sf --max-time 10 \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${forge_url}/api/v1/repos/${repo_slug}" 2>/dev/null) || repo_info=""
|
|
if [ -z "$repo_info" ]; then
|
|
is_empty="skipped"
|
|
break # API unreachable, skip verification
|
|
fi
|
|
is_empty=$(printf '%s' "$repo_info" | jq -r '.empty // "unknown"')
|
|
if [ "$is_empty" != "true" ]; then
|
|
echo "Verify: repo is not empty (push confirmed)"
|
|
break
|
|
fi
|
|
if [ "$verify_attempt" -lt 5 ]; then
|
|
sleep 2
|
|
fi
|
|
done
|
|
if [ "$is_empty" = "true" ]; then
|
|
echo "Warning: Forgejo repo still reports empty after push" >&2
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Preflight check — verify all factory requirements before proceeding.
|
|
preflight_check() {
|
|
local repo_slug="${1:-}"
|
|
local forge_url="${2:-${FORGE_URL:-http://localhost:3000}}"
|
|
local errors=0
|
|
|
|
# ── Required commands ──
|
|
local -A hints=(
|
|
[claude]="Install: https://docs.anthropic.com/en/docs/claude-code/overview"
|
|
[tmux]="Install: apt install tmux / brew install tmux"
|
|
[git]="Install: apt install git / brew install git"
|
|
[jq]="Install: apt install jq / brew install jq"
|
|
[python3]="Install: apt install python3 / brew install python3"
|
|
[curl]="Install: apt install curl / brew install curl"
|
|
)
|
|
|
|
local cmd
|
|
for cmd in claude tmux git jq python3 curl; do
|
|
if ! command -v "$cmd" &>/dev/null; then
|
|
echo "Error: ${cmd} not found" >&2
|
|
echo " ${hints[$cmd]}" >&2
|
|
errors=$((errors + 1))
|
|
fi
|
|
done
|
|
|
|
# ── Claude Code authentication ──
|
|
if command -v claude &>/dev/null && command -v jq &>/dev/null; then
|
|
local auth_json auth_stderr auth_rc=0
|
|
auth_stderr=$(claude auth status 2>&1 >/dev/null) || auth_rc=$?
|
|
auth_json=$(claude auth status 2>/dev/null) || auth_json=""
|
|
# Only skip check if subcommand is unrecognized (old claude version)
|
|
if printf '%s' "$auth_stderr" | grep -qi "unknown command"; then
|
|
: # claude version doesn't support auth status — skip
|
|
elif [ -z "$auth_json" ] || [ "$auth_rc" -ne 0 ]; then
|
|
echo "Error: Claude Code is not authenticated (auth check failed)" >&2
|
|
echo " Run: claude auth login" >&2
|
|
errors=$((errors + 1))
|
|
else
|
|
local logged_in
|
|
logged_in=$(printf '%s' "$auth_json" | jq -r '.loggedIn // false' 2>/dev/null) || logged_in="false"
|
|
if [ "$logged_in" != "true" ]; then
|
|
echo "Error: Claude Code is not authenticated" >&2
|
|
echo " Run: claude auth login" >&2
|
|
errors=$((errors + 1))
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
# ── Forge API check (verify the forge is reachable and token works) ──
|
|
if [ -n "${FORGE_TOKEN:-}" ] && command -v curl &>/dev/null; then
|
|
if ! curl -sf --max-time 10 \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${forge_url}/api/v1/repos/${repo_slug}" >/dev/null 2>&1; then
|
|
echo "Error: Forge API auth failed at ${forge_url}" >&2
|
|
echo " Verify your FORGE_TOKEN and that Forgejo is running" >&2
|
|
errors=$((errors + 1))
|
|
fi
|
|
fi
|
|
|
|
# ── Git identity check ──
|
|
if command -v git &>/dev/null; then
|
|
local git_name git_email
|
|
git_name=$(git config user.name 2>/dev/null) || git_name=""
|
|
git_email=$(git config user.email 2>/dev/null) || git_email=""
|
|
if [ -z "$git_name" ] || [ -z "$git_email" ]; then
|
|
echo "Warning: git user.name/user.email not configured" >&2
|
|
echo " Init will set a repo-local identity for ops commits" >&2
|
|
fi
|
|
fi
|
|
|
|
# ── Optional tools (warn only) ──
|
|
if ! command -v docker &>/dev/null; then
|
|
echo "Warning: docker not found (needed for Forgejo provisioning)" >&2
|
|
fi
|
|
if ! command -v sops &>/dev/null; then
|
|
echo "Warning: sops not found (secrets will be stored in plaintext .env)" >&2
|
|
echo " Install: https://github.com/getsops/sops/releases" >&2
|
|
fi
|
|
if ! command -v age-keygen &>/dev/null; then
|
|
echo "Warning: age not found (needed for secret encryption with SOPS)" >&2
|
|
echo " Install: apt install age / brew install age" >&2
|
|
fi
|
|
|
|
if [ "$errors" -gt 0 ]; then
|
|
echo "" >&2
|
|
echo "${errors} preflight error(s) — fix the above before running disinto init" >&2
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
# Clone the repo if the target directory doesn't exist; validate if it does.
|
|
clone_or_validate() {
|
|
local slug="$1" target="$2" forge_url="${3:-${FORGE_URL:-http://localhost:3000}}"
|
|
if [ -d "${target}/.git" ]; then
|
|
echo "Repo: ${target} (existing clone)"
|
|
return
|
|
fi
|
|
local url
|
|
url=$(clone_url_from_slug "$slug" "$forge_url")
|
|
echo "Cloning: ${url} -> ${target}"
|
|
git clone "$url" "$target"
|
|
}
|
|
|
|
# Detect the primary branch from the remote HEAD or fallback to main/master.
|
|
detect_branch() {
|
|
local repo_root="$1"
|
|
local branch
|
|
branch=$(git -C "$repo_root" symbolic-ref refs/remotes/origin/HEAD 2>/dev/null \
|
|
| sed 's|refs/remotes/origin/||') || true
|
|
if [ -z "$branch" ]; then
|
|
if git -C "$repo_root" show-ref --verify --quiet refs/remotes/origin/main 2>/dev/null; then
|
|
branch="main"
|
|
else
|
|
branch="master"
|
|
fi
|
|
fi
|
|
printf '%s' "$branch"
|
|
}
|
|
|
|
# Generate projects/<name>.toml config file.
|
|
generate_toml() {
|
|
local path="$1" name="$2" repo="$3" root="$4" branch="$5" ci_id="$6" forge_url="$7"
|
|
cat > "$path" <<EOF
|
|
# projects/${name}.toml — Project config for ${repo}
|
|
#
|
|
# Generated by disinto init
|
|
|
|
name = "${name}"
|
|
repo = "${repo}"
|
|
ops_repo = "${repo}-ops"
|
|
forge_url = "${forge_url}"
|
|
repo_root = "${root}"
|
|
ops_repo_root = "/home/${USER}/${name}-ops"
|
|
primary_branch = "${branch}"
|
|
|
|
[ci]
|
|
woodpecker_repo_id = ${ci_id}
|
|
stale_minutes = 60
|
|
|
|
[services]
|
|
containers = []
|
|
|
|
[monitoring]
|
|
check_prs = true
|
|
check_dev_agent = true
|
|
check_pipeline_stall = false
|
|
|
|
# [mirrors]
|
|
# github = "git@github.com:user/repo.git"
|
|
# codeberg = "git@codeberg.org:user/repo.git"
|
|
EOF
|
|
}
|
|
|
|
# Create standard labels on the forge repo.
|
|
create_labels() {
|
|
local repo="$1"
|
|
local forge_url="${2:-${FORGE_URL:-http://localhost:3000}}"
|
|
local api="${forge_url}/api/v1/repos/${repo}"
|
|
|
|
local -A labels=(
|
|
["backlog"]="#0075ca"
|
|
["in-progress"]="#e4e669"
|
|
["blocked"]="#d73a4a"
|
|
["tech-debt"]="#cfd3d7"
|
|
["underspecified"]="#fbca04"
|
|
["vision"]="#0e8a16"
|
|
["action"]="#1d76db"
|
|
)
|
|
|
|
echo "Creating labels on ${repo}..."
|
|
|
|
# Fetch existing labels so we can skip duplicates
|
|
local existing
|
|
existing=$(curl -sf \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${api}/labels?limit=50" 2>/dev/null \
|
|
| grep -o '"name":"[^"]*"' | cut -d'"' -f4) || existing=""
|
|
|
|
local name color
|
|
for name in backlog in-progress blocked tech-debt underspecified vision action; do
|
|
if echo "$existing" | grep -qx "$name"; then
|
|
echo " . ${name} (already exists)"
|
|
continue
|
|
fi
|
|
color="${labels[$name]}"
|
|
if curl -sf -X POST \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
-H "Content-Type: application/json" \
|
|
"${api}/labels" \
|
|
-d "{\"name\":\"${name}\",\"color\":\"${color}\"}" >/dev/null 2>&1; then
|
|
echo " + ${name}"
|
|
else
|
|
echo " ! ${name} (failed to create)"
|
|
fi
|
|
done
|
|
}
|
|
|
|
# Generate a minimal VISION.md template in the target project.
|
|
generate_vision() {
|
|
local repo_root="$1" name="$2"
|
|
local vision_path="${repo_root}/VISION.md"
|
|
if [ -f "$vision_path" ]; then
|
|
echo "VISION: ${vision_path} (already exists, skipping)"
|
|
return
|
|
fi
|
|
cat > "$vision_path" <<EOF
|
|
# Vision
|
|
|
|
## What ${name} does
|
|
|
|
<!-- Describe the purpose of this project in one paragraph -->
|
|
|
|
## Who it's for
|
|
|
|
<!-- Describe the target audience -->
|
|
|
|
## Design principles
|
|
|
|
- <!-- Principle 1 -->
|
|
- <!-- Principle 2 -->
|
|
- <!-- Principle 3 -->
|
|
|
|
## Milestones
|
|
|
|
### Current
|
|
- <!-- What you're working on now -->
|
|
|
|
### Next
|
|
- <!-- What comes after -->
|
|
EOF
|
|
echo "Created: ${vision_path}"
|
|
echo " Commit this to your repo when ready"
|
|
}
|
|
|
|
# Generate and optionally install cron entries for the project agents.
|
|
install_cron() {
|
|
local name="$1" toml="$2" auto_yes="$3" bare="${4:-false}"
|
|
|
|
# In compose mode, skip host cron — the agents container runs cron internally
|
|
if [ "$bare" = false ]; then
|
|
echo ""
|
|
echo "Cron: skipped (agents container handles scheduling in compose mode)"
|
|
return
|
|
fi
|
|
|
|
# Bare mode: crontab is required on the host
|
|
if ! command -v crontab &>/dev/null; then
|
|
echo "Error: crontab not found (required for bare-metal mode)" >&2
|
|
echo " Install: apt install cron / brew install cron" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Use absolute path for the TOML in cron entries
|
|
local abs_toml
|
|
abs_toml="$(cd "$(dirname "$toml")" && pwd)/$(basename "$toml")"
|
|
|
|
local cron_block
|
|
cron_block="# disinto: ${name}
|
|
2,7,12,17,22,27,32,37,42,47,52,57 * * * * ${FACTORY_ROOT}/review/review-poll.sh ${abs_toml} >/dev/null 2>&1
|
|
4,9,14,19,24,29,34,39,44,49,54,59 * * * * ${FACTORY_ROOT}/dev/dev-poll.sh ${abs_toml} >/dev/null 2>&1
|
|
0 0,6,12,18 * * * cd ${FACTORY_ROOT} && bash gardener/gardener-run.sh ${abs_toml} >/dev/null 2>&1"
|
|
|
|
echo ""
|
|
echo "Cron entries to install:"
|
|
echo "$cron_block"
|
|
echo ""
|
|
|
|
if [ "$auto_yes" = false ] && [ -t 0 ]; then
|
|
read -rp "Install these cron entries? [y/N] " confirm
|
|
if [[ ! "$confirm" =~ ^[Yy] ]]; then
|
|
echo "Skipped cron install. Add manually with: crontab -e"
|
|
return
|
|
fi
|
|
fi
|
|
|
|
# Append to existing crontab
|
|
{ crontab -l 2>/dev/null || true; printf '%s\n' "$cron_block"; } | crontab -
|
|
echo "Cron entries installed"
|
|
}
|
|
|
|
# Set up Woodpecker CI to use Forgejo as its forge backend.
|
|
# Creates an OAuth2 app on Forgejo for Woodpecker, activates the repo.
|
|
create_woodpecker_oauth() {
|
|
local forge_url="$1" repo_slug="$2"
|
|
|
|
echo ""
|
|
echo "── Woodpecker OAuth2 setup ────────────────────────────"
|
|
|
|
# Create OAuth2 application on Forgejo for Woodpecker
|
|
local oauth2_name="woodpecker-ci"
|
|
local redirect_uri="http://localhost:8000/authorize"
|
|
local existing_app client_id client_secret
|
|
|
|
# Check if OAuth2 app already exists
|
|
existing_app=$(curl -sf \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${forge_url}/api/v1/user/applications/oauth2" 2>/dev/null \
|
|
| jq -r --arg name "$oauth2_name" '.[] | select(.name == $name) | .client_id // empty' 2>/dev/null) || true
|
|
|
|
if [ -n "$existing_app" ]; then
|
|
echo "OAuth2: ${oauth2_name} (already exists, client_id=${existing_app})"
|
|
client_id="$existing_app"
|
|
else
|
|
local oauth2_resp
|
|
oauth2_resp=$(curl -sf -X POST \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/user/applications/oauth2" \
|
|
-d "{\"name\":\"${oauth2_name}\",\"redirect_uris\":[\"${redirect_uri}\"],\"confidential_client\":true}" \
|
|
2>/dev/null) || oauth2_resp=""
|
|
|
|
if [ -z "$oauth2_resp" ]; then
|
|
echo "Warning: failed to create OAuth2 app on Forgejo" >&2
|
|
return
|
|
fi
|
|
|
|
client_id=$(printf '%s' "$oauth2_resp" | jq -r '.client_id // empty')
|
|
client_secret=$(printf '%s' "$oauth2_resp" | jq -r '.client_secret // empty')
|
|
|
|
if [ -z "$client_id" ]; then
|
|
echo "Warning: OAuth2 app creation returned no client_id" >&2
|
|
return
|
|
fi
|
|
|
|
echo "OAuth2: ${oauth2_name} created (client_id=${client_id})"
|
|
fi
|
|
|
|
# Store Woodpecker forge config in .env
|
|
# WP_FORGEJO_CLIENT/SECRET match the docker-compose.yml variable references
|
|
local env_file="${FACTORY_ROOT}/.env"
|
|
local wp_vars=(
|
|
"WOODPECKER_FORGEJO=true"
|
|
"WOODPECKER_FORGEJO_URL=${forge_url}"
|
|
)
|
|
if [ -n "${client_id:-}" ]; then
|
|
wp_vars+=("WP_FORGEJO_CLIENT=${client_id}")
|
|
fi
|
|
if [ -n "${client_secret:-}" ]; then
|
|
wp_vars+=("WP_FORGEJO_SECRET=${client_secret}")
|
|
fi
|
|
|
|
for var_line in "${wp_vars[@]}"; do
|
|
local var_name="${var_line%%=*}"
|
|
if grep -q "^${var_name}=" "$env_file" 2>/dev/null; then
|
|
sed -i "s|^${var_name}=.*|${var_line}|" "$env_file"
|
|
else
|
|
printf '%s\n' "$var_line" >> "$env_file"
|
|
fi
|
|
done
|
|
echo "Config: Woodpecker forge vars written to .env"
|
|
}
|
|
|
|
# Auto-generate WOODPECKER_TOKEN by driving the Forgejo OAuth2 login flow.
|
|
# Requires _FORGE_ADMIN_PASS (set by setup_forge when admin user was just created).
|
|
# Called after compose stack is up, before activate_woodpecker_repo.
|
|
generate_woodpecker_token() {
|
|
local forge_url="$1"
|
|
local wp_server="${WOODPECKER_SERVER:-http://localhost:8000}"
|
|
local env_file="${FACTORY_ROOT}/.env"
|
|
local admin_user="disinto-admin"
|
|
local admin_pass="${_FORGE_ADMIN_PASS:-}"
|
|
|
|
# Skip if already set
|
|
if grep -q '^WOODPECKER_TOKEN=' "$env_file" 2>/dev/null; then
|
|
echo "Config: WOODPECKER_TOKEN already set in .env"
|
|
return 0
|
|
fi
|
|
|
|
echo ""
|
|
echo "── Woodpecker token generation ────────────────────────"
|
|
|
|
if [ -z "$admin_pass" ]; then
|
|
echo "Warning: Forgejo admin password not available — cannot generate WOODPECKER_TOKEN" >&2
|
|
echo " Log into Woodpecker at ${wp_server} and create a token manually" >&2
|
|
return 1
|
|
fi
|
|
|
|
# Wait for Woodpecker to become ready
|
|
echo -n "Waiting for Woodpecker"
|
|
local retries=0
|
|
while ! curl -sf --max-time 3 "${wp_server}/api/version" >/dev/null 2>&1; do
|
|
retries=$((retries + 1))
|
|
if [ "$retries" -gt 30 ]; then
|
|
echo ""
|
|
echo "Warning: Woodpecker not ready at ${wp_server} — skipping token generation" >&2
|
|
return 1
|
|
fi
|
|
echo -n "."
|
|
sleep 2
|
|
done
|
|
echo " ready"
|
|
|
|
# Flow: Forgejo web login → OAuth2 authorize → Woodpecker callback → token
|
|
local cookie_jar auth_body_file
|
|
cookie_jar=$(mktemp /tmp/wp-auth-XXXXXX)
|
|
auth_body_file=$(mktemp /tmp/wp-body-XXXXXX)
|
|
|
|
# Step 1: Log into Forgejo web UI (session cookie needed for OAuth consent)
|
|
local csrf
|
|
csrf=$(curl -sf -c "$cookie_jar" "${forge_url}/user/login" 2>/dev/null \
|
|
| grep -o 'name="_csrf"[^>]*' | head -1 \
|
|
| grep -oE '(content|value)="[^"]*"' | head -1 \
|
|
| cut -d'"' -f2) || csrf=""
|
|
|
|
if [ -z "$csrf" ]; then
|
|
echo "Warning: could not get Forgejo CSRF token — skipping token generation" >&2
|
|
rm -f "$cookie_jar" "$auth_body_file"
|
|
return 1
|
|
fi
|
|
|
|
curl -sf -b "$cookie_jar" -c "$cookie_jar" -X POST \
|
|
-o /dev/null \
|
|
"${forge_url}/user/login" \
|
|
--data-urlencode "_csrf=${csrf}" \
|
|
--data-urlencode "user_name=${admin_user}" \
|
|
--data-urlencode "password=${admin_pass}" \
|
|
2>/dev/null || true
|
|
|
|
# Step 2: Start Woodpecker OAuth2 flow (captures authorize URL with state param)
|
|
local wp_redir
|
|
wp_redir=$(curl -sf -o /dev/null -w '%{redirect_url}' \
|
|
"${wp_server}/authorize" 2>/dev/null) || wp_redir=""
|
|
|
|
if [ -z "$wp_redir" ]; then
|
|
echo "Warning: Woodpecker did not provide OAuth redirect — skipping token generation" >&2
|
|
rm -f "$cookie_jar" "$auth_body_file"
|
|
return 1
|
|
fi
|
|
|
|
# Rewrite internal Docker network URLs to host-accessible URLs.
|
|
# Handle both plain and URL-encoded forms of the internal hostnames.
|
|
local forge_url_enc wp_server_enc
|
|
forge_url_enc=$(printf '%s' "$forge_url" | sed 's|:|%3A|g; s|/|%2F|g')
|
|
wp_server_enc=$(printf '%s' "$wp_server" | sed 's|:|%3A|g; s|/|%2F|g')
|
|
wp_redir=$(printf '%s' "$wp_redir" \
|
|
| sed "s|http://forgejo:3000|${forge_url}|g" \
|
|
| sed "s|http%3A%2F%2Fforgejo%3A3000|${forge_url_enc}|g" \
|
|
| sed "s|http://woodpecker:8000|${wp_server}|g" \
|
|
| sed "s|http%3A%2F%2Fwoodpecker%3A8000|${wp_server_enc}|g")
|
|
|
|
# Step 3: Hit Forgejo OAuth authorize endpoint with session
|
|
# First time: shows consent page. Already approved: redirects with code.
|
|
local auth_headers redirect_loc auth_code
|
|
auth_headers=$(curl -sf -b "$cookie_jar" -c "$cookie_jar" \
|
|
-D - -o "$auth_body_file" \
|
|
"$wp_redir" 2>/dev/null) || auth_headers=""
|
|
|
|
redirect_loc=$(printf '%s' "$auth_headers" \
|
|
| grep -i '^location:' | head -1 | tr -d '\r' | awk '{print $2}')
|
|
|
|
if printf '%s' "${redirect_loc:-}" | grep -q 'code='; then
|
|
# Auto-approved: extract code from redirect
|
|
auth_code=$(printf '%s' "$redirect_loc" | sed 's/.*code=\([^&]*\).*/\1/')
|
|
else
|
|
# Consent page: extract CSRF and all form fields, POST grant approval
|
|
local consent_csrf form_client_id form_state form_redirect_uri
|
|
consent_csrf=$(grep -o 'name="_csrf"[^>]*' "$auth_body_file" 2>/dev/null \
|
|
| head -1 | grep -oE '(content|value)="[^"]*"' | head -1 \
|
|
| cut -d'"' -f2) || consent_csrf=""
|
|
form_client_id=$(grep 'name="client_id"' "$auth_body_file" 2>/dev/null \
|
|
| grep -oE 'value="[^"]*"' | cut -d'"' -f2) || form_client_id=""
|
|
form_state=$(grep 'name="state"' "$auth_body_file" 2>/dev/null \
|
|
| grep -oE 'value="[^"]*"' | cut -d'"' -f2) || form_state=""
|
|
form_redirect_uri=$(grep 'name="redirect_uri"' "$auth_body_file" 2>/dev/null \
|
|
| grep -oE 'value="[^"]*"' | cut -d'"' -f2) || form_redirect_uri=""
|
|
|
|
if [ -n "$consent_csrf" ]; then
|
|
local grant_headers
|
|
grant_headers=$(curl -sf -b "$cookie_jar" -c "$cookie_jar" \
|
|
-D - -o /dev/null -X POST \
|
|
"${forge_url}/login/oauth/grant" \
|
|
--data-urlencode "_csrf=${consent_csrf}" \
|
|
--data-urlencode "client_id=${form_client_id}" \
|
|
--data-urlencode "state=${form_state}" \
|
|
--data-urlencode "scope=" \
|
|
--data-urlencode "nonce=" \
|
|
--data-urlencode "redirect_uri=${form_redirect_uri}" \
|
|
--data-urlencode "granted=true" \
|
|
2>/dev/null) || grant_headers=""
|
|
|
|
redirect_loc=$(printf '%s' "$grant_headers" \
|
|
| grep -i '^location:' | head -1 | tr -d '\r' | awk '{print $2}')
|
|
|
|
if printf '%s' "${redirect_loc:-}" | grep -q 'code='; then
|
|
auth_code=$(printf '%s' "$redirect_loc" | sed 's/.*code=\([^&]*\).*/\1/')
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
rm -f "$auth_body_file"
|
|
|
|
if [ -z "${auth_code:-}" ]; then
|
|
echo "Warning: could not obtain OAuth2 authorization code — skipping token generation" >&2
|
|
rm -f "$cookie_jar"
|
|
return 1
|
|
fi
|
|
|
|
# Step 4: Complete Woodpecker OAuth callback (exchanges code for session)
|
|
local state
|
|
state=$(printf '%s' "$wp_redir" | sed -n 's/.*[&?]state=\([^&]*\).*/\1/p')
|
|
|
|
local wp_headers wp_token
|
|
wp_headers=$(curl -sf -c "$cookie_jar" \
|
|
-D - -o /dev/null \
|
|
"${wp_server}/authorize?code=${auth_code}&state=${state:-}" \
|
|
2>/dev/null) || wp_headers=""
|
|
|
|
# Extract token from redirect URL (Woodpecker returns ?access_token=...)
|
|
redirect_loc=$(printf '%s' "$wp_headers" \
|
|
| grep -i '^location:' | head -1 | tr -d '\r' | awk '{print $2}')
|
|
|
|
wp_token=""
|
|
if printf '%s' "${redirect_loc:-}" | grep -q 'access_token='; then
|
|
wp_token=$(printf '%s' "$redirect_loc" | sed 's/.*access_token=\([^&]*\).*/\1/')
|
|
fi
|
|
|
|
# Fallback: check for user_sess cookie
|
|
if [ -z "$wp_token" ]; then
|
|
wp_token=$(awk '/user_sess/{print $NF}' "$cookie_jar" 2>/dev/null) || wp_token=""
|
|
fi
|
|
|
|
rm -f "$cookie_jar"
|
|
|
|
if [ -z "$wp_token" ]; then
|
|
echo "Warning: could not obtain Woodpecker token — skipping token generation" >&2
|
|
return 1
|
|
fi
|
|
|
|
# Step 5: Create persistent personal access token via Woodpecker API
|
|
# WP v3 requires CSRF header for POST operations with session tokens.
|
|
local wp_csrf
|
|
wp_csrf=$(curl -sf -b "user_sess=${wp_token}" \
|
|
"${wp_server}/web-config.js" 2>/dev/null \
|
|
| sed -n 's/.*WOODPECKER_CSRF = "\([^"]*\)".*/\1/p') || wp_csrf=""
|
|
|
|
local pat_resp final_token
|
|
pat_resp=$(curl -sf -X POST \
|
|
-b "user_sess=${wp_token}" \
|
|
${wp_csrf:+-H "X-CSRF-Token: ${wp_csrf}"} \
|
|
"${wp_server}/api/user/token" \
|
|
2>/dev/null) || pat_resp=""
|
|
|
|
final_token=""
|
|
if [ -n "$pat_resp" ]; then
|
|
final_token=$(printf '%s' "$pat_resp" \
|
|
| jq -r 'if .token then .token elif .access_token then .access_token else empty end' \
|
|
2>/dev/null) || final_token=""
|
|
fi
|
|
|
|
# Use persistent token if available, otherwise use session token
|
|
final_token="${final_token:-$wp_token}"
|
|
|
|
# Save to .env
|
|
if grep -q '^WOODPECKER_TOKEN=' "$env_file" 2>/dev/null; then
|
|
sed -i "s|^WOODPECKER_TOKEN=.*|WOODPECKER_TOKEN=${final_token}|" "$env_file"
|
|
else
|
|
printf 'WOODPECKER_TOKEN=%s\n' "$final_token" >> "$env_file"
|
|
fi
|
|
export WOODPECKER_TOKEN="$final_token"
|
|
echo "Config: WOODPECKER_TOKEN generated and saved to .env"
|
|
}
|
|
|
|
activate_woodpecker_repo() {
|
|
local forge_repo="$1"
|
|
local wp_server="${WOODPECKER_SERVER:-http://localhost:8000}"
|
|
|
|
# Wait for Woodpecker to become ready after stack start
|
|
local retries=0
|
|
while [ $retries -lt 10 ]; do
|
|
if curl -sf --max-time 3 "${wp_server}/api/version" >/dev/null 2>&1; then
|
|
break
|
|
fi
|
|
retries=$((retries + 1))
|
|
sleep 2
|
|
done
|
|
|
|
if ! curl -sf --max-time 5 "${wp_server}/api/version" >/dev/null 2>&1; then
|
|
echo "Woodpecker: not reachable at ${wp_server} after stack start, skipping repo activation" >&2
|
|
return
|
|
fi
|
|
|
|
echo ""
|
|
echo "── Woodpecker repo activation ─────────────────────────"
|
|
|
|
local wp_token="${WOODPECKER_TOKEN:-}"
|
|
if [ -z "$wp_token" ]; then
|
|
echo "Warning: WOODPECKER_TOKEN not set — cannot activate repo" >&2
|
|
echo " Activate manually: woodpecker-cli repo add ${forge_repo}" >&2
|
|
return
|
|
fi
|
|
|
|
local wp_repo_id
|
|
wp_repo_id=$(curl -sf \
|
|
-H "Authorization: Bearer ${wp_token}" \
|
|
"${wp_server}/api/repos/lookup/${forge_repo}" 2>/dev/null \
|
|
| jq -r '.id // empty' 2>/dev/null) || true
|
|
|
|
if [ -n "$wp_repo_id" ] && [ "$wp_repo_id" != "0" ]; then
|
|
echo "Repo: ${forge_repo} already active in Woodpecker (id=${wp_repo_id})"
|
|
else
|
|
# Get Forgejo repo numeric ID for WP activation
|
|
local forge_repo_id
|
|
forge_repo_id=$(curl -sf \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${FORGE_URL:-http://localhost:3000}/api/v1/repos/${forge_repo}" 2>/dev/null \
|
|
| jq -r '.id // empty' 2>/dev/null) || forge_repo_id=""
|
|
|
|
local activate_resp
|
|
activate_resp=$(curl -sf -X POST \
|
|
-H "Authorization: Bearer ${wp_token}" \
|
|
"${wp_server}/api/repos?forge_remote_id=${forge_repo_id:-0}" \
|
|
2>/dev/null) || activate_resp=""
|
|
|
|
wp_repo_id=$(printf '%s' "$activate_resp" | jq -r '.id // empty' 2>/dev/null) || true
|
|
|
|
if [ -n "$wp_repo_id" ] && [ "$wp_repo_id" != "0" ]; then
|
|
echo "Repo: ${forge_repo} activated in Woodpecker (id=${wp_repo_id})"
|
|
|
|
# Set pipeline timeout to 5 minutes (default is 60)
|
|
curl -sf -X PATCH -H "Authorization: Bearer ${wp_token}" -H "Content-Type: application/json" "${wp_server}/api/repos/${wp_repo_id}" -d '{"timeout": 5}' >/dev/null 2>&1 && echo "Config: pipeline timeout set to 5 minutes" || true
|
|
else
|
|
echo "Warning: could not activate repo in Woodpecker" >&2
|
|
echo " Activate manually: woodpecker-cli repo add ${forge_repo}" >&2
|
|
fi
|
|
fi
|
|
|
|
# Store repo ID for later TOML generation
|
|
if [ -n "$wp_repo_id" ] && [ "$wp_repo_id" != "0" ]; then
|
|
_WP_REPO_ID="$wp_repo_id"
|
|
fi
|
|
}
|
|
|
|
# ── init command ─────────────────────────────────────────────────────────────
|
|
|
|
disinto_init() {
|
|
local repo_url="${1:-}"
|
|
if [ -z "$repo_url" ]; then
|
|
echo "Error: repo URL required" >&2
|
|
echo "Usage: disinto init <repo-url>" >&2
|
|
exit 1
|
|
fi
|
|
shift
|
|
|
|
# Parse flags
|
|
local branch="" repo_root="" ci_id="0" auto_yes=false forge_url_flag="" bare=false
|
|
while [ $# -gt 0 ]; do
|
|
case "$1" in
|
|
--branch) branch="$2"; shift 2 ;;
|
|
--repo-root) repo_root="$2"; shift 2 ;;
|
|
--ci-id) ci_id="$2"; shift 2 ;;
|
|
--forge-url) forge_url_flag="$2"; shift 2 ;;
|
|
--bare) bare=true; shift ;;
|
|
--yes) auto_yes=true; shift ;;
|
|
*) echo "Unknown option: $1" >&2; exit 1 ;;
|
|
esac
|
|
done
|
|
|
|
# Export bare-metal flag for setup_forge
|
|
export DISINTO_BARE="$bare"
|
|
|
|
# Extract org/repo slug
|
|
local forge_repo
|
|
forge_repo=$(parse_repo_slug "$repo_url")
|
|
local project_name="${forge_repo##*/}"
|
|
local toml_path="${FACTORY_ROOT}/projects/${project_name}.toml"
|
|
|
|
# Determine forge URL (flag > env > default)
|
|
local forge_url="${forge_url_flag:-${FORGE_URL:-http://localhost:3000}}"
|
|
|
|
echo "=== disinto init ==="
|
|
echo "Project: ${forge_repo}"
|
|
echo "Name: ${project_name}"
|
|
echo "Forge: ${forge_url}"
|
|
|
|
# Check for existing config
|
|
local toml_exists=false
|
|
if [ -f "$toml_path" ]; then
|
|
toml_exists=true
|
|
echo "Config: ${toml_path} (already exists, reusing)"
|
|
|
|
# Read repo_root and branch from existing TOML
|
|
local existing_root existing_branch
|
|
existing_root=$(python3 -c "
|
|
import sys, tomllib
|
|
with open(sys.argv[1], 'rb') as f:
|
|
cfg = tomllib.load(f)
|
|
print(cfg.get('repo_root', ''))
|
|
" "$toml_path" 2>/dev/null) || existing_root=""
|
|
existing_branch=$(python3 -c "
|
|
import sys, tomllib
|
|
with open(sys.argv[1], 'rb') as f:
|
|
cfg = tomllib.load(f)
|
|
print(cfg.get('primary_branch', ''))
|
|
" "$toml_path" 2>/dev/null) || existing_branch=""
|
|
|
|
# Use existing values as defaults
|
|
if [ -n "$existing_branch" ] && [ -z "$branch" ]; then
|
|
branch="$existing_branch"
|
|
fi
|
|
|
|
# Handle repo_root: flag overrides TOML, prompt if they differ
|
|
if [ -z "$repo_root" ]; then
|
|
repo_root="${existing_root:-/home/${USER}/${project_name}}"
|
|
elif [ -n "$existing_root" ] && [ "$repo_root" != "$existing_root" ]; then
|
|
echo "Note: --repo-root (${repo_root}) differs from TOML (${existing_root})"
|
|
local update_toml=false
|
|
if [ "$auto_yes" = true ]; then
|
|
update_toml=true
|
|
elif [ -t 0 ]; then
|
|
read -rp "Update repo_root in TOML to ${repo_root}? [y/N] " confirm
|
|
if [[ "$confirm" =~ ^[Yy] ]]; then
|
|
update_toml=true
|
|
else
|
|
repo_root="$existing_root"
|
|
fi
|
|
fi
|
|
if [ "$update_toml" = true ]; then
|
|
python3 -c "
|
|
import sys, re, pathlib
|
|
p = pathlib.Path(sys.argv[1])
|
|
text = p.read_text()
|
|
text = re.sub(r'^repo_root\s*=\s*.*$', 'repo_root = \"' + sys.argv[2] + '\"', text, flags=re.MULTILINE)
|
|
p.write_text(text)
|
|
" "$toml_path" "$repo_root"
|
|
echo "Updated: repo_root in ${toml_path}"
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
# Generate compose files (unless --bare)
|
|
if [ "$bare" = false ]; then
|
|
local forge_port
|
|
forge_port=$(printf '%s' "$forge_url" | sed -E 's|.*:([0-9]+)/?$|\1|')
|
|
forge_port="${forge_port:-3000}"
|
|
generate_compose "$forge_port"
|
|
generate_agent_docker
|
|
generate_caddyfile
|
|
generate_staging_index
|
|
# Create empty .env so docker compose can parse the agents service
|
|
# env_file reference before setup_forge generates the real tokens (#769)
|
|
touch "${FACTORY_ROOT}/.env"
|
|
fi
|
|
|
|
# Set up local Forgejo instance (provision if needed, create users/tokens/repo)
|
|
setup_forge "$forge_url" "$forge_repo"
|
|
|
|
# Preflight: verify factory requirements
|
|
preflight_check "$forge_repo" "$forge_url"
|
|
|
|
# Determine repo root (for new projects)
|
|
repo_root="${repo_root:-/home/${USER}/${project_name}}"
|
|
|
|
# Clone or validate (try origin first for initial clone from upstream)
|
|
if [ ! -d "${repo_root}/.git" ]; then
|
|
# For initial setup, clone from the provided URL directly
|
|
echo "Cloning: ${repo_url} -> ${repo_root}"
|
|
git clone "$repo_url" "$repo_root" 2>/dev/null || \
|
|
clone_or_validate "$forge_repo" "$repo_root" "$forge_url"
|
|
else
|
|
echo "Repo: ${repo_root} (existing clone)"
|
|
fi
|
|
|
|
# Push to local Forgejo
|
|
push_to_forge "$repo_root" "$forge_url" "$forge_repo"
|
|
|
|
# Detect primary branch
|
|
if [ -z "$branch" ]; then
|
|
branch=$(detect_branch "$repo_root")
|
|
fi
|
|
echo "Branch: ${branch}"
|
|
|
|
# Set up {project}-ops repo (#757)
|
|
local ops_slug="${forge_repo}-ops"
|
|
local ops_root="/home/${USER}/${project_name}-ops"
|
|
setup_ops_repo "$forge_url" "$ops_slug" "$ops_root" "$branch"
|
|
|
|
# Set up vault branch protection on ops repo (#77)
|
|
# This ensures admin-only merge to main, blocking bots from merging vault PRs
|
|
# Use HUMAN_TOKEN (johba) or FORGE_TOKEN (dev-bot) for admin operations
|
|
export FORGE_OPS_REPO="$ops_slug"
|
|
# Source env.sh to ensure FORGE_TOKEN is available
|
|
source "${FACTORY_ROOT}/lib/env.sh"
|
|
source "${FACTORY_ROOT}/lib/branch-protection.sh"
|
|
if setup_vault_branch_protection "$branch"; then
|
|
echo "Branch protection: vault protection configured on ${ops_slug}"
|
|
else
|
|
echo "Warning: failed to set up vault branch protection" >&2
|
|
fi
|
|
unset FORGE_OPS_REPO
|
|
|
|
# Generate project TOML (skip if already exists)
|
|
if [ "$toml_exists" = false ]; then
|
|
# Prompt for CI ID if interactive and not already set via flag
|
|
if [ "$ci_id" = "0" ] && [ "$auto_yes" = false ] && [ -t 0 ]; then
|
|
read -rp "Woodpecker CI repo ID (0 to skip CI): " user_ci_id
|
|
ci_id="${user_ci_id:-0}"
|
|
fi
|
|
|
|
generate_toml "$toml_path" "$project_name" "$forge_repo" "$repo_root" "$branch" "$ci_id" "$forge_url"
|
|
echo "Created: ${toml_path}"
|
|
fi
|
|
|
|
# Create OAuth2 app on Forgejo for Woodpecker (before compose up)
|
|
_WP_REPO_ID=""
|
|
create_woodpecker_oauth "$forge_url" "$forge_repo"
|
|
|
|
# Generate WOODPECKER_AGENT_SECRET for server↔agent auth
|
|
local env_file="${FACTORY_ROOT}/.env"
|
|
if ! grep -q '^WOODPECKER_AGENT_SECRET=' "$env_file" 2>/dev/null; then
|
|
local agent_secret
|
|
agent_secret="$(head -c 32 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 40)"
|
|
printf 'WOODPECKER_AGENT_SECRET=%s\n' "$agent_secret" >> "$env_file"
|
|
echo "Config: WOODPECKER_AGENT_SECRET generated and saved to .env"
|
|
fi
|
|
|
|
# Ensure Claude Code never auto-updates, phones home, or sends telemetry (#725)
|
|
if ! grep -q '^CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=' "$env_file" 2>/dev/null; then
|
|
printf 'CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=1\n' >> "$env_file"
|
|
echo "Config: CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=1 saved to .env"
|
|
fi
|
|
|
|
# Create labels on remote
|
|
create_labels "$forge_repo" "$forge_url"
|
|
|
|
# Generate VISION.md template
|
|
generate_vision "$repo_root" "$project_name"
|
|
|
|
# Generate template deployment pipeline configs in project repo
|
|
generate_deploy_pipelines "$repo_root" "$project_name"
|
|
|
|
# Install cron jobs
|
|
install_cron "$project_name" "$toml_path" "$auto_yes" "$bare"
|
|
|
|
# Set up mirror remotes if [mirrors] configured in TOML
|
|
source "${FACTORY_ROOT}/lib/load-project.sh" "$toml_path"
|
|
if [ -n "${MIRROR_NAMES:-}" ]; then
|
|
echo "Mirrors: setting up remotes"
|
|
local mname murl
|
|
for mname in $MIRROR_NAMES; do
|
|
murl=$(eval "echo \"\$MIRROR_$(echo "$mname" | tr '[:lower:]' '[:upper:]')\"") || true
|
|
[ -z "$murl" ] && continue
|
|
git -C "$repo_root" remote add "$mname" "$murl" 2>/dev/null \
|
|
|| git -C "$repo_root" remote set-url "$mname" "$murl" 2>/dev/null || true
|
|
echo " + ${mname} -> ${murl}"
|
|
done
|
|
# Initial sync: push current primary branch to mirrors
|
|
source "${FACTORY_ROOT}/lib/mirrors.sh"
|
|
export PROJECT_REPO_ROOT="$repo_root"
|
|
mirror_push
|
|
fi
|
|
|
|
# Encrypt secrets if SOPS + age are available
|
|
write_secrets_encrypted
|
|
|
|
# Bring up the full stack (compose mode only)
|
|
if [ "$bare" = false ] && [ -f "${FACTORY_ROOT}/docker-compose.yml" ]; then
|
|
echo ""
|
|
echo "── Starting full stack ────────────────────────────────"
|
|
docker compose -f "${FACTORY_ROOT}/docker-compose.yml" up -d
|
|
echo "Stack: running (forgejo + woodpecker + agents)"
|
|
|
|
# Generate WOODPECKER_TOKEN via Forgejo OAuth2 flow (#779)
|
|
generate_woodpecker_token "$forge_url" || true
|
|
|
|
# Activate repo in Woodpecker now that stack is running
|
|
activate_woodpecker_repo "$forge_repo"
|
|
|
|
# Use detected Woodpecker repo ID if ci_id was not explicitly set
|
|
if [ "$ci_id" = "0" ] && [ -n "${_WP_REPO_ID:-}" ]; then
|
|
ci_id="$_WP_REPO_ID"
|
|
echo "CI ID: ${ci_id} (from Woodpecker)"
|
|
# Update TOML with Woodpecker repo ID
|
|
if [ -f "$toml_path" ]; then
|
|
python3 -c "
|
|
import sys, re, pathlib
|
|
p = pathlib.Path(sys.argv[1])
|
|
text = p.read_text()
|
|
text = re.sub(r'^woodpecker_repo_id\s*=\s*.*$', 'woodpecker_repo_id = ' + sys.argv[2], text, flags=re.MULTILINE)
|
|
p.write_text(text)
|
|
" "$toml_path" "$ci_id"
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
# Activate default agents (zero-cost when idle — they only invoke Claude
|
|
# when there is actual work, so an empty project burns no LLM tokens)
|
|
mkdir -p "${FACTORY_ROOT}/state"
|
|
touch "${FACTORY_ROOT}/state/.dev-active"
|
|
touch "${FACTORY_ROOT}/state/.reviewer-active"
|
|
touch "${FACTORY_ROOT}/state/.gardener-active"
|
|
|
|
echo ""
|
|
echo "Done. Project ${project_name} is ready."
|
|
echo " Config: ${toml_path}"
|
|
echo " Clone: ${repo_root}"
|
|
echo " Forge: ${forge_url}/${forge_repo}"
|
|
if [ "$bare" = false ]; then
|
|
echo " Stack: docker compose (use 'disinto up/down/logs/shell')"
|
|
else
|
|
echo " Mode: bare-metal"
|
|
fi
|
|
echo ""
|
|
echo "── Claude authentication ──────────────────────────────"
|
|
echo " OAuth (shared across containers):"
|
|
echo " Run 'claude auth login' on the host once."
|
|
echo " Credentials in ~/.claude are mounted into containers."
|
|
echo " API key (alternative — metered billing, no rotation issues):"
|
|
echo " Set ANTHROPIC_API_KEY in .env to skip OAuth entirely."
|
|
echo ""
|
|
echo " Run 'disinto status' to verify."
|
|
}
|
|
|
|
# ── status command ───────────────────────────────────────────────────────────
|
|
|
|
disinto_status() {
|
|
local toml_dir="${FACTORY_ROOT}/projects"
|
|
local found=false
|
|
|
|
for toml in "${toml_dir}"/*.toml; do
|
|
[ -f "$toml" ] || continue
|
|
found=true
|
|
|
|
# Parse name, repo, forge_url from TOML
|
|
local pname prepo pforge_url
|
|
pname=$(python3 -c "
|
|
import sys, tomllib
|
|
with open(sys.argv[1], 'rb') as f:
|
|
print(tomllib.load(f)['name'])
|
|
" "$toml" 2>/dev/null) || continue
|
|
prepo=$(python3 -c "
|
|
import sys, tomllib
|
|
with open(sys.argv[1], 'rb') as f:
|
|
print(tomllib.load(f)['repo'])
|
|
" "$toml" 2>/dev/null) || continue
|
|
pforge_url=$(python3 -c "
|
|
import sys, tomllib
|
|
with open(sys.argv[1], 'rb') as f:
|
|
print(tomllib.load(f).get('forge_url', ''))
|
|
" "$toml" 2>/dev/null) || pforge_url=""
|
|
pforge_url="${pforge_url:-${FORGE_URL:-http://localhost:3000}}"
|
|
|
|
echo "== ${pname} (${prepo}) =="
|
|
|
|
# Active dev sessions
|
|
local has_sessions=false
|
|
for pf in /tmp/dev-session-"${pname}"-*.phase; do
|
|
[ -f "$pf" ] || continue
|
|
has_sessions=true
|
|
local issue phase_line
|
|
issue=$(basename "$pf" | sed "s/dev-session-${pname}-//;s/\.phase//")
|
|
phase_line=$(head -1 "$pf" 2>/dev/null || echo "unknown")
|
|
echo " Session #${issue}: ${phase_line}"
|
|
done
|
|
if [ "$has_sessions" = false ]; then
|
|
echo " Sessions: none"
|
|
fi
|
|
|
|
# Backlog depth via API
|
|
if [ -n "${FORGE_TOKEN:-}" ]; then
|
|
local api="${pforge_url}/api/v1/repos/${prepo}"
|
|
local backlog_count pr_count
|
|
|
|
backlog_count=$(curl -sf -I \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${api}/issues?state=open&labels=backlog&limit=1" 2>/dev/null \
|
|
| grep -i 'x-total-count' | tr -d '\r' | awk '{print $2}') || backlog_count="?"
|
|
echo " Backlog: ${backlog_count:-0} issues"
|
|
|
|
pr_count=$(curl -sf -I \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
"${api}/pulls?state=open&limit=1" 2>/dev/null \
|
|
| grep -i 'x-total-count' | tr -d '\r' | awk '{print $2}') || pr_count="?"
|
|
echo " Open PRs: ${pr_count:-0}"
|
|
else
|
|
echo " Backlog: (no FORGE_TOKEN)"
|
|
echo " Open PRs: (no FORGE_TOKEN)"
|
|
fi
|
|
|
|
echo ""
|
|
done
|
|
|
|
if [ "$found" = false ]; then
|
|
echo "No projects configured."
|
|
echo "Run 'disinto init <repo-url>' to get started."
|
|
fi
|
|
}
|
|
|
|
# ── secrets command ────────────────────────────────────────────────────────────
|
|
|
|
disinto_secrets() {
|
|
local subcmd="${1:-}"
|
|
local enc_file="${FACTORY_ROOT}/.env.enc"
|
|
local env_file="${FACTORY_ROOT}/.env"
|
|
local vault_enc_file="${FACTORY_ROOT}/.env.vault.enc"
|
|
local vault_env_file="${FACTORY_ROOT}/.env.vault"
|
|
|
|
# Shared helper: ensure sops+age and .sops.yaml exist
|
|
_secrets_ensure_sops() {
|
|
if ! command -v sops &>/dev/null || ! command -v age-keygen &>/dev/null; then
|
|
echo "Error: sops and age are required." >&2
|
|
echo " Install sops: https://github.com/getsops/sops/releases" >&2
|
|
echo " Install age: apt install age / brew install age" >&2
|
|
exit 1
|
|
fi
|
|
if ! ensure_age_key; then
|
|
echo "Error: failed to generate age key" >&2
|
|
exit 1
|
|
fi
|
|
if [ ! -f "${FACTORY_ROOT}/.sops.yaml" ]; then
|
|
write_sops_yaml "$AGE_PUBLIC_KEY"
|
|
echo "Created: .sops.yaml"
|
|
fi
|
|
}
|
|
|
|
local secrets_dir="${FACTORY_ROOT}/secrets"
|
|
local age_key_file="${HOME}/.config/sops/age/keys.txt"
|
|
|
|
# Shared helper: ensure age key exists and export AGE_PUBLIC_KEY
|
|
_secrets_ensure_age_key() {
|
|
if ! command -v age &>/dev/null; then
|
|
echo "Error: age is required." >&2
|
|
echo " Install age: apt install age / brew install age" >&2
|
|
exit 1
|
|
fi
|
|
if [ ! -f "$age_key_file" ]; then
|
|
echo "Error: age key not found at ${age_key_file}" >&2
|
|
echo " Run 'disinto init' to generate one, or create manually with:" >&2
|
|
echo " mkdir -p ~/.config/sops/age && age-keygen -o ${age_key_file}" >&2
|
|
exit 1
|
|
fi
|
|
AGE_PUBLIC_KEY="$(age-keygen -y "$age_key_file" 2>/dev/null)"
|
|
if [ -z "$AGE_PUBLIC_KEY" ]; then
|
|
echo "Error: failed to read public key from ${age_key_file}" >&2
|
|
exit 1
|
|
fi
|
|
export AGE_PUBLIC_KEY
|
|
}
|
|
|
|
case "$subcmd" in
|
|
add)
|
|
local name="${2:-}"
|
|
if [ -z "$name" ]; then
|
|
echo "Usage: disinto secrets add <NAME>" >&2
|
|
exit 1
|
|
fi
|
|
_secrets_ensure_age_key
|
|
mkdir -p "$secrets_dir"
|
|
|
|
printf 'Enter value for %s: ' "$name" >&2
|
|
local value
|
|
IFS= read -rs value
|
|
echo >&2
|
|
if [ -z "$value" ]; then
|
|
echo "Error: empty value" >&2
|
|
exit 1
|
|
fi
|
|
|
|
local enc_path="${secrets_dir}/${name}.enc"
|
|
if [ -f "$enc_path" ]; then
|
|
printf 'Secret %s already exists. Overwrite? [y/N] ' "$name" >&2
|
|
local confirm
|
|
read -r confirm
|
|
if [ "$confirm" != "y" ] && [ "$confirm" != "Y" ]; then
|
|
echo "Aborted." >&2
|
|
exit 1
|
|
fi
|
|
fi
|
|
if ! printf '%s' "$value" | age -r "$AGE_PUBLIC_KEY" -o "$enc_path"; then
|
|
echo "Error: encryption failed" >&2
|
|
exit 1
|
|
fi
|
|
echo "Stored: ${enc_path}"
|
|
;;
|
|
show)
|
|
local name="${2:-}"
|
|
if [ -n "$name" ]; then
|
|
# Show individual secret: disinto secrets show <NAME>
|
|
local enc_path="${secrets_dir}/${name}.enc"
|
|
if [ ! -f "$enc_path" ]; then
|
|
echo "Error: ${enc_path} not found" >&2
|
|
exit 1
|
|
fi
|
|
if [ ! -f "$age_key_file" ]; then
|
|
echo "Error: age key not found at ${age_key_file}" >&2
|
|
exit 1
|
|
fi
|
|
age -d -i "$age_key_file" "$enc_path"
|
|
else
|
|
# Show all agent secrets: disinto secrets show
|
|
if [ ! -f "$enc_file" ]; then
|
|
echo "Error: ${enc_file} not found." >&2
|
|
exit 1
|
|
fi
|
|
sops -d "$enc_file"
|
|
fi
|
|
;;
|
|
edit)
|
|
if [ ! -f "$enc_file" ]; then
|
|
echo "Error: ${enc_file} not found. Run 'disinto secrets migrate' first." >&2
|
|
exit 1
|
|
fi
|
|
sops "$enc_file"
|
|
;;
|
|
migrate)
|
|
if [ ! -f "$env_file" ]; then
|
|
echo "Error: ${env_file} not found — nothing to migrate." >&2
|
|
exit 1
|
|
fi
|
|
_secrets_ensure_sops
|
|
encrypt_env_file "$env_file" "$enc_file"
|
|
# Verify decryption works
|
|
if ! sops -d "$enc_file" >/dev/null 2>&1; then
|
|
echo "Error: failed to verify .env.enc decryption" >&2
|
|
rm -f "$enc_file"
|
|
exit 1
|
|
fi
|
|
rm -f "$env_file"
|
|
echo "Migrated: .env -> .env.enc (plaintext removed)"
|
|
;;
|
|
edit-vault)
|
|
if [ ! -f "$vault_enc_file" ]; then
|
|
echo "Error: ${vault_enc_file} not found. Run 'disinto secrets migrate-vault' first." >&2
|
|
exit 1
|
|
fi
|
|
sops "$vault_enc_file"
|
|
;;
|
|
show-vault)
|
|
if [ ! -f "$vault_enc_file" ]; then
|
|
echo "Error: ${vault_enc_file} not found." >&2
|
|
exit 1
|
|
fi
|
|
sops -d "$vault_enc_file"
|
|
;;
|
|
migrate-vault)
|
|
if [ ! -f "$vault_env_file" ]; then
|
|
echo "Error: ${vault_env_file} not found — nothing to migrate." >&2
|
|
echo " Create .env.vault with vault secrets (GITHUB_TOKEN, deploy keys, etc.)" >&2
|
|
exit 1
|
|
fi
|
|
_secrets_ensure_sops
|
|
encrypt_env_file "$vault_env_file" "$vault_enc_file"
|
|
# Verify decryption works before removing plaintext
|
|
if ! sops -d "$vault_enc_file" >/dev/null 2>&1; then
|
|
echo "Error: failed to verify .env.vault.enc decryption" >&2
|
|
rm -f "$vault_enc_file"
|
|
exit 1
|
|
fi
|
|
rm -f "$vault_env_file"
|
|
echo "Migrated: .env.vault -> .env.vault.enc (plaintext removed)"
|
|
;;
|
|
*)
|
|
cat <<EOF >&2
|
|
Usage: disinto secrets <subcommand>
|
|
|
|
Individual secrets (secrets/<NAME>.enc):
|
|
add <NAME> Prompt for value, encrypt, store in secrets/<NAME>.enc
|
|
show <NAME> Decrypt and print an individual secret
|
|
|
|
Agent secrets (.env.enc):
|
|
edit Edit agent secrets (FORGE_TOKEN, CLAUDE_API_KEY, etc.)
|
|
show Show decrypted agent secrets (no argument)
|
|
migrate Encrypt .env -> .env.enc
|
|
|
|
Vault secrets (.env.vault.enc):
|
|
edit-vault Edit vault secrets (GITHUB_TOKEN, deploy keys, etc.)
|
|
show-vault Show decrypted vault secrets
|
|
migrate-vault Encrypt .env.vault -> .env.vault.enc
|
|
EOF
|
|
exit 1
|
|
;;
|
|
esac
|
|
}
|
|
|
|
# ── run command ───────────────────────────────────────────────────────────────
|
|
|
|
disinto_run() {
|
|
local action_id="${1:?Usage: disinto run <action-id>}"
|
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
|
local vault_enc="${FACTORY_ROOT}/.env.vault.enc"
|
|
|
|
if [ ! -f "$compose_file" ]; then
|
|
echo "Error: docker-compose.yml not found" >&2
|
|
echo " Run 'disinto init <repo-url>' first (without --bare)" >&2
|
|
exit 1
|
|
fi
|
|
|
|
if [ ! -f "$vault_enc" ]; then
|
|
echo "Error: .env.vault.enc not found — create vault secrets first" >&2
|
|
echo " Run 'disinto secrets migrate-vault' after creating .env.vault" >&2
|
|
exit 1
|
|
fi
|
|
|
|
if ! command -v sops &>/dev/null; then
|
|
echo "Error: sops not found — required to decrypt vault secrets" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Decrypt vault secrets to temp file
|
|
local tmp_env
|
|
tmp_env=$(mktemp /tmp/disinto-vault-XXXXXX)
|
|
trap 'rm -f "$tmp_env"' EXIT
|
|
|
|
if ! sops -d --output-type dotenv "$vault_enc" > "$tmp_env" 2>/dev/null; then
|
|
rm -f "$tmp_env"
|
|
echo "Error: failed to decrypt .env.vault.enc" >&2
|
|
exit 1
|
|
fi
|
|
|
|
echo "Vault secrets decrypted to tmpfile"
|
|
|
|
# Run action in ephemeral runner container
|
|
local rc=0
|
|
docker compose -f "$compose_file" \
|
|
run --rm --env-file "$tmp_env" \
|
|
runner "$action_id" || rc=$?
|
|
|
|
# Clean up — secrets gone
|
|
rm -f "$tmp_env"
|
|
echo "Run tmpfile removed"
|
|
|
|
if [ "$rc" -eq 0 ]; then
|
|
echo "Run action ${action_id} completed successfully"
|
|
else
|
|
echo "Run action ${action_id} failed (exit ${rc})" >&2
|
|
fi
|
|
return "$rc"
|
|
}
|
|
|
|
# ── up command ────────────────────────────────────────────────────────────────
|
|
|
|
disinto_up() {
|
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
|
if [ ! -f "$compose_file" ]; then
|
|
echo "Error: docker-compose.yml not found" >&2
|
|
echo " Run 'disinto init <repo-url>' first (without --bare)" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Decrypt secrets to temp .env if SOPS available and .env.enc exists
|
|
local tmp_env=""
|
|
local enc_file="${FACTORY_ROOT}/.env.enc"
|
|
local env_file="${FACTORY_ROOT}/.env"
|
|
if [ -f "$enc_file" ] && command -v sops &>/dev/null && [ ! -f "$env_file" ]; then
|
|
tmp_env="${env_file}"
|
|
sops -d --output-type dotenv "$enc_file" > "$tmp_env"
|
|
trap '[ -n "${tmp_env:-}" ] && rm -f "$tmp_env"' EXIT
|
|
echo "Decrypted secrets for compose"
|
|
fi
|
|
|
|
docker compose -f "$compose_file" up -d "$@"
|
|
echo "Stack is up"
|
|
|
|
# Clean up temp .env (also handled by EXIT trap if compose fails)
|
|
if [ -n "$tmp_env" ] && [ -f "$tmp_env" ]; then
|
|
rm -f "$tmp_env"
|
|
echo "Removed temporary .env"
|
|
fi
|
|
}
|
|
|
|
# ── down command ──────────────────────────────────────────────────────────────
|
|
|
|
disinto_down() {
|
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
|
if [ ! -f "$compose_file" ]; then
|
|
echo "Error: docker-compose.yml not found" >&2
|
|
exit 1
|
|
fi
|
|
docker compose -f "$compose_file" down "$@"
|
|
echo "Stack is down"
|
|
}
|
|
|
|
# ── logs command ──────────────────────────────────────────────────────────────
|
|
|
|
disinto_logs() {
|
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
|
if [ ! -f "$compose_file" ]; then
|
|
echo "Error: docker-compose.yml not found" >&2
|
|
exit 1
|
|
fi
|
|
docker compose -f "$compose_file" logs -f "$@"
|
|
}
|
|
|
|
# ── shell command ─────────────────────────────────────────────────────────────
|
|
|
|
disinto_shell() {
|
|
local compose_file="${FACTORY_ROOT}/docker-compose.yml"
|
|
if [ ! -f "$compose_file" ]; then
|
|
echo "Error: docker-compose.yml not found" >&2
|
|
exit 1
|
|
fi
|
|
docker compose -f "$compose_file" exec agents bash
|
|
}
|
|
|
|
# ── hire-an-agent command ─────────────────────────────────────────────────────
|
|
|
|
# Creates a Forgejo user and .profile repo for an agent.
|
|
# Usage: disinto hire-an-agent <agent-name> <role> [--formula <path>]
|
|
disinto_hire_an_agent() {
|
|
local agent_name="${1:-}"
|
|
local role="${2:-}"
|
|
local formula_path=""
|
|
|
|
if [ -z "$agent_name" ] || [ -z "$role" ]; then
|
|
echo "Error: agent-name and role required" >&2
|
|
echo "Usage: disinto hire-an-agent <agent-name> <role> [--formula <path>]" >&2
|
|
exit 1
|
|
fi
|
|
shift 2
|
|
|
|
# Parse flags
|
|
while [ $# -gt 0 ]; do
|
|
case "$1" in
|
|
--formula)
|
|
formula_path="$2"
|
|
shift 2
|
|
;;
|
|
*)
|
|
echo "Unknown option: $1" >&2
|
|
exit 1
|
|
;;
|
|
esac
|
|
done
|
|
|
|
# Default formula path
|
|
if [ -z "$formula_path" ]; then
|
|
formula_path="${FACTORY_ROOT}/formulas/${role}.toml"
|
|
fi
|
|
|
|
# Validate formula exists
|
|
if [ ! -f "$formula_path" ]; then
|
|
echo "Error: formula not found at ${formula_path}" >&2
|
|
exit 1
|
|
fi
|
|
|
|
echo "── Hiring agent: ${agent_name} (${role}) ───────────────────────"
|
|
echo "Formula: ${formula_path}"
|
|
|
|
# Ensure FORGE_TOKEN is set
|
|
if [ -z "${FORGE_TOKEN:-}" ]; then
|
|
echo "Error: FORGE_TOKEN not set" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Get Forge URL
|
|
local forge_url="${FORGE_URL:-http://localhost:3000}"
|
|
echo "Forge: ${forge_url}"
|
|
|
|
# Step 1: Create user via API (skip if exists)
|
|
echo ""
|
|
echo "Step 1: Creating user '${agent_name}' (if not exists)..."
|
|
|
|
local user_exists=false
|
|
if curl -sf --max-time 5 "${forge_url}/api/v1/users/${agent_name}" >/dev/null 2>&1; then
|
|
user_exists=true
|
|
echo " User '${agent_name}' already exists"
|
|
else
|
|
# Create user using admin token
|
|
local admin_user="disinto-admin"
|
|
local admin_pass="${_FORGE_ADMIN_PASS:-admin}"
|
|
|
|
# Try to get admin token first
|
|
local admin_token
|
|
admin_token=$(curl -sf -X POST \
|
|
-u "${admin_user}:${admin_pass}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/users/${admin_user}/tokens" \
|
|
-d '{"name":"temp-token","scopes":["all"]}' 2>/dev/null \
|
|
| jq -r '.sha1 // empty') || admin_token=""
|
|
|
|
if [ -z "$admin_token" ]; then
|
|
# Token might already exist — try listing
|
|
admin_token=$(curl -sf \
|
|
-u "${admin_user}:${admin_pass}" \
|
|
"${forge_url}/api/v1/users/${admin_user}/tokens" 2>/dev/null \
|
|
| jq -r '.[0].sha1 // empty') || admin_token=""
|
|
fi
|
|
|
|
if [ -z "$admin_token" ]; then
|
|
echo " Warning: could not obtain admin token, trying FORGE_TOKEN..."
|
|
admin_token="${FORGE_TOKEN}"
|
|
fi
|
|
|
|
# Create the user
|
|
local user_pass="agent-$(head -c 16 /dev/urandom | base64 | tr -dc 'a-zA-Z0-9' | head -c 20)"
|
|
if curl -sf -X POST \
|
|
-H "Authorization: token ${admin_token}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/admin/users" \
|
|
-d "{\"username\":\"${agent_name}\",\"password\":\"${user_pass}\",\"email\":\"${agent_name}@${PROJECT_NAME:-disinto}.local\",\"full_name\":\"${agent_name}\",\"active\":true,\"admin\":false,\"must_change_password\":false}" >/dev/null 2>&1; then
|
|
echo " Created user '${agent_name}'"
|
|
else
|
|
echo " Warning: failed to create user via admin API" >&2
|
|
# Try alternative: user might already exist
|
|
if curl -sf --max-time 5 "${forge_url}/api/v1/users/${agent_name}" >/dev/null 2>&1; then
|
|
user_exists=true
|
|
echo " User '${agent_name}' exists (confirmed)"
|
|
else
|
|
echo " Error: failed to create user '${agent_name}'" >&2
|
|
exit 1
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
# Step 2: Create .profile repo on Forgejo
|
|
echo ""
|
|
echo "Step 2: Creating '${agent_name}/.profile' repo (if not exists)..."
|
|
|
|
local repo_exists=false
|
|
if curl -sf --max-time 5 "${forge_url}/api/v1/repos/${agent_name}/.profile" >/dev/null 2>&1; then
|
|
repo_exists=true
|
|
echo " Repo '${agent_name}/.profile' already exists"
|
|
else
|
|
# Get user token for creating repo
|
|
local user_token=""
|
|
if [ "$user_exists" = true ]; then
|
|
# Try to get token for the new user
|
|
# Note: user_pass was set in Step 1; for existing users this will fail (unknown password)
|
|
user_token=$(curl -sf -X POST \
|
|
-u "${agent_name}:${user_pass}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/users/${agent_name}/tokens" \
|
|
-d "{\"name\":\".profile-repo-token\",\"scopes\":[\"repository\"]}" 2>/dev/null \
|
|
| jq -r '.sha1 // empty') || user_token=""
|
|
|
|
if [ -z "$user_token" ]; then
|
|
# Try listing existing tokens
|
|
user_token=$(curl -sf \
|
|
-u "${agent_name}:${user_pass}" \
|
|
"${forge_url}/api/v1/users/${agent_name}/tokens" 2>/dev/null \
|
|
| jq -r '.[0].sha1 // empty') || user_token=""
|
|
fi
|
|
fi
|
|
|
|
# Fall back to admin token if user token not available
|
|
if [ -z "$user_token" ]; then
|
|
echo " Using admin token to create repo"
|
|
user_token="${admin_token:-${FORGE_TOKEN}}"
|
|
fi
|
|
|
|
# Create the repo
|
|
if curl -sf -X POST \
|
|
-H "Authorization: token ${user_token}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/user/repos" \
|
|
-d "{\"name\":\".profile\",\"description\":\"${agent_name}'s .profile repo\",\"private\":true,\"auto_init\":false}" >/dev/null 2>&1; then
|
|
echo " Created repo '${agent_name}/.profile'"
|
|
else
|
|
# Try with org path
|
|
if curl -sf -X POST \
|
|
-H "Authorization: token ${user_token}" \
|
|
-H "Content-Type: application/json" \
|
|
"${forge_url}/api/v1/orgs/${agent_name}/repos" \
|
|
-d "{\"name\":\".profile\",\"description\":\"${agent_name}'s .profile repo\",\"private\":true,\"auto_init\":false}" >/dev/null 2>&1; then
|
|
echo " Created repo '${agent_name}/.profile' (in org)"
|
|
else
|
|
echo " Error: failed to create repo '${agent_name}/.profile'" >&2
|
|
exit 1
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
# Step 3: Clone repo and create initial commit
|
|
echo ""
|
|
echo "Step 3: Cloning repo and creating initial commit..."
|
|
|
|
local clone_dir="/tmp/.profile-clone-${agent_name}"
|
|
rm -rf "$clone_dir"
|
|
mkdir -p "$clone_dir"
|
|
|
|
# Build clone URL (unauthenticated version for display)
|
|
local clone_url="${forge_url}/${agent_name}/.profile.git"
|
|
local auth_url
|
|
auth_url=$(printf '%s' "$forge_url" | sed "s|://|://${agent_name}:${user_token:-${FORGE_TOKEN}}@|")
|
|
clone_url="${auth_url}/.profile.git"
|
|
|
|
# Display unauthenticated URL (auth token only in actual git clone command)
|
|
echo " Cloning: ${forge_url}/${agent_name}/.profile.git"
|
|
|
|
if ! git clone --quiet "$clone_url" "$clone_dir" 2>/dev/null; then
|
|
# Try without auth (might work for public repos or with FORGE_TOKEN)
|
|
clone_url="${forge_url}/${agent_name}/.profile.git"
|
|
if ! git clone --quiet "$clone_url" "$clone_dir" 2>/dev/null; then
|
|
echo " Error: failed to clone repo" >&2
|
|
rm -rf "$clone_dir"
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
# Configure git
|
|
git -C "$clone_dir" config user.name "disinto-admin"
|
|
git -C "$clone_dir" config user.email "disinto-admin@localhost"
|
|
|
|
# Create directory structure
|
|
echo " Creating directory structure..."
|
|
mkdir -p "${clone_dir}/journal"
|
|
mkdir -p "${clone_dir}/knowledge"
|
|
touch "${clone_dir}/journal/.gitkeep"
|
|
touch "${clone_dir}/knowledge/.gitkeep"
|
|
|
|
# Copy formula
|
|
echo " Copying formula..."
|
|
cp "$formula_path" "${clone_dir}/formula.toml"
|
|
|
|
# Create README
|
|
if [ ! -f "${clone_dir}/README.md" ]; then
|
|
cat > "${clone_dir}/README.md" <<EOF
|
|
# ${agent_name}'s .profile
|
|
|
|
Agent profile repository for ${agent_name}.
|
|
|
|
## Structure
|
|
|
|
\`\`\`
|
|
${agent_name}/.profile/
|
|
├── formula.toml # Agent's role formula
|
|
├── journal/ # Issue-by-issue log files (journal branch)
|
|
│ └── .gitkeep
|
|
├── knowledge/ # Shared knowledge and best practices
|
|
│ └── .gitkeep
|
|
└── README.md
|
|
\`\`\`
|
|
|
|
## Branches
|
|
|
|
- \`main\` — Admin-only merge for formula changes (requires 1 approval)
|
|
- \`journal\` — Agent branch for direct journal entries
|
|
- Agent can push directly to this branch
|
|
- Formula changes must go through PR to \`main\`
|
|
|
|
## Branch protection
|
|
|
|
- \`main\`: Protected — requires 1 admin approval for merges
|
|
- \`journal\`: Unprotected — agent can push directly
|
|
EOF
|
|
fi
|
|
|
|
# Commit and push
|
|
echo " Committing and pushing..."
|
|
git -C "$clone_dir" add -A
|
|
if ! git -C "$clone_dir" diff --cached --quiet 2>/dev/null; then
|
|
git -C "$clone_dir" commit -m "chore: initial .profile setup" -q
|
|
git -C "$clone_dir" push origin main 2>&1 >/dev/null || \
|
|
git -C "$clone_dir" push origin master 2>&1 >/dev/null || true
|
|
echo " Committed: initial .profile setup"
|
|
else
|
|
echo " No changes to commit"
|
|
fi
|
|
|
|
rm -rf "$clone_dir"
|
|
|
|
# Step 4: Set up branch protection
|
|
echo ""
|
|
echo "Step 4: Setting up branch protection..."
|
|
|
|
# Source branch-protection.sh helper
|
|
local bp_script="${FACTORY_ROOT}/lib/branch-protection.sh"
|
|
if [ -f "$bp_script" ]; then
|
|
# Source required environment
|
|
if [ -f "${FACTORY_ROOT}/lib/env.sh" ]; then
|
|
source "${FACTORY_ROOT}/lib/env.sh"
|
|
fi
|
|
|
|
# Set up branch protection for .profile repo
|
|
if source "$bp_script" 2>/dev/null && setup_profile_branch_protection "${agent_name}/.profile" "main"; then
|
|
echo " Branch protection configured for main branch"
|
|
echo " - Requires 1 approval before merge"
|
|
echo " - Admin-only merge enforcement"
|
|
echo " - Journal branch created for direct agent pushes"
|
|
else
|
|
echo " Warning: could not configure branch protection (Forgejo API may not be available)"
|
|
echo " Note: Branch protection can be set up manually later"
|
|
fi
|
|
else
|
|
echo " Warning: branch-protection.sh not found at ${bp_script}"
|
|
fi
|
|
|
|
# Step 5: Create state marker
|
|
echo ""
|
|
echo "Step 5: Creating state marker..."
|
|
|
|
local state_dir="${FACTORY_ROOT}/state"
|
|
mkdir -p "$state_dir"
|
|
local state_file="${state_dir}/.${role}-active"
|
|
|
|
if [ ! -f "$state_file" ]; then
|
|
touch "$state_file"
|
|
echo " Created: ${state_file}"
|
|
else
|
|
echo " State marker already exists: ${state_file}"
|
|
fi
|
|
|
|
echo ""
|
|
echo "Done! Agent '${agent_name}' hired for role '${role}'."
|
|
echo " User: ${forge_url}/${agent_name}"
|
|
echo " Repo: ${forge_url}/${agent_name}/.profile"
|
|
echo " Formula: ${role}.toml"
|
|
}
|
|
|
|
# ── release command ───────────────────────────────────────────────────────────
|
|
#
|
|
# Creates a vault PR for the release. This is a convenience wrapper that
|
|
# creates the vault item TOML and submits it as a PR to the ops repo.
|
|
#
|
|
# Usage: disinto release <version>
|
|
# Example: disinto release v1.2.0
|
|
|
|
disinto_release() {
|
|
local version="${1:-}"
|
|
local formula_path="${FACTORY_ROOT}/formulas/release.toml"
|
|
|
|
if [ -z "$version" ]; then
|
|
echo "Error: version required" >&2
|
|
echo "Usage: disinto release <version>" >&2
|
|
echo "Example: disinto release v1.2.0" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Validate version format (must start with 'v' followed by semver)
|
|
if ! echo "$version" | grep -qE '^v[0-9]+\.[0-9]+\.[0-9]+$'; then
|
|
echo "Error: version must be in format v1.2.3 (semver with 'v' prefix)" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Check formula exists
|
|
if [ ! -f "$formula_path" ]; then
|
|
echo "Error: release formula not found at ${formula_path}" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Get the ops repo root
|
|
local ops_root="${FACTORY_ROOT}/../disinto-ops"
|
|
if [ ! -d "${ops_root}/.git" ]; then
|
|
echo "Error: ops repo not found at ${ops_root}" >&2
|
|
echo " Run 'disinto init' to set up the ops repo first" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Generate a unique ID for the vault item
|
|
local id="release-${version//./}"
|
|
local vault_toml="${ops_root}/vault/pending/${id}.toml"
|
|
|
|
# Create vault TOML with the specific version
|
|
cat > "$vault_toml" <<EOF
|
|
# vault/pending/${id}.toml
|
|
# Release vault item for ${version}
|
|
# Auto-generated by disinto release
|
|
|
|
id = "${id}"
|
|
formula = "release"
|
|
context = "Release ${version}"
|
|
secrets = []
|
|
EOF
|
|
|
|
echo "Created vault item: ${vault_toml}"
|
|
|
|
# Create a PR to submit the vault item to the ops repo
|
|
local branch_name="release/${version//./}"
|
|
local pr_title="release: ${version}"
|
|
local pr_body="Release ${version}
|
|
|
|
This PR creates a vault item for the release of version ${version}.
|
|
|
|
## Changes
|
|
- Added vault item: ${id}.toml
|
|
|
|
## Next Steps
|
|
1. Review this PR
|
|
2. Approve and merge
|
|
3. The vault runner will execute the release formula
|
|
"
|
|
|
|
# Create branch
|
|
cd "$ops_root"
|
|
git checkout -B "$branch_name" 2>/dev/null || git checkout "$branch_name"
|
|
|
|
# Add and commit
|
|
git add -A
|
|
git commit -m "$pr_title" -m "$pr_body" 2>/dev/null || true
|
|
|
|
# Push branch
|
|
git push -u origin "$branch_name" 2>/dev/null || {
|
|
echo "Error: failed to push branch" >&2
|
|
exit 1
|
|
}
|
|
|
|
# Create PR
|
|
local pr_response
|
|
pr_response=$(curl -sf -X POST \
|
|
-H "Authorization: token ${FORGE_TOKEN}" \
|
|
-H "Content-Type: application/json" \
|
|
"${FORGE_URL}/api/v1/repos/${PROJECT_REPO}/pulls" \
|
|
-d "{\"title\":\"${pr_title}\",\"head\":\"${branch_name}\",\"base\":\"main\",\"body\":\"$(echo "$pr_body" | sed ':a;N;$!ba;s/\n/\\n/g')\"}" 2>/dev/null) || {
|
|
echo "Error: failed to create PR" >&2
|
|
echo "Response: ${pr_response}" >&2
|
|
exit 1
|
|
}
|
|
|
|
local pr_number
|
|
pr_number=$(echo "$pr_response" | jq -r '.number')
|
|
|
|
local pr_url="${FORGE_URL}/${PROJECT_REPO}/pulls/${pr_number}"
|
|
|
|
echo ""
|
|
echo "Release PR created: ${pr_url}"
|
|
echo ""
|
|
echo "Next steps:"
|
|
echo " 1. Review the PR"
|
|
echo " 2. Approve and merge (requires 2 reviewers for vault items)"
|
|
echo " 3. The vault runner will execute the release formula"
|
|
echo ""
|
|
echo "After merge, the release will:"
|
|
echo " 1. Tag Forgejo main with ${version}"
|
|
echo " 2. Push tag to mirrors (Codeberg, GitHub)"
|
|
echo " 3. Build and tag the agents Docker image"
|
|
echo " 4. Restart agent containers"
|
|
}
|
|
|
|
# ── Main dispatch ────────────────────────────────────────────────────────────
|
|
|
|
case "${1:-}" in
|
|
init) shift; disinto_init "$@" ;;
|
|
up) shift; disinto_up "$@" ;;
|
|
down) shift; disinto_down "$@" ;;
|
|
logs) shift; disinto_logs "$@" ;;
|
|
shell) shift; disinto_shell ;;
|
|
status) shift; disinto_status "$@" ;;
|
|
secrets) shift; disinto_secrets "$@" ;;
|
|
run) shift; disinto_run "$@" ;;
|
|
release) shift; disinto_release "$@" ;;
|
|
hire-an-agent) shift; disinto_hire_an_agent "$@" ;;
|
|
-h|--help) usage ;;
|
|
*) usage ;;
|
|
esac
|