Compare commits

..

No commits in common. "main" and "v0.1.0" have entirely different histories.
main ... v0.1.0

17 changed files with 430 additions and 1322 deletions

4
.gitignore vendored
View file

@ -25,6 +25,4 @@ gardener/dust.jsonl
# Individual encrypted secrets (managed by disinto secrets add)
secrets/
# Pre-built binaries for Docker builds (avoid network calls during build)
docker/agents/bin/
.woodpecker/smoke-init.yml

View file

@ -1,17 +0,0 @@
when:
- event: pull_request
path:
- "bin/disinto"
- "lib/load-project.sh"
- "lib/env.sh"
- "tests/**"
- ".woodpecker/smoke-init.yml"
steps:
- name: smoke-init
image: python:3-alpine
commands:
- apk add --no-cache bash curl jq git coreutils
- python3 tests/mock-forgejo.py &
- sleep 2
- bash tests/smoke-init.sh

View file

@ -11,7 +11,6 @@
# disinto status Show factory status
# disinto secrets <subcommand> Manage encrypted secrets
# disinto run <action-id> Run action in ephemeral runner container
# disinto ci-logs <pipeline> [--step <name>] Read CI logs from Woodpecker SQLite
#
# Usage:
# disinto init https://github.com/user/repo
@ -41,8 +40,6 @@ Usage:
disinto status Show factory status
disinto secrets <subcommand> Manage encrypted secrets
disinto run <action-id> Run action in ephemeral runner container
disinto ci-logs <pipeline> [--step <name>]
Read CI logs from Woodpecker SQLite
disinto release <version> Create vault PR for release (e.g., v1.2.0)
disinto hire-an-agent <agent-name> <role> [--formula <path>]
Hire a new agent (create user + .profile repo)
@ -57,9 +54,6 @@ Init options:
Hire an agent options:
--formula <path> Path to role formula TOML (default: formulas/<role>.toml)
CI logs options:
--step <name> Filter logs to a specific step (e.g., smoke-init)
EOF
exit 1
}
@ -232,9 +226,7 @@ services:
- woodpecker
agents:
build:
context: .
dockerfile: docker/agents/Dockerfile
build: ./docker/agents
restart: unless-stopped
security_opt:
- apparmor=unconfined
@ -246,13 +238,11 @@ services:
- CLAUDE_BIN_PLACEHOLDER:/usr/local/bin/claude:ro
- ${HOME}/.ssh:/home/agent/.ssh:ro
- ${HOME}/.config/sops/age:/home/agent/.config/sops/age:ro
- woodpecker-data:/woodpecker-data:ro
environment:
FORGE_URL: http://forgejo:3000
WOODPECKER_SERVER: http://woodpecker:8000
DISINTO_CONTAINER: "1"
PROJECT_REPO_ROOT: /home/agent/repos/${PROJECT_NAME:-project}
WOODPECKER_DATA_DIR: /woodpecker-data
env_file:
- .env
# IMPORTANT: agents get .env only (forge tokens, CI tokens, config).
@ -266,9 +256,7 @@ services:
- disinto-net
runner:
build:
context: .
dockerfile: docker/agents/Dockerfile
build: ./docker/agents
profiles: ["vault"]
security_opt:
- apparmor=unconfined
@ -290,19 +278,9 @@ services:
ports:
- "80:80"
- "443:443"
environment:
- DISINTO_VERSION=${DISINTO_VERSION:-main}
- FORGE_URL=http://forgejo:3000
- FORGE_REPO=johba/disinto
- FORGE_OPS_REPO=johba/disinto-ops
- FORGE_TOKEN=${FORGE_TOKEN:-}
- FORGE_ADMIN_USERS=${FORGE_ADMIN_USERS:-disinto-admin,johba}
- FORGE_ADMIN_TOKEN=${FORGE_ADMIN_TOKEN:-}
- OPS_REPO_ROOT=/opt/disinto-ops
- PROJECT_REPO_ROOT=/opt/disinto
- PRIMARY_BRANCH=main
volumes:
- ./docker/Caddyfile:/etc/caddy/Caddyfile
- ./docker/edge/dispatcher.sh:/usr/local/bin/dispatcher.sh:ro
- caddy_data:/data
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
@ -746,7 +724,6 @@ setup_forge() {
# Get or create human user token
local human_token
local env_file="${FACTORY_ROOT}/.env"
if curl -sf --max-time 5 "${forge_url}/api/v1/users/${human_user}" >/dev/null 2>&1; then
human_token=$(curl -sf -X POST \
-u "${human_user}:${human_pass}" \
@ -786,9 +763,9 @@ setup_forge() {
[vault-bot]="FORGE_VAULT_TOKEN"
[supervisor-bot]="FORGE_SUPERVISOR_TOKEN"
[predictor-bot]="FORGE_PREDICTOR_TOKEN"
[architect-bot]="FORGE_ARCHITECT_TOKEN"
)
local env_file="${FACTORY_ROOT}/.env"
local bot_user bot_pass token token_var
for bot_user in dev-bot review-bot planner-bot gardener-bot vault-bot supervisor-bot predictor-bot architect-bot; do
@ -1922,10 +1899,8 @@ p.write_text(text)
echo "Repo: ${repo_root} (existing clone)"
fi
# Push to local Forgejo (skip if SKIP_PUSH is set)
if [ "${SKIP_PUSH:-false}" = "false" ]; then
# Push to local Forgejo
push_to_forge "$repo_root" "$forge_url" "$forge_repo"
fi
# Detect primary branch
if [ -z "$branch" ]; then
@ -2390,55 +2365,6 @@ disinto_run() {
return "$rc"
}
# ── Pre-build: download binaries to docker/agents/bin/ ────────────────────────
# This avoids network calls during docker build (needed for Docker-in-LXD builds)
# Returns 0 on success, 1 on failure
download_agent_binaries() {
local bin_dir="${FACTORY_ROOT}/docker/agents/bin"
mkdir -p "$bin_dir"
echo "Downloading agent binaries to ${bin_dir}..."
# Download SOPS
local sops_file="${bin_dir}/sops"
if [ ! -f "$sops_file" ]; then
echo " Downloading SOPS v3.9.4..."
curl -sL https://github.com/getsops/sops/releases/download/v3.9.4/sops-v3.9.4.linux.amd64 -o "$sops_file"
if [ ! -f "$sops_file" ]; then
echo "Error: failed to download SOPS" >&2
return 1
fi
fi
# Verify checksum
echo " Verifying SOPS checksum..."
if ! echo "5488e32bc471de7982ad895dd054bbab3ab91c417a118426134551e9626e4e85 ${sops_file}" | sha256sum -c - >/dev/null 2>&1; then
echo "Error: SOPS checksum verification failed" >&2
return 1
fi
chmod +x "$sops_file"
# Download tea CLI
local tea_file="${bin_dir}/tea"
if [ ! -f "$tea_file" ]; then
echo " Downloading tea CLI v0.9.2..."
curl -sL https://dl.gitea.com/tea/0.9.2/tea-0.9.2-linux-amd64 -o "$tea_file"
if [ ! -f "$tea_file" ]; then
echo "Error: failed to download tea CLI" >&2
return 1
fi
fi
# Verify checksum
echo " Verifying tea CLI checksum..."
if ! echo "be10cdf9a619e3c0f121df874960ed19b53e62d1c7036cf60313a28b5227d54d ${tea_file}" | sha256sum -c - >/dev/null 2>&1; then
echo "Error: tea CLI checksum verification failed" >&2
return 1
fi
chmod +x "$tea_file"
echo "Binaries downloaded and verified successfully"
return 0
}
# ── up command ────────────────────────────────────────────────────────────────
disinto_up() {
@ -2449,14 +2375,6 @@ disinto_up() {
exit 1
fi
# Pre-build: download binaries to docker/agents/bin/ to avoid network calls during docker build
echo "── Pre-build: downloading agent binaries ────────────────────────"
if ! download_agent_binaries; then
echo "Error: failed to download agent binaries" >&2
exit 1
fi
echo ""
# Decrypt secrets to temp .env if SOPS available and .env.enc exists
local tmp_env=""
local enc_file="${FACTORY_ROOT}/.env.enc"
@ -2936,59 +2854,6 @@ This PR creates a vault item for the release of version ${version}.
echo " 4. Restart agent containers"
}
# ── ci-logs command ──────────────────────────────────────────────────────────
# Reads CI logs from the Woodpecker SQLite database.
# Usage: disinto ci-logs <pipeline> [--step <name>]
disinto_ci_logs() {
local pipeline_number="" step_name=""
if [ $# -lt 1 ]; then
echo "Error: pipeline number required" >&2
echo "Usage: disinto ci-logs <pipeline> [--step <name>]" >&2
exit 1
fi
# Parse arguments
while [ $# -gt 0 ]; do
case "$1" in
--step|-s)
step_name="$2"
shift 2
;;
-*)
echo "Unknown option: $1" >&2
exit 1
;;
*)
if [ -z "$pipeline_number" ]; then
pipeline_number="$1"
else
echo "Unexpected argument: $1" >&2
exit 1
fi
shift
;;
esac
done
if [ -z "$pipeline_number" ] || ! [[ "$pipeline_number" =~ ^[0-9]+$ ]]; then
echo "Error: pipeline number must be a positive integer" >&2
exit 1
fi
local log_reader="${FACTORY_ROOT}/lib/ci-log-reader.py"
if [ ! -f "$log_reader" ]; then
echo "Error: ci-log-reader.py not found at $log_reader" >&2
exit 1
fi
if [ -n "$step_name" ]; then
python3 "$log_reader" "$pipeline_number" --step "$step_name"
else
python3 "$log_reader" "$pipeline_number"
fi
}
# ── Main dispatch ────────────────────────────────────────────────────────────
case "${1:-}" in
@ -3000,7 +2865,6 @@ case "${1:-}" in
status) shift; disinto_status "$@" ;;
secrets) shift; disinto_secrets "$@" ;;
run) shift; disinto_run "$@" ;;
ci-logs) shift; disinto_ci_logs "$@" ;;
release) shift; disinto_release "$@" ;;
hire-an-agent) shift; disinto_hire_an_agent "$@" ;;
-h|--help) usage ;;

View file

@ -41,7 +41,7 @@ REPO_ROOT="${PROJECT_REPO_ROOT}"
LOCKFILE="/tmp/dev-agent-${PROJECT_NAME:-default}.lock"
STATUSFILE="/tmp/dev-agent-status-${PROJECT_NAME:-default}"
BRANCH="fix/issue-${ISSUE}" # Default; will be updated after FORGE_REMOTE is known
BRANCH="fix/issue-${ISSUE}"
WORKTREE="/tmp/${PROJECT_NAME}-worktree-${ISSUE}"
SID_FILE="/tmp/dev-session-${PROJECT_NAME}-${ISSUE}.sid"
PREFLIGHT_RESULT="/tmp/dev-agent-preflight.json"
@ -263,19 +263,6 @@ FORGE_REMOTE="${FORGE_REMOTE:-origin}"
export FORGE_REMOTE
log "forge remote: ${FORGE_REMOTE}"
# Generate unique branch name per attempt to avoid collision with failed attempts
# Only apply when not in recovery mode (RECOVERY_MODE branch is already set from existing PR)
# First attempt: fix/issue-N, subsequent: fix/issue-N-1, fix/issue-N-2, etc.
if [ "$RECOVERY_MODE" = false ]; then
# Count only branches matching fix/issue-N, fix/issue-N-1, fix/issue-N-2, etc. (exact prefix match)
ATTEMPT=$(git ls-remote --heads "$FORGE_REMOTE" "refs/heads/fix/issue-${ISSUE}" 2>/dev/null | grep -c "refs/heads/fix/issue-${ISSUE}$" || echo 0)
ATTEMPT=$((ATTEMPT + $(git ls-remote --heads "$FORGE_REMOTE" "refs/heads/fix/issue-${ISSUE}-*" 2>/dev/null | wc -l)))
if [ "$ATTEMPT" -gt 0 ]; then
BRANCH="fix/issue-${ISSUE}-${ATTEMPT}"
fi
fi
log "using branch: ${BRANCH}"
if [ "$RECOVERY_MODE" = true ]; then
if ! worktree_recover "$WORKTREE" "$BRANCH" "$FORGE_REMOTE"; then
log "ERROR: worktree recovery failed"
@ -588,8 +575,11 @@ else
outcome="blocked_${_PR_WALK_EXIT_REASON:-agent_failed}"
profile_write_journal "$ISSUE" "$ISSUE_TITLE" "$outcome" "$FILES_CHANGED" || true
# Cleanup on failure: preserve remote branch and PR for debugging, clean up local worktree
# Remote state (PR and branch) stays open for inspection of CI logs and review comments
# Cleanup on failure: close PR, delete remote branch, clean up worktree
if [ -n "$PR_NUMBER" ]; then
pr_close "$PR_NUMBER"
fi
git push "$FORGE_REMOTE" --delete "$BRANCH" 2>/dev/null || true
worktree_cleanup "$WORKTREE"
rm -f "$SID_FILE" "$IMPL_SUMMARY_FILE"
CLAIMED=false

View file

@ -339,26 +339,6 @@ if [ "$ORPHAN_COUNT" -gt 0 ]; then
'.[] | select(.head.ref == $branch) | .number' | head -1) || true
if [ -n "$HAS_PR" ]; then
# Check if branch is stale (behind primary branch)
BRANCH="fix/issue-${ISSUE_NUM}"
AHEAD=$(git rev-list --count "origin/${BRANCH}..origin/${PRIMARY_BRANCH}" 2>/dev/null || echo "999")
if [ "$AHEAD" -gt 0 ]; then
log "issue #${ISSUE_NUM} PR #${HAS_PR} is $AHEAD commits behind ${PRIMARY_BRANCH} — abandoning stale PR"
# Close the PR via API
curl -sf -X PATCH \
-H "Authorization: token ${FORGE_TOKEN}" \
-H "Content-Type: application/json" \
"${API}/pulls/${HAS_PR}" \
-d '{"state":"closed"}' >/dev/null 2>&1 || true
# Delete the branch via git push
git -C "${PROJECT_REPO_ROOT:-}" push origin --delete "${BRANCH}" 2>/dev/null || true
# Reset to fresh start on primary branch
git -C "${PROJECT_REPO_ROOT:-}" checkout "${PRIMARY_BRANCH}" 2>/dev/null || true
git -C "${PROJECT_REPO_ROOT:-}" pull --ff-only origin "${PRIMARY_BRANCH}" 2>/dev/null || true
# Exit to restart poll cycle (issue will be picked up fresh)
exit 0
fi
PR_SHA=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
"${API}/pulls/${HAS_PR}" | jq -r '.head.sha') || true
CI_STATE=$(ci_commit_status "$PR_SHA") || true
@ -582,26 +562,6 @@ for i in $(seq 0 $((BACKLOG_COUNT - 1))); do
'.[] | select((.head.ref == $branch) or (.title | contains($num))) | .number' | head -1) || true
if [ -n "$EXISTING_PR" ]; then
# Check if branch is stale (behind primary branch)
BRANCH="fix/issue-${ISSUE_NUM}"
AHEAD=$(git rev-list --count "origin/${BRANCH}..origin/${PRIMARY_BRANCH}" 2>/dev/null || echo "999")
if [ "$AHEAD" -gt 0 ]; then
log "issue #${ISSUE_NUM} PR #${EXISTING_PR} is $AHEAD commits behind ${PRIMARY_BRANCH} — abandoning stale PR"
# Close the PR via API
curl -sf -X PATCH \
-H "Authorization: token ${FORGE_TOKEN}" \
-H "Content-Type: application/json" \
"${API}/pulls/${EXISTING_PR}" \
-d '{"state":"closed"}' >/dev/null 2>&1 || true
# Delete the branch via git push
git -C "${PROJECT_REPO_ROOT:-}" push origin --delete "${BRANCH}" 2>/dev/null || true
# Reset to fresh start on primary branch
git -C "${PROJECT_REPO_ROOT:-}" checkout "${PRIMARY_BRANCH}" 2>/dev/null || true
git -C "${PROJECT_REPO_ROOT:-}" pull --ff-only origin "${PRIMARY_BRANCH}" 2>/dev/null || true
# Continue to find another ready issue
continue
fi
PR_SHA=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
"${API}/pulls/${EXISTING_PR}" | jq -r '.head.sha') || true
CI_STATE=$(ci_commit_status "$PR_SHA") || true

View file

@ -3,16 +3,20 @@ FROM debian:bookworm-slim
RUN apt-get update && apt-get install -y --no-install-recommends \
bash curl git jq tmux cron python3 python3-pip openssh-client ca-certificates age shellcheck \
&& pip3 install --break-system-packages networkx \
&& curl -sL https://github.com/getsops/sops/releases/download/v3.9.4/sops-v3.9.4.linux.amd64 \
-o /usr/local/bin/sops \
&& curl -sL https://github.com/getsops/sops/releases/download/v3.9.4/sops-v3.9.4.checksums.txt \
-o /tmp/sops-checksums.txt \
&& sha256sum -c --ignore-missing /tmp/sops-checksums.txt \
&& rm -f /tmp/sops-checksums.txt \
&& chmod +x /usr/local/bin/sops \
&& rm -rf /var/lib/apt/lists/*
# Pre-built binaries (copied from docker/agents/bin/)
# SOPS — encrypted data decryption tool
COPY docker/agents/bin/sops /usr/local/bin/sops
RUN chmod +x /usr/local/bin/sops
# tea CLI — official Gitea/Forgejo CLI for issue/label/comment operations
COPY docker/agents/bin/tea /usr/local/bin/tea
RUN chmod +x /usr/local/bin/tea
# Checksum from https://dl.gitea.com/tea/0.9.2/tea-0.9.2-linux-amd64.sha256
RUN curl -sL https://dl.gitea.com/tea/0.9.2/tea-0.9.2-linux-amd64 -o /usr/local/bin/tea \
&& echo "be10cdf9a619e3c0f121df874960ed19b53e62d1c7036cf60313a28b5227d54d /usr/local/bin/tea" | sha256sum -c - \
&& chmod +x /usr/local/bin/tea
# Claude CLI is mounted from the host via docker-compose volume.
# No internet access to cli.anthropic.com required at build time.
@ -23,7 +27,7 @@ RUN useradd -m -u 1000 -s /bin/bash agent
# Copy disinto code into the image
COPY . /home/agent/disinto
COPY docker/agents/entrypoint.sh /entrypoint.sh
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
# Entrypoint runs as root to start the cron daemon;

View file

@ -1,4 +1,3 @@
FROM caddy:alpine
RUN apk add --no-cache bash jq curl git docker-cli
COPY entrypoint-edge.sh /usr/local/bin/entrypoint-edge.sh
ENTRYPOINT ["bash", "/usr/local/bin/entrypoint-edge.sh"]
COPY dispatcher.sh /usr/local/bin/dispatcher.sh

View file

@ -63,12 +63,8 @@ is_user_admin() {
local username="$1"
local user_json
# Use admin token for API check (Forgejo only exposes is_admin: true
# when the requesting user is also a site admin)
local admin_token="${FORGE_ADMIN_TOKEN:-${FORGE_TOKEN}}"
# Fetch user info from Forgejo API
user_json=$(curl -sf -H "Authorization: token ${admin_token}" \
user_json=$(curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
"${FORGE_URL}/api/v1/users/${username}" 2>/dev/null) || return 1
# Forgejo uses .is_admin for site-wide admin users
@ -113,34 +109,33 @@ get_pr_for_file() {
local file_name
file_name=$(basename "$file_path")
# Step 1: find the commit that added the file
local add_commit
add_commit=$(git -C "$OPS_REPO_ROOT" log --diff-filter=A --format="%H" \
-- "vault/actions/${file_name}" 2>/dev/null | head -1)
# Get recent commits that added this specific file
local commits
commits=$(git -C "$OPS_REPO_ROOT" log --oneline --diff-filter=A -- "vault/actions/${file_name}" 2>/dev/null | head -20) || true
if [ -z "$add_commit" ]; then
if [ -z "$commits" ]; then
return 1
fi
# Step 2: find the merge commit that contains it via ancestry path
local merge_line
# Use --reverse to get the oldest (direct PR merge) first, not the newest
merge_line=$(git -C "$OPS_REPO_ROOT" log --merges --ancestry-path \
--reverse "${add_commit}..HEAD" --oneline 2>/dev/null | head -1)
# For each commit, check if it's a merge commit from a PR
while IFS= read -r commit; do
local commit_sha commit_msg
if [ -z "$merge_line" ]; then
return 1
fi
commit_sha=$(echo "$commit" | awk '{print $1}')
commit_msg=$(git -C "$OPS_REPO_ROOT" log -1 --format="%B" "$commit_sha" 2>/dev/null) || continue
# Step 3: extract PR number from merge commit message
# Forgejo format: "Merge pull request 'title' (#N) from branch into main"
# Check if this is a merge commit (has "Merge pull request" in message)
if [[ "$commit_msg" =~ "Merge pull request" ]]; then
# Extract PR number from merge message (e.g., "Merge pull request #123")
local pr_num
pr_num=$(echo "$merge_line" | grep -oE '#[0-9]+' | head -1 | tr -d '#')
pr_num=$(echo "$commit_msg" | grep -oP '#\d+' | head -1 | tr -d '#') || true
if [ -n "$pr_num" ]; then
echo "$pr_num"
return 0
fi
fi
done <<< "$commits"
return 1
}
@ -151,11 +146,8 @@ get_pr_for_file() {
get_pr_merger() {
local pr_number="$1"
# Use ops repo API URL for PR lookups (not disinto repo)
local ops_api="${FORGE_URL}/api/v1/repos/${FORGE_OPS_REPO}"
curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
"${ops_api}/pulls/${pr_number}" 2>/dev/null | jq -r '{
"${FORGE_API}/pulls/${pr_number}" 2>/dev/null | jq -r '{
username: .merge_user?.login // .user?.login,
merged: .merged,
merged_at: .merged_at // empty
@ -298,11 +290,16 @@ launch_runner() {
local secrets_array
secrets_array="${VAULT_ACTION_SECRETS:-}"
if [ -z "$secrets_array" ]; then
log "ERROR: Action ${action_id} has no secrets declared"
write_result "$action_id" 1 "No secrets declared in TOML"
return 1
fi
# Build command array (safe from shell injection)
local -a cmd=(docker compose run --rm runner)
# Add environment variables for secrets (if any declared)
if [ -n "$secrets_array" ]; then
# Add environment variables for secrets
for secret in $secrets_array; do
secret=$(echo "$secret" | xargs)
if [ -n "$secret" ]; then
@ -315,9 +312,6 @@ launch_runner() {
cmd+=(-e "$secret")
fi
done
else
log "Action ${action_id} has no secrets declared — runner will execute without extra env vars"
fi
# Add formula and action id as arguments (after service name)
local formula="${VAULT_ACTION_FORMULA:-}"

View file

@ -1,19 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Set USER before sourcing env.sh (Alpine doesn't set USER)
export USER="${USER:-root}"
DISINTO_VERSION="${DISINTO_VERSION:-main}"
DISINTO_REPO="${FORGE_URL:-http://forgejo:3000}/johba/disinto.git"
# Shallow clone at the pinned version
if [ ! -d /opt/disinto/.git ]; then
git clone --depth 1 --branch "$DISINTO_VERSION" "$DISINTO_REPO" /opt/disinto
fi
# Start dispatcher in background
bash /opt/disinto/docker/edge/dispatcher.sh &
# Caddy as main process
exec caddy run --config /etc/caddy/Caddyfile --adapter caddyfile

View file

@ -1,15 +1,16 @@
# formulas/run-gardener.toml — Gardener housekeeping formula
#
# Defines the gardener's complete run: grooming (Claude session via
# gardener-run.sh) + AGENTS.md maintenance + final commit-and-pr.
# gardener-run.sh) + blocked-review + AGENTS.md maintenance + final
# commit-and-pr.
#
# Gardener has journaling via .profile (issue #97), so it learns from
# past runs and improves over time.
# No memory, no journal. The gardener does mechanical housekeeping
# based on current state — it doesn't need to remember past runs.
#
# Steps: preflight -> grooming -> dust-bundling -> agents-update -> commit-and-pr
# Steps: preflight → grooming → dust-bundling → blocked-review → stale-pr-recycle → agents-update → commit-and-pr
name = "run-gardener"
description = "Mechanical housekeeping: grooming, dust bundling, docs update"
description = "Mechanical housekeeping: grooming, blocked review, docs update"
version = 1
[context]
@ -119,17 +120,15 @@ DUST (trivial — single-line edit, rename, comment, style, whitespace):
of 3+ into one backlog issue.
VAULT (needs human decision or external resource):
File a vault procurement item using vault_request():
source "$(dirname "$0")/../lib/vault.sh"
TOML_CONTENT="# Vault action: <action_id>
context = \"<description of what decision/resource is needed>\"
unblocks = [\"#NNN\"]
[execution]
# Commands to run after approval
"
PR_NUM=$(vault_request "<action_id>" "$TOML_CONTENT")
echo "VAULT: filed PR #${PR_NUM} for #NNN — <reason>" >> "$RESULT_FILE"
File a vault procurement item at $OPS_REPO_ROOT/vault/pending/<id>.md:
# <What decision or resource is needed>
## What
<description>
## Why
<which issue this unblocks>
## Unblocks
- #NNN — <title>
Log: echo "VAULT: filed $OPS_REPO_ROOT/vault/pending/<id>.md for #NNN — <reason>" >> "$RESULT_FILE"
CLEAN (only if truly nothing to do):
echo 'CLEAN' >> "$RESULT_FILE"
@ -143,7 +142,25 @@ Sibling dependency rule (CRITICAL):
NEVER add bidirectional ## Dependencies between siblings (creates deadlocks).
Use ## Related for cross-references: "## Related\n- #NNN (sibling)"
6. Quality gate backlog label enforcement:
7. Architecture decision alignment check (AD check):
For each open issue labeled 'backlog', check whether the issue
contradicts any architecture decision listed in the
## Architecture Decisions section of AGENTS.md.
Read AGENTS.md and extract the AD table. For each backlog issue,
compare the issue title and body against each AD. If an issue
clearly violates an AD:
a. Write a comment action to the manifest:
echo '{"action":"comment","issue":NNN,"body":"Closing: violates AD-NNN (<decision summary>). See AGENTS.md § Architecture Decisions."}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
b. Write a close action to the manifest:
echo '{"action":"close","issue":NNN,"reason":"violates AD-NNN"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
c. Log to the result file:
echo "ACTION: closed #NNN — violates AD-NNN" >> "$RESULT_FILE"
Only close for clear, unambiguous violations. If the issue is
borderline or could be interpreted as compatible, leave it open
and file a VAULT item for human decision instead.
8. Quality gate backlog label enforcement:
For each open issue labeled 'backlog', verify it has the required
sections for dev-agent pickup:
a. Acceptance criteria body must contain at least one checkbox
@ -164,11 +181,28 @@ Sibling dependency rule (CRITICAL):
Well-structured issues (both sections present) are left untouched
they are ready for dev-agent pickup.
9. Portfolio lifecycle maintain ## Addressables and ## Observables in AGENTS.md:
Read the current Addressables and Observables tables from AGENTS.md.
a. ADD: if a recently closed issue shipped a new deployment, listing,
package, or external presence not yet in the table, add a row.
b. PROMOTE: if an addressable now has measurement wired (an evidence
process reads from it), move it to the Observables section.
c. REMOVE: if an addressable was decommissioned (vision change
invalidated it, service shut down), remove the row and log why.
d. FLAG: if an addressable has been live > 2 weeks with Observable? = No
and no evidence process is planned, add a comment to the result file:
echo "ACTION: flagged addressable '<name>' — live >2 weeks, no observation path" >> "$RESULT_FILE"
Stage AGENTS.md if changed the commit-and-pr step handles the actual commit.
Processing order:
1. Handle PRIORITY_blockers_starving_factory first promote or resolve
2. Quality gate strip backlog from issues missing acceptance criteria or affected files
3. Process tech-debt issues by score (impact/effort)
4. Classify remaining items as dust or route to vault
2. AD alignment check close backlog issues that violate architecture decisions
3. Quality gate strip backlog from issues missing acceptance criteria or affected files
4. Process tech-debt issues by score (impact/effort)
5. Classify remaining items as dust or route to vault
6. Portfolio lifecycle update addressables/observables tables
Do NOT bundle dust yourself the dust-bundling step handles accumulation,
dedup, TTL expiry, and bundling into backlog issues.
@ -223,12 +257,126 @@ session, so changes there would be lost.
5. If no DUST items were emitted and no groups are ripe, skip this step.
CRITICAL: If this step fails, log the failure and move on.
CRITICAL: If this step fails, log the failure and move on to blocked-review.
"""
needs = ["grooming"]
# ─────────────────────────────────────────────────────────────────────
# Step 4: agents-update — AGENTS.md watermark staleness + size enforcement
# Step 4: blocked-review — triage blocked issues
# ─────────────────────────────────────────────────────────────────────
[[steps]]
id = "blocked-review"
title = "Review issues labeled blocked"
description = """
Review all issues labeled 'blocked' and decide their fate.
(See issue #352 for the blocked label convention.)
1. Fetch all blocked issues:
curl -sf -H "Authorization: token $FORGE_TOKEN" \
"$FORGE_API/issues?state=open&type=issues&labels=blocked&limit=50"
2. For each blocked issue, read the full body and comments:
curl -sf -H "Authorization: token $FORGE_TOKEN" \
"$FORGE_API/issues/<number>"
curl -sf -H "Authorization: token $FORGE_TOKEN" \
"$FORGE_API/issues/<number>/comments"
3. Check dependencies extract issue numbers from ## Dependencies /
## Depends on / ## Blocked by sections. For each dependency:
curl -sf -H "Authorization: token $FORGE_TOKEN" \
"$FORGE_API/issues/<dep_number>"
Check if the dependency is now closed.
4. For each blocked issue, choose ONE action:
UNBLOCK all dependencies are now closed or the blocking condition resolved:
a. Write a remove_label action to the manifest:
echo '{"action":"remove_label","issue":NNN,"label":"blocked"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
b. Write a comment action to the manifest:
echo '{"action":"comment","issue":NNN,"body":"Unblocked: <explanation of what resolved the blocker>"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
NEEDS HUMAN blocking condition is ambiguous, requires architectural
decision, or involves external factors:
a. Write a comment action to the manifest:
echo '{"action":"comment","issue":NNN,"body":"<diagnostic: what you found and what decision is needed>"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
b. Leave the 'blocked' label in place
CLOSE issue is stale (blocked 30+ days with no progress on blocker),
the blocker is wontfix, or the issue is no longer relevant:
a. Write a comment action to the manifest:
echo '{"action":"comment","issue":NNN,"body":"Closing: <reason — stale blocker, no longer relevant, etc.>"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
b. Write a close action to the manifest:
echo '{"action":"close","issue":NNN,"reason":"<stale blocker / no longer relevant / etc.>"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
CRITICAL: If this step fails, log the failure and move on.
"""
needs = ["dust-bundling"]
# ─────────────────────────────────────────────────────────────────────
# Step 5: stale-pr-recycle — recycle stale failed PRs back to backlog
# ─────────────────────────────────────────────────────────────────────
[[steps]]
id = "stale-pr-recycle"
title = "Recycle stale failed PRs back to backlog"
description = """
Detect open PRs where CI has failed and no work has happened in 24+ hours.
These represent abandoned dev-agent attempts recycle them so the pipeline
can retry with a fresh session.
1. Fetch all open PRs:
curl -sf -H "Authorization: token $FORGE_TOKEN" \
"$FORGE_API/pulls?state=open&limit=50"
2. For each PR, check all four conditions before recycling:
a. CI failed get the HEAD SHA from the PR's head.sha field, then:
curl -sf -H "Authorization: token $FORGE_TOKEN" \
"$FORGE_API/commits/<head_sha>/status"
Only proceed if the combined state is "failure" or "error".
Skip PRs with "success", "pending", or no CI status.
b. Last push > 24 hours ago get the commit details:
curl -sf -H "Authorization: token $FORGE_TOKEN" \
"$FORGE_API/git/commits/<head_sha>"
Parse the committer.date field. Only proceed if it is older than:
$(date -u -d '24 hours ago' +%Y-%m-%dT%H:%M:%SZ)
c. Linked issue exists extract the issue number from the PR body.
Look for "Fixes #NNN" or "ixes #NNN" patterns (case-insensitive).
If no linked issue found, skip this PR (cannot reset labels).
d. No active tmux session check:
tmux has-session -t "dev-${PROJECT_NAME}-<issue_number>" 2>/dev/null
If a session exists, someone may still be working skip this PR.
3. For each PR that passes all checks (failed CI, 24+ hours stale,
linked issue found, no active session):
a. Write a comment on the PR explaining the recycle:
echo '{"action":"comment","issue":<pr_number>,"body":"Recycling stale CI failure for fresh attempt. Previous PR: #<pr_number>"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
b. Write a close_pr action:
echo '{"action":"close_pr","pr":<pr_number>}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
c. Remove the in-progress label from the linked issue:
echo '{"action":"remove_label","issue":<issue_number>,"label":"in-progress"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
d. Add the backlog label to the linked issue:
echo '{"action":"add_label","issue":<issue_number>,"label":"backlog"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
e. Log to result file:
echo "ACTION: recycled PR #<pr_number> (linked issue #<issue_number>) — stale CI failure" >> "$RESULT_FILE"
4. If no stale failed PRs found, skip this step.
CRITICAL: If this step fails, log the failure and move on to agents-update.
"""
needs = ["blocked-review"]
# ─────────────────────────────────────────────────────────────────────
# Step 6: agents-update — AGENTS.md watermark staleness + size enforcement
# ─────────────────────────────────────────────────────────────────────
[[steps]]
@ -349,10 +497,10 @@ needed. You wouldn't dump a 500-page wiki on a new hire's first morning.
CRITICAL: If this step fails for any reason, log the failure and move on.
Do NOT let an AGENTS.md failure prevent the commit-and-pr step.
"""
needs = ["dust-bundling"]
needs = ["stale-pr-recycle"]
# ─────────────────────────────────────────────────────────────────────
# Step 5: commit-and-pr — single commit with all file changes
# Step 7: commit-and-pr — single commit with all file changes
# ─────────────────────────────────────────────────────────────────────
[[steps]]
@ -406,14 +554,16 @@ executes them after the PR merges.
PR_NUMBER=$(echo "$PR_RESPONSE" | jq -r '.number')
h. Save PR number for orchestrator tracking:
echo "$PR_NUMBER" > /tmp/gardener-pr-${PROJECT_NAME}.txt
i. The orchestrator handles CI/review via pr_walk_to_merge.
The gardener stays alive to inject CI results and review feedback
as they come in, then executes the pending-actions manifest after merge.
i. Signal the orchestrator to monitor CI:
echo "PHASE:awaiting_ci" > "$PHASE_FILE"
j. STOP and WAIT. Do NOT return to the primary branch.
The orchestrator polls CI, injects results and review feedback.
When you receive injected CI or review feedback, follow its
instructions, then write PHASE:awaiting_ci and wait again.
4. If no file changes existed (step 2 found nothing):
# Nothing to commit — the gardener has no work to do this run.
exit 0
echo "PHASE:done" > "$PHASE_FILE"
5. If PR creation fails, log the error and exit.
5. If PR creation fails, log the error and write PHASE:failed.
"""
needs = ["agents-update"]

View file

@ -22,8 +22,7 @@ directly from cron like the planner, predictor, and supervisor.
`PHASE:awaiting_ci` — injects CI results and review feedback, re-signals
`PHASE:awaiting_ci` after fixes, signals `PHASE:awaiting_review` on CI pass.
Executes pending-actions manifest after PR merge.
- `formulas/run-gardener.toml` — Execution spec: preflight, grooming, dust-bundling,
agents-update, commit-and-pr
- `formulas/run-gardener.toml` — Execution spec: preflight, grooming, dust-bundling, blocked-review, agents-update, commit-and-pr
- `gardener/pending-actions.json` — Manifest of deferred repo actions (label changes,
closures, comments, issue creation). Written during grooming steps, committed to the
PR, reviewed alongside AGENTS.md changes, executed by gardener-run.sh after merge.
@ -35,7 +34,7 @@ directly from cron like the planner, predictor, and supervisor.
**Lifecycle**: gardener-run.sh (cron 0,6,12,18) → `check_active gardener` → lock + memory guard →
load formula + context → create tmux session →
Claude grooms backlog (writes proposed actions to manifest), bundles dust,
updates AGENTS.md, commits manifest + docs to PR →
reviews blocked issues, updates AGENTS.md, commits manifest + docs to PR →
`PHASE:awaiting_ci` (stays alive) → CI pass → `PHASE:awaiting_review`
review feedback → address + re-signal → merge → gardener-run.sh executes
manifest actions via API → `PHASE:done`. When blocked on external resources

View file

@ -7,7 +7,7 @@ sourced as needed.
| File | What it provides | Sourced by |
|---|---|---|
| `lib/env.sh` | Loads `.env`, sets `FACTORY_ROOT`, exports project config (`FORGE_REPO`, `PROJECT_NAME`, etc.), defines `log()`, `forge_api()`, `forge_api_all()` (accepts optional second TOKEN parameter, defaults to `$FORGE_TOKEN`), `woodpecker_api()`, `wpdb()`, `memory_guard()` (skips agent if RAM < threshold). Auto-loads project TOML if `PROJECT_TOML` is set. Exports per-agent tokens (`FORGE_PLANNER_TOKEN`, `FORGE_GARDENER_TOKEN`, `FORGE_VAULT_TOKEN`, `FORGE_SUPERVISOR_TOKEN`, `FORGE_PREDICTOR_TOKEN`) each falls back to `$FORGE_TOKEN` if not set. **Vault-only token guard (AD-006)**: `unset GITHUB_TOKEN CLAWHUB_TOKEN` so agents never hold external-action tokens only the runner container receives them. **Container note**: when `DISINTO_CONTAINER=1`, `.env` is NOT re-sourced compose already injects env vars (including `FORGE_URL=http://forgejo:3000`) and re-sourcing would clobber them. | Every agent |
| `lib/ci-helpers.sh` | `ci_passed()` — returns 0 if CI state is "success" (or no CI configured). `ci_required_for_pr()` — returns 0 if PR has code files (CI required), 1 if non-code only (CI not required). `is_infra_step()` — returns 0 if a single CI step failure matches infra heuristics (clone/git exit 128, any exit 137, log timeout patterns). `classify_pipeline_failure()` — returns "infra \<reason>" if any failed Woodpecker step matches infra heuristics via `is_infra_step()`, else "code". `ensure_priority_label()` — looks up (or creates) the `priority` label and returns its ID; caches in `_PRIORITY_LABEL_ID`. `ci_commit_status <sha>` — queries Woodpecker directly for CI state, falls back to forge commit status API. `ci_pipeline_number <sha>` — returns the Woodpecker pipeline number for a commit, falls back to parsing forge status `target_url`. `ci_promote <repo_id> <pipeline_num> <environment>` — promotes a pipeline to a named Woodpecker environment (vault-gated deployment: vault approves, vault-fire calls this — vault redesign in progress, see #73-#77). `ci_get_logs <pipeline_number> [--step <name>]` — reads CI logs from Woodpecker SQLite database; outputs last 200 lines to stdout. Requires mounted woodpecker-data volume at /woodpecker-data. | dev-poll, review-poll, review-pr, supervisor-poll |
| `lib/ci-helpers.sh` | `ci_passed()` — returns 0 if CI state is "success" (or no CI configured). `ci_required_for_pr()` — returns 0 if PR has code files (CI required), 1 if non-code only (CI not required). `is_infra_step()` — returns 0 if a single CI step failure matches infra heuristics (clone/git exit 128, any exit 137, log timeout patterns). `classify_pipeline_failure()` — returns "infra \<reason>" if any failed Woodpecker step matches infra heuristics via `is_infra_step()`, else "code". `ensure_priority_label()` — looks up (or creates) the `priority` label and returns its ID; caches in `_PRIORITY_LABEL_ID`. `ci_commit_status <sha>` — queries Woodpecker directly for CI state, falls back to forge commit status API. `ci_pipeline_number <sha>` — returns the Woodpecker pipeline number for a commit, falls back to parsing forge status `target_url`. `ci_promote <repo_id> <pipeline_num> <environment>` — promotes a pipeline to a named Woodpecker environment (vault-gated deployment: vault approves, vault-fire calls this — vault redesign in progress, see #73-#77). | dev-poll, review-poll, review-pr, supervisor-poll |
| `lib/ci-debug.sh` | CLI tool for Woodpecker CI: `list`, `status`, `logs`, `failures` subcommands. Not sourced — run directly. | Humans / dev-agent (tool access) |
| `lib/load-project.sh` | Parses a `projects/*.toml` file into env vars (`PROJECT_NAME`, `FORGE_REPO`, `WOODPECKER_REPO_ID`, monitoring toggles, mirror config, etc.). | env.sh (when `PROJECT_TOML` is set), supervisor-poll (per-project iteration) |
| `lib/parse-deps.sh` | Extracts dependency issue numbers from an issue body (stdin → stdout, one number per line). Matches `## Dependencies` / `## Depends on` / `## Blocked by` sections and inline `depends on #N` / `blocked by #N` patterns. Inline scan skips fenced code blocks to prevent false positives from code examples in issue bodies. Not sourced — executed via `bash lib/parse-deps.sh`. | dev-poll, supervisor-poll |

View file

@ -267,42 +267,3 @@ ci_promote() {
echo "$new_num"
}
# ci_get_logs <pipeline_number> [--step <step_name>]
# Reads CI logs from the Woodpecker SQLite database.
# Requires: WOODPECKER_DATA_DIR env var or mounted volume at /woodpecker-data
# Returns: 0 on success, 1 on failure. Outputs log text to stdout.
#
# Usage:
# ci_get_logs 346 # Get all failed step logs
# ci_get_logs 346 --step smoke-init # Get logs for specific step
ci_get_logs() {
local pipeline_number="$1"
shift || true
local step_name=""
while [ $# -gt 0 ]; do
case "$1" in
--step|-s)
step_name="$2"
shift 2
;;
*)
echo "Unknown option: $1" >&2
return 1
;;
esac
done
local log_reader="${FACTORY_ROOT:-/home/agent/disinto}/lib/ci-log-reader.py"
if [ -f "$log_reader" ]; then
if [ -n "$step_name" ]; then
python3 "$log_reader" "$pipeline_number" --step "$step_name"
else
python3 "$log_reader" "$pipeline_number"
fi
else
echo "ERROR: ci-log-reader.py not found at $log_reader" >&2
return 1
fi
}

View file

@ -1,125 +0,0 @@
#!/usr/bin/env python3
"""
ci-log-reader.py Read CI logs from Woodpecker SQLite database.
Usage:
ci-log-reader.py <pipeline_number> [--step <step_name>]
Reads log entries from the Woodpecker SQLite database and outputs them to stdout.
If --step is specified, filters to that step only. Otherwise returns logs from
all failed steps, truncated to the last 200 lines to avoid context bloat.
Environment:
WOODPECKER_DATA_DIR - Path to Woodpecker data directory (default: /woodpecker-data)
The SQLite database is located at: $WOODPECKER_DATA_DIR/woodpecker.sqlite
"""
import argparse
import sqlite3
import sys
import os
DEFAULT_DB_PATH = "/woodpecker-data/woodpecker.sqlite"
DEFAULT_WOODPECKER_DATA_DIR = "/woodpecker-data"
MAX_OUTPUT_LINES = 200
def get_db_path():
"""Determine the path to the Woodpecker SQLite database."""
env_dir = os.environ.get("WOODPECKER_DATA_DIR", DEFAULT_WOODPECKER_DATA_DIR)
return os.path.join(env_dir, "woodpecker.sqlite")
def query_logs(pipeline_number: int, step_name: str | None = None) -> list[str]:
"""
Query log entries from the Woodpecker database.
Args:
pipeline_number: The pipeline number to query
step_name: Optional step name to filter by
Returns:
List of log data strings
"""
db_path = get_db_path()
if not os.path.exists(db_path):
print(f"ERROR: Woodpecker database not found at {db_path}", file=sys.stderr)
print(f"Set WOODPECKER_DATA_DIR or mount volume to {DEFAULT_WOODPECKER_DATA_DIR}", file=sys.stderr)
sys.exit(1)
conn = sqlite3.connect(db_path)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
if step_name:
# Query logs for a specific step
query = """
SELECT le.data
FROM log_entries le
JOIN steps s ON le.step_id = s.id
JOIN pipelines p ON s.pipeline_id = p.id
WHERE p.number = ? AND s.name = ?
ORDER BY le.id
"""
cursor.execute(query, (pipeline_number, step_name))
else:
# Query logs for all failed steps in the pipeline
query = """
SELECT le.data
FROM log_entries le
JOIN steps s ON le.step_id = s.id
JOIN pipelines p ON s.pipeline_id = p.id
WHERE p.number = ? AND s.state IN ('failure', 'error', 'killed')
ORDER BY le.id
"""
cursor.execute(query, (pipeline_number,))
logs = [row["data"] for row in cursor.fetchall()]
conn.close()
return logs
def main():
parser = argparse.ArgumentParser(
description="Read CI logs from Woodpecker SQLite database"
)
parser.add_argument(
"pipeline_number",
type=int,
help="Pipeline number to query"
)
parser.add_argument(
"--step", "-s",
dest="step_name",
default=None,
help="Filter to a specific step name"
)
args = parser.parse_args()
logs = query_logs(args.pipeline_number, args.step_name)
if not logs:
if args.step_name:
print(f"No logs found for pipeline #{args.pipeline_number}, step '{args.step_name}'", file=sys.stderr)
else:
print(f"No failed steps found in pipeline #{args.pipeline_number}", file=sys.stderr)
sys.exit(0)
# Join all log data and output
full_output = "\n".join(logs)
# Truncate to last N lines to avoid context bloat
lines = full_output.split("\n")
if len(lines) > MAX_OUTPUT_LINES:
# Keep last N lines
truncated = lines[-MAX_OUTPUT_LINES:]
print("\n".join(truncated))
else:
print(full_output)
if __name__ == "__main__":
main()

View file

@ -414,23 +414,6 @@ pr_walk_to_merge() {
fi
_prl_log "CI failed — invoking agent (attempt ${ci_fix_count}/${max_ci_fixes})"
# Get CI logs from SQLite database if available
local ci_logs=""
if [ -n "$_PR_CI_PIPELINE" ] && [ -n "${FACTORY_ROOT:-}" ]; then
ci_logs=$(ci_get_logs "$_PR_CI_PIPELINE" 2>/dev/null | tail -50) || ci_logs=""
fi
local logs_section=""
if [ -n "$ci_logs" ]; then
logs_section="
CI Log Output (last 50 lines):
\`\`\`
${ci_logs}
\`\`\`
"
fi
agent_run --resume "$session_id" --worktree "$worktree" \
"CI failed on PR #${pr_num} (attempt ${ci_fix_count}/${max_ci_fixes}).
@ -438,7 +421,7 @@ Pipeline: #${_PR_CI_PIPELINE:-?}
Failure type: ${_PR_CI_FAILURE_TYPE:-unknown}
Error log:
${_PR_CI_ERROR_LOG:-No logs available.}${logs_section}
${_PR_CI_ERROR_LOG:-No logs available.}
Fix the issue, run tests, commit, rebase on ${PRIMARY_BRANCH}, and push:
git fetch ${remote} ${PRIMARY_BRANCH} && git rebase ${remote}/${PRIMARY_BRANCH}

View file

@ -1,748 +0,0 @@
#!/usr/bin/env python3
"""Mock Forgejo API server for CI smoke tests.
Implements 15 Forgejo API endpoints that disinto init calls.
State stored in-memory (dicts), responds instantly.
"""
import base64
import hashlib
import json
import os
import re
import signal
import socket
import sys
import threading
import uuid
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from urllib.parse import parse_qs, urlparse
# Global state
state = {
"users": {}, # key: username -> user object
"tokens": {}, # key: token_sha1 -> token object
"repos": {}, # key: "owner/repo" -> repo object
"orgs": {}, # key: orgname -> org object
"labels": {}, # key: "owner/repo" -> list of labels
"collaborators": {}, # key: "owner/repo" -> set of usernames
"protections": {}, # key: "owner/repo" -> list of protections
"oauth2_apps": [], # list of oauth2 app objects
}
next_ids = {"users": 1, "tokens": 1, "repos": 1, "orgs": 1, "labels": 1, "oauth2_apps": 1}
SHUTDOWN_REQUESTED = False
def log_request(handler, method, path, status):
"""Log request details."""
print(f"[{handler.log_date_time_string()}] {method} {path} {status}", file=sys.stderr)
def json_response(handler, status, data):
"""Send JSON response."""
body = json.dumps(data).encode("utf-8")
handler.send_response(status)
handler.send_header("Content-Type", "application/json")
handler.send_header("Content-Length", len(body))
handler.end_headers()
handler.wfile.write(body)
def basic_auth_user(handler):
"""Extract username from Basic auth header. Returns None if invalid."""
auth_header = handler.headers.get("Authorization", "")
if not auth_header.startswith("Basic "):
return None
try:
decoded = base64.b64decode(auth_header[6:]).decode("utf-8")
username, _ = decoded.split(":", 1)
return username
except Exception:
return None
def token_auth_valid(handler):
"""Check if Authorization header contains token. Doesn't validate value."""
auth_header = handler.headers.get("Authorization", "")
return auth_header.startswith("token ")
def require_token(handler):
"""Require token auth. Return user or None if invalid."""
if not token_auth_valid(handler):
return None
return True # Any token is valid for mock purposes
def require_basic_auth(handler, required_user=None):
"""Require basic auth. Return username or None if invalid."""
username = basic_auth_user(handler)
if username is None:
return None
# Check user exists in state
if username not in state["users"]:
return None
if required_user and username != required_user:
return None
return username
class ForgejoHandler(BaseHTTPRequestHandler):
"""HTTP request handler for mock Forgejo API."""
def log_message(self, format, *args):
"""Override to use our logging."""
pass # We log in do_request
def do_request(self, method):
"""Route request to appropriate handler."""
parsed = urlparse(self.path)
path = parsed.path
query = parse_qs(parsed.query)
log_request(self, method, self.path, "PENDING")
# Strip /api/v1/ prefix for routing (or leading slash for other routes)
route_path = path
if route_path.startswith("/api/v1/"):
route_path = route_path[8:]
elif route_path.startswith("/"):
route_path = route_path.lstrip("/")
# Route to handler
try:
# First try exact match (with / replaced by _)
handler_path = route_path.replace("/", "_")
handler_name = f"handle_{method}_{handler_path}"
handler = getattr(self, handler_name, None)
if handler:
handler(query)
else:
# Try pattern matching for routes with dynamic segments
self._handle_patterned_route(method, route_path, query)
except Exception as e:
log_request(self, method, self.path, 500)
json_response(self, 500, {"message": str(e)})
def _handle_patterned_route(self, method, route_path, query):
"""Handle routes with dynamic segments using pattern matching."""
# Define patterns: (regex, handler_name)
patterns = [
# Users patterns
(r"^users/([^/]+)$", f"handle_{method}_users_username"),
(r"^users/([^/]+)/tokens$", f"handle_{method}_users_username_tokens"),
(r"^users/([^/]+)/repos$", f"handle_{method}_users_username_repos"),
# Repos patterns
(r"^repos/([^/]+)/([^/]+)$", f"handle_{method}_repos_owner_repo"),
(r"^repos/([^/]+)/([^/]+)/labels$", f"handle_{method}_repos_owner_repo_labels"),
(r"^repos/([^/]+)/([^/]+)/branch_protections$", f"handle_{method}_repos_owner_repo_branch_protections"),
(r"^repos/([^/]+)/([^/]+)/collaborators/([^/]+)$", f"handle_{method}_repos_owner_repo_collaborators_collaborator"),
# Org patterns
(r"^orgs/([^/]+)/repos$", f"handle_{method}_orgs_org_repos"),
# User patterns
(r"^user/repos$", f"handle_{method}_user_repos"),
(r"^user/applications/oauth2$", f"handle_{method}_user_applications_oauth2"),
# Admin patterns
(r"^admin/users$", f"handle_{method}_admin_users"),
(r"^admin/users/([^/]+)$", f"handle_{method}_admin_users_username"),
# Org patterns
(r"^orgs$", f"handle_{method}_orgs"),
]
for pattern, handler_name in patterns:
if re.match(pattern, route_path):
handler = getattr(self, handler_name, None)
if handler:
handler(query)
return
self.handle_404()
def do_GET(self):
self.do_request("GET")
def do_POST(self):
self.do_request("POST")
def do_PATCH(self):
self.do_request("PATCH")
def do_PUT(self):
self.do_request("PUT")
def handle_GET_version(self, query):
"""GET /api/v1/version"""
json_response(self, 200, {"version": "11.0.0-mock"})
def handle_GET_users_username(self, query):
"""GET /api/v1/users/{username}"""
# Extract username from path
parts = self.path.split("/")
if len(parts) >= 5:
username = parts[4]
else:
json_response(self, 404, {"message": "user does not exist"})
return
if username in state["users"]:
json_response(self, 200, state["users"][username])
else:
json_response(self, 404, {"message": "user does not exist"})
def handle_GET_users_username_repos(self, query):
"""GET /api/v1/users/{username}/repos"""
if not require_token(self):
json_response(self, 401, {"message": "invalid authentication"})
return
parts = self.path.split("/")
if len(parts) >= 5:
username = parts[4]
else:
json_response(self, 404, {"message": "user not found"})
return
if username not in state["users"]:
json_response(self, 404, {"message": "user not found"})
return
# Return repos owned by this user
user_repos = [r for r in state["repos"].values() if r["owner"]["login"] == username]
json_response(self, 200, user_repos)
def handle_GET_repos_owner_repo(self, query):
"""GET /api/v1/repos/{owner}/{repo}"""
parts = self.path.split("/")
if len(parts) >= 6:
owner = parts[4]
repo = parts[5]
else:
json_response(self, 404, {"message": "repository not found"})
return
key = f"{owner}/{repo}"
if key in state["repos"]:
json_response(self, 200, state["repos"][key])
else:
json_response(self, 404, {"message": "repository not found"})
def handle_GET_repos_owner_repo_labels(self, query):
"""GET /api/v1/repos/{owner}/{repo}/labels"""
parts = self.path.split("/")
if len(parts) >= 6:
owner = parts[4]
repo = parts[5]
else:
json_response(self, 404, {"message": "repository not found"})
return
require_token(self)
key = f"{owner}/{repo}"
if key in state["labels"]:
json_response(self, 200, state["labels"][key])
else:
json_response(self, 200, [])
def handle_GET_user_applications_oauth2(self, query):
"""GET /api/v1/user/applications/oauth2"""
require_token(self)
json_response(self, 200, state["oauth2_apps"])
def handle_GET_mock_shutdown(self, query):
"""GET /mock/shutdown"""
global SHUTDOWN_REQUESTED
SHUTDOWN_REQUESTED = True
json_response(self, 200, {"status": "shutdown"})
def handle_POST_admin_users(self, query):
"""POST /api/v1/admin/users"""
require_token(self)
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
username = data.get("username")
email = data.get("email")
if not username or not email:
json_response(self, 400, {"message": "username and email are required"})
return
user_id = next_ids["users"]
next_ids["users"] += 1
user = {
"id": user_id,
"login": username,
"email": email,
"full_name": data.get("full_name", ""),
"is_admin": data.get("admin", False),
"must_change_password": data.get("must_change_password", False),
"login_name": data.get("login_name", username),
"visibility": data.get("visibility", "public"),
"avatar_url": f"https://seccdn.libravatar.org/avatar/{hashlib.md5(email.encode()).hexdigest()}",
}
state["users"][username] = user
json_response(self, 201, user)
def handle_GET_users_username_tokens(self, query):
"""GET /api/v1/users/{username}/tokens"""
username = require_token(self)
if not username:
json_response(self, 401, {"message": "invalid authentication"})
return
# Return list of tokens for this user
tokens = [t for t in state["tokens"].values() if t.get("username") == username]
json_response(self, 200, tokens)
def handle_POST_users_username_tokens(self, query):
"""POST /api/v1/users/{username}/tokens"""
username = require_basic_auth(self)
if not username:
json_response(self, 401, {"message": "invalid authentication"})
return
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
token_name = data.get("name")
if not token_name:
json_response(self, 400, {"message": "name is required"})
return
token_id = next_ids["tokens"]
next_ids["tokens"] += 1
# Deterministic token: sha256(username + name)[:40]
token_str = hashlib.sha256(f"{username}{token_name}".encode()).hexdigest()[:40]
token = {
"id": token_id,
"name": token_name,
"sha1": token_str,
"scopes": data.get("scopes", ["all"]),
"created_at": "2026-04-01T00:00:00Z",
"expires_at": None,
"username": username, # Store username for lookup
}
state["tokens"][token_str] = token
json_response(self, 201, token)
def handle_GET_orgs(self, query):
"""GET /api/v1/orgs"""
if not require_token(self):
json_response(self, 401, {"message": "invalid authentication"})
return
json_response(self, 200, list(state["orgs"].values()))
def handle_POST_orgs(self, query):
"""POST /api/v1/orgs"""
require_token(self)
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
username = data.get("username")
if not username:
json_response(self, 400, {"message": "username is required"})
return
org_id = next_ids["orgs"]
next_ids["orgs"] += 1
org = {
"id": org_id,
"username": username,
"full_name": username,
"avatar_url": f"https://seccdn.libravatar.org/avatar/{hashlib.md5(username.encode()).hexdigest()}",
"visibility": data.get("visibility", "public"),
}
state["orgs"][username] = org
json_response(self, 201, org)
def handle_POST_orgs_org_repos(self, query):
"""POST /api/v1/orgs/{org}/repos"""
require_token(self)
parts = self.path.split("/")
if len(parts) >= 6:
org = parts[4]
else:
json_response(self, 404, {"message": "organization not found"})
return
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
repo_name = data.get("name")
if not repo_name:
json_response(self, 400, {"message": "name is required"})
return
repo_id = next_ids["repos"]
next_ids["repos"] += 1
key = f"{org}/{repo_name}"
repo = {
"id": repo_id,
"full_name": key,
"name": repo_name,
"owner": {"id": state["orgs"][org]["id"], "login": org},
"empty": False,
"default_branch": data.get("default_branch", "main"),
"description": data.get("description", ""),
"private": data.get("private", False),
"html_url": f"https://example.com/{key}",
"ssh_url": f"git@example.com:{key}.git",
"clone_url": f"https://example.com/{key}.git",
"created_at": "2026-04-01T00:00:00Z",
}
state["repos"][key] = repo
json_response(self, 201, repo)
def handle_POST_users_username_repos(self, query):
"""POST /api/v1/users/{username}/repos"""
require_token(self)
parts = self.path.split("/")
if len(parts) >= 5:
username = parts[4]
else:
json_response(self, 400, {"message": "username required"})
return
if username not in state["users"]:
json_response(self, 404, {"message": "user not found"})
return
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
repo_name = data.get("name")
if not repo_name:
json_response(self, 400, {"message": "name is required"})
return
repo_id = next_ids["repos"]
next_ids["repos"] += 1
key = f"{username}/{repo_name}"
repo = {
"id": repo_id,
"full_name": key,
"name": repo_name,
"owner": {"id": state["users"][username]["id"], "login": username},
"empty": not data.get("auto_init", False),
"default_branch": data.get("default_branch", "main"),
"description": data.get("description", ""),
"private": data.get("private", False),
"html_url": f"https://example.com/{key}",
"ssh_url": f"git@example.com:{key}.git",
"clone_url": f"https://example.com/{key}.git",
"created_at": "2026-04-01T00:00:00Z",
}
state["repos"][key] = repo
json_response(self, 201, repo)
def handle_POST_user_repos(self, query):
"""POST /api/v1/user/repos"""
require_token(self)
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
repo_name = data.get("name")
if not repo_name:
json_response(self, 400, {"message": "name is required"})
return
# Get authenticated user from token
auth_header = self.headers.get("Authorization", "")
token = auth_header.split(" ", 1)[1] if " " in auth_header else ""
# Find user by token (use stored username field)
owner = None
for tok_sha1, tok in state["tokens"].items():
if tok_sha1 == token:
owner = tok.get("username")
break
if not owner:
json_response(self, 401, {"message": "invalid token"})
return
repo_id = next_ids["repos"]
next_ids["repos"] += 1
key = f"{owner}/{repo_name}"
repo = {
"id": repo_id,
"full_name": key,
"name": repo_name,
"owner": {"id": state["users"].get(owner, {}).get("id", 0), "login": owner},
"empty": False,
"default_branch": data.get("default_branch", "main"),
"description": data.get("description", ""),
"private": data.get("private", False),
"html_url": f"https://example.com/{key}",
"ssh_url": f"git@example.com:{key}.git",
"clone_url": f"https://example.com/{key}.git",
"created_at": "2026-04-01T00:00:00Z",
}
state["repos"][key] = repo
json_response(self, 201, repo)
def handle_POST_repos_owner_repo_labels(self, query):
"""POST /api/v1/repos/{owner}/{repo}/labels"""
require_token(self)
parts = self.path.split("/")
if len(parts) >= 6:
owner = parts[4]
repo = parts[5]
else:
json_response(self, 404, {"message": "repository not found"})
return
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
label_name = data.get("name")
label_color = data.get("color")
if not label_name or not label_color:
json_response(self, 400, {"message": "name and color are required"})
return
label_id = next_ids["labels"]
next_ids["labels"] += 1
key = f"{owner}/{repo}"
label = {
"id": label_id,
"name": label_name,
"color": label_color,
"description": data.get("description", ""),
"url": f"https://example.com/api/v1/repos/{key}/labels/{label_id}",
}
if key not in state["labels"]:
state["labels"][key] = []
state["labels"][key].append(label)
json_response(self, 201, label)
def handle_POST_repos_owner_repo_branch_protections(self, query):
"""POST /api/v1/repos/{owner}/{repo}/branch_protections"""
require_token(self)
parts = self.path.split("/")
if len(parts) >= 6:
owner = parts[4]
repo = parts[5]
else:
json_response(self, 404, {"message": "repository not found"})
return
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
branch_name = data.get("branch_name", "main")
key = f"{owner}/{repo}"
# Generate unique ID for protection
if key in state["protections"]:
protection_id = len(state["protections"][key]) + 1
else:
protection_id = 1
protection = {
"id": protection_id,
"repo_id": state["repos"].get(key, {}).get("id", 0),
"branch_name": branch_name,
"rule_name": data.get("rule_name", branch_name),
"enable_push": data.get("enable_push", False),
"enable_merge_whitelist": data.get("enable_merge_whitelist", True),
"merge_whitelist_usernames": data.get("merge_whitelist_usernames", ["admin"]),
"required_approvals": data.get("required_approvals", 1),
"apply_to_admins": data.get("apply_to_admins", True),
}
if key not in state["protections"]:
state["protections"][key] = []
state["protections"][key].append(protection)
json_response(self, 201, protection)
def handle_POST_user_applications_oauth2(self, query):
"""POST /api/v1/user/applications/oauth2"""
require_token(self)
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
app_name = data.get("name")
if not app_name:
json_response(self, 400, {"message": "name is required"})
return
app_id = next_ids["oauth2_apps"]
next_ids["oauth2_apps"] += 1
app = {
"id": app_id,
"name": app_name,
"client_id": str(uuid.uuid4()),
"client_secret": hashlib.sha256(str(uuid.uuid4()).encode()).hexdigest(),
"redirect_uris": data.get("redirect_uris", []),
"confidential_client": data.get("confidential_client", True),
"created_at": "2026-04-01T00:00:00Z",
}
state["oauth2_apps"].append(app)
json_response(self, 201, app)
def handle_PATCH_admin_users_username(self, query):
"""PATCH /api/v1/admin/users/{username}"""
if not require_token(self):
json_response(self, 401, {"message": "invalid authentication"})
return
parts = self.path.split("/")
if len(parts) >= 6:
username = parts[5]
else:
json_response(self, 404, {"message": "user does not exist"})
return
if username not in state["users"]:
json_response(self, 404, {"message": "user does not exist"})
return
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
user = state["users"][username]
for key, value in data.items():
# Map 'admin' to 'is_admin' for consistency
update_key = 'is_admin' if key == 'admin' else key
if update_key in user:
user[update_key] = value
json_response(self, 200, user)
def handle_PUT_repos_owner_repo_collaborators_collaborator(self, query):
"""PUT /api/v1/repos/{owner}/{repo}/collaborators/{collaborator}"""
require_token(self)
parts = self.path.split("/")
if len(parts) >= 8:
owner = parts[4]
repo = parts[5]
collaborator = parts[7]
else:
json_response(self, 404, {"message": "repository not found"})
return
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
key = f"{owner}/{repo}"
if key not in state["collaborators"]:
state["collaborators"][key] = set()
state["collaborators"][key].add(collaborator)
self.send_response(204)
self.send_header("Content-Length", 0)
self.end_headers()
def handle_GET_repos_owner_repo_collaborators_collaborator(self, query):
"""GET /api/v1/repos/{owner}/{repo}/collaborators/{collaborator}"""
require_token(self)
parts = self.path.split("/")
if len(parts) >= 8:
owner = parts[4]
repo = parts[5]
collaborator = parts[7]
else:
json_response(self, 404, {"message": "repository not found"})
return
key = f"{owner}/{repo}"
if key in state["collaborators"] and collaborator in state["collaborators"][key]:
self.send_response(204)
self.send_header("Content-Length", 0)
self.end_headers()
else:
json_response(self, 404, {"message": "collaborator not found"})
def handle_404(self):
"""Return 404 for unknown routes."""
json_response(self, 404, {"message": "route not found"})
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
"""Threaded HTTP server for handling concurrent requests."""
daemon_threads = True
def main():
"""Start the mock server."""
global SHUTDOWN_REQUESTED
port = int(os.environ.get("MOCK_FORGE_PORT", 3000))
try:
server = ThreadingHTTPServer(("0.0.0.0", port), ForgejoHandler)
try:
server.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except OSError:
pass # Not all platforms support this
except OSError as e:
print(f"Error: Failed to start server on port {port}: {e}", file=sys.stderr)
sys.exit(1)
print(f"Mock Forgejo server starting on port {port}", file=sys.stderr)
sys.stderr.flush()
def shutdown_handler(signum, frame):
global SHUTDOWN_REQUESTED
SHUTDOWN_REQUESTED = True
# Can't call server.shutdown() directly from signal handler in threaded server
threading.Thread(target=server.shutdown, daemon=True).start()
signal.signal(signal.SIGTERM, shutdown_handler)
signal.signal(signal.SIGINT, shutdown_handler)
try:
server.serve_forever()
except KeyboardInterrupt:
pass
finally:
server.shutdown()
print("Mock Forgejo server stopped", file=sys.stderr)
if __name__ == "__main__":
main()

View file

@ -1,31 +1,32 @@
#!/usr/bin/env bash
# tests/smoke-init.sh — End-to-end smoke test for disinto init with mock Forgejo
# tests/smoke-init.sh — End-to-end smoke test for disinto init
#
# Validates the full init flow using mock Forgejo server:
# 1. Verify mock Forgejo is ready
# 2. Set up mock binaries (docker, claude, tmux)
# 3. Run disinto init
# 4. Verify Forgejo state (users, repo)
# 5. Verify local state (TOML, .env, repo clone)
# 6. Verify cron setup
# Expects a running Forgejo at SMOKE_FORGE_URL with a bootstrap admin
# user already created (see .woodpecker/smoke-init.yml for CI setup).
# Validates the full init flow: Forgejo API, user/token creation,
# repo setup, labels, TOML generation, and cron installation.
#
# Required env: FORGE_URL (default: http://localhost:3000)
# Required env: SMOKE_FORGE_URL (default: http://localhost:3000)
# Required tools: bash, curl, jq, python3, git
set -euo pipefail
FACTORY_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
FORGE_URL="${FORGE_URL:-http://localhost:3000}"
MOCK_BIN="/tmp/smoke-mock-bin"
FORGE_URL="${SMOKE_FORGE_URL:-http://localhost:3000}"
SETUP_ADMIN="setup-admin"
SETUP_PASS="SetupPass-789xyz"
TEST_SLUG="smoke-org/smoke-repo"
MOCK_BIN="/tmp/smoke-mock-bin"
MOCK_STATE="/tmp/smoke-mock-state"
FAILED=0
fail() { printf 'FAIL: %s\n' "$*" >&2; FAILED=1; }
pass() { printf 'PASS: %s\n' "$*"; }
cleanup() {
rm -rf "$MOCK_BIN" /tmp/smoke-test-repo \
"${FACTORY_ROOT}/projects/smoke-repo.toml"
rm -rf "$MOCK_BIN" "$MOCK_STATE" /tmp/smoke-test-repo \
"${FACTORY_ROOT}/projects/smoke-repo.toml" \
"${FACTORY_ROOT}/docker-compose.yml"
# Restore .env only if we created the backup
if [ -f "${FACTORY_ROOT}/.env.smoke-backup" ]; then
mv "${FACTORY_ROOT}/.env.smoke-backup" "${FACTORY_ROOT}/.env"
@ -39,11 +40,11 @@ trap cleanup EXIT
if [ -f "${FACTORY_ROOT}/.env" ]; then
cp "${FACTORY_ROOT}/.env" "${FACTORY_ROOT}/.env.smoke-backup"
fi
# Start with a clean .env
# Start with a clean .env (setup_forge writes tokens here)
printf '' > "${FACTORY_ROOT}/.env"
# ── 1. Verify mock Forgejo is ready ─────────────────────────────────────────
echo "=== 1/6 Verifying mock Forgejo at ${FORGE_URL} ==="
# ── 1. Verify Forgejo is ready ──────────────────────────────────────────────
echo "=== 1/6 Verifying Forgejo at ${FORGE_URL} ==="
retries=0
api_version=""
while true; do
@ -54,64 +55,163 @@ while true; do
fi
retries=$((retries + 1))
if [ "$retries" -gt 30 ]; then
fail "Mock Forgejo API not responding after 30s"
fail "Forgejo API not responding after 30s"
exit 1
fi
sleep 1
done
pass "Mock Forgejo API v${api_version} (${retries}s)"
pass "Forgejo API v${api_version} (${retries}s)"
# Verify bootstrap admin user exists
if curl -sf --max-time 5 "${FORGE_URL}/api/v1/users/${SETUP_ADMIN}" >/dev/null 2>&1; then
pass "Bootstrap admin '${SETUP_ADMIN}' exists"
else
fail "Bootstrap admin '${SETUP_ADMIN}' not found — was Forgejo set up?"
exit 1
fi
# ── 2. Set up mock binaries ─────────────────────────────────────────────────
echo "=== 2/6 Setting up mock binaries ==="
mkdir -p "$MOCK_BIN"
mkdir -p "$MOCK_BIN" "$MOCK_STATE"
# Store bootstrap admin credentials for the docker mock
printf '%s:%s' "${SETUP_ADMIN}" "${SETUP_PASS}" > "$MOCK_STATE/bootstrap_creds"
# ── Mock: docker ──
# Intercepts docker exec calls that disinto init --bare makes to Forgejo CLI
# Routes 'docker exec' user-creation calls to the Forgejo admin API,
# using the bootstrap admin's credentials.
cat > "$MOCK_BIN/docker" << 'DOCKERMOCK'
#!/usr/bin/env bash
set -euo pipefail
FORGE_URL="${SMOKE_FORGE_URL:-${FORGE_URL:-http://localhost:3000}}"
if [ "${1:-}" = "ps" ]; then exit 0; fi
FORGE_URL="${SMOKE_FORGE_URL:-http://localhost:3000}"
MOCK_STATE="/tmp/smoke-mock-state"
if [ ! -f "$MOCK_STATE/bootstrap_creds" ]; then
echo "mock-docker: bootstrap credentials not found" >&2
exit 1
fi
BOOTSTRAP_CREDS="$(cat "$MOCK_STATE/bootstrap_creds")"
# docker ps — return empty (no containers running)
if [ "${1:-}" = "ps" ]; then
exit 0
fi
# docker exec — route to Forgejo API
if [ "${1:-}" = "exec" ]; then
shift
shift # remove 'exec'
# Skip docker exec flags (-u VALUE, -T, -i, etc.)
while [ $# -gt 0 ] && [ "${1#-}" != "$1" ]; do
case "$1" in -u|-w|-e) shift 2 ;; *) shift ;; esac
done
shift # container name
if [ "${1:-}" = "forgejo" ] && [ "${2:-}" = "admin" ] && [ "${3:-}" = "user" ]; then
subcmd="${4:-}"
if [ "$subcmd" = "list" ]; then echo "ID Username Email"; exit 0; fi
if [ "$subcmd" = "create" ]; then
shift 4; username="" password="" email="" is_admin="false"
while [ $# -gt 0 ]; do
case "$1" in
--admin) is_admin="true"; shift ;; --username) username="$2"; shift 2 ;;
--password) password="$2"; shift 2 ;; --email) email="$2"; shift 2 ;;
--must-change-password*) shift ;; *) shift ;;
-u|-w|-e) shift 2 ;;
*) shift ;;
esac
done
curl -sf -X POST -H "Content-Type: application/json" \
"${FORGE_URL}/api/v1/admin/users" \
-d "{\"username\":\"${username}\",\"password\":\"${password}\",\"email\":\"${email}\",\"must_change_password\":false}" >/dev/null 2>&1
if [ "$is_admin" = "true" ]; then
curl -sf -X PATCH -H "Content-Type: application/json" \
"${FORGE_URL}/api/v1/admin/users/${username}" \
-d "{\"admin\":true,\"must_change_password\":false}" >/dev/null 2>&1 || true
shift # remove container name (e.g. disinto-forgejo)
# $@ is now: forgejo admin user list|create [flags]
if [ "${1:-}" = "forgejo" ] && [ "${2:-}" = "admin" ] && [ "${3:-}" = "user" ]; then
subcmd="${4:-}"
if [ "$subcmd" = "list" ]; then
echo "ID Username Email"
exit 0
fi
echo "New user '${username}' has been successfully created!"; exit 0
fi
if [ "$subcmd" = "change-password" ]; then
shift 4; username=""
if [ "$subcmd" = "create" ]; then
shift 4 # skip 'forgejo admin user create'
username="" password="" email="" is_admin="false"
while [ $# -gt 0 ]; do
case "$1" in --username) username="$2"; shift 2 ;; --password) shift 2 ;; --must-change-password*|--config*) shift ;; *) shift ;; esac
case "$1" in
--admin) is_admin="true"; shift ;;
--username) username="$2"; shift 2 ;;
--password) password="$2"; shift 2 ;;
--email) email="$2"; shift 2 ;;
--must-change-password*) shift ;;
*) shift ;;
esac
done
curl -sf -X PATCH -H "Content-Type: application/json" \
if [ -z "$username" ] || [ -z "$password" ] || [ -z "$email" ]; then
echo "mock-docker: missing required args" >&2
exit 1
fi
# Create user via Forgejo admin API
if ! curl -sf -X POST \
-u "$BOOTSTRAP_CREDS" \
-H "Content-Type: application/json" \
"${FORGE_URL}/api/v1/admin/users" \
-d "{\"username\":\"${username}\",\"password\":\"${password}\",\"email\":\"${email}\",\"must_change_password\":false,\"login_name\":\"${username}\",\"source_id\":0}" \
>/dev/null 2>&1; then
echo "mock-docker: failed to create user '${username}'" >&2
exit 1
fi
# Patch user: ensure must_change_password is false (Forgejo admin
# API POST may ignore it) and promote to admin if requested
patch_body="{\"must_change_password\":false,\"login_name\":\"${username}\",\"source_id\":0"
if [ "$is_admin" = "true" ]; then
patch_body="${patch_body},\"admin\":true"
fi
patch_body="${patch_body}}"
curl -sf -X PATCH \
-u "$BOOTSTRAP_CREDS" \
-H "Content-Type: application/json" \
"${FORGE_URL}/api/v1/admin/users/${username}" \
-d "{\"must_change_password\":false}" >/dev/null 2>&1 || true
-d "${patch_body}" \
>/dev/null 2>&1 || true
echo "New user '${username}' has been successfully created!"
exit 0
fi
if [ "$subcmd" = "change-password" ]; then
shift 4 # skip 'forgejo admin user change-password'
username="" password=""
while [ $# -gt 0 ]; do
case "$1" in
--username) username="$2"; shift 2 ;;
--password) password="$2"; shift 2 ;;
--must-change-password*) shift ;;
--config*) shift ;;
*) shift ;;
esac
done
if [ -z "$username" ]; then
echo "mock-docker: change-password missing --username" >&2
exit 1
fi
# PATCH user via Forgejo admin API to clear must_change_password
patch_body="{\"must_change_password\":false,\"login_name\":\"${username}\",\"source_id\":0"
if [ -n "$password" ]; then
patch_body="${patch_body},\"password\":\"${password}\""
fi
patch_body="${patch_body}}"
if ! curl -sf -X PATCH \
-u "$BOOTSTRAP_CREDS" \
-H "Content-Type: application/json" \
"${FORGE_URL}/api/v1/admin/users/${username}" \
-d "${patch_body}" \
>/dev/null 2>&1; then
echo "mock-docker: failed to change-password for '${username}'" >&2
exit 1
fi
exit 0
fi
fi
echo "mock-docker: unhandled exec: $*" >&2
exit 1
fi
echo "mock-docker: unhandled command: $*" >&2
exit 1
DOCKERMOCK
chmod +x "$MOCK_BIN/docker"
@ -131,8 +231,11 @@ chmod +x "$MOCK_BIN/claude"
printf '#!/usr/bin/env bash\nexit 0\n' > "$MOCK_BIN/tmux"
chmod +x "$MOCK_BIN/tmux"
# No crontab mock — use real BusyBox crontab (available in the Forgejo
# Alpine image). Cron entries are verified via 'crontab -l' in step 6.
export PATH="$MOCK_BIN:$PATH"
pass "Mock binaries installed"
pass "Mock binaries installed (docker, claude, tmux)"
# ── 3. Run disinto init ─────────────────────────────────────────────────────
echo "=== 3/6 Running disinto init ==="
@ -142,26 +245,9 @@ rm -f "${FACTORY_ROOT}/projects/smoke-repo.toml"
git config --global user.email "smoke@test.local"
git config --global user.name "Smoke Test"
# USER needs to be set twice: assignment then export (SC2155)
USER=$(whoami)
export USER
# Create mock git repo to avoid clone failure (mock server has no git support)
mkdir -p "/tmp/smoke-test-repo"
cd "/tmp/smoke-test-repo"
git init --quiet
git config user.email "smoke@test.local"
git config user.name "Smoke Test"
echo "# smoke-repo" > README.md
git add README.md
git commit --quiet -m "Initial commit"
export SMOKE_FORGE_URL="$FORGE_URL"
export FORGE_URL
# Skip push to mock server (no git support)
export SKIP_PUSH=true
if bash "${FACTORY_ROOT}/bin/disinto" init \
"${TEST_SLUG}" \
--bare --yes \
@ -204,6 +290,35 @@ if [ "$repo_found" = false ]; then
fail "Repo not found on Forgejo under any expected path"
fi
# Labels exist on repo — use bootstrap admin to check
setup_token=$(curl -sf -X POST \
-u "${SETUP_ADMIN}:${SETUP_PASS}" \
-H "Content-Type: application/json" \
"${FORGE_URL}/api/v1/users/${SETUP_ADMIN}/tokens" \
-d '{"name":"smoke-verify","scopes":["all"]}' 2>/dev/null \
| jq -r '.sha1 // empty') || setup_token=""
if [ -n "$setup_token" ]; then
label_count=0
for repo_path in "${TEST_SLUG}" "dev-bot/smoke-repo" "disinto-admin/smoke-repo"; do
label_count=$(curl -sf \
-H "Authorization: token ${setup_token}" \
"${FORGE_URL}/api/v1/repos/${repo_path}/labels?limit=50" 2>/dev/null \
| jq 'length' 2>/dev/null) || label_count=0
if [ "$label_count" -gt 0 ]; then
break
fi
done
if [ "$label_count" -ge 5 ]; then
pass "Labels created on repo (${label_count} labels)"
else
fail "Expected >= 5 labels, found ${label_count}"
fi
else
fail "Could not obtain verification token from bootstrap admin"
fi
# ── 5. Verify local state ───────────────────────────────────────────────────
echo "=== 5/6 Verifying local state ==="
@ -242,7 +357,7 @@ else
fail ".env not found"
fi
# Repo was cloned (mock git repo created before disinto init)
# Repo was cloned
if [ -d "/tmp/smoke-test-repo/.git" ]; then
pass "Repo cloned to /tmp/smoke-test-repo"
else