Compare commits

..

No commits in common. "main" and "v0.1.0" have entirely different histories.
main ... v0.1.0

10 changed files with 244 additions and 830 deletions

4
.gitignore vendored
View file

@ -25,6 +25,4 @@ gardener/dust.jsonl
# Individual encrypted secrets (managed by disinto secrets add) # Individual encrypted secrets (managed by disinto secrets add)
secrets/ secrets/
.woodpecker/smoke-init.yml
# Pre-built binaries for Docker builds (avoid network calls during build)
docker/agents/bin/

View file

@ -226,9 +226,7 @@ services:
- woodpecker - woodpecker
agents: agents:
build: build: ./docker/agents
context: .
dockerfile: docker/agents/Dockerfile
restart: unless-stopped restart: unless-stopped
security_opt: security_opt:
- apparmor=unconfined - apparmor=unconfined
@ -258,9 +256,7 @@ services:
- disinto-net - disinto-net
runner: runner:
build: build: ./docker/agents
context: .
dockerfile: docker/agents/Dockerfile
profiles: ["vault"] profiles: ["vault"]
security_opt: security_opt:
- apparmor=unconfined - apparmor=unconfined
@ -282,17 +278,9 @@ services:
ports: ports:
- "80:80" - "80:80"
- "443:443" - "443:443"
environment:
- DISINTO_VERSION=${DISINTO_VERSION:-main}
- FORGE_URL=http://forgejo:3000
- FORGE_REPO=johba/disinto
- FORGE_OPS_REPO=johba/disinto-ops
- FORGE_TOKEN=${FORGE_TOKEN:-}
- OPS_REPO_ROOT=/opt/disinto-ops
- PROJECT_REPO_ROOT=/opt/disinto
- PRIMARY_BRANCH=main
volumes: volumes:
- ./docker/Caddyfile:/etc/caddy/Caddyfile - ./docker/Caddyfile:/etc/caddy/Caddyfile
- ./docker/edge/dispatcher.sh:/usr/local/bin/dispatcher.sh:ro
- caddy_data:/data - caddy_data:/data
- /var/run/docker.sock:/var/run/docker.sock - /var/run/docker.sock:/var/run/docker.sock
depends_on: depends_on:
@ -2377,55 +2365,6 @@ disinto_run() {
return "$rc" return "$rc"
} }
# ── Pre-build: download binaries to docker/agents/bin/ ────────────────────────
# This avoids network calls during docker build (needed for Docker-in-LXD builds)
# Returns 0 on success, 1 on failure
download_agent_binaries() {
local bin_dir="${FACTORY_ROOT}/docker/agents/bin"
mkdir -p "$bin_dir"
echo "Downloading agent binaries to ${bin_dir}..."
# Download SOPS
local sops_file="${bin_dir}/sops"
if [ ! -f "$sops_file" ]; then
echo " Downloading SOPS v3.9.4..."
curl -sL https://github.com/getsops/sops/releases/download/v3.9.4/sops-v3.9.4.linux.amd64 -o "$sops_file"
if [ ! -f "$sops_file" ]; then
echo "Error: failed to download SOPS" >&2
return 1
fi
fi
# Verify checksum
echo " Verifying SOPS checksum..."
if ! echo "5488e32bc471de7982ad895dd054bbab3ab91c417a118426134551e9626e4e85 ${sops_file}" | sha256sum -c - >/dev/null 2>&1; then
echo "Error: SOPS checksum verification failed" >&2
return 1
fi
chmod +x "$sops_file"
# Download tea CLI
local tea_file="${bin_dir}/tea"
if [ ! -f "$tea_file" ]; then
echo " Downloading tea CLI v0.9.2..."
curl -sL https://dl.gitea.com/tea/0.9.2/tea-0.9.2-linux-amd64 -o "$tea_file"
if [ ! -f "$tea_file" ]; then
echo "Error: failed to download tea CLI" >&2
return 1
fi
fi
# Verify checksum
echo " Verifying tea CLI checksum..."
if ! echo "be10cdf9a619e3c0f121df874960ed19b53e62d1c7036cf60313a28b5227d54d ${tea_file}" | sha256sum -c - >/dev/null 2>&1; then
echo "Error: tea CLI checksum verification failed" >&2
return 1
fi
chmod +x "$tea_file"
echo "Binaries downloaded and verified successfully"
return 0
}
# ── up command ──────────────────────────────────────────────────────────────── # ── up command ────────────────────────────────────────────────────────────────
disinto_up() { disinto_up() {
@ -2436,14 +2375,6 @@ disinto_up() {
exit 1 exit 1
fi fi
# Pre-build: download binaries to docker/agents/bin/ to avoid network calls during docker build
echo "── Pre-build: downloading agent binaries ────────────────────────"
if ! download_agent_binaries; then
echo "Error: failed to download agent binaries" >&2
exit 1
fi
echo ""
# Decrypt secrets to temp .env if SOPS available and .env.enc exists # Decrypt secrets to temp .env if SOPS available and .env.enc exists
local tmp_env="" local tmp_env=""
local enc_file="${FACTORY_ROOT}/.env.enc" local enc_file="${FACTORY_ROOT}/.env.enc"

View file

@ -41,7 +41,7 @@ REPO_ROOT="${PROJECT_REPO_ROOT}"
LOCKFILE="/tmp/dev-agent-${PROJECT_NAME:-default}.lock" LOCKFILE="/tmp/dev-agent-${PROJECT_NAME:-default}.lock"
STATUSFILE="/tmp/dev-agent-status-${PROJECT_NAME:-default}" STATUSFILE="/tmp/dev-agent-status-${PROJECT_NAME:-default}"
BRANCH="fix/issue-${ISSUE}" # Default; will be updated after FORGE_REMOTE is known BRANCH="fix/issue-${ISSUE}"
WORKTREE="/tmp/${PROJECT_NAME}-worktree-${ISSUE}" WORKTREE="/tmp/${PROJECT_NAME}-worktree-${ISSUE}"
SID_FILE="/tmp/dev-session-${PROJECT_NAME}-${ISSUE}.sid" SID_FILE="/tmp/dev-session-${PROJECT_NAME}-${ISSUE}.sid"
PREFLIGHT_RESULT="/tmp/dev-agent-preflight.json" PREFLIGHT_RESULT="/tmp/dev-agent-preflight.json"
@ -263,19 +263,6 @@ FORGE_REMOTE="${FORGE_REMOTE:-origin}"
export FORGE_REMOTE export FORGE_REMOTE
log "forge remote: ${FORGE_REMOTE}" log "forge remote: ${FORGE_REMOTE}"
# Generate unique branch name per attempt to avoid collision with failed attempts
# Only apply when not in recovery mode (RECOVERY_MODE branch is already set from existing PR)
# First attempt: fix/issue-N, subsequent: fix/issue-N-1, fix/issue-N-2, etc.
if [ "$RECOVERY_MODE" = false ]; then
# Count only branches matching fix/issue-N, fix/issue-N-1, fix/issue-N-2, etc. (exact prefix match)
ATTEMPT=$(git ls-remote --heads "$FORGE_REMOTE" "refs/heads/fix/issue-${ISSUE}" 2>/dev/null | grep -c "refs/heads/fix/issue-${ISSUE}$" || echo 0)
ATTEMPT=$((ATTEMPT + $(git ls-remote --heads "$FORGE_REMOTE" "refs/heads/fix/issue-${ISSUE}-*" 2>/dev/null | wc -l)))
if [ "$ATTEMPT" -gt 0 ]; then
BRANCH="fix/issue-${ISSUE}-${ATTEMPT}"
fi
fi
log "using branch: ${BRANCH}"
if [ "$RECOVERY_MODE" = true ]; then if [ "$RECOVERY_MODE" = true ]; then
if ! worktree_recover "$WORKTREE" "$BRANCH" "$FORGE_REMOTE"; then if ! worktree_recover "$WORKTREE" "$BRANCH" "$FORGE_REMOTE"; then
log "ERROR: worktree recovery failed" log "ERROR: worktree recovery failed"
@ -588,8 +575,11 @@ else
outcome="blocked_${_PR_WALK_EXIT_REASON:-agent_failed}" outcome="blocked_${_PR_WALK_EXIT_REASON:-agent_failed}"
profile_write_journal "$ISSUE" "$ISSUE_TITLE" "$outcome" "$FILES_CHANGED" || true profile_write_journal "$ISSUE" "$ISSUE_TITLE" "$outcome" "$FILES_CHANGED" || true
# Cleanup on failure: preserve remote branch and PR for debugging, clean up local worktree # Cleanup on failure: close PR, delete remote branch, clean up worktree
# Remote state (PR and branch) stays open for inspection of CI logs and review comments if [ -n "$PR_NUMBER" ]; then
pr_close "$PR_NUMBER"
fi
git push "$FORGE_REMOTE" --delete "$BRANCH" 2>/dev/null || true
worktree_cleanup "$WORKTREE" worktree_cleanup "$WORKTREE"
rm -f "$SID_FILE" "$IMPL_SUMMARY_FILE" rm -f "$SID_FILE" "$IMPL_SUMMARY_FILE"
CLAIMED=false CLAIMED=false

View file

@ -3,16 +3,20 @@ FROM debian:bookworm-slim
RUN apt-get update && apt-get install -y --no-install-recommends \ RUN apt-get update && apt-get install -y --no-install-recommends \
bash curl git jq tmux cron python3 python3-pip openssh-client ca-certificates age shellcheck \ bash curl git jq tmux cron python3 python3-pip openssh-client ca-certificates age shellcheck \
&& pip3 install --break-system-packages networkx \ && pip3 install --break-system-packages networkx \
&& curl -sL https://github.com/getsops/sops/releases/download/v3.9.4/sops-v3.9.4.linux.amd64 \
-o /usr/local/bin/sops \
&& curl -sL https://github.com/getsops/sops/releases/download/v3.9.4/sops-v3.9.4.checksums.txt \
-o /tmp/sops-checksums.txt \
&& sha256sum -c --ignore-missing /tmp/sops-checksums.txt \
&& rm -f /tmp/sops-checksums.txt \
&& chmod +x /usr/local/bin/sops \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# Pre-built binaries (copied from docker/agents/bin/)
# SOPS — encrypted data decryption tool
COPY docker/agents/bin/sops /usr/local/bin/sops
RUN chmod +x /usr/local/bin/sops
# tea CLI — official Gitea/Forgejo CLI for issue/label/comment operations # tea CLI — official Gitea/Forgejo CLI for issue/label/comment operations
COPY docker/agents/bin/tea /usr/local/bin/tea # Checksum from https://dl.gitea.com/tea/0.9.2/tea-0.9.2-linux-amd64.sha256
RUN chmod +x /usr/local/bin/tea RUN curl -sL https://dl.gitea.com/tea/0.9.2/tea-0.9.2-linux-amd64 -o /usr/local/bin/tea \
&& echo "be10cdf9a619e3c0f121df874960ed19b53e62d1c7036cf60313a28b5227d54d /usr/local/bin/tea" | sha256sum -c - \
&& chmod +x /usr/local/bin/tea
# Claude CLI is mounted from the host via docker-compose volume. # Claude CLI is mounted from the host via docker-compose volume.
# No internet access to cli.anthropic.com required at build time. # No internet access to cli.anthropic.com required at build time.
@ -23,7 +27,7 @@ RUN useradd -m -u 1000 -s /bin/bash agent
# Copy disinto code into the image # Copy disinto code into the image
COPY . /home/agent/disinto COPY . /home/agent/disinto
COPY docker/agents/entrypoint.sh /entrypoint.sh COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh RUN chmod +x /entrypoint.sh
# Entrypoint runs as root to start the cron daemon; # Entrypoint runs as root to start the cron daemon;

View file

@ -1,4 +1,3 @@
FROM caddy:alpine FROM caddy:alpine
RUN apk add --no-cache bash jq curl git docker-cli RUN apk add --no-cache bash jq curl git docker-cli
COPY entrypoint-edge.sh /usr/local/bin/entrypoint-edge.sh COPY dispatcher.sh /usr/local/bin/dispatcher.sh
ENTRYPOINT ["bash", "/usr/local/bin/entrypoint-edge.sh"]

View file

@ -109,34 +109,33 @@ get_pr_for_file() {
local file_name local file_name
file_name=$(basename "$file_path") file_name=$(basename "$file_path")
# Step 1: find the commit that added the file # Get recent commits that added this specific file
local add_commit local commits
add_commit=$(git -C "$OPS_REPO_ROOT" log --diff-filter=A --format="%H" \ commits=$(git -C "$OPS_REPO_ROOT" log --oneline --diff-filter=A -- "vault/actions/${file_name}" 2>/dev/null | head -20) || true
-- "vault/actions/${file_name}" 2>/dev/null | head -1)
if [ -z "$add_commit" ]; then if [ -z "$commits" ]; then
return 1 return 1
fi fi
# Step 2: find the merge commit that contains it via ancestry path # For each commit, check if it's a merge commit from a PR
local merge_line while IFS= read -r commit; do
# Use --reverse to get the oldest (direct PR merge) first, not the newest local commit_sha commit_msg
merge_line=$(git -C "$OPS_REPO_ROOT" log --merges --ancestry-path \
--reverse "${add_commit}..HEAD" --oneline 2>/dev/null | head -1)
if [ -z "$merge_line" ]; then commit_sha=$(echo "$commit" | awk '{print $1}')
return 1 commit_msg=$(git -C "$OPS_REPO_ROOT" log -1 --format="%B" "$commit_sha" 2>/dev/null) || continue
fi
# Step 3: extract PR number from merge commit message # Check if this is a merge commit (has "Merge pull request" in message)
# Forgejo format: "Merge pull request 'title' (#N) from branch into main" if [[ "$commit_msg" =~ "Merge pull request" ]]; then
# Extract PR number from merge message (e.g., "Merge pull request #123")
local pr_num local pr_num
pr_num=$(echo "$merge_line" | grep -oP '#\d+' | head -1 | tr -d '#') pr_num=$(echo "$commit_msg" | grep -oP '#\d+' | head -1 | tr -d '#') || true
if [ -n "$pr_num" ]; then if [ -n "$pr_num" ]; then
echo "$pr_num" echo "$pr_num"
return 0 return 0
fi fi
fi
done <<< "$commits"
return 1 return 1
} }
@ -147,11 +146,8 @@ get_pr_for_file() {
get_pr_merger() { get_pr_merger() {
local pr_number="$1" local pr_number="$1"
# Use ops repo API URL for PR lookups (not disinto repo)
local ops_api="${FORGE_URL}/api/v1/repos/${FORGE_OPS_REPO}"
curl -sf -H "Authorization: token ${FORGE_TOKEN}" \ curl -sf -H "Authorization: token ${FORGE_TOKEN}" \
"${ops_api}/pulls/${pr_number}" 2>/dev/null | jq -r '{ "${FORGE_API}/pulls/${pr_number}" 2>/dev/null | jq -r '{
username: .merge_user?.login // .user?.login, username: .merge_user?.login // .user?.login,
merged: .merged, merged: .merged,
merged_at: .merged_at // empty merged_at: .merged_at // empty
@ -294,11 +290,16 @@ launch_runner() {
local secrets_array local secrets_array
secrets_array="${VAULT_ACTION_SECRETS:-}" secrets_array="${VAULT_ACTION_SECRETS:-}"
if [ -z "$secrets_array" ]; then
log "ERROR: Action ${action_id} has no secrets declared"
write_result "$action_id" 1 "No secrets declared in TOML"
return 1
fi
# Build command array (safe from shell injection) # Build command array (safe from shell injection)
local -a cmd=(docker compose run --rm runner) local -a cmd=(docker compose run --rm runner)
# Add environment variables for secrets (if any declared) # Add environment variables for secrets
if [ -n "$secrets_array" ]; then
for secret in $secrets_array; do for secret in $secrets_array; do
secret=$(echo "$secret" | xargs) secret=$(echo "$secret" | xargs)
if [ -n "$secret" ]; then if [ -n "$secret" ]; then
@ -311,9 +312,6 @@ launch_runner() {
cmd+=(-e "$secret") cmd+=(-e "$secret")
fi fi
done done
else
log "Action ${action_id} has no secrets declared — runner will execute without extra env vars"
fi
# Add formula and action id as arguments (after service name) # Add formula and action id as arguments (after service name)
local formula="${VAULT_ACTION_FORMULA:-}" local formula="${VAULT_ACTION_FORMULA:-}"

View file

@ -1,19 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Set USER before sourcing env.sh (Alpine doesn't set USER)
export USER="${USER:-root}"
DISINTO_VERSION="${DISINTO_VERSION:-main}"
DISINTO_REPO="${FORGE_URL:-http://forgejo:3000}/johba/disinto.git"
# Shallow clone at the pinned version
if [ ! -d /opt/disinto/.git ]; then
git clone --depth 1 --branch "$DISINTO_VERSION" "$DISINTO_REPO" /opt/disinto
fi
# Start dispatcher in background
bash /opt/disinto/docker/edge/dispatcher.sh &
# Caddy as main process
exec caddy run --config /etc/caddy/Caddyfile --adapter caddyfile

View file

@ -1,15 +1,16 @@
# formulas/run-gardener.toml — Gardener housekeeping formula # formulas/run-gardener.toml — Gardener housekeeping formula
# #
# Defines the gardener's complete run: grooming (Claude session via # Defines the gardener's complete run: grooming (Claude session via
# gardener-run.sh) + AGENTS.md maintenance + final commit-and-pr. # gardener-run.sh) + blocked-review + AGENTS.md maintenance + final
# commit-and-pr.
# #
# Gardener has journaling via .profile (issue #97), so it learns from # No memory, no journal. The gardener does mechanical housekeeping
# past runs and improves over time. # based on current state — it doesn't need to remember past runs.
# #
# Steps: preflight -> grooming -> dust-bundling -> agents-update -> commit-and-pr # Steps: preflight → grooming → dust-bundling → blocked-review → stale-pr-recycle → agents-update → commit-and-pr
name = "run-gardener" name = "run-gardener"
description = "Mechanical housekeeping: grooming, dust bundling, docs update" description = "Mechanical housekeeping: grooming, blocked review, docs update"
version = 1 version = 1
[context] [context]
@ -119,17 +120,15 @@ DUST (trivial — single-line edit, rename, comment, style, whitespace):
of 3+ into one backlog issue. of 3+ into one backlog issue.
VAULT (needs human decision or external resource): VAULT (needs human decision or external resource):
File a vault procurement item using vault_request(): File a vault procurement item at $OPS_REPO_ROOT/vault/pending/<id>.md:
source "$(dirname "$0")/../lib/vault.sh" # <What decision or resource is needed>
TOML_CONTENT="# Vault action: <action_id> ## What
context = \"<description of what decision/resource is needed>\" <description>
unblocks = [\"#NNN\"] ## Why
<which issue this unblocks>
[execution] ## Unblocks
# Commands to run after approval - #NNN — <title>
" Log: echo "VAULT: filed $OPS_REPO_ROOT/vault/pending/<id>.md for #NNN — <reason>" >> "$RESULT_FILE"
PR_NUM=$(vault_request "<action_id>" "$TOML_CONTENT")
echo "VAULT: filed PR #${PR_NUM} for #NNN — <reason>" >> "$RESULT_FILE"
CLEAN (only if truly nothing to do): CLEAN (only if truly nothing to do):
echo 'CLEAN' >> "$RESULT_FILE" echo 'CLEAN' >> "$RESULT_FILE"
@ -143,7 +142,25 @@ Sibling dependency rule (CRITICAL):
NEVER add bidirectional ## Dependencies between siblings (creates deadlocks). NEVER add bidirectional ## Dependencies between siblings (creates deadlocks).
Use ## Related for cross-references: "## Related\n- #NNN (sibling)" Use ## Related for cross-references: "## Related\n- #NNN (sibling)"
6. Quality gate backlog label enforcement: 7. Architecture decision alignment check (AD check):
For each open issue labeled 'backlog', check whether the issue
contradicts any architecture decision listed in the
## Architecture Decisions section of AGENTS.md.
Read AGENTS.md and extract the AD table. For each backlog issue,
compare the issue title and body against each AD. If an issue
clearly violates an AD:
a. Write a comment action to the manifest:
echo '{"action":"comment","issue":NNN,"body":"Closing: violates AD-NNN (<decision summary>). See AGENTS.md § Architecture Decisions."}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
b. Write a close action to the manifest:
echo '{"action":"close","issue":NNN,"reason":"violates AD-NNN"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
c. Log to the result file:
echo "ACTION: closed #NNN — violates AD-NNN" >> "$RESULT_FILE"
Only close for clear, unambiguous violations. If the issue is
borderline or could be interpreted as compatible, leave it open
and file a VAULT item for human decision instead.
8. Quality gate backlog label enforcement:
For each open issue labeled 'backlog', verify it has the required For each open issue labeled 'backlog', verify it has the required
sections for dev-agent pickup: sections for dev-agent pickup:
a. Acceptance criteria body must contain at least one checkbox a. Acceptance criteria body must contain at least one checkbox
@ -164,11 +181,28 @@ Sibling dependency rule (CRITICAL):
Well-structured issues (both sections present) are left untouched Well-structured issues (both sections present) are left untouched
they are ready for dev-agent pickup. they are ready for dev-agent pickup.
9. Portfolio lifecycle maintain ## Addressables and ## Observables in AGENTS.md:
Read the current Addressables and Observables tables from AGENTS.md.
a. ADD: if a recently closed issue shipped a new deployment, listing,
package, or external presence not yet in the table, add a row.
b. PROMOTE: if an addressable now has measurement wired (an evidence
process reads from it), move it to the Observables section.
c. REMOVE: if an addressable was decommissioned (vision change
invalidated it, service shut down), remove the row and log why.
d. FLAG: if an addressable has been live > 2 weeks with Observable? = No
and no evidence process is planned, add a comment to the result file:
echo "ACTION: flagged addressable '<name>' — live >2 weeks, no observation path" >> "$RESULT_FILE"
Stage AGENTS.md if changed the commit-and-pr step handles the actual commit.
Processing order: Processing order:
1. Handle PRIORITY_blockers_starving_factory first promote or resolve 1. Handle PRIORITY_blockers_starving_factory first promote or resolve
2. Quality gate strip backlog from issues missing acceptance criteria or affected files 2. AD alignment check close backlog issues that violate architecture decisions
3. Process tech-debt issues by score (impact/effort) 3. Quality gate strip backlog from issues missing acceptance criteria or affected files
4. Classify remaining items as dust or route to vault 4. Process tech-debt issues by score (impact/effort)
5. Classify remaining items as dust or route to vault
6. Portfolio lifecycle update addressables/observables tables
Do NOT bundle dust yourself the dust-bundling step handles accumulation, Do NOT bundle dust yourself the dust-bundling step handles accumulation,
dedup, TTL expiry, and bundling into backlog issues. dedup, TTL expiry, and bundling into backlog issues.
@ -223,12 +257,126 @@ session, so changes there would be lost.
5. If no DUST items were emitted and no groups are ripe, skip this step. 5. If no DUST items were emitted and no groups are ripe, skip this step.
CRITICAL: If this step fails, log the failure and move on. CRITICAL: If this step fails, log the failure and move on to blocked-review.
""" """
needs = ["grooming"] needs = ["grooming"]
# ───────────────────────────────────────────────────────────────────── # ─────────────────────────────────────────────────────────────────────
# Step 4: agents-update — AGENTS.md watermark staleness + size enforcement # Step 4: blocked-review — triage blocked issues
# ─────────────────────────────────────────────────────────────────────
[[steps]]
id = "blocked-review"
title = "Review issues labeled blocked"
description = """
Review all issues labeled 'blocked' and decide their fate.
(See issue #352 for the blocked label convention.)
1. Fetch all blocked issues:
curl -sf -H "Authorization: token $FORGE_TOKEN" \
"$FORGE_API/issues?state=open&type=issues&labels=blocked&limit=50"
2. For each blocked issue, read the full body and comments:
curl -sf -H "Authorization: token $FORGE_TOKEN" \
"$FORGE_API/issues/<number>"
curl -sf -H "Authorization: token $FORGE_TOKEN" \
"$FORGE_API/issues/<number>/comments"
3. Check dependencies extract issue numbers from ## Dependencies /
## Depends on / ## Blocked by sections. For each dependency:
curl -sf -H "Authorization: token $FORGE_TOKEN" \
"$FORGE_API/issues/<dep_number>"
Check if the dependency is now closed.
4. For each blocked issue, choose ONE action:
UNBLOCK all dependencies are now closed or the blocking condition resolved:
a. Write a remove_label action to the manifest:
echo '{"action":"remove_label","issue":NNN,"label":"blocked"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
b. Write a comment action to the manifest:
echo '{"action":"comment","issue":NNN,"body":"Unblocked: <explanation of what resolved the blocker>"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
NEEDS HUMAN blocking condition is ambiguous, requires architectural
decision, or involves external factors:
a. Write a comment action to the manifest:
echo '{"action":"comment","issue":NNN,"body":"<diagnostic: what you found and what decision is needed>"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
b. Leave the 'blocked' label in place
CLOSE issue is stale (blocked 30+ days with no progress on blocker),
the blocker is wontfix, or the issue is no longer relevant:
a. Write a comment action to the manifest:
echo '{"action":"comment","issue":NNN,"body":"Closing: <reason — stale blocker, no longer relevant, etc.>"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
b. Write a close action to the manifest:
echo '{"action":"close","issue":NNN,"reason":"<stale blocker / no longer relevant / etc.>"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
CRITICAL: If this step fails, log the failure and move on.
"""
needs = ["dust-bundling"]
# ─────────────────────────────────────────────────────────────────────
# Step 5: stale-pr-recycle — recycle stale failed PRs back to backlog
# ─────────────────────────────────────────────────────────────────────
[[steps]]
id = "stale-pr-recycle"
title = "Recycle stale failed PRs back to backlog"
description = """
Detect open PRs where CI has failed and no work has happened in 24+ hours.
These represent abandoned dev-agent attempts recycle them so the pipeline
can retry with a fresh session.
1. Fetch all open PRs:
curl -sf -H "Authorization: token $FORGE_TOKEN" \
"$FORGE_API/pulls?state=open&limit=50"
2. For each PR, check all four conditions before recycling:
a. CI failed get the HEAD SHA from the PR's head.sha field, then:
curl -sf -H "Authorization: token $FORGE_TOKEN" \
"$FORGE_API/commits/<head_sha>/status"
Only proceed if the combined state is "failure" or "error".
Skip PRs with "success", "pending", or no CI status.
b. Last push > 24 hours ago get the commit details:
curl -sf -H "Authorization: token $FORGE_TOKEN" \
"$FORGE_API/git/commits/<head_sha>"
Parse the committer.date field. Only proceed if it is older than:
$(date -u -d '24 hours ago' +%Y-%m-%dT%H:%M:%SZ)
c. Linked issue exists extract the issue number from the PR body.
Look for "Fixes #NNN" or "ixes #NNN" patterns (case-insensitive).
If no linked issue found, skip this PR (cannot reset labels).
d. No active tmux session check:
tmux has-session -t "dev-${PROJECT_NAME}-<issue_number>" 2>/dev/null
If a session exists, someone may still be working skip this PR.
3. For each PR that passes all checks (failed CI, 24+ hours stale,
linked issue found, no active session):
a. Write a comment on the PR explaining the recycle:
echo '{"action":"comment","issue":<pr_number>,"body":"Recycling stale CI failure for fresh attempt. Previous PR: #<pr_number>"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
b. Write a close_pr action:
echo '{"action":"close_pr","pr":<pr_number>}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
c. Remove the in-progress label from the linked issue:
echo '{"action":"remove_label","issue":<issue_number>,"label":"in-progress"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
d. Add the backlog label to the linked issue:
echo '{"action":"add_label","issue":<issue_number>,"label":"backlog"}' >> "$PROJECT_REPO_ROOT/gardener/pending-actions.jsonl"
e. Log to result file:
echo "ACTION: recycled PR #<pr_number> (linked issue #<issue_number>) — stale CI failure" >> "$RESULT_FILE"
4. If no stale failed PRs found, skip this step.
CRITICAL: If this step fails, log the failure and move on to agents-update.
"""
needs = ["blocked-review"]
# ─────────────────────────────────────────────────────────────────────
# Step 6: agents-update — AGENTS.md watermark staleness + size enforcement
# ───────────────────────────────────────────────────────────────────── # ─────────────────────────────────────────────────────────────────────
[[steps]] [[steps]]
@ -349,10 +497,10 @@ needed. You wouldn't dump a 500-page wiki on a new hire's first morning.
CRITICAL: If this step fails for any reason, log the failure and move on. CRITICAL: If this step fails for any reason, log the failure and move on.
Do NOT let an AGENTS.md failure prevent the commit-and-pr step. Do NOT let an AGENTS.md failure prevent the commit-and-pr step.
""" """
needs = ["dust-bundling"] needs = ["stale-pr-recycle"]
# ───────────────────────────────────────────────────────────────────── # ─────────────────────────────────────────────────────────────────────
# Step 5: commit-and-pr — single commit with all file changes # Step 7: commit-and-pr — single commit with all file changes
# ───────────────────────────────────────────────────────────────────── # ─────────────────────────────────────────────────────────────────────
[[steps]] [[steps]]
@ -406,14 +554,16 @@ executes them after the PR merges.
PR_NUMBER=$(echo "$PR_RESPONSE" | jq -r '.number') PR_NUMBER=$(echo "$PR_RESPONSE" | jq -r '.number')
h. Save PR number for orchestrator tracking: h. Save PR number for orchestrator tracking:
echo "$PR_NUMBER" > /tmp/gardener-pr-${PROJECT_NAME}.txt echo "$PR_NUMBER" > /tmp/gardener-pr-${PROJECT_NAME}.txt
i. The orchestrator handles CI/review via pr_walk_to_merge. i. Signal the orchestrator to monitor CI:
The gardener stays alive to inject CI results and review feedback echo "PHASE:awaiting_ci" > "$PHASE_FILE"
as they come in, then executes the pending-actions manifest after merge. j. STOP and WAIT. Do NOT return to the primary branch.
The orchestrator polls CI, injects results and review feedback.
When you receive injected CI or review feedback, follow its
instructions, then write PHASE:awaiting_ci and wait again.
4. If no file changes existed (step 2 found nothing): 4. If no file changes existed (step 2 found nothing):
# Nothing to commit — the gardener has no work to do this run. echo "PHASE:done" > "$PHASE_FILE"
exit 0
5. If PR creation fails, log the error and exit. 5. If PR creation fails, log the error and write PHASE:failed.
""" """
needs = ["agents-update"] needs = ["agents-update"]

View file

@ -22,8 +22,7 @@ directly from cron like the planner, predictor, and supervisor.
`PHASE:awaiting_ci` — injects CI results and review feedback, re-signals `PHASE:awaiting_ci` — injects CI results and review feedback, re-signals
`PHASE:awaiting_ci` after fixes, signals `PHASE:awaiting_review` on CI pass. `PHASE:awaiting_ci` after fixes, signals `PHASE:awaiting_review` on CI pass.
Executes pending-actions manifest after PR merge. Executes pending-actions manifest after PR merge.
- `formulas/run-gardener.toml` — Execution spec: preflight, grooming, dust-bundling, - `formulas/run-gardener.toml` — Execution spec: preflight, grooming, dust-bundling, blocked-review, agents-update, commit-and-pr
agents-update, commit-and-pr
- `gardener/pending-actions.json` — Manifest of deferred repo actions (label changes, - `gardener/pending-actions.json` — Manifest of deferred repo actions (label changes,
closures, comments, issue creation). Written during grooming steps, committed to the closures, comments, issue creation). Written during grooming steps, committed to the
PR, reviewed alongside AGENTS.md changes, executed by gardener-run.sh after merge. PR, reviewed alongside AGENTS.md changes, executed by gardener-run.sh after merge.
@ -35,7 +34,7 @@ directly from cron like the planner, predictor, and supervisor.
**Lifecycle**: gardener-run.sh (cron 0,6,12,18) → `check_active gardener` → lock + memory guard → **Lifecycle**: gardener-run.sh (cron 0,6,12,18) → `check_active gardener` → lock + memory guard →
load formula + context → create tmux session → load formula + context → create tmux session →
Claude grooms backlog (writes proposed actions to manifest), bundles dust, Claude grooms backlog (writes proposed actions to manifest), bundles dust,
updates AGENTS.md, commits manifest + docs to PR → reviews blocked issues, updates AGENTS.md, commits manifest + docs to PR →
`PHASE:awaiting_ci` (stays alive) → CI pass → `PHASE:awaiting_review` `PHASE:awaiting_ci` (stays alive) → CI pass → `PHASE:awaiting_review`
review feedback → address + re-signal → merge → gardener-run.sh executes review feedback → address + re-signal → merge → gardener-run.sh executes
manifest actions via API → `PHASE:done`. When blocked on external resources manifest actions via API → `PHASE:done`. When blocked on external resources

View file

@ -1,636 +0,0 @@
#!/usr/bin/env python3
"""Mock Forgejo API server for CI smoke tests.
Implements 15 Forgejo API endpoints that disinto init calls.
State stored in-memory (dicts), responds instantly.
"""
import base64
import hashlib
import json
import os
import re
import signal
import socket
import sys
import threading
import uuid
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from urllib.parse import parse_qs, urlparse
# Global state
state = {
"users": {}, # key: username -> user object
"tokens": {}, # key: token_sha1 -> token object
"repos": {}, # key: "owner/repo" -> repo object
"orgs": {}, # key: orgname -> org object
"labels": {}, # key: "owner/repo" -> list of labels
"collaborators": {}, # key: "owner/repo" -> set of usernames
"protections": {}, # key: "owner/repo" -> list of protections
"oauth2_apps": [], # list of oauth2 app objects
}
next_ids = {"users": 1, "tokens": 1, "repos": 1, "orgs": 1, "labels": 1, "oauth2_apps": 1}
SHUTDOWN_REQUESTED = False
def log_request(handler, method, path, status):
"""Log request details."""
print(f"[{handler.log_date_time_string()}] {method} {path} {status}", file=sys.stderr)
def json_response(handler, status, data):
"""Send JSON response."""
body = json.dumps(data).encode("utf-8")
handler.send_response(status)
handler.send_header("Content-Type", "application/json")
handler.send_header("Content-Length", len(body))
handler.end_headers()
handler.wfile.write(body)
def basic_auth_user(handler):
"""Extract username from Basic auth header. Returns None if invalid."""
auth_header = handler.headers.get("Authorization", "")
if not auth_header.startswith("Basic "):
return None
try:
decoded = base64.b64decode(auth_header[6:]).decode("utf-8")
username, _ = decoded.split(":", 1)
return username
except Exception:
return None
def token_auth_valid(handler):
"""Check if Authorization header contains token. Doesn't validate value."""
auth_header = handler.headers.get("Authorization", "")
return auth_header.startswith("token ")
def require_token(handler):
"""Require token auth. Return user or None if invalid."""
if not token_auth_valid(handler):
return None
return True # Any token is valid for mock purposes
def require_basic_auth(handler, required_user=None):
"""Require basic auth. Return username or None if invalid."""
username = basic_auth_user(handler)
if username is None:
return None
# Check user exists in state
if username not in state["users"]:
return None
if required_user and username != required_user:
return None
return username
class ForgejoHandler(BaseHTTPRequestHandler):
"""HTTP request handler for mock Forgejo API."""
def log_message(self, format, *args):
"""Override to use our logging."""
pass # We log in do_request
def do_request(self, method):
"""Route request to appropriate handler."""
parsed = urlparse(self.path)
path = parsed.path
query = parse_qs(parsed.query)
log_request(self, method, self.path, "PENDING")
# Strip /api/v1/ prefix for routing (or leading slash for other routes)
route_path = path
if route_path.startswith("/api/v1/"):
route_path = route_path[8:]
elif route_path.startswith("/"):
route_path = route_path.lstrip("/")
# Route to handler
try:
# First try exact match (with / replaced by _)
handler_path = route_path.replace("/", "_")
handler_name = f"handle_{method}_{handler_path}"
handler = getattr(self, handler_name, None)
if handler:
handler(query)
else:
# Try pattern matching for routes with dynamic segments
self._handle_patterned_route(method, route_path, query)
except Exception as e:
log_request(self, method, self.path, 500)
json_response(self, 500, {"message": str(e)})
def _handle_patterned_route(self, method, route_path, query):
"""Handle routes with dynamic segments using pattern matching."""
# Define patterns: (regex, handler_name)
patterns = [
# Users patterns
(r"^users/([^/]+)$", f"handle_{method}_users_username"),
(r"^users/([^/]+)/tokens$", f"handle_{method}_users_username_tokens"),
# Repos patterns
(r"^repos/([^/]+)/([^/]+)$", f"handle_{method}_repos_owner_repo"),
(r"^repos/([^/]+)/([^/]+)/labels$", f"handle_{method}_repos_owner_repo_labels"),
(r"^repos/([^/]+)/([^/]+)/branch_protections$", f"handle_{method}_repos_owner_repo_branch_protections"),
(r"^repos/([^/]+)/([^/]+)/collaborators/([^/]+)$", f"handle_{method}_repos_owner_repo_collaborators_collaborator"),
# Org patterns
(r"^orgs/([^/]+)/repos$", f"handle_{method}_orgs_org_repos"),
# User patterns
(r"^user/repos$", f"handle_{method}_user_repos"),
(r"^user/applications/oauth2$", f"handle_{method}_user_applications_oauth2"),
# Admin patterns
(r"^admin/users$", f"handle_{method}_admin_users"),
(r"^admin/users/([^/]+)$", f"handle_{method}_admin_users_username"),
# Org patterns
(r"^orgs$", f"handle_{method}_orgs"),
]
for pattern, handler_name in patterns:
if re.match(pattern, route_path):
handler = getattr(self, handler_name, None)
if handler:
handler(query)
return
self.handle_404()
def do_GET(self):
self.do_request("GET")
def do_POST(self):
self.do_request("POST")
def do_PATCH(self):
self.do_request("PATCH")
def do_PUT(self):
self.do_request("PUT")
def handle_GET_version(self, query):
"""GET /api/v1/version"""
json_response(self, 200, {"version": "11.0.0-mock"})
def handle_GET_users_username(self, query):
"""GET /api/v1/users/{username}"""
# Extract username from path
parts = self.path.split("/")
if len(parts) >= 5:
username = parts[4]
else:
json_response(self, 404, {"message": "user does not exist"})
return
if username in state["users"]:
json_response(self, 200, state["users"][username])
else:
json_response(self, 404, {"message": "user does not exist"})
def handle_GET_repos_owner_repo(self, query):
"""GET /api/v1/repos/{owner}/{repo}"""
parts = self.path.split("/")
if len(parts) >= 6:
owner = parts[4]
repo = parts[5]
else:
json_response(self, 404, {"message": "repository not found"})
return
key = f"{owner}/{repo}"
if key in state["repos"]:
json_response(self, 200, state["repos"][key])
else:
json_response(self, 404, {"message": "repository not found"})
def handle_GET_repos_owner_repo_labels(self, query):
"""GET /api/v1/repos/{owner}/{repo}/labels"""
parts = self.path.split("/")
if len(parts) >= 6:
owner = parts[4]
repo = parts[5]
else:
json_response(self, 404, {"message": "repository not found"})
return
require_token(self)
key = f"{owner}/{repo}"
if key in state["labels"]:
json_response(self, 200, state["labels"][key])
else:
json_response(self, 200, [])
def handle_GET_user_applications_oauth2(self, query):
"""GET /api/v1/user/applications/oauth2"""
require_token(self)
json_response(self, 200, state["oauth2_apps"])
def handle_GET_mock_shutdown(self, query):
"""GET /mock/shutdown"""
global SHUTDOWN_REQUESTED
SHUTDOWN_REQUESTED = True
json_response(self, 200, {"status": "shutdown"})
def handle_POST_admin_users(self, query):
"""POST /api/v1/admin/users"""
require_token(self)
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
username = data.get("username")
email = data.get("email")
if not username or not email:
json_response(self, 400, {"message": "username and email are required"})
return
user_id = next_ids["users"]
next_ids["users"] += 1
user = {
"id": user_id,
"login": username,
"email": email,
"full_name": data.get("full_name", ""),
"is_admin": data.get("admin", False),
"must_change_password": data.get("must_change_password", False),
"login_name": data.get("login_name", username),
"visibility": data.get("visibility", "public"),
"avatar_url": f"https://seccdn.libravatar.org/avatar/{hashlib.md5(email.encode()).hexdigest()}",
}
state["users"][username] = user
json_response(self, 201, user)
def handle_POST_users_username_tokens(self, query):
"""POST /api/v1/users/{username}/tokens"""
username = require_basic_auth(self)
if not username:
json_response(self, 401, {"message": "invalid authentication"})
return
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
token_name = data.get("name")
if not token_name:
json_response(self, 400, {"message": "name is required"})
return
token_id = next_ids["tokens"]
next_ids["tokens"] += 1
# Deterministic token: sha256(username + name)[:40]
token_str = hashlib.sha256(f"{username}{token_name}".encode()).hexdigest()[:40]
token = {
"id": token_id,
"name": token_name,
"sha1": token_str,
"scopes": data.get("scopes", ["all"]),
"created_at": "2026-04-01T00:00:00Z",
"expires_at": None,
"username": username, # Store username for lookup
}
state["tokens"][token_str] = token
json_response(self, 201, token)
def handle_POST_orgs(self, query):
"""POST /api/v1/orgs"""
require_token(self)
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
username = data.get("username")
if not username:
json_response(self, 400, {"message": "username is required"})
return
org_id = next_ids["orgs"]
next_ids["orgs"] += 1
org = {
"id": org_id,
"username": username,
"full_name": username,
"avatar_url": f"https://seccdn.libravatar.org/avatar/{hashlib.md5(username.encode()).hexdigest()}",
"visibility": data.get("visibility", "public"),
}
state["orgs"][username] = org
json_response(self, 201, org)
def handle_POST_orgs_org_repos(self, query):
"""POST /api/v1/orgs/{org}/repos"""
require_token(self)
parts = self.path.split("/")
if len(parts) >= 6:
org = parts[4]
else:
json_response(self, 404, {"message": "organization not found"})
return
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
repo_name = data.get("name")
if not repo_name:
json_response(self, 400, {"message": "name is required"})
return
repo_id = next_ids["repos"]
next_ids["repos"] += 1
key = f"{org}/{repo_name}"
repo = {
"id": repo_id,
"full_name": key,
"name": repo_name,
"owner": {"id": state["orgs"][org]["id"], "login": org},
"empty": False,
"default_branch": data.get("default_branch", "main"),
"description": data.get("description", ""),
"private": data.get("private", False),
"html_url": f"https://example.com/{key}",
"ssh_url": f"git@example.com:{key}.git",
"clone_url": f"https://example.com/{key}.git",
"created_at": "2026-04-01T00:00:00Z",
}
state["repos"][key] = repo
json_response(self, 201, repo)
def handle_POST_user_repos(self, query):
"""POST /api/v1/user/repos"""
require_token(self)
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
repo_name = data.get("name")
if not repo_name:
json_response(self, 400, {"message": "name is required"})
return
# Get authenticated user from token
auth_header = self.headers.get("Authorization", "")
token = auth_header.split(" ", 1)[1] if " " in auth_header else ""
# Find user by token (use stored username field)
owner = None
for tok_sha1, tok in state["tokens"].items():
if tok_sha1 == token:
owner = tok.get("username")
break
if not owner:
json_response(self, 401, {"message": "invalid token"})
return
repo_id = next_ids["repos"]
next_ids["repos"] += 1
key = f"{owner}/{repo_name}"
repo = {
"id": repo_id,
"full_name": key,
"name": repo_name,
"owner": {"id": state["users"].get(owner, {}).get("id", 0), "login": owner},
"empty": False,
"default_branch": data.get("default_branch", "main"),
"description": data.get("description", ""),
"private": data.get("private", False),
"html_url": f"https://example.com/{key}",
"ssh_url": f"git@example.com:{key}.git",
"clone_url": f"https://example.com/{key}.git",
"created_at": "2026-04-01T00:00:00Z",
}
state["repos"][key] = repo
json_response(self, 201, repo)
def handle_POST_repos_owner_repo_labels(self, query):
"""POST /api/v1/repos/{owner}/{repo}/labels"""
require_token(self)
parts = self.path.split("/")
if len(parts) >= 6:
owner = parts[4]
repo = parts[5]
else:
json_response(self, 404, {"message": "repository not found"})
return
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
label_name = data.get("name")
label_color = data.get("color")
if not label_name or not label_color:
json_response(self, 400, {"message": "name and color are required"})
return
label_id = next_ids["labels"]
next_ids["labels"] += 1
key = f"{owner}/{repo}"
label = {
"id": label_id,
"name": label_name,
"color": label_color,
"description": data.get("description", ""),
"url": f"https://example.com/api/v1/repos/{key}/labels/{label_id}",
}
if key not in state["labels"]:
state["labels"][key] = []
state["labels"][key].append(label)
json_response(self, 201, label)
def handle_POST_repos_owner_repo_branch_protections(self, query):
"""POST /api/v1/repos/{owner}/{repo}/branch_protections"""
require_token(self)
parts = self.path.split("/")
if len(parts) >= 6:
owner = parts[4]
repo = parts[5]
else:
json_response(self, 404, {"message": "repository not found"})
return
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
branch_name = data.get("branch_name", "main")
key = f"{owner}/{repo}"
# Generate unique ID for protection
if key in state["protections"]:
protection_id = len(state["protections"][key]) + 1
else:
protection_id = 1
protection = {
"id": protection_id,
"repo_id": state["repos"].get(key, {}).get("id", 0),
"branch_name": branch_name,
"rule_name": data.get("rule_name", branch_name),
"enable_push": data.get("enable_push", False),
"enable_merge_whitelist": data.get("enable_merge_whitelist", True),
"merge_whitelist_usernames": data.get("merge_whitelist_usernames", ["admin"]),
"required_approvals": data.get("required_approvals", 1),
"apply_to_admins": data.get("apply_to_admins", True),
}
if key not in state["protections"]:
state["protections"][key] = []
state["protections"][key].append(protection)
json_response(self, 201, protection)
def handle_POST_user_applications_oauth2(self, query):
"""POST /api/v1/user/applications/oauth2"""
require_token(self)
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
app_name = data.get("name")
if not app_name:
json_response(self, 400, {"message": "name is required"})
return
app_id = next_ids["oauth2_apps"]
next_ids["oauth2_apps"] += 1
app = {
"id": app_id,
"name": app_name,
"client_id": str(uuid.uuid4()),
"client_secret": hashlib.sha256(str(uuid.uuid4()).encode()).hexdigest(),
"redirect_uris": data.get("redirect_uris", []),
"confidential_client": data.get("confidential_client", True),
"created_at": "2026-04-01T00:00:00Z",
}
state["oauth2_apps"].append(app)
json_response(self, 201, app)
def handle_PATCH_admin_users_username(self, query):
"""PATCH /api/v1/admin/users/{username}"""
if not require_token(self):
json_response(self, 401, {"message": "invalid authentication"})
return
parts = self.path.split("/")
if len(parts) >= 6:
username = parts[5]
else:
json_response(self, 404, {"message": "user does not exist"})
return
if username not in state["users"]:
json_response(self, 404, {"message": "user does not exist"})
return
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
user = state["users"][username]
for key, value in data.items():
# Map 'admin' to 'is_admin' for consistency
update_key = 'is_admin' if key == 'admin' else key
if update_key in user:
user[update_key] = value
json_response(self, 200, user)
def handle_PUT_repos_owner_repo_collaborators_collaborator(self, query):
"""PUT /api/v1/repos/{owner}/{repo}/collaborators/{collaborator}"""
require_token(self)
parts = self.path.split("/")
if len(parts) >= 8:
owner = parts[4]
repo = parts[5]
collaborator = parts[7]
else:
json_response(self, 404, {"message": "repository not found"})
return
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode("utf-8")
data = json.loads(body) if body else {}
key = f"{owner}/{repo}"
if key not in state["collaborators"]:
state["collaborators"][key] = set()
state["collaborators"][key].add(collaborator)
self.send_response(204)
self.send_header("Content-Length", 0)
self.end_headers()
def handle_404(self):
"""Return 404 for unknown routes."""
json_response(self, 404, {"message": "route not found"})
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
"""Threaded HTTP server for handling concurrent requests."""
daemon_threads = True
def main():
"""Start the mock server."""
global SHUTDOWN_REQUESTED
port = int(os.environ.get("MOCK_FORGE_PORT", 3000))
server = ThreadingHTTPServer(("0.0.0.0", port), ForgejoHandler)
try:
server.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except OSError:
pass # Not all platforms support this
print(f"Mock Forgejo server starting on port {port}", file=sys.stderr)
def shutdown_handler(signum, frame):
global SHUTDOWN_REQUESTED
SHUTDOWN_REQUESTED = True
# Can't call server.shutdown() directly from signal handler in threaded server
threading.Thread(target=server.shutdown, daemon=True).start()
signal.signal(signal.SIGTERM, shutdown_handler)
signal.signal(signal.SIGINT, shutdown_handler)
try:
server.serve_forever()
except KeyboardInterrupt:
pass
finally:
server.shutdown()
print("Mock Forgejo server stopped", file=sys.stderr)
if __name__ == "__main__":
main()