Merge pull request 'fix: config: gardener=1h, architect=9m, planner=11m for disinto factory (+ add PLANNER_INTERVAL env var) (#682)' (#683) from fix/issue-682 into main
All checks were successful
ci/woodpecker/push/ci Pipeline was successful

This commit is contained in:
dev-qwen 2026-04-11 18:03:04 +00:00
commit 526928dca8
4 changed files with 14 additions and 7 deletions

View file

@ -32,6 +32,9 @@ services:
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- FORGE_ADMIN_PASS=${FORGE_ADMIN_PASS:-}
- DISINTO_AGENTS=review,gardener
- GARDENER_INTERVAL=3600
- ARCHITECT_INTERVAL=540
- PLANNER_INTERVAL=660
depends_on:
- forgejo

View file

@ -13,7 +13,7 @@ set -euo pipefail
# - dev-poll: every 5 minutes (offset by 2 minutes)
# - gardener: every GARDENER_INTERVAL seconds (default: 21600 = 6 hours)
# - architect: every ARCHITECT_INTERVAL seconds (default: 21600 = 6 hours)
# - planner: every 12 hours (144 iterations * 5 min)
# - planner: every PLANNER_INTERVAL seconds (default: 43200 = 12 hours)
# - predictor: every 24 hours (288 iterations * 5 min)
DISINTO_BAKED="/home/agent/disinto"
@ -332,9 +332,10 @@ POLL_INTERVAL="${POLL_INTERVAL:-300}"
# Gardener and architect intervals (default 6 hours = 21600 seconds)
GARDENER_INTERVAL="${GARDENER_INTERVAL:-21600}"
ARCHITECT_INTERVAL="${ARCHITECT_INTERVAL:-21600}"
PLANNER_INTERVAL="${PLANNER_INTERVAL:-43200}"
log "Entering polling loop (interval: ${POLL_INTERVAL}s, roles: ${AGENT_ROLES})"
log "Gardener interval: ${GARDENER_INTERVAL}s, Architect interval: ${ARCHITECT_INTERVAL}s"
log "Gardener interval: ${GARDENER_INTERVAL}s, Architect interval: ${ARCHITECT_INTERVAL}s, Planner interval: ${PLANNER_INTERVAL}s"
# Main polling loop using iteration counter for gardener scheduling
iteration=0
@ -424,13 +425,12 @@ print(cfg.get('primary_branch', 'main'))
fi
fi
# Planner (every 12 hours = 144 iterations * 5 min = 43200 seconds)
# Planner (interval configurable via PLANNER_INTERVAL env var)
if [[ ",${AGENT_ROLES}," == *",planner,"* ]]; then
planner_iteration=$((iteration * POLL_INTERVAL))
planner_interval=$((12 * 60 * 60)) # 12 hours in seconds
if [ $((planner_iteration % planner_interval)) -eq 0 ] && [ "$now" -ge "$planner_iteration" ]; then
if [ $((planner_iteration % PLANNER_INTERVAL)) -eq 0 ] && [ "$now" -ge "$planner_iteration" ]; then
if ! pgrep -f "planner-run.sh" >/dev/null; then
log "Running planner (iteration ${iteration}, 12-hour interval) for ${toml}"
log "Running planner (iteration ${iteration}, ${PLANNER_INTERVAL}s interval) for ${toml}"
gosu agent bash -c "cd ${DISINTO_DIR} && bash planner/planner-run.sh \"${toml}\"" >> "${DISINTO_LOG_DIR}/planner.log" 2>&1 &
else
log "Skipping planner — already running"

View file

@ -141,6 +141,7 @@ _generate_local_model_services() {
POLL_INTERVAL: "${poll_interval_val}"
GARDENER_INTERVAL: "${GARDENER_INTERVAL:-21600}"
ARCHITECT_INTERVAL: "${ARCHITECT_INTERVAL:-21600}"
PLANNER_INTERVAL: "${PLANNER_INTERVAL:-43200}"
depends_on:
- forgejo
- woodpecker
@ -357,6 +358,7 @@ services:
POLL_INTERVAL: ${POLL_INTERVAL:-300}
GARDENER_INTERVAL: ${GARDENER_INTERVAL:-21600}
ARCHITECT_INTERVAL: ${ARCHITECT_INTERVAL:-21600}
PLANNER_INTERVAL: ${PLANNER_INTERVAL:-43200}
# IMPORTANT: agents get explicit environment variables (forge tokens, CI tokens, config).
# Vault-only secrets (GITHUB_TOKEN, CLAWHUB_TOKEN, deploy keys) live in
# .env.vault.enc and are NEVER injected here — only the runner

View file

@ -31,13 +31,15 @@ check_pipeline_stall = false
# configure shorter intervals:
#
# GARDENER_INTERVAL=3600 # 1 hour (default: 21600 = 6 hours)
# ARCHITECT_INTERVAL=600 # 10 minutes (default: 21600 = 6 hours)
# ARCHITECT_INTERVAL=540 # 9 minutes (default: 21600 = 6 hours)
# PLANNER_INTERVAL=660 # 11 minutes (default: 43200 = 12 hours)
#
# These can be set in docker-compose.yml environment section or in a .env file.
#
# [agents.schedule]
# gardener_interval = 21600 # seconds (default: 21600 = 6 hours)
# architect_interval = 21600 # seconds (default: 21600 = 6 hours)
# planner_interval = 43200 # seconds (default: 43200 = 12 hours)
# Local-model agents (optional) — configure to use llama-server or similar
# for local LLM inference. Each agent gets its own container with isolated