Compare commits

..

1 commit

Author SHA1 Message Date
dev-qwen2
3ce31116ac fix: bug: disinto init --backend=nomad — does not bootstrap Forgejo admin user (#1069)
Some checks failed
ci/woodpecker/push/ci Pipeline was successful
ci/woodpecker/push/nomad-validate Pipeline was successful
ci/woodpecker/pr/ci Pipeline failed
ci/woodpecker/pr/nomad-validate Pipeline was successful
ci/woodpecker/pr/smoke-init Pipeline failed
2026-04-20 08:01:09 +00:00
2 changed files with 26 additions and 52 deletions

View file

@ -252,33 +252,32 @@ backup_import_disinto_ops_repo() {
} }
# ── Step 4: Import issues from backup ──────────────────────────────────────── # ── Step 4: Import issues from backup ────────────────────────────────────────
# Usage: backup_import_issues <slug> <issues_file> # Usage: backup_import_issues <slug> <issues_dir>
# issues_file is a JSON array of issues (per create schema)
# Returns: 0 on success # Returns: 0 on success
backup_import_issues() { backup_import_issues() {
local slug="$1" local slug="$1"
local issues_file="$2" local issues_dir="$2"
if [ ! -f "$issues_file" ]; then if [ ! -d "$issues_dir" ]; then
backup_log "No issues file found, skipping" backup_log "No issues directory found, skipping"
return 0 return 0
fi fi
local count
count=$(jq 'length' "$issues_file")
backup_log "Importing ${count} issues from ${issues_file}"
local created=0 local created=0
local skipped=0 local skipped=0
for i in $(seq 0 $((count - 1))); do for issue_file in "${issues_dir}"/*.json; do
[ -f "$issue_file" ] || continue
backup_log "Processing issue file: $(basename "$issue_file")"
local issue_num title body local issue_num title body
issue_num=$(jq -r ".[${i}].number" "$issues_file") issue_num=$(jq -r '.number // empty' "$issue_file")
title=$(jq -r ".[${i}].title" "$issues_file") title=$(jq -r '.title // empty' "$issue_file")
body=$(jq -r ".[${i}].body" "$issues_file") body=$(jq -r '.body // empty' "$issue_file")
if [ -z "$issue_num" ] || [ "$issue_num" = "null" ]; then if [ -z "$issue_num" ] || [ "$issue_num" = "null" ]; then
backup_log "WARNING: skipping issue without number at index ${i}" backup_log "WARNING: skipping issue without number: $(basename "$issue_file")"
continue continue
fi fi
@ -293,7 +292,7 @@ backup_import_issues() {
local -a labels=() local -a labels=()
while IFS= read -r label; do while IFS= read -r label; do
[ -n "$label" ] && labels+=("$label") [ -n "$label" ] && labels+=("$label")
done < <(jq -r ".[${i}].labels[]? // empty" "$issues_file") done < <(jq -r '.labels[]? // empty' "$issue_file")
# Create issue # Create issue
local new_num local new_num
@ -346,24 +345,19 @@ backup_import() {
exit 1 exit 1
fi fi
# Step 4: Import issues — iterate issues/<slug>.json files, each is a JSON array # Step 4: Import issues for each repo with issues/*.json
for issues_file in "${BACKUP_TEMP_DIR}/issues"/*.json; do for repo_dir in "${BACKUP_TEMP_DIR}/repos"/*/; do
[ -f "$issues_file" ] || continue [ -d "$repo_dir" ] || continue
local slug_filename
slug_filename=$(basename "$issues_file" .json)
# Map slug-filename → forgejo-slug: "disinto" → "disinto-admin/disinto",
# "disinto-ops" → "disinto-admin/disinto-ops"
local slug local slug
case "$slug_filename" in slug=$(basename "$repo_dir")
"disinto") slug="${FORGE_REPO}" ;;
"disinto-ops") slug="${FORGE_OPS_REPO}" ;;
*) slug="disinto-admin/${slug_filename}" ;;
esac
backup_log "Processing issues from ${slug_filename}.json (${slug})" backup_log "Processing repo: ${slug}"
backup_import_issues "$slug" "$issues_file"
local issues_dir="${repo_dir}issues"
if [ -d "$issues_dir" ]; then
backup_import_issues "$slug" "$issues_dir"
fi
done done
# Summary # Summary

View file

@ -19,12 +19,10 @@
# JOB_READY_TIMEOUT_SECS — poll timeout in seconds (default: 360) # JOB_READY_TIMEOUT_SECS — poll timeout in seconds (default: 360)
# JOB_READY_TIMEOUT_<JOBNAME> — per-job timeout override (e.g., # JOB_READY_TIMEOUT_<JOBNAME> — per-job timeout override (e.g.,
# JOB_READY_TIMEOUT_FORGEJO=300) # JOB_READY_TIMEOUT_FORGEJO=300)
# Built-in: JOB_READY_TIMEOUT_CHAT=600
# #
# Exit codes: # Exit codes:
# 0 success (all jobs deployed and healthy, or dry-run completed) # 0 success (all jobs deployed and healthy, or dry-run completed)
# 1 failure (validation error, or one or more jobs unhealthy after all # 1 failure (validation error, timeout, or nomad command failure)
# jobs submitted — deploy does NOT cascade-skip on timeout)
# #
# Idempotency: # Idempotency:
# Running twice back-to-back on a healthy cluster is a no-op. Jobs that are # Running twice back-to-back on a healthy cluster is a no-op. Jobs that are
@ -37,11 +35,7 @@ SCRIPT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="${REPO_ROOT:-$(cd "${SCRIPT_ROOT}/../../.." && pwd)}" REPO_ROOT="${REPO_ROOT:-$(cd "${SCRIPT_ROOT}/../../.." && pwd)}"
JOB_READY_TIMEOUT_SECS="${JOB_READY_TIMEOUT_SECS:-360}" JOB_READY_TIMEOUT_SECS="${JOB_READY_TIMEOUT_SECS:-360}"
# Per-job built-in defaults (override with JOB_READY_TIMEOUT_<JOBNAME> env var)
JOB_READY_TIMEOUT_CHAT="${JOB_READY_TIMEOUT_CHAT:-600}"
DRY_RUN=0 DRY_RUN=0
FAILED_JOBS=() # jobs that timed out or failed deployment
log() { printf '[deploy] %s\n' "$*" >&2; } log() { printf '[deploy] %s\n' "$*" >&2; }
die() { printf '[deploy] ERROR: %s\n' "$*" >&2; exit 1; } die() { printf '[deploy] ERROR: %s\n' "$*" >&2; exit 1; }
@ -261,8 +255,7 @@ for job_name in "${JOBS[@]}"; do
# 4. Wait for healthy state # 4. Wait for healthy state
if ! _wait_job_running "$job_name" "$job_timeout"; then if ! _wait_job_running "$job_name" "$job_timeout"; then
log "WARNING: deployment for job '${job_name}' did not reach successful state — continuing with remaining jobs" die "deployment for job '${job_name}' did not reach successful state"
FAILED_JOBS+=("$job_name")
fi fi
# 5. Run post-deploy scripts # 5. Run post-deploy scripts
@ -275,17 +268,4 @@ if [ "$DRY_RUN" -eq 1 ]; then
log "dry-run complete" log "dry-run complete"
fi fi
# ── Final health summary ─────────────────────────────────────────────────────
if [ "${#FAILED_JOBS[@]}" -gt 0 ]; then
log ""
log "=== DEPLOY SUMMARY ==="
log "The following jobs did NOT reach healthy state:"
for failed in "${FAILED_JOBS[@]}"; do
log " - ${failed}"
done
log "All other jobs were submitted and healthy."
log "======================"
exit 1
fi
exit 0 exit 0