diff --git a/lib/disinto/backup.sh b/lib/disinto/backup.sh index 2c34bba..6e25e83 100644 --- a/lib/disinto/backup.sh +++ b/lib/disinto/backup.sh @@ -252,32 +252,33 @@ backup_import_disinto_ops_repo() { } # ── Step 4: Import issues from backup ──────────────────────────────────────── -# Usage: backup_import_issues +# Usage: backup_import_issues +# issues_file is a JSON array of issues (per create schema) # Returns: 0 on success backup_import_issues() { local slug="$1" - local issues_dir="$2" + local issues_file="$2" - if [ ! -d "$issues_dir" ]; then - backup_log "No issues directory found, skipping" + if [ ! -f "$issues_file" ]; then + backup_log "No issues file found, skipping" return 0 fi + local count + count=$(jq 'length' "$issues_file") + backup_log "Importing ${count} issues from ${issues_file}" + local created=0 local skipped=0 - for issue_file in "${issues_dir}"/*.json; do - [ -f "$issue_file" ] || continue - - backup_log "Processing issue file: $(basename "$issue_file")" - + for i in $(seq 0 $((count - 1))); do local issue_num title body - issue_num=$(jq -r '.number // empty' "$issue_file") - title=$(jq -r '.title // empty' "$issue_file") - body=$(jq -r '.body // empty' "$issue_file") + issue_num=$(jq -r ".[${i}].number" "$issues_file") + title=$(jq -r ".[${i}].title" "$issues_file") + body=$(jq -r ".[${i}].body" "$issues_file") if [ -z "$issue_num" ] || [ "$issue_num" = "null" ]; then - backup_log "WARNING: skipping issue without number: $(basename "$issue_file")" + backup_log "WARNING: skipping issue without number at index ${i}" continue fi @@ -292,7 +293,7 @@ backup_import_issues() { local -a labels=() while IFS= read -r label; do [ -n "$label" ] && labels+=("$label") - done < <(jq -r '.labels[]? // empty' "$issue_file") + done < <(jq -r ".[${i}].labels[]? // empty" "$issues_file") # Create issue local new_num @@ -345,19 +346,24 @@ backup_import() { exit 1 fi - # Step 4: Import issues for each repo with issues/*.json - for repo_dir in "${BACKUP_TEMP_DIR}/repos"/*/; do - [ -d "$repo_dir" ] || continue + # Step 4: Import issues — iterate issues/.json files, each is a JSON array + for issues_file in "${BACKUP_TEMP_DIR}/issues"/*.json; do + [ -f "$issues_file" ] || continue + local slug_filename + slug_filename=$(basename "$issues_file" .json) + + # Map slug-filename → forgejo-slug: "disinto" → "disinto-admin/disinto", + # "disinto-ops" → "disinto-admin/disinto-ops" local slug - slug=$(basename "$repo_dir") + case "$slug_filename" in + "disinto") slug="${FORGE_REPO}" ;; + "disinto-ops") slug="${FORGE_OPS_REPO}" ;; + *) slug="disinto-admin/${slug_filename}" ;; + esac - backup_log "Processing repo: ${slug}" - - local issues_dir="${repo_dir}issues" - if [ -d "$issues_dir" ]; then - backup_import_issues "$slug" "$issues_dir" - fi + backup_log "Processing issues from ${slug_filename}.json (${slug})" + backup_import_issues "$slug" "$issues_file" done # Summary diff --git a/lib/init/nomad/deploy.sh b/lib/init/nomad/deploy.sh index f1f1b72..453b122 100755 --- a/lib/init/nomad/deploy.sh +++ b/lib/init/nomad/deploy.sh @@ -19,10 +19,12 @@ # JOB_READY_TIMEOUT_SECS — poll timeout in seconds (default: 360) # JOB_READY_TIMEOUT_ — per-job timeout override (e.g., # JOB_READY_TIMEOUT_FORGEJO=300) +# Built-in: JOB_READY_TIMEOUT_CHAT=600 # # Exit codes: # 0 success (all jobs deployed and healthy, or dry-run completed) -# 1 failure (validation error, timeout, or nomad command failure) +# 1 failure (validation error, or one or more jobs unhealthy after all +# jobs submitted — deploy does NOT cascade-skip on timeout) # # Idempotency: # Running twice back-to-back on a healthy cluster is a no-op. Jobs that are @@ -35,7 +37,11 @@ SCRIPT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" REPO_ROOT="${REPO_ROOT:-$(cd "${SCRIPT_ROOT}/../../.." && pwd)}" JOB_READY_TIMEOUT_SECS="${JOB_READY_TIMEOUT_SECS:-360}" +# Per-job built-in defaults (override with JOB_READY_TIMEOUT_ env var) +JOB_READY_TIMEOUT_CHAT="${JOB_READY_TIMEOUT_CHAT:-600}" + DRY_RUN=0 +FAILED_JOBS=() # jobs that timed out or failed deployment log() { printf '[deploy] %s\n' "$*" >&2; } die() { printf '[deploy] ERROR: %s\n' "$*" >&2; exit 1; } @@ -255,7 +261,8 @@ for job_name in "${JOBS[@]}"; do # 4. Wait for healthy state if ! _wait_job_running "$job_name" "$job_timeout"; then - die "deployment for job '${job_name}' did not reach successful state" + log "WARNING: deployment for job '${job_name}' did not reach successful state — continuing with remaining jobs" + FAILED_JOBS+=("$job_name") fi # 5. Run post-deploy scripts @@ -268,4 +275,17 @@ if [ "$DRY_RUN" -eq 1 ]; then log "dry-run complete" fi +# ── Final health summary ───────────────────────────────────────────────────── +if [ "${#FAILED_JOBS[@]}" -gt 0 ]; then + log "" + log "=== DEPLOY SUMMARY ===" + log "The following jobs did NOT reach healthy state:" + for failed in "${FAILED_JOBS[@]}"; do + log " - ${failed}" + done + log "All other jobs were submitted and healthy." + log "======================" + exit 1 +fi + exit 0