Files
Sankofa/scripts/verify-backups.sh
defiQUG 9daf1fd378 Apply Composer changes: comprehensive API updates, migrations, middleware, and infrastructure improvements
- Add comprehensive database migrations (001-024) for schema evolution
- Enhance API schema with expanded type definitions and resolvers
- Add new middleware: audit logging, rate limiting, MFA enforcement, security, tenant auth
- Implement new services: AI optimization, billing, blockchain, compliance, marketplace
- Add adapter layer for cloud integrations (Cloudflare, Kubernetes, Proxmox, storage)
- Update Crossplane provider with enhanced VM management capabilities
- Add comprehensive test suite for API endpoints and services
- Update frontend components with improved GraphQL subscriptions and real-time updates
- Enhance security configurations and headers (CSP, CORS, etc.)
- Update documentation and configuration files
- Add new CI/CD workflows and validation scripts
- Implement design system improvements and UI enhancements
2025-12-12 18:01:35 -08:00

202 lines
5.7 KiB
Bash
Executable File

#!/bin/bash
# Backup Verification Script
# Verifies database backups are working correctly
set -e
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m'
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
# Configuration
BACKUP_DIR="${BACKUP_DIR:-/backups/postgres}"
RETENTION_DAYS="${RETENTION_DAYS:-7}"
DB_NAME="${DB_NAME:-sankofa}"
# Check if kubectl is available
if ! command -v kubectl &> /dev/null; then
log_error "kubectl not found. This script requires kubectl access."
exit 1
fi
# Verify backup directory exists
verify_backup_directory() {
log_info "Verifying backup directory..."
if [ -d "$BACKUP_DIR" ]; then
test_pass "Backup directory exists"
else
test_fail "Backup directory does not exist: $BACKUP_DIR"
return 1
fi
}
# Check recent backups
check_recent_backups() {
log_info "Checking for recent backups..."
local recent_backups=$(find "$BACKUP_DIR" -name "*.sql" -o -name "*.dump" -mtime -1 2>/dev/null | wc -l)
if [ $recent_backups -gt 0 ]; then
log_info "Found $recent_backups backup(s) from last 24 hours"
test_pass "Recent backups exist"
else
test_fail "No backups found in last 24 hours"
return 1
fi
}
# Verify backup integrity
verify_backup_integrity() {
log_info "Verifying backup integrity..."
local latest_backup=$(find "$BACKUP_DIR" -name "*.sql" -o -name "*.dump" -type f -printf '%T@ %p\n' | sort -n | tail -1 | cut -d' ' -f2-)
if [ -z "$latest_backup" ]; then
test_fail "No backup file found"
return 1
fi
log_info "Testing backup: $latest_backup"
# Check if file is not empty
if [ -s "$latest_backup" ]; then
test_pass "Backup file is not empty"
else
test_fail "Backup file is empty"
return 1
fi
# Check if file is readable
if [ -r "$latest_backup" ]; then
test_pass "Backup file is readable"
else
test_fail "Backup file is not readable"
return 1
fi
# For SQL dumps, check for basic SQL structure
if [[ "$latest_backup" == *.sql ]]; then
if grep -q "CREATE TABLE\|INSERT INTO" "$latest_backup" 2>/dev/null; then
test_pass "Backup file contains SQL data"
else
log_warn "Backup file may be incomplete (no SQL statements found)"
fi
fi
}
# Check backup retention
check_backup_retention() {
log_info "Checking backup retention policy..."
local old_backups=$(find "$BACKUP_DIR" -name "*.sql" -o -name "*.dump" -mtime +$RETENTION_DAYS 2>/dev/null | wc -l)
if [ $old_backups -gt 0 ]; then
log_warn "Found $old_backups backup(s) older than $RETENTION_DAYS days"
log_warn "Consider cleaning up old backups"
else
test_pass "Backup retention policy is being followed"
fi
}
# Test backup restoration (dry run)
test_backup_restore() {
log_info "Testing backup restoration (dry run)..."
local latest_backup=$(find "$BACKUP_DIR" -name "*.sql" -o -name "*.dump" -type f -printf '%T@ %p\n' | sort -n | tail -1 | cut -d' ' -f2-)
if [ -z "$latest_backup" ]; then
test_fail "No backup file found for restore test"
return 1
fi
# For SQL dumps, check if we can parse the file
if [[ "$latest_backup" == *.sql ]]; then
if head -100 "$latest_backup" | grep -q "PostgreSQL\|pg_dump" 2>/dev/null; then
test_pass "Backup file appears to be a valid PostgreSQL dump"
else
log_warn "Backup file format may be unexpected"
fi
fi
}
# Check automated backup schedule
check_backup_schedule() {
log_info "Checking backup automation..."
# Check for cron jobs or Kubernetes CronJobs
if kubectl get cronjob -n api postgres-backup &> /dev/null; then
local schedule=$(kubectl get cronjob -n api postgres-backup -o jsonpath='{.spec.schedule}')
log_info "Found backup CronJob with schedule: $schedule"
test_pass "Backup automation configured"
else
log_warn "No backup CronJob found. Backups may not be automated."
log_warn "Consider creating a CronJob for automated backups."
fi
}
# Verify database connectivity
verify_db_connectivity() {
log_info "Verifying database connectivity..."
if kubectl exec -n api deployment/api -- \
psql "${DATABASE_URL}" -c "SELECT 1" > /dev/null 2>&1; then
test_pass "Database is accessible"
else
test_fail "Cannot connect to database"
return 1
fi
}
# Main execution
main() {
echo "=========================================="
echo "Sankofa Phoenix Backup Verification"
echo "=========================================="
echo ""
local passed=0
local failed=0
verify_backup_directory && ((passed++)) || ((failed++))
check_recent_backups && ((passed++)) || ((failed++))
verify_backup_integrity && ((passed++)) || ((failed++))
check_backup_retention && ((passed++)) || ((failed++))
test_backup_restore && ((passed++)) || ((failed++))
check_backup_schedule && ((passed++)) || ((failed++))
verify_db_connectivity && ((passed++)) || ((failed++))
echo ""
echo "=========================================="
echo "Verification Summary"
echo "=========================================="
echo "Passed: ${GREEN}${passed}${NC}"
echo "Failed: ${RED}${failed}${NC}"
echo ""
if [ $failed -eq 0 ]; then
log_info "All backup verifications passed!"
exit 0
else
log_error "Some verifications failed. Please review backup configuration."
exit 1
fi
}
main "$@"