Initial commit: loc_az_hci (smom-dbis-138 excluded via .gitignore)
Some checks failed
Test / test (push) Has been cancelled

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
defiQUG
2026-02-08 09:04:46 -08:00
commit c39465c2bd
386 changed files with 50649 additions and 0 deletions

View File

@@ -0,0 +1,200 @@
#!/bin/bash
source ~/.bashrc
# Add SSH Keys to VMs that are already using DHCP
# Since VMs are already on DHCP, we just need to add SSH keys via cloud-init
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Load environment variables
if [ -f "$PROJECT_ROOT/.env" ]; then
set -a
source <(grep -v '^#' "$PROJECT_ROOT/.env" | grep -v '^$' | sed 's/#.*$//' | grep '=')
set +a
fi
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
log_step() {
echo ""
echo -e "${BLUE}========================================${NC}"
echo -e "${BLUE}$1${NC}"
echo -e "${BLUE}========================================${NC}"
echo ""
}
PVE_USERNAME="${PVE_USERNAME:-root@pam}"
PVE_PASSWORD="${PVE_ROOT_PASS:-}"
PROXMOX_URL="${PROXMOX_ML110_URL:-https://192.168.1.206:8006}"
PROXMOX_NODE="${PROXMOX_NODE:-pve}"
SSH_KEY_FILE="$HOME/.ssh/id_ed25519_proxmox.pub"
# VM definitions: vmid name
VMS=(
"100 cloudflare-tunnel"
"101 k3s-master"
"102 git-server"
"103 observability"
)
get_api_token() {
local response=$(curl -s -k --connect-timeout 10 --max-time 15 \
-d "username=$PVE_USERNAME&password=$PVE_PASSWORD" \
"$PROXMOX_URL/api2/json/access/ticket" 2>&1)
if echo "$response" | grep -q '"data"'; then
local ticket=$(echo "$response" | grep -o '"ticket":"[^"]*' | cut -d'"' -f4)
local csrf_token=$(echo "$response" | grep -o '"CSRFPreventionToken":"[^"]*' | cut -d'"' -f4)
echo "$ticket|$csrf_token"
else
echo ""
fi
}
add_ssh_key_to_vm() {
local vmid=$1
local name=$2
log_info "Adding SSH key to VM $vmid ($name)..."
if [ ! -f "$SSH_KEY_FILE" ]; then
log_error "SSH key file not found: $SSH_KEY_FILE"
return 1
fi
local tokens=$(get_api_token)
if [ -z "$tokens" ]; then
log_error "Failed to authenticate with Proxmox"
return 1
fi
local ticket=$(echo "$tokens" | cut -d'|' -f1)
local csrf_token=$(echo "$tokens" | cut -d'|' -f2)
# Read and encode SSH key
local ssh_key_content=$(cat "$SSH_KEY_FILE")
local ssh_key_b64=$(echo "$ssh_key_content" | base64 -w 0)
# Add SSH key via cloud-init
curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
--data-urlencode "sshkeys=$ssh_key_b64" \
--data-urlencode "ciuser=ubuntu" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/config" > /dev/null
log_info "✓ SSH key added to VM $vmid"
}
discover_vm_ips() {
log_step "Discovering VM IPs via QEMU Guest Agent"
log_info "Waiting for VMs to apply cloud-init changes..."
sleep 10
log_info "Rebooting VMs to apply SSH keys..."
local tokens=$(get_api_token)
local ticket=$(echo "$tokens" | cut -d'|' -f1)
local csrf_token=$(echo "$tokens" | cut -d'|' -f2)
for vm_spec in "${VMS[@]}"; do
read -r vmid name <<< "$vm_spec"
log_info "Rebooting VM $vmid..."
curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/status/reboot" > /dev/null 2>&1 || true
done
log_info "Waiting 90 seconds for VMs to reboot and apply cloud-init..."
sleep 90
log_info "Discovering IPs via QEMU Guest Agent..."
source "$PROJECT_ROOT/scripts/lib/proxmox_vm_helpers.sh" 2>/dev/null || {
log_error "Helper library not found"
return 1
}
local all_ok=true
for vm_spec in "${VMS[@]}"; do
read -r vmid name <<< "$vm_spec"
local ip
ip="$(get_vm_ip_from_guest_agent "$vmid" 2>/dev/null || true)"
if [[ -n "$ip" ]]; then
log_info " ✓ VM $vmid ($name): $ip"
# Test SSH
if ssh -i "${SSH_KEY_FILE%.pub}" -o ConnectTimeout=5 -o StrictHostKeyChecking=no ubuntu@$ip "echo 'SSH OK'" &>/dev/null; then
log_info " ✓ SSH working!"
else
log_warn " ✗ SSH not working yet (may need more time)"
all_ok=false
fi
else
log_warn " ✗ VM $vmid ($name): IP not discovered (guest agent may need more time)"
all_ok=false
fi
done
if [ "$all_ok" = true ]; then
log_info ""
log_info "✓ All VMs have SSH access!"
else
log_warn ""
log_warn "Some VMs may need more time. Wait a few minutes and test again."
fi
}
main() {
log_step "Add SSH Keys to DHCP VMs"
log_info "Your VMs are already configured for DHCP - no IP conflicts!"
log_info "We just need to add SSH keys via cloud-init."
echo ""
if [ ! -f "$SSH_KEY_FILE" ]; then
log_error "SSH key file not found: $SSH_KEY_FILE"
exit 1
fi
log_step "Step 1: Adding SSH Keys via Cloud-Init"
for vm_spec in "${VMS[@]}"; do
read -r vmid name <<< "$vm_spec"
add_ssh_key_to_vm "$vmid" "$name" || log_warn "Failed to add SSH key to VM $vmid"
done
discover_vm_ips
log_step "Summary"
log_info "✓ SSH keys added via cloud-init"
log_info "✓ VMs are using DHCP (no IP conflicts)"
log_info "✓ IPs discovered via QEMU Guest Agent"
log_info ""
log_info "Your scripts already support dynamic IP discovery!"
log_info "Test SSH: ./scripts/ops/ssh-test-all.sh"
}
main "$@"

View File

@@ -0,0 +1,119 @@
#!/bin/bash
source ~/.bashrc
# Fix VM SSH Access via Proxmox Console
# Instructions for manual console access to fix SSH keys
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Load environment variables
if [ -f "$PROJECT_ROOT/.env" ]; then
set -a
source <(grep -v '^#' "$PROJECT_ROOT/.env" | grep -v '^$' | sed 's/#.*$//' | grep '=')
set +a
fi
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
SSH_KEY="ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBGrtqePuHm2bJLNnQbuzYrpcXoHHhwWv5s2RmqEezbz proxmox-access"
# VMID NAME (IPs will be discovered via guest agent or shown from Proxmox Summary)
VMS=(
"100 cloudflare-tunnel"
"101 k3s-master"
"102 git-server"
"103 observability"
)
# Fallback IPs for reference (when guest agent not available)
declare -A FALLBACK_IPS=(
["100"]="192.168.1.60"
["101"]="192.168.1.188"
["102"]="192.168.1.121"
["103"]="192.168.1.82"
)
main() {
echo "========================================="
echo "Fix VM SSH Access via Console"
echo "========================================="
echo ""
log_info "Since SSH is not working, use Proxmox Console to fix:"
echo ""
for vm_spec in "${VMS[@]}"; do
read -r vmid name <<< "$vm_spec"
# Try to get IP from guest agent (if available)
local ip="${FALLBACK_IPS[$vmid]:-}"
if [ -f "$PROJECT_ROOT/scripts/lib/proxmox_vm_helpers.sh" ]; then
source "$PROJECT_ROOT/scripts/lib/proxmox_vm_helpers.sh" 2>/dev/null || true
local discovered_ip
discovered_ip="$(get_vm_ip_from_guest_agent "$vmid" 2>/dev/null || true)"
[[ -n "$discovered_ip" ]] && ip="$discovered_ip"
fi
echo "VM $vmid: $name"
if [[ -n "$ip" ]]; then
echo " Expected IP: $ip (check Proxmox Summary if different)"
else
echo " IP: Check Proxmox Summary for current IP"
fi
echo " 1. Access Proxmox Web UI: https://192.168.1.206:8006"
echo " 2. Navigate to: VM $vmid ($name) → Console"
echo " 3. Login as: ubuntu"
echo " 4. Run these commands:"
echo ""
echo " mkdir -p ~/.ssh"
echo " chmod 700 ~/.ssh"
echo " echo '$SSH_KEY' >> ~/.ssh/authorized_keys"
echo " chmod 600 ~/.ssh/authorized_keys"
echo ""
echo " 5. Install QEMU Guest Agent:"
echo ""
echo " sudo apt update"
echo " sudo apt install -y qemu-guest-agent"
echo " sudo systemctl enable qemu-guest-agent"
echo " sudo systemctl start qemu-guest-agent"
echo ""
if [[ -n "$ip" ]]; then
echo " 6. Test SSH from workstation:"
echo ""
echo " ssh -i ~/.ssh/id_ed25519_proxmox ubuntu@$ip"
else
echo " 6. Test SSH from workstation (use IP from Proxmox Summary):"
echo ""
echo " ssh -i ~/.ssh/id_ed25519_proxmox ubuntu@<VM_IP>"
fi
echo ""
echo "----------------------------------------"
echo ""
done
log_info "After fixing SSH, you can:"
echo " - Deploy services via SSH"
echo " - Use QEMU Guest Agent for automation"
echo " - Complete remaining tasks"
}
main "$@"

View File

@@ -0,0 +1,128 @@
#!/bin/bash
# Fix VM 100 Guest Agent Restart Issues
# This version uses qm guest exec (no SSH to VM required)
# Use this if you cannot access VM 100 via console or SSH
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
VM_ID=100
SSH_KEY="${SSH_KEY:-$HOME/.ssh/id_ed25519_proxmox}"
PROXMOX_HOST="${PROXMOX_HOST:-192.168.1.206}"
echo "=== Fixing VM 100 Guest Agent Restart Issues (via Guest Agent) ==="
echo ""
# Test guest agent connection first
echo "Testing guest agent connection..."
if ! ssh -i "$SSH_KEY" -o StrictHostKeyChecking=no "root@${PROXMOX_HOST}" "qm guest exec $VM_ID -- /bin/hostname" > /dev/null 2>&1; then
echo "ERROR: Guest agent is not responding. Please ensure:"
echo " 1. Guest agent is enabled in VM configuration"
echo " 2. qemu-guest-agent service is running inside VM 100"
echo " 3. VM 100 is running"
exit 1
fi
echo "✅ Guest agent is responding"
echo ""
# Function to execute command via guest agent and extract output
exec_via_qga() {
local cmd="$1"
# Execute and parse JSON output, extract out-data field
ssh -i "$SSH_KEY" -o StrictHostKeyChecking=no "root@${PROXMOX_HOST}" \
"qm guest exec $VM_ID -- /bin/bash -c '${cmd//\'/\\\'}'" 2>&1 | \
grep -oP '"out-data"\s*:\s*"[^"]*"' | \
sed 's/"out-data"\s*:\s*"//;s/"$//' | \
sed 's/\\n/\n/g' | \
sed 's/\\"/"/g' || true
}
# Function to execute command and get exit code
exec_via_qga_silent() {
local cmd="$1"
local result
result=$(ssh -i "$SSH_KEY" -o StrictHostKeyChecking=no "root@${PROXMOX_HOST}" \
"qm guest exec $VM_ID -- /bin/bash -c '${cmd//\'/\\\'}'" 2>&1)
# Check if exitcode is 0 in JSON output
echo "$result" | grep -q '"exitcode"\s*:\s*0' && return 0 || return 1
}
echo "=== Current Guest Agent Status ==="
exec_via_qga "systemctl status qemu-guest-agent --no-pager | head -10" || true
echo ""
echo "=== Creating systemd override directory ==="
exec_via_qga "sudo mkdir -p /etc/systemd/system/qemu-guest-agent.service.d/"
echo "✅ Directory created"
echo ""
echo "=== Creating override configuration ==="
# Create the override file using echo (heredoc doesn't work well via qm guest exec)
exec_via_qga "sudo bash -c 'echo \"[Service]\" > /etc/systemd/system/qemu-guest-agent.service.d/override.conf'"
exec_via_qga "sudo bash -c 'echo \"# Add 5 second delay before restart to prevent restart loops\" >> /etc/systemd/system/qemu-guest-agent.service.d/override.conf'"
exec_via_qga "sudo bash -c 'echo \"RestartSec=5\" >> /etc/systemd/system/qemu-guest-agent.service.d/override.conf'"
exec_via_qga "sudo bash -c 'echo \"# Increase timeout for service start\" >> /etc/systemd/system/qemu-guest-agent.service.d/override.conf'"
exec_via_qga "sudo bash -c 'echo \"TimeoutStartSec=30\" >> /etc/systemd/system/qemu-guest-agent.service.d/override.conf'"
echo "✅ Override configuration created"
echo ""
echo "=== Reloading systemd daemon ==="
exec_via_qga "sudo systemctl daemon-reload"
echo "✅ Systemd daemon reloaded"
echo ""
echo "=== Verifying override configuration ==="
exec_via_qga "systemctl cat qemu-guest-agent.service | grep -A 5 override.conf || echo 'Override not found in output'"
echo ""
echo "=== Restarting guest agent service ==="
exec_via_qga "sudo systemctl restart qemu-guest-agent"
echo "✅ Service restarted"
echo ""
echo "=== Waiting for service to stabilize ==="
sleep 5
echo ""
echo "=== Checking service status ==="
exec_via_qga "systemctl status qemu-guest-agent --no-pager | head -15" || true
echo ""
echo "=== Verifying service is running ==="
if exec_via_qga_silent "systemctl is-active --quiet qemu-guest-agent"; then
echo "✅ Guest agent service is active"
else
echo "⚠️ Guest agent service status check failed (may still be starting)"
# Try to get actual status
exec_via_qga "systemctl is-active qemu-guest-agent" || true
fi
echo ""
echo "=== Checking restart configuration ==="
exec_via_qga "systemctl show qemu-guest-agent | grep -E 'RestartSec|Restart=' || true"
echo ""
echo "=== Testing guest agent from Proxmox host ==="
HOSTNAME_OUTPUT=$(ssh -i "$SSH_KEY" -o StrictHostKeyChecking=no "root@${PROXMOX_HOST}" "qm guest exec $VM_ID -- /bin/hostname" 2>&1)
if echo "$HOSTNAME_OUTPUT" | grep -q '"exitcode"\s*:\s*0'; then
echo "✅ Guest agent is responding"
HOSTNAME=$(echo "$HOSTNAME_OUTPUT" | grep -oP '"out-data"\s*:\s*"[^"]*"' | sed 's/"out-data"\s*:\s*"//;s/"$//' | sed 's/\\n/\n/g' | head -1)
echo " VM hostname: $HOSTNAME"
else
echo "⚠️ Guest agent test failed (may need a moment to stabilize)"
fi
echo ""
echo "=== Fix Complete ==="
echo "The guest agent service now has a 5-second restart delay."
echo "This should prevent restart loops and connection timeouts."
echo ""
echo "Monitor the service with:"
echo " ssh root@${PROXMOX_HOST} 'qm guest exec $VM_ID -- systemctl status qemu-guest-agent'"
echo ""
echo "Or check logs with:"
echo " ssh root@${PROXMOX_HOST} 'qm guest exec $VM_ID -- journalctl -u qemu-guest-agent -n 20'"

View File

@@ -0,0 +1,106 @@
#!/bin/bash
# Fix VM 100 Guest Agent Restart Issues
# This script adds a restart delay to prevent restart loops
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Source helper functions
source "$PROJECT_ROOT/scripts/lib/proxmox_vm_helpers.sh"
VM_ID=100
VM_USER="ubuntu"
SSH_KEY="${SSH_KEY:-$HOME/.ssh/id_ed25519_proxmox}"
PROXMOX_HOST="${PROXMOX_HOST:-192.168.1.206}"
echo "=== Fixing VM 100 Guest Agent Restart Issues ==="
echo ""
# Get VM IP
echo "Getting VM 100 IP address..."
ip=$(get_vm_ip_or_warn "$VM_ID" "$PROXMOX_HOST" "$SSH_KEY")
if [ -z "$ip" ]; then
echo "ERROR: Could not get IP for VM $VM_ID"
exit 1
fi
echo "VM 100 IP: $ip"
echo ""
# SSH into Proxmox host, then into VM 100
echo "Connecting to VM 100 via Proxmox host..."
ssh -i "$SSH_KEY" -o StrictHostKeyChecking=no "root@${PROXMOX_HOST}" <<EOF
ssh -i "$SSH_KEY" -o StrictHostKeyChecking=no "${VM_USER}@${ip}" <<'VMEOF'
set -euo pipefail
echo "=== Current Guest Agent Status ==="
systemctl status qemu-guest-agent --no-pager | head -10 || true
echo ""
echo "=== Creating systemd override directory ==="
sudo mkdir -p /etc/systemd/system/qemu-guest-agent.service.d/
echo "=== Creating override configuration ==="
sudo tee /etc/systemd/system/qemu-guest-agent.service.d/override.conf > /dev/null <<'OVERRIDE'
[Service]
# Add 5 second delay before restart to prevent restart loops
RestartSec=5
# Increase timeout for service start
TimeoutStartSec=30
OVERRIDE
echo "=== Reloading systemd daemon ==="
sudo systemctl daemon-reload
echo "=== Verifying override configuration ==="
systemctl cat qemu-guest-agent.service | grep -A 5 "override.conf" || true
echo ""
echo "=== Restarting guest agent service ==="
sudo systemctl restart qemu-guest-agent
echo "=== Waiting for service to stabilize ==="
sleep 3
echo "=== Checking service status ==="
systemctl status qemu-guest-agent --no-pager | head -15 || true
echo ""
echo "=== Verifying service is running ==="
if systemctl is-active --quiet qemu-guest-agent; then
echo "✅ Guest agent service is active"
else
echo "❌ Guest agent service is not active"
exit 1
fi
echo ""
echo "=== Checking restart configuration ==="
systemctl show qemu-guest-agent | grep -E "RestartSec|Restart=" || true
echo ""
echo "✅ Guest agent restart fix completed successfully"
VMEOF
EOF
echo ""
echo "=== Testing guest agent from Proxmox host ==="
ssh -i "$SSH_KEY" -o StrictHostKeyChecking=no "root@${PROXMOX_HOST}" <<EOF
echo "Testing guest agent connection..."
if qm guest exec $VM_ID -- hostname > /dev/null 2>&1; then
echo "✅ Guest agent is responding"
qm guest exec $VM_ID -- hostname
else
echo "⚠️ Guest agent test failed (may need a moment to stabilize)"
fi
EOF
echo ""
echo "=== Fix Complete ==="
echo "The guest agent service now has a 5-second restart delay."
echo "This should prevent restart loops and connection timeouts."
echo ""
echo "Monitor the service with:"
echo " ssh root@${PROXMOX_HOST} 'qm guest exec $VM_ID -- systemctl status qemu-guest-agent'"

View File

@@ -0,0 +1,448 @@
#!/bin/bash
source ~/.bashrc
# Recreate Template VM 9000 with Proper Cloud-Init
# Then Recreate VMs 100-103 from the new template
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Load environment variables
if [ -f "$PROJECT_ROOT/.env" ]; then
set -a
source <(grep -v '^#' "$PROJECT_ROOT/.env" | grep -v '^$' | sed 's/#.*$//' | grep '=')
set +a
fi
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
log_step() {
echo ""
echo -e "${BLUE}========================================${NC}"
echo -e "${BLUE}$1${NC}"
echo -e "${BLUE}========================================${NC}"
echo ""
}
PVE_USERNAME="${PVE_USERNAME:-root@pam}"
PVE_PASSWORD="${PVE_ROOT_PASS:-}"
PROXMOX_URL="${PROXMOX_ML110_URL:-https://192.168.1.206:8006}"
PROXMOX_NODE="${PROXMOX_NODE:-pve}"
PROXMOX_HOST="${PROXMOX_ML110_IP:-192.168.1.206}"
SSH_KEY="${SSH_KEY:-$HOME/.ssh/id_ed25519_proxmox}"
SSH_KEY_FILE="$SSH_KEY.pub"
TEMPLATE_VMID=9000
STORAGE="${STORAGE:-local-lvm}"
# VM definitions: vmid name ip cores memory disk_size
VMS=(
"100 cloudflare-tunnel 192.168.1.188 2 2048 20"
"101 k3s-master 192.168.1.60 4 4096 40"
"102 git-server 192.168.1.121 2 2048 30"
"103 observability 192.168.1.82 2 2048 30"
)
get_api_token() {
local response=$(curl -s -k --connect-timeout 10 --max-time 15 \
-d "username=$PVE_USERNAME&password=$PVE_PASSWORD" \
"$PROXMOX_URL/api2/json/access/ticket" 2>&1)
if echo "$response" | grep -q '"data"'; then
local ticket=$(echo "$response" | grep -o '"ticket":"[^"]*' | cut -d'"' -f4)
local csrf_token=$(echo "$response" | grep -o '"CSRFPreventionToken":"[^"]*' | cut -d'"' -f4)
echo "$ticket|$csrf_token"
else
echo ""
fi
}
recreate_template() {
log_step "Step 1: Recreating Template VM 9000"
if [ ! -f "$SSH_KEY_FILE" ]; then
log_error "SSH key file not found: $SSH_KEY_FILE"
exit 1
fi
log_info "This will destroy and recreate template VM 9000"
log_warn "All VMs cloned from this template will need to be recreated"
echo ""
# Auto-confirm if running non-interactively
if [ -t 0 ]; then
read -p "Continue? (yes/no): " confirm
if [ "$confirm" != "yes" ]; then
log_info "Cancelled"
exit 0
fi
else
log_info "Non-interactive mode: auto-confirming"
fi
log_info "Connecting to Proxmox host to recreate template..."
ssh -i "$SSH_KEY" root@$PROXMOX_HOST <<'TEMPLATE_SCRIPT'
set -e
TEMPLATE_VMID=9000
STORAGE="${STORAGE:-local-lvm}"
SSH_KEY_FILE="/tmp/id_ed25519_proxmox.pub"
# Check if template exists and destroy it
if qm status $TEMPLATE_VMID &>/dev/null; then
echo "Stopping and destroying existing template VM $TEMPLATE_VMID..."
qm stop $TEMPLATE_VMID 2>/dev/null || true
sleep 5
qm destroy $TEMPLATE_VMID 2>/dev/null || true
sleep 2
fi
# Download Ubuntu 24.04 cloud image
echo "Downloading Ubuntu 24.04 cloud image..."
IMAGE_URL="https://cloud-images.ubuntu.com/releases/24.04/release/ubuntu-24.04-server-cloudimg-amd64.img"
IMAGE_FILE="/tmp/ubuntu-24.04-server-cloudimg-amd64.img"
if [ ! -f "$IMAGE_FILE" ]; then
wget -q --show-progress -O "$IMAGE_FILE" "$IMAGE_URL" || {
echo "Failed to download image"
exit 1
}
fi
# Create VM
echo "Creating template VM $TEMPLATE_VMID..."
qm create $TEMPLATE_VMID \
--name ubuntu-24.04-cloud-init \
--memory 2048 \
--cores 2 \
--net0 virtio,bridge=vmbr0 \
--scsihw virtio-scsi-pci \
--scsi0 $STORAGE:0,import-from=$IMAGE_FILE,discard=on \
--ide2 $STORAGE:cloudinit \
--boot order=scsi0 \
--serial0 socket \
--vga serial0 \
--agent enabled=1 \
--ostype l26
# Resize disk to 32GB
echo "Resizing disk to 32GB..."
qm disk resize $TEMPLATE_VMID scsi0 32G
# Configure cloud-init
echo "Configuring cloud-init..."
qm set $TEMPLATE_VMID \
--ciuser ubuntu \
--cipassword "" \
--sshkeys /tmp/id_ed25519_proxmox.pub \
--ipconfig0 ip=dhcp
# Convert to template
echo "Converting to template..."
qm template $TEMPLATE_VMID
echo "✓ Template VM $TEMPLATE_VMID created successfully"
TEMPLATE_SCRIPT
# Copy SSH key to Proxmox host
log_info "Copying SSH key to Proxmox host..."
scp -i "$SSH_KEY" "$SSH_KEY_FILE" root@$PROXMOX_HOST:/tmp/id_ed25519_proxmox.pub
# Execute template creation
ssh -i "$SSH_KEY" root@$PROXMOX_HOST "STORAGE=$STORAGE bash" < <(cat <<'INLINE_SCRIPT'
set -e
TEMPLATE_VMID=9000
STORAGE="${STORAGE:-local-lvm}"
SSH_KEY_FILE="/tmp/id_ed25519_proxmox.pub"
# Check if template exists and destroy it
if qm status $TEMPLATE_VMID &>/dev/null; then
echo "Stopping and destroying existing template VM $TEMPLATE_VMID..."
qm stop $TEMPLATE_VMID 2>/dev/null || true
sleep 5
qm destroy $TEMPLATE_VMID 2>/dev/null || true
sleep 2
fi
# Download Ubuntu 24.04 cloud image
echo "Downloading Ubuntu 24.04 cloud image..."
IMAGE_URL="https://cloud-images.ubuntu.com/releases/24.04/release/ubuntu-24.04-server-cloudimg-amd64.img"
IMAGE_FILE="/tmp/ubuntu-24.04-server-cloudimg-amd64.img"
if [ ! -f "$IMAGE_FILE" ]; then
wget -q --show-progress -O "$IMAGE_FILE" "$IMAGE_URL" || {
echo "Failed to download image"
exit 1
}
fi
# Create VM
echo "Creating template VM $TEMPLATE_VMID..."
qm create $TEMPLATE_VMID \
--name ubuntu-24.04-cloud-init \
--memory 2048 \
--cores 2 \
--net0 virtio,bridge=vmbr0 \
--scsihw virtio-scsi-pci \
--scsi0 $STORAGE:0,import-from=$IMAGE_FILE,discard=on \
--ide2 $STORAGE:cloudinit \
--boot order=scsi0 \
--serial0 socket \
--vga serial0 \
--agent enabled=1 \
--ostype l26
# Resize disk to 32GB
echo "Resizing disk to 32GB..."
qm disk resize $TEMPLATE_VMID scsi0 32G
# Configure cloud-init with SSH key
echo "Configuring cloud-init..."
qm set $TEMPLATE_VMID \
--ciuser ubuntu \
--cipassword "" \
--sshkeys $SSH_KEY_FILE \
--ipconfig0 ip=dhcp
# Convert to template
echo "Converting to template..."
qm template $TEMPLATE_VMID
echo "✓ Template VM $TEMPLATE_VMID created successfully"
INLINE_SCRIPT
)
log_info "✓ Template VM 9000 recreated with proper cloud-init"
}
destroy_existing_vms() {
log_step "Step 2: Destroying Existing VMs"
local tokens=$(get_api_token)
if [ -z "$tokens" ]; then
log_error "Failed to authenticate with Proxmox"
return 1
fi
local ticket=$(echo "$tokens" | cut -d'|' -f1)
local csrf_token=$(echo "$tokens" | cut -d'|' -f2)
for vm_spec in "${VMS[@]}"; do
read -r vmid name ip cores memory disk_size <<< "$vm_spec"
log_info "Destroying VM $vmid ($name)..."
# Stop VM if running
local status=$(curl -s -k -H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/status/current" | \
python3 -c "import sys, json; print(json.load(sys.stdin).get('data', {}).get('status', 'stopped'))" 2>/dev/null || echo "stopped")
if [ "$status" = "running" ]; then
log_info "Stopping VM $vmid..."
curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/status/stop" > /dev/null
sleep 5
fi
# Delete VM
curl -s -k -X DELETE \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid" > /dev/null
log_info "✓ VM $vmid destroyed"
done
}
create_vms_from_template() {
log_step "Step 3: Creating VMs from Template"
local tokens=$(get_api_token)
if [ -z "$tokens" ]; then
log_error "Failed to authenticate with Proxmox"
return 1
fi
local ticket=$(echo "$tokens" | cut -d'|' -f1)
local csrf_token=$(echo "$tokens" | cut -d'|' -f2)
# Read SSH key
local ssh_key_content=$(cat "$SSH_KEY_FILE")
local ssh_key_b64=$(echo "$ssh_key_content" | base64 -w 0)
for vm_spec in "${VMS[@]}"; do
read -r vmid name ip cores memory disk_size <<< "$vm_spec"
log_info "Creating VM $vmid: $name"
# Clone from template
log_info "Cloning from template $TEMPLATE_VMID..."
local clone_response=$(curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
-d "newid=$vmid" \
-d "name=$name" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$TEMPLATE_VMID/clone" 2>&1)
if ! echo "$clone_response" | grep -q '"data"'; then
log_error "Failed to clone VM: $clone_response"
continue
fi
log_info "Waiting for clone to complete..."
sleep 10
# Configure VM
log_info "Configuring VM $vmid..."
# Set resources
curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
-d "cores=$cores" \
-d "memory=$memory" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/config" > /dev/null
# Resize disk if needed
if [ "$disk_size" != "32" ]; then
log_info "Resizing disk to ${disk_size}G..."
ssh -i "$SSH_KEY" root@$PROXMOX_HOST "qm disk resize $vmid scsi0 ${disk_size}G" 2>/dev/null || true
fi
# Configure cloud-init with SSH keys and DHCP
log_info "Configuring cloud-init with SSH keys..."
curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
--data-urlencode "ipconfig0=ip=dhcp" \
--data-urlencode "ciuser=ubuntu" \
--data-urlencode "sshkeys=$ssh_key_b64" \
--data-urlencode "agent=1" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/config" > /dev/null
# Start VM
log_info "Starting VM $vmid..."
curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/status/start" > /dev/null
log_info "✓ VM $vmid created and started"
done
}
wait_and_test() {
log_step "Step 4: Waiting for VMs to Boot and Testing SSH"
log_info "Waiting 90 seconds for VMs to boot and apply cloud-init..."
sleep 90
log_info "Discovering IPs via QEMU Guest Agent..."
source "$PROJECT_ROOT/scripts/lib/proxmox_vm_helpers.sh" 2>/dev/null || {
log_warn "Helper library not found, will test SSH manually"
}
local all_ok=true
for vm_spec in "${VMS[@]}"; do
read -r vmid name ip cores memory disk_size <<< "$vm_spec"
# Try to get IP from guest agent
local discovered_ip=""
if command -v get_vm_ip_from_guest_agent &>/dev/null; then
discovered_ip=$(ssh -i "$SSH_KEY" root@$PROXMOX_HOST \
"source /home/intlc/projects/loc_az_hci/scripts/lib/proxmox_vm_helpers.sh 2>/dev/null && \
get_vm_ip_from_guest_agent $vmid 2>/dev/null || echo ''")
fi
if [[ -n "$discovered_ip" ]]; then
log_info "VM $vmid ($name): $discovered_ip"
# Test SSH
if ssh -i "$SSH_KEY" -o ConnectTimeout=5 -o StrictHostKeyChecking=no ubuntu@$discovered_ip "echo 'SSH OK'" &>/dev/null; then
log_info " ✓ SSH working!"
else
log_warn " ✗ SSH not working yet (may need more time)"
all_ok=false
fi
else
log_warn "VM $vmid ($name): IP not discovered yet"
log_info " Try checking router DHCP leases or wait a bit longer"
all_ok=false
fi
done
if [ "$all_ok" = true ]; then
log_info ""
log_info "✓ All VMs recreated successfully with SSH access!"
log_info "You can now run: ./scripts/deploy/complete-all-next-steps.sh"
else
log_warn ""
log_warn "Some VMs may need more time. Wait a few minutes and test again."
log_info "Use: ./scripts/ops/ssh-test-all.sh to test SSH access"
fi
}
main() {
log_step "Recreate Template and VMs with Proper Cloud-Init"
if [ ! -f "$SSH_KEY_FILE" ]; then
log_error "SSH key file not found: $SSH_KEY_FILE"
exit 1
fi
log_warn "This will:"
log_warn " 1. Destroy and recreate template VM 9000"
log_warn " 2. Destroy existing VMs 100-103"
log_warn " 3. Recreate VMs 100-103 from new template"
log_warn " 4. Configure all VMs with SSH keys via cloud-init"
echo ""
# Auto-confirm if running non-interactively
if [ -t 0 ]; then
read -p "Continue? (yes/no): " confirm
if [ "$confirm" != "yes" ]; then
log_info "Cancelled"
exit 0
fi
else
log_info "Non-interactive mode: auto-confirming"
fi
recreate_template
destroy_existing_vms
create_vms_from_template
wait_and_test
log_step "Summary"
log_info "✓ Template VM 9000 recreated with proper cloud-init"
log_info "✓ VMs 100-103 recreated from template"
log_info "✓ SSH keys configured via cloud-init"
log_info "✓ VMs using DHCP (no IP conflicts)"
log_info ""
log_info "Next: Test SSH access and install QEMU Guest Agent"
}
main "$@"

View File

@@ -0,0 +1,269 @@
#!/bin/bash
source ~/.bashrc
# Recreate VMs with Proper SSH Key Configuration
# Destroys existing VMs and recreates them with cloud-init SSH keys
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Load environment variables
if [ -f "$PROJECT_ROOT/.env" ]; then
set -a
source <(grep -v '^#' "$PROJECT_ROOT/.env" | grep -v '^$' | sed 's/#.*$//' | grep '=')
set +a
fi
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
log_step() {
echo ""
echo -e "${BLUE}========================================${NC}"
echo -e "${BLUE}$1${NC}"
echo -e "${BLUE}========================================${NC}"
echo ""
}
PVE_USERNAME="${PVE_USERNAME:-root@pam}"
PVE_PASSWORD="${PVE_ROOT_PASS:-}"
PROXMOX_URL="${PROXMOX_ML110_URL:-https://192.168.1.206:8006}"
PROXMOX_NODE="${PROXMOX_NODE:-pve}"
TEMPLATE_VMID="${TEMPLATE_VMID:-9000}"
SSH_KEY_FILE="$HOME/.ssh/id_ed25519_proxmox.pub"
GATEWAY="${GATEWAY:-192.168.1.254}"
# VM definitions: vmid name ip cores memory disk_size
VMS=(
"100 cloudflare-tunnel 192.168.1.188 2 2048 20"
"101 k3s-master 192.168.1.60 4 4096 40"
"102 git-server 192.168.1.121 2 2048 30"
"103 observability 192.168.1.82 2 2048 30"
)
get_api_token() {
local response=$(curl -s -k --connect-timeout 10 --max-time 15 \
-d "username=$PVE_USERNAME&password=$PVE_PASSWORD" \
"$PROXMOX_URL/api2/json/access/ticket" 2>&1)
if echo "$response" | grep -q '"data"'; then
local ticket=$(echo "$response" | grep -o '"ticket":"[^"]*' | cut -d'"' -f4)
local csrf_token=$(echo "$response" | grep -o '"CSRFPreventionToken":"[^"]*' | cut -d'"' -f4)
echo "$ticket|$csrf_token"
else
echo ""
fi
}
destroy_vm() {
local vmid=$1
local name=$2
log_info "Destroying VM $vmid ($name)..."
local tokens=$(get_api_token)
if [ -z "$tokens" ]; then
log_error "Failed to authenticate with Proxmox"
return 1
fi
local ticket=$(echo "$tokens" | cut -d'|' -f1)
local csrf_token=$(echo "$tokens" | cut -d'|' -f2)
# Stop VM if running
local status=$(curl -s -k -H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/status/current" | \
python3 -c "import sys, json; print(json.load(sys.stdin).get('data', {}).get('status', 'stopped'))" 2>/dev/null || echo "stopped")
if [ "$status" = "running" ]; then
log_info "Stopping VM $vmid..."
curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/status/stop" > /dev/null
sleep 5
fi
# Delete VM
log_info "Deleting VM $vmid..."
curl -s -k -X DELETE \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid" > /dev/null
log_info "VM $vmid destroyed"
}
create_vm_with_ssh() {
local vmid=$1
local name=$2
local ip=$3
local cores=$4
local memory=$5
local disk_size=$6
log_info "Creating VM $vmid: $name with SSH keys"
local tokens=$(get_api_token)
if [ -z "$tokens" ]; then
log_error "Failed to authenticate with Proxmox"
return 1
fi
local ticket=$(echo "$tokens" | cut -d'|' -f1)
local csrf_token=$(echo "$tokens" | cut -d'|' -f2)
# Read SSH public key
if [ ! -f "$SSH_KEY_FILE" ]; then
log_error "SSH key file not found: $SSH_KEY_FILE"
return 1
fi
local ssh_key_content=$(cat "$SSH_KEY_FILE")
local ssh_key_b64=$(echo "$ssh_key_content" | base64 -w 0)
# Check if template exists
local template_check=$(curl -s -k -H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$TEMPLATE_VMID/status/current" 2>&1)
if ! echo "$template_check" | grep -q '"data"'; then
log_error "Template VM $TEMPLATE_VMID not found"
return 1
fi
# Clone VM from template
log_info "Cloning from template $TEMPLATE_VMID..."
local clone_response=$(curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
-d "newid=$vmid" \
-d "name=$name" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$TEMPLATE_VMID/clone" 2>&1)
if ! echo "$clone_response" | grep -q '"data"'; then
log_error "Failed to clone VM: $clone_response"
return 1
fi
log_info "VM cloned, waiting for clone to complete..."
sleep 10
# Configure VM
log_info "Configuring VM $vmid..."
# Set resources
curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
-d "cores=$cores" \
-d "memory=$memory" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/config" > /dev/null
# Configure cloud-init with SSH keys
log_info "Configuring cloud-init with SSH keys..."
curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
--data-urlencode "ipconfig0=ip=$ip/24,gw=$GATEWAY" \
--data-urlencode "ciuser=ubuntu" \
--data-urlencode "sshkeys=$ssh_key_b64" \
--data-urlencode "agent=1" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/config" > /dev/null
log_info "✓ VM $vmid configured with SSH keys"
# Start VM
log_info "Starting VM $vmid..."
curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/status/start" > /dev/null
log_info "✓ VM $vmid started"
}
main() {
log_step "Recreate VMs with SSH Key Configuration"
if [ ! -f "$SSH_KEY_FILE" ]; then
log_error "SSH key file not found: $SSH_KEY_FILE"
exit 1
fi
log_warn "This will DESTROY and RECREATE all 4 VMs (100-103)"
log_warn "All data on these VMs will be lost!"
echo ""
read -p "Are you sure you want to continue? (yes/no): " confirm
if [ "$confirm" != "yes" ]; then
log_info "Cancelled"
exit 0
fi
# Destroy existing VMs
log_step "Step 1: Destroying Existing VMs"
for vm_spec in "${VMS[@]}"; do
read -r vmid name ip cores memory disk_size <<< "$vm_spec"
destroy_vm "$vmid" "$name" || log_warn "Failed to destroy VM $vmid"
done
sleep 5
# Create new VMs with SSH keys
log_step "Step 2: Creating VMs with SSH Keys"
for vm_spec in "${VMS[@]}"; do
read -r vmid name ip cores memory disk_size <<< "$vm_spec"
create_vm_with_ssh "$vmid" "$name" "$ip" "$cores" "$memory" "$disk_size" || {
log_error "Failed to create VM $vmid"
continue
}
done
log_step "Step 3: Waiting for VMs to Boot"
log_info "Waiting 60 seconds for VMs to boot and apply cloud-init..."
sleep 60
log_step "Step 4: Testing SSH Access"
log_info "Testing SSH access to VMs..."
local all_ok=true
for vm_spec in "${VMS[@]}"; do
read -r vmid name ip cores memory disk_size <<< "$vm_spec"
if ssh -i "${SSH_KEY_FILE%.pub}" -o ConnectTimeout=10 -o StrictHostKeyChecking=no ubuntu@$ip "echo 'SSH OK' && hostname" &>/dev/null; then
log_info " ✓ VM $vmid ($name) at $ip: SSH working"
else
log_error " ✗ VM $vmid ($name) at $ip: SSH not working"
all_ok=false
fi
done
if [ "$all_ok" = true ]; then
log_info ""
log_info "✓ All VMs recreated successfully with SSH access!"
log_info "You can now run: ./scripts/deploy/complete-all-next-steps.sh"
else
log_warn ""
log_warn "Some VMs may need more time to boot. Wait a few minutes and test again."
fi
}
main "$@"

275
scripts/fix/setup-nat-for-vms.sh Executable file
View File

@@ -0,0 +1,275 @@
#!/bin/bash
source ~/.bashrc
# Setup NAT for VMs - Make VMs accessible via Proxmox host
# Creates a NAT network so VMs can be accessed via Proxmox host IP
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Load environment variables
if [ -f "$PROJECT_ROOT/.env" ]; then
set -a
source <(grep -v '^#' "$PROJECT_ROOT/.env" | grep -v '^$' | sed 's/#.*$//' | grep '=')
set +a
fi
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
log_step() {
echo ""
echo -e "${BLUE}========================================${NC}"
echo -e "${BLUE}$1${NC}"
echo -e "${BLUE}========================================${NC}"
echo ""
}
PROXMOX_HOST="${PROXMOX_ML110_IP:-192.168.1.206}"
SSH_KEY="${SSH_KEY:-$HOME/.ssh/id_ed25519_proxmox}"
# NAT network configuration
NAT_NETWORK="10.0.0.0/24"
NAT_BRIDGE="vmbr1"
NAT_GATEWAY="10.0.0.1"
# VM definitions: vmid name nat_ip
VMS=(
"100 cloudflare-tunnel 10.0.0.10"
"101 k3s-master 10.0.0.11"
"102 git-server 10.0.0.12"
"103 observability 10.0.0.13"
)
setup_nat_bridge() {
log_step "Step 1: Setting up NAT Bridge"
log_info "Creating NAT bridge $NAT_BRIDGE on Proxmox host..."
ssh -i "$SSH_KEY" root@$PROXMOX_HOST <<EOF
set -e
# Check if bridge already exists
if ip link show $NAT_BRIDGE &>/dev/null; then
echo "Bridge $NAT_BRIDGE already exists"
else
# Create bridge
cat >> /etc/network/interfaces <<INTERFACES
# NAT bridge for VMs
auto $NAT_BRIDGE
iface $NAT_BRIDGE inet static
address $NAT_GATEWAY
netmask 255.255.255.0
bridge_ports none
bridge_stp off
bridge_fd 0
post-up echo 1 > /proc/sys/net/ipv4/ip_forward
post-up iptables -t nat -A POSTROUTING -s $NAT_NETWORK -o vmbr0 -j MASQUERADE
post-up iptables -A FORWARD -s $NAT_NETWORK -j ACCEPT
post-up iptables -A FORWARD -d $NAT_NETWORK -j ACCEPT
INTERFACES
# Bring up bridge
ifup $NAT_BRIDGE
echo "✓ NAT bridge $NAT_BRIDGE created"
fi
# Enable IP forwarding
echo 1 > /proc/sys/net/ipv4/ip_forward
# Setup iptables rules (idempotent)
iptables -t nat -C POSTROUTING -s $NAT_NETWORK -o vmbr0 -j MASQUERADE 2>/dev/null || \
iptables -t nat -A POSTROUTING -s $NAT_NETWORK -o vmbr0 -j MASQUERADE
iptables -C FORWARD -s $NAT_NETWORK -j ACCEPT 2>/dev/null || \
iptables -A FORWARD -s $NAT_NETWORK -j ACCEPT
iptables -C FORWARD -d $NAT_NETWORK -j ACCEPT 2>/dev/null || \
iptables -A FORWARD -d $NAT_NETWORK -j ACCEPT
echo "✓ NAT rules configured"
EOF
log_info "✓ NAT bridge configured"
}
configure_vm_nat() {
local vmid=$1
local name=$2
local nat_ip=$3
log_info "Configuring VM $vmid ($name) with NAT IP $nat_ip..."
ssh -i "$SSH_KEY" root@$PROXMOX_HOST <<EOF
# Update VM network to use NAT bridge
qm set $vmid --net0 virtio,bridge=$NAT_BRIDGE
# Configure cloud-init with NAT IP
qm set $vmid --ipconfig0 ip=$nat_ip/24,gw=$NAT_GATEWAY
echo "✓ VM $vmid configured for NAT"
EOF
}
setup_port_forwarding() {
log_step "Step 3: Setting up Port Forwarding"
log_info "Setting up port forwarding rules..."
# Port mappings: external_port -> vm_nat_ip:internal_port
# Format: vmid external_port internal_port description
PORT_MAPPINGS=(
"100 2222 22 cloudflare-tunnel-ssh"
"101 2223 22 k3s-master-ssh"
"102 2224 22 git-server-ssh"
"103 2225 22 observability-ssh"
"102 3000 3000 gitea-web"
"103 9090 9090 prometheus"
"103 3001 3000 grafana"
)
ssh -i "$SSH_KEY" root@$PROXMOX_HOST <<'EOF'
set -e
# Get NAT IPs for VMs
declare -A VM_NAT_IPS=(
["100"]="10.0.0.10"
["101"]="10.0.0.11"
["102"]="10.0.0.12"
["103"]="10.0.0.13"
)
# Port forwarding rules
# Format: vmid external_port internal_port
PORT_MAPPINGS=(
"100 2222 22"
"101 2223 22"
"102 2224 22"
"103 2225 22"
"102 3000 3000"
"103 9090 9090"
"103 3001 3000"
)
for mapping in "${PORT_MAPPINGS[@]}"; do
read -r vmid ext_port int_port <<< "$mapping"
nat_ip="${VM_NAT_IPS[$vmid]}"
# Check if rule exists
if iptables -t nat -C PREROUTING -p tcp --dport $ext_port -j DNAT --to-destination $nat_ip:$int_port 2>/dev/null; then
echo "Port forwarding $ext_port -> $nat_ip:$int_port already exists"
else
# Add port forwarding
iptables -t nat -A PREROUTING -p tcp --dport $ext_port -j DNAT --to-destination $nat_ip:$int_port
iptables -A FORWARD -p tcp -d $nat_ip --dport $int_port -j ACCEPT
echo "✓ Port forwarding: $PROXMOX_HOST:$ext_port -> $nat_ip:$int_port"
fi
done
# Save iptables rules
if command -v netfilter-persistent &>/dev/null; then
netfilter-persistent save
elif [ -f /etc/iptables/rules.v4 ]; then
iptables-save > /etc/iptables/rules.v4
fi
echo "✓ Port forwarding configured"
EOF
log_info "✓ Port forwarding configured"
}
show_access_info() {
log_step "Access Information"
log_info "VM Access via NAT:"
echo ""
echo " VM 100 (cloudflare-tunnel):"
echo " SSH: ssh -i $SSH_KEY ubuntu@$PROXMOX_HOST -p 2222"
echo " Direct NAT: ssh -i $SSH_KEY ubuntu@10.0.0.10 (from Proxmox host)"
echo ""
echo " VM 101 (k3s-master):"
echo " SSH: ssh -i $SSH_KEY ubuntu@$PROXMOX_HOST -p 2223"
echo " Direct NAT: ssh -i $SSH_KEY ubuntu@10.0.0.11 (from Proxmox host)"
echo ""
echo " VM 102 (git-server):"
echo " SSH: ssh -i $SSH_KEY ubuntu@$PROXMOX_HOST -p 2224"
echo " Gitea: http://$PROXMOX_HOST:3000"
echo " Direct NAT: ssh -i $SSH_KEY ubuntu@10.0.0.12 (from Proxmox host)"
echo ""
echo " VM 103 (observability):"
echo " SSH: ssh -i $SSH_KEY ubuntu@$PROXMOX_HOST -p 2225"
echo " Prometheus: http://$PROXMOX_HOST:9090"
echo " Grafana: http://$PROXMOX_HOST:3001"
echo " Direct NAT: ssh -i $SSH_KEY ubuntu@10.0.0.13 (from Proxmox host)"
echo ""
log_info "To access VMs from Proxmox host:"
echo " ssh -i $SSH_KEY ubuntu@10.0.0.10 # VM 100"
echo " ssh -i $SSH_KEY ubuntu@10.0.0.11 # VM 101"
echo " ssh -i $SSH_KEY ubuntu@10.0.0.12 # VM 102"
echo " ssh -i $SSH_KEY ubuntu@10.0.0.13 # VM 103"
}
main() {
log_step "Setup NAT for VMs"
log_warn "This will:"
log_warn " 1. Create a NAT bridge (vmbr1) on Proxmox host"
log_warn " 2. Reconfigure VMs to use NAT network"
log_warn " 3. Setup port forwarding for SSH and services"
echo ""
read -p "Continue? (yes/no): " confirm
if [ "$confirm" != "yes" ]; then
log_info "Cancelled"
exit 0
fi
setup_nat_bridge
log_step "Step 2: Configuring VMs for NAT"
for vm_spec in "${VMS[@]}"; do
read -r vmid name nat_ip <<< "$vm_spec"
configure_vm_nat "$vmid" "$name" "$nat_ip" || log_warn "Failed to configure VM $vmid"
done
setup_port_forwarding
log_info "Rebooting VMs to apply network changes..."
ssh -i "$SSH_KEY" root@$PROXMOX_HOST "for vmid in 100 101 102 103; do qm reboot \$vmid 2>/dev/null || true; done"
log_info "Waiting 60 seconds for VMs to reboot..."
sleep 60
show_access_info
log_step "Testing NAT Access"
log_info "Testing SSH via port forwarding..."
if ssh -i "$SSH_KEY" -o ConnectTimeout=10 -p 2222 ubuntu@$PROXMOX_HOST "echo 'SSH OK' && hostname" &>/dev/null; then
log_info "✓ SSH via NAT is working!"
else
log_warn "SSH may need more time. Wait a few minutes and test again."
fi
}
main "$@"

View File

@@ -0,0 +1,307 @@
#!/bin/bash
source ~/.bashrc
# Setup NAT for VMs AND Reconfigure with SSH Keys
# Combines NAT setup with cloud-init SSH key injection
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Load environment variables
if [ -f "$PROJECT_ROOT/.env" ]; then
set -a
source <(grep -v '^#' "$PROJECT_ROOT/.env" | grep -v '^$' | sed 's/#.*$//' | grep '=')
set +a
fi
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
log_step() {
echo ""
echo -e "${BLUE}========================================${NC}"
echo -e "${BLUE}$1${NC}"
echo -e "${BLUE}========================================${NC}"
echo ""
}
PROXMOX_HOST="${PROXMOX_ML110_IP:-192.168.1.206}"
SSH_KEY="${SSH_KEY:-$HOME/.ssh/id_ed25519_proxmox}"
SSH_KEY_FILE="$SSH_KEY.pub"
PVE_USERNAME="${PVE_USERNAME:-root@pam}"
PVE_PASSWORD="${PVE_ROOT_PASS:-}"
PROXMOX_URL="${PROXMOX_ML110_URL:-https://192.168.1.206:8006}"
PROXMOX_NODE="${PROXMOX_NODE:-pve}"
# NAT network configuration
NAT_NETWORK="10.0.0.0/24"
NAT_BRIDGE="vmbr1"
NAT_GATEWAY="10.0.0.1"
# VM definitions: vmid name nat_ip
VMS=(
"100 cloudflare-tunnel 10.0.0.10"
"101 k3s-master 10.0.0.11"
"102 git-server 10.0.0.12"
"103 observability 10.0.0.13"
)
get_api_token() {
local response=$(curl -s -k --connect-timeout 10 --max-time 15 \
-d "username=$PVE_USERNAME&password=$PVE_PASSWORD" \
"$PROXMOX_URL/api2/json/access/ticket" 2>&1)
if echo "$response" | grep -q '"data"'; then
local ticket=$(echo "$response" | grep -o '"ticket":"[^"]*' | cut -d'"' -f4)
local csrf_token=$(echo "$response" | grep -o '"CSRFPreventionToken":"[^"]*' | cut -d'"' -f4)
echo "$ticket|$csrf_token"
else
echo ""
fi
}
setup_nat_bridge() {
log_step "Step 1: Setting up NAT Bridge"
log_info "Creating NAT bridge $NAT_BRIDGE on Proxmox host..."
ssh -i "$SSH_KEY" root@$PROXMOX_HOST <<EOF
set -e
# Check if bridge already exists
if ip link show $NAT_BRIDGE &>/dev/null; then
echo "Bridge $NAT_BRIDGE already exists"
else
# Create bridge
cat >> /etc/network/interfaces <<INTERFACES
# NAT bridge for VMs
auto $NAT_BRIDGE
iface $NAT_BRIDGE inet static
address $NAT_GATEWAY
netmask 255.255.255.0
bridge_ports none
bridge_stp off
bridge_fd 0
post-up echo 1 > /proc/sys/net/ipv4/ip_forward
post-up iptables -t nat -A POSTROUTING -s $NAT_NETWORK -o vmbr0 -j MASQUERADE
post-up iptables -A FORWARD -s $NAT_NETWORK -j ACCEPT
post-up iptables -A FORWARD -d $NAT_NETWORK -j ACCEPT
INTERFACES
# Bring up bridge
ifup $NAT_BRIDGE
echo "✓ NAT bridge $NAT_BRIDGE created"
fi
# Enable IP forwarding
echo 1 > /proc/sys/net/ipv4/ip_forward
# Setup iptables rules (idempotent)
iptables -t nat -C POSTROUTING -s $NAT_NETWORK -o vmbr0 -j MASQUERADE 2>/dev/null || \
iptables -t nat -A POSTROUTING -s $NAT_NETWORK -o vmbr0 -j MASQUERADE
iptables -C FORWARD -s $NAT_NETWORK -j ACCEPT 2>/dev/null || \
iptables -A FORWARD -s $NAT_NETWORK -j ACCEPT
iptables -C FORWARD -d $NAT_NETWORK -j ACCEPT 2>/dev/null || \
iptables -A FORWARD -d $NAT_NETWORK -j ACCEPT
echo "✓ NAT rules configured"
EOF
log_info "✓ NAT bridge configured"
}
configure_vm_nat_with_ssh() {
local vmid=$1
local name=$2
local nat_ip=$3
log_info "Configuring VM $vmid ($name) with NAT IP $nat_ip and SSH keys..."
if [ ! -f "$SSH_KEY_FILE" ]; then
log_error "SSH key file not found: $SSH_KEY_FILE"
return 1
fi
local ssh_key_content=$(cat "$SSH_KEY_FILE")
local ssh_key_b64=$(echo "$ssh_key_content" | base64 -w 0)
local tokens=$(get_api_token)
if [ -z "$tokens" ]; then
log_error "Failed to authenticate with Proxmox"
return 1
fi
local ticket=$(echo "$tokens" | cut -d'|' -f1)
local csrf_token=$(echo "$tokens" | cut -d'|' -f2)
# Update VM network to use NAT bridge
curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
-d "net0=virtio,bridge=$NAT_BRIDGE" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/config" > /dev/null
# Configure cloud-init with NAT IP and SSH keys
curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
--data-urlencode "ipconfig0=ip=$nat_ip/24,gw=$NAT_GATEWAY" \
--data-urlencode "ciuser=ubuntu" \
--data-urlencode "sshkeys=$ssh_key_b64" \
--data-urlencode "agent=1" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/config" > /dev/null
log_info "✓ VM $vmid configured for NAT with SSH keys"
}
setup_port_forwarding() {
log_step "Step 3: Setting up Port Forwarding"
log_info "Setting up port forwarding rules..."
ssh -i "$SSH_KEY" root@$PROXMOX_HOST <<'EOF'
set -e
# Get NAT IPs for VMs
declare -A VM_NAT_IPS=(
["100"]="10.0.0.10"
["101"]="10.0.0.11"
["102"]="10.0.0.12"
["103"]="10.0.0.13"
)
# Port forwarding rules
# Format: vmid external_port internal_port
PORT_MAPPINGS=(
"100 2222 22"
"101 2223 22"
"102 2224 22"
"103 2225 22"
"102 3000 3000"
"103 9090 9090"
"103 3001 3000"
)
for mapping in "${PORT_MAPPINGS[@]}"; do
read -r vmid ext_port int_port <<< "$mapping"
nat_ip="${VM_NAT_IPS[$vmid]}"
# Check if rule exists
if iptables -t nat -C PREROUTING -p tcp --dport $ext_port -j DNAT --to-destination $nat_ip:$int_port 2>/dev/null; then
echo "Port forwarding $ext_port -> $nat_ip:$int_port already exists"
else
# Add port forwarding
iptables -t nat -A PREROUTING -p tcp --dport $ext_port -j DNAT --to-destination $nat_ip:$int_port
iptables -A FORWARD -p tcp -d $nat_ip --dport $int_port -j ACCEPT
echo "✓ Port forwarding: $PROXMOX_HOST:$ext_port -> $nat_ip:$int_port"
fi
done
# Save iptables rules
if command -v netfilter-persistent &>/dev/null; then
netfilter-persistent save
elif [ -f /etc/iptables/rules.v4 ]; then
iptables-save > /etc/iptables/rules.v4
fi
echo "✓ Port forwarding configured"
EOF
log_info "✓ Port forwarding configured"
}
main() {
log_step "Setup NAT with SSH Keys"
if [ ! -f "$SSH_KEY_FILE" ]; then
log_error "SSH key file not found: $SSH_KEY_FILE"
exit 1
fi
log_warn "This will:"
log_warn " 1. Create a NAT bridge (vmbr1) on Proxmox host"
log_warn " 2. Reconfigure VMs to use NAT network"
log_warn " 3. Inject SSH keys via cloud-init"
log_warn " 4. Setup port forwarding for SSH and services"
echo ""
read -p "Continue? (yes/no): " confirm
if [ "$confirm" != "yes" ]; then
log_info "Cancelled"
exit 0
fi
setup_nat_bridge
log_step "Step 2: Configuring VMs for NAT with SSH Keys"
for vm_spec in "${VMS[@]}"; do
read -r vmid name nat_ip <<< "$vm_spec"
configure_vm_nat_with_ssh "$vmid" "$name" "$nat_ip" || log_warn "Failed to configure VM $vmid"
done
setup_port_forwarding
log_info "Rebooting VMs to apply network and SSH key changes..."
ssh -i "$SSH_KEY" root@$PROXMOX_HOST "for vmid in 100 101 102 103; do qm reboot \$vmid 2>/dev/null || true; done"
log_info "Waiting 90 seconds for VMs to reboot and apply cloud-init..."
sleep 90
log_step "Testing Access"
log_info "Testing SSH via port forwarding..."
local all_ok=true
for port in 2222 2223 2224 2225; do
local vm_name=$(echo $port | sed 's/2222/cloudflare-tunnel/;s/2223/k3s-master/;s/2224/git-server/;s/2225/observability/')
if ssh -i "$SSH_KEY" -o ConnectTimeout=10 -p $port ubuntu@$PROXMOX_HOST "echo 'SSH OK' && hostname" &>/dev/null; then
log_info "$vm_name (port $port): SSH working"
else
log_warn "$vm_name (port $port): SSH not working yet (may need more time)"
all_ok=false
fi
done
if [ "$all_ok" = true ]; then
log_info ""
log_info "✓ All VMs accessible via NAT with SSH!"
else
log_warn ""
log_warn "Some VMs may need more time. Wait a few minutes and test again."
fi
log_step "Access Information"
log_info "VM Access:"
echo " VM 100: ssh -i $SSH_KEY -p 2222 ubuntu@$PROXMOX_HOST"
echo " VM 101: ssh -i $SSH_KEY -p 2223 ubuntu@$PROXMOX_HOST"
echo " VM 102: ssh -i $SSH_KEY -p 2224 ubuntu@$PROXMOX_HOST"
echo " VM 103: ssh -i $SSH_KEY -p 2225 ubuntu@$PROXMOX_HOST"
echo ""
log_info "From Proxmox host:"
echo " ssh -i $SSH_KEY ubuntu@10.0.0.10 # VM 100"
echo " ssh -i $SSH_KEY ubuntu@10.0.0.11 # VM 101"
echo " ssh -i $SSH_KEY ubuntu@10.0.0.12 # VM 102"
echo " ssh -i $SSH_KEY ubuntu@10.0.0.13 # VM 103"
}
main "$@"

213
scripts/fix/switch-vms-to-dhcp.sh Executable file
View File

@@ -0,0 +1,213 @@
#!/bin/bash
source ~/.bashrc
# Switch VMs from Static IPs to DHCP
# Removes static IP configuration and lets VMs get IPs from DHCP
# Then uses QEMU Guest Agent to discover IPs dynamically
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Load environment variables
if [ -f "$PROJECT_ROOT/.env" ]; then
set -a
source <(grep -v '^#' "$PROJECT_ROOT/.env" | grep -v '^$' | sed 's/#.*$//' | grep '=')
set +a
fi
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
log_step() {
echo ""
echo -e "${BLUE}========================================${NC}"
echo -e "${BLUE}$1${NC}"
echo -e "${BLUE}========================================${NC}"
echo ""
}
PVE_USERNAME="${PVE_USERNAME:-root@pam}"
PVE_PASSWORD="${PVE_ROOT_PASS:-}"
PROXMOX_URL="${PROXMOX_ML110_URL:-https://192.168.1.206:8006}"
PROXMOX_NODE="${PROXMOX_NODE:-pve}"
SSH_KEY="${SSH_KEY:-$HOME/.ssh/id_ed25519_proxmox}"
SSH_KEY_FILE="$SSH_KEY.pub"
# VM definitions: vmid name
VMS=(
"100 cloudflare-tunnel"
"101 k3s-master"
"102 git-server"
"103 observability"
)
get_api_token() {
local response=$(curl -s -k --connect-timeout 10 --max-time 15 \
-d "username=$PVE_USERNAME&password=$PVE_PASSWORD" \
"$PROXMOX_URL/api2/json/access/ticket" 2>&1)
if echo "$response" | grep -q '"data"'; then
local ticket=$(echo "$response" | grep -o '"ticket":"[^"]*' | cut -d'"' -f4)
local csrf_token=$(echo "$response" | grep -o '"CSRFPreventionToken":"[^"]*' | cut -d'"' -f4)
echo "$ticket|$csrf_token"
else
echo ""
fi
}
switch_vm_to_dhcp() {
local vmid=$1
local name=$2
log_info "Switching VM $vmid ($name) to DHCP..."
local tokens=$(get_api_token)
if [ -z "$tokens" ]; then
log_error "Failed to authenticate with Proxmox"
return 1
fi
local ticket=$(echo "$tokens" | cut -d'|' -f1)
local csrf_token=$(echo "$tokens" | cut -d'|' -f2)
# Remove static IP configuration (set to DHCP)
# Remove ipconfig0 to let cloud-init use DHCP
curl -s -k -X DELETE \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/config/ipconfig0" > /dev/null 2>&1 || true
# Ensure cloud-init is configured for DHCP
# Set ciuser if not set
curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
-d "ciuser=ubuntu" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/config" > /dev/null 2>&1 || true
# Add SSH keys if not already configured
if [ -f "$SSH_KEY_FILE" ]; then
local ssh_key_content=$(cat "$SSH_KEY_FILE")
local ssh_key_b64=$(echo "$ssh_key_content" | base64 -w 0)
curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
--data-urlencode "sshkeys=$ssh_key_b64" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/config" > /dev/null 2>&1 || true
fi
log_info "✓ VM $vmid configured for DHCP"
}
discover_vm_ips() {
log_step "Step 3: Discovering VM IPs via QEMU Guest Agent"
log_info "Waiting for VMs to get DHCP IPs and start guest agent..."
sleep 30
log_info "Discovering IPs..."
source "$PROJECT_ROOT/scripts/lib/proxmox_vm_helpers.sh" 2>/dev/null || {
log_error "Helper library not found"
return 1
}
local all_ok=true
for vm_spec in "${VMS[@]}"; do
read -r vmid name <<< "$vm_spec"
local ip
ip="$(get_vm_ip_from_guest_agent "$vmid" 2>/dev/null || true)"
if [[ -n "$ip" ]]; then
log_info " ✓ VM $vmid ($name): $ip"
else
log_warn " ✗ VM $vmid ($name): IP not discovered yet (guest agent may need more time)"
all_ok=false
fi
done
if [ "$all_ok" = false ]; then
log_warn ""
log_warn "Some VMs may need more time. Wait a few minutes and check again:"
log_info " ssh root@192.168.1.206"
log_info " source /home/intlc/projects/loc_az_hci/scripts/lib/proxmox_vm_helpers.sh"
log_info " get_vm_ip_from_guest_agent 100"
fi
}
main() {
log_step "Switch VMs from Static IPs to DHCP"
log_warn "This will:"
log_warn " 1. Remove static IP configuration from all VMs"
log_warn " 2. Configure VMs to use DHCP"
log_warn " 3. Add SSH keys via cloud-init"
log_warn " 4. Reboot VMs to apply changes"
log_warn ""
log_warn "VMs will get IPs from your router's DHCP server"
log_warn "IPs will be discovered via QEMU Guest Agent"
echo ""
read -p "Continue? (yes/no): " confirm
if [ "$confirm" != "yes" ]; then
log_info "Cancelled"
exit 0
fi
log_step "Step 1: Switching VMs to DHCP"
for vm_spec in "${VMS[@]}"; do
read -r vmid name <<< "$vm_spec"
switch_vm_to_dhcp "$vmid" "$name" || log_warn "Failed to configure VM $vmid"
done
log_step "Step 2: Rebooting VMs"
log_info "Rebooting VMs to apply DHCP configuration..."
local tokens=$(get_api_token)
local ticket=$(echo "$tokens" | cut -d'|' -f1)
local csrf_token=$(echo "$tokens" | cut -d'|' -f2)
for vm_spec in "${VMS[@]}"; do
read -r vmid name <<< "$vm_spec"
log_info "Rebooting VM $vmid..."
curl -s -k -X POST \
-H "Cookie: PVEAuthCookie=$ticket" \
-H "CSRFPreventionToken: $csrf_token" \
"$PROXMOX_URL/api2/json/nodes/$PROXMOX_NODE/qemu/$vmid/status/reboot" > /dev/null 2>&1 || true
done
discover_vm_ips
log_step "Summary"
log_info "✓ VMs switched to DHCP"
log_info "✓ SSH keys configured via cloud-init"
log_info "✓ IPs will be discovered via QEMU Guest Agent"
log_info ""
log_info "All your scripts already support dynamic IP discovery!"
log_info "They use get_vm_ip_from_guest_agent() to find IPs automatically."
log_info ""
log_info "Test SSH access (after IPs are discovered):"
log_info " ./scripts/ops/ssh-test-all.sh"
}
main "$@"