Files
Sankofa/crossplane-provider-proxmox/pkg/scaling/instance-manager.go
defiQUG 9daf1fd378 Apply Composer changes: comprehensive API updates, migrations, middleware, and infrastructure improvements
- Add comprehensive database migrations (001-024) for schema evolution
- Enhance API schema with expanded type definitions and resolvers
- Add new middleware: audit logging, rate limiting, MFA enforcement, security, tenant auth
- Implement new services: AI optimization, billing, blockchain, compliance, marketplace
- Add adapter layer for cloud integrations (Cloudflare, Kubernetes, Proxmox, storage)
- Update Crossplane provider with enhanced VM management capabilities
- Add comprehensive test suite for API endpoints and services
- Update frontend components with improved GraphQL subscriptions and real-time updates
- Enhance security configurations and headers (CSP, CORS, etc.)
- Update documentation and configuration files
- Add new CI/CD workflows and validation scripts
- Implement design system improvements and UI enhancements
2025-12-12 18:01:35 -08:00

161 lines
4.1 KiB
Go

package scaling
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/sankofa/crossplane-provider-proxmox/apis/v1alpha1"
"github.com/sankofa/crossplane-provider-proxmox/pkg/proxmox"
)
// InstanceManager manages VM instances in a scale set
type InstanceManager struct {
proxmoxClient *proxmox.Client
}
// NewInstanceManager creates a new instance manager
func NewInstanceManager(client *proxmox.Client) *InstanceManager {
return &InstanceManager{
proxmoxClient: client,
}
}
// CreateInstance creates a new VM instance from template
func (m *InstanceManager) CreateInstance(
ctx context.Context,
template v1alpha1.ProxmoxVMParameters,
instanceName string,
) (*v1alpha1.VMInstance, error) {
// Generate unique name if not provided
if instanceName == "" {
instanceName = fmt.Sprintf("%s-%d", template.Name, time.Now().Unix())
}
// Create VM spec from template
vmSpec := proxmox.VMSpec{
Node: template.Node,
Name: instanceName,
CPU: template.CPU,
Memory: template.Memory,
Disk: template.Disk,
Storage: template.Storage,
Network: template.Network,
Image: template.Image,
}
// Create VM
vm, err := m.proxmoxClient.CreateVM(ctx, vmSpec)
if err != nil {
return nil, errors.Wrap(err, "failed to create VM instance")
}
// Wait for VM to be ready (simplified - would need proper health checks)
time.Sleep(5 * time.Second)
// Get VM status
status, err := m.proxmoxClient.GetVMStatus(ctx, vm.ID)
if err != nil {
return nil, errors.Wrap(err, "failed to get VM status")
}
instance := &v1alpha1.VMInstance{
VMID: vm.ID,
Name: instanceName,
State: status.State,
IPAddress: status.IPAddress,
CreatedAt: metav1.Now(),
}
return instance, nil
}
// DeleteInstance deletes a VM instance
func (m *InstanceManager) DeleteInstance(ctx context.Context, vmID int) error {
return m.proxmoxClient.DeleteVM(ctx, vmID)
}
// GetInstanceStatus gets the status of a VM instance
func (m *InstanceManager) GetInstanceStatus(
ctx context.Context,
vmID int,
node string,
) (*v1alpha1.VMInstance, error) {
status, err := m.proxmoxClient.GetVMStatus(ctx, vmID)
if err != nil {
return nil, errors.Wrap(err, "failed to get VM status")
}
// Get VM details (would need additional API call)
instance := &v1alpha1.VMInstance{
VMID: vmID,
State: status.State,
IPAddress: status.IPAddress,
}
return instance, nil
}
// HealthCheck performs a health check on a VM instance
func (m *InstanceManager) HealthCheck(
ctx context.Context,
instance v1alpha1.VMInstance,
) bool {
status, err := m.proxmoxClient.GetVMStatus(ctx, instance.VMID)
if err != nil {
return false
}
// VM is healthy if it's running
return status.State == "running"
}
// ScaleTo scales the scale set to the desired number of replicas
func (m *InstanceManager) ScaleTo(
ctx context.Context,
template v1alpha1.ProxmoxVMParameters,
currentInstances []v1alpha1.VMInstance,
desiredReplicas int,
) ([]v1alpha1.VMInstance, error) {
currentCount := len(currentInstances)
if desiredReplicas == currentCount {
return currentInstances, nil
}
var newInstances []v1alpha1.VMInstance
if desiredReplicas > currentCount {
// Scale up
toCreate := desiredReplicas - currentCount
for i := 0; i < toCreate; i++ {
instanceName := fmt.Sprintf("%s-%d", template.Name, time.Now().UnixNano()+int64(i))
instance, err := m.CreateInstance(ctx, template, instanceName)
if err != nil {
return nil, errors.Wrapf(err, "failed to create instance %d", i+1)
}
newInstances = append(newInstances, *instance)
}
newInstances = append(newInstances, currentInstances...)
} else {
// Scale down
toDelete := currentCount - desiredReplicas
instancesToKeep := currentInstances[:desiredReplicas]
instancesToDelete := currentInstances[desiredReplicas:]
for _, instance := range instancesToDelete[:toDelete] {
if err := m.DeleteInstance(ctx, instance.VMID); err != nil {
// Log error but continue
fmt.Printf("Failed to delete instance %d: %v\n", instance.VMID, err)
}
}
newInstances = instancesToKeep
}
return newInstances, nil
}