- Introduced Aggregator.sol for Chainlink-compatible oracle functionality, including round-based updates and access control. - Added OracleWithCCIP.sol to extend Aggregator with CCIP cross-chain messaging capabilities. - Created .gitmodules to include OpenZeppelin contracts as a submodule. - Developed a comprehensive deployment guide in NEXT_STEPS_COMPLETE_GUIDE.md for Phase 2 and smart contract deployment. - Implemented Vite configuration for the orchestration portal, supporting both Vue and React frameworks. - Added server-side logic for the Multi-Cloud Orchestration Portal, including API endpoints for environment management and monitoring. - Created scripts for resource import and usage validation across non-US regions. - Added tests for CCIP error handling and integration to ensure robust functionality. - Included various new files and directories for the orchestration portal and deployment scripts.
174 lines
4.7 KiB
HCL
174 lines
4.7 KiB
HCL
# Kubernetes Module for Azure AKS
|
|
# Creates AKS cluster with multiple node pools
|
|
# Variables are defined in variables.tf
|
|
|
|
# Azure AD integration (optional)
|
|
data "azurerm_client_config" "current" {}
|
|
|
|
# AKS Cluster
|
|
resource "azurerm_kubernetes_cluster" "main" {
|
|
name = var.cluster_name
|
|
location = var.location
|
|
resource_group_name = var.resource_group_name
|
|
dns_prefix = var.cluster_name
|
|
# Use the version provided by the root module (kept in terraform/variables.tf)
|
|
kubernetes_version = var.kubernetes_version
|
|
|
|
# Network configuration
|
|
network_profile {
|
|
network_plugin = "azure"
|
|
network_policy = "azure"
|
|
load_balancer_sku = "standard"
|
|
service_cidr = "10.1.0.0/16"
|
|
dns_service_ip = "10.1.0.10"
|
|
}
|
|
|
|
# Default node pool (system)
|
|
default_node_pool {
|
|
name = "system"
|
|
node_count = var.node_count["system"]
|
|
vm_size = var.vm_size["system"]
|
|
vnet_subnet_id = var.node_subnet_id
|
|
enable_auto_scaling = false
|
|
os_disk_size_gb = 128
|
|
type = "VirtualMachineScaleSets"
|
|
# Remove explicit zone pinning to avoid SKU/zone constraints
|
|
|
|
node_labels = {
|
|
pool = "system"
|
|
}
|
|
|
|
tags = merge(var.tags, {
|
|
Pool = "system"
|
|
})
|
|
}
|
|
|
|
# Identity
|
|
identity {
|
|
type = "SystemAssigned"
|
|
}
|
|
|
|
# Azure Monitor
|
|
oms_agent {
|
|
log_analytics_workspace_id = azurerm_log_analytics_workspace.main.id
|
|
}
|
|
|
|
# Azure Policy (temporarily disabled to pass cluster creation)
|
|
azure_policy_enabled = false
|
|
|
|
# Key Vault integration (disable during initial cluster creation; enable later if needed)
|
|
|
|
tags = merge(var.tags, {
|
|
Purpose = "Kubernetes-Cluster"
|
|
})
|
|
}
|
|
|
|
# Log Analytics Workspace
|
|
resource "azurerm_log_analytics_workspace" "main" {
|
|
name = "${var.cluster_name}-logs"
|
|
location = var.location
|
|
resource_group_name = var.resource_group_name
|
|
sku = "PerGB2018"
|
|
retention_in_days = var.environment == "prod" ? 90 : 30
|
|
|
|
tags = merge(var.tags, {
|
|
Purpose = "Logging"
|
|
})
|
|
}
|
|
|
|
# Node pool for validators
|
|
resource "azurerm_kubernetes_cluster_node_pool" "validators" {
|
|
count = var.node_count["validators"] > 0 ? 1 : 0
|
|
name = "validators"
|
|
kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id
|
|
node_count = var.node_count["validators"]
|
|
vm_size = var.vm_size["validators"]
|
|
vnet_subnet_id = var.node_subnet_id
|
|
os_disk_size_gb = 512
|
|
enable_auto_scaling = false
|
|
# Remove explicit zone pinning to avoid SKU/zone constraints
|
|
|
|
node_labels = {
|
|
pool = "validators"
|
|
role = "validator"
|
|
}
|
|
|
|
node_taints = [
|
|
"role=validator:NoSchedule"
|
|
]
|
|
|
|
tags = merge(var.tags, {
|
|
Pool = "validators"
|
|
Role = "validator"
|
|
})
|
|
}
|
|
|
|
# Node pool for sentries
|
|
resource "azurerm_kubernetes_cluster_node_pool" "sentries" {
|
|
count = var.node_count["sentries"] > 0 ? 1 : 0
|
|
name = "sentries"
|
|
kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id
|
|
node_count = var.node_count["sentries"]
|
|
vm_size = var.vm_size["sentries"]
|
|
vnet_subnet_id = var.node_subnet_id
|
|
os_disk_size_gb = 256
|
|
enable_auto_scaling = false
|
|
# Remove explicit zone pinning to avoid SKU/zone constraints
|
|
|
|
node_labels = {
|
|
pool = "sentries"
|
|
role = "sentry"
|
|
}
|
|
|
|
tags = merge(var.tags, {
|
|
Pool = "sentries"
|
|
Role = "sentry"
|
|
})
|
|
}
|
|
|
|
# Node pool for RPC nodes
|
|
resource "azurerm_kubernetes_cluster_node_pool" "rpc" {
|
|
count = var.node_count["rpc"] > 0 ? 1 : 0
|
|
name = "rpc"
|
|
kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id
|
|
node_count = var.node_count["rpc"]
|
|
vm_size = var.vm_size["rpc"]
|
|
vnet_subnet_id = var.node_subnet_id
|
|
os_disk_size_gb = 256
|
|
enable_auto_scaling = false
|
|
# Remove explicit zone pinning to avoid SKU/zone constraints
|
|
|
|
node_labels = {
|
|
pool = "rpc"
|
|
role = "rpc"
|
|
}
|
|
|
|
tags = merge(var.tags, {
|
|
Pool = "rpc"
|
|
Role = "rpc"
|
|
})
|
|
}
|
|
|
|
# Key Vault access policy for AKS managed identity (only if using access policies)
|
|
# NOTE: If using RBAC (enhanced Key Vault module), use role assignments instead
|
|
resource "azurerm_key_vault_access_policy" "aks" {
|
|
count = var.environment == "prod" ? 0 : 1 # Skip if using RBAC in production
|
|
|
|
key_vault_id = var.key_vault_id
|
|
tenant_id = data.azurerm_client_config.current.tenant_id
|
|
object_id = azurerm_kubernetes_cluster.main.identity[0].principal_id
|
|
|
|
secret_permissions = [
|
|
"Get",
|
|
"List"
|
|
]
|
|
|
|
key_permissions = [
|
|
"Get",
|
|
"List"
|
|
]
|
|
}
|
|
|
|
# Outputs are defined in outputs.tf
|
|
|