Files
smom-dbis-138/terraform/multi-cloud/modules/gcp/main.tf
defiQUG 1fb7266469 Add Oracle Aggregator and CCIP Integration
- Introduced Aggregator.sol for Chainlink-compatible oracle functionality, including round-based updates and access control.
- Added OracleWithCCIP.sol to extend Aggregator with CCIP cross-chain messaging capabilities.
- Created .gitmodules to include OpenZeppelin contracts as a submodule.
- Developed a comprehensive deployment guide in NEXT_STEPS_COMPLETE_GUIDE.md for Phase 2 and smart contract deployment.
- Implemented Vite configuration for the orchestration portal, supporting both Vue and React frameworks.
- Added server-side logic for the Multi-Cloud Orchestration Portal, including API endpoints for environment management and monitoring.
- Created scripts for resource import and usage validation across non-US regions.
- Added tests for CCIP error handling and integration to ensure robust functionality.
- Included various new files and directories for the orchestration portal and deployment scripts.
2025-12-12 14:57:48 -08:00

368 lines
8.8 KiB
HCL

# Google Cloud Platform Infrastructure Module
# Creates GKE cluster, networking, and supporting resources for GCP environments
locals {
env = var.environment_config
# Extract GCP-specific config
gcp_config = try(local.env.gcp, {})
# Extract infrastructure config
infra = try(local.env.infrastructure, {})
k8s_config = try(local.infra.kubernetes, {})
net_config = try(local.infra.networking, {})
# Naming
name_prefix = "${local.env.name}-${var.environment}"
# Node pools
node_pools = try(local.k8s_config.node_pools, {})
# Project and region
project_id = try(local.gcp_config.project_id, var.gcp_project_id)
region = try(local.env.region, var.gcp_default_region)
}
# VPC Network
resource "google_compute_network" "main" {
name = "${local.name_prefix}-vpc"
auto_create_subnetworks = false
project = local.project_id
tags = var.tags
}
# Subnets
resource "google_compute_subnetwork" "gke" {
for_each = {
for idx, subnet in try(local.net_config.subnets, []) : subnet.name => subnet
if subnet.name == "gke" || subnet.name == "eks" || subnet.name == "aks"
}
name = "${local.name_prefix}-subnet-${each.key}"
ip_cidr_range = each.value.cidr
region = try(each.value.region, local.region)
network = google_compute_network.main.id
project = local.project_id
private_ip_google_access = true
secondary_ip_range {
range_name = "${local.name_prefix}-pods"
ip_cidr_range = cidrsubnet(each.value.cidr, 8, 1)
}
secondary_ip_range {
range_name = "${local.name_prefix}-services"
ip_cidr_range = cidrsubnet(each.value.cidr, 8, 2)
}
}
resource "google_compute_subnetwork" "validators" {
for_each = {
for idx, subnet in try(local.net_config.subnets, []) : subnet.name => subnet
if subnet.name == "validators"
}
name = "${local.name_prefix}-subnet-validators"
ip_cidr_range = each.value.cidr
region = try(each.value.region, local.region)
network = google_compute_network.main.id
project = local.project_id
private_ip_google_access = true
}
resource "google_compute_subnetwork" "rpc" {
for_each = {
for idx, subnet in try(local.net_config.subnets, []) : subnet.name => subnet
if subnet.name == "rpc"
}
name = "${local.name_prefix}-subnet-rpc"
ip_cidr_range = each.value.cidr
region = try(each.value.region, local.region)
network = google_compute_network.main.id
project = local.project_id
private_ip_google_access = true
}
# Firewall Rules
resource "google_compute_firewall" "allow_internal" {
name = "${local.name_prefix}-allow-internal"
network = google_compute_network.main.name
project = local.project_id
allow {
protocol = "icmp"
}
allow {
protocol = "tcp"
ports = ["0-65535"]
}
allow {
protocol = "udp"
ports = ["0-65535"]
}
source_ranges = [google_compute_network.main.ipv4_range]
tags = var.tags
}
resource "google_compute_firewall" "allow_validators_p2p" {
name = "${local.name_prefix}-allow-validators-p2p"
network = google_compute_network.main.name
project = local.project_id
allow {
protocol = "tcp"
ports = ["30303"]
}
source_ranges = [google_compute_network.main.ipv4_range]
target_tags = ["validators"]
tags = var.tags
}
resource "google_compute_firewall" "allow_rpc" {
name = "${local.name_prefix}-allow-rpc"
network = google_compute_network.main.name
project = local.project_id
allow {
protocol = "tcp"
ports = ["8545", "8546"]
}
source_ranges = ["0.0.0.0/0"]
target_tags = ["rpc"]
tags = var.tags
}
# GKE Cluster
resource "google_container_cluster" "main" {
name = "${local.name_prefix}-gke"
location = local.region
project = local.project_id
# We can't create a cluster with no node pool defined, but we want to only use
# separately managed node pools. So we create the smallest possible default
# node pool and immediately delete it.
remove_default_node_pool = true
initial_node_count = 1
network = google_compute_network.main.name
subnetwork = [for s in google_compute_subnetwork.gke : s.name][0]
# Enable Workload Identity
workload_identity_config {
workload_pool = "${local.project_id}.svc.id.goog"
}
# Enable private cluster
private_cluster_config {
enable_private_nodes = true
enable_private_endpoint = false
master_ipv4_cidr_block = "172.16.0.0/28"
}
# Enable logging and monitoring
logging_service = "logging.googleapis.com/kubernetes"
monitoring_service = "monitoring.googleapis.com/kubernetes"
# Network policy
network_policy {
enabled = true
}
# Release channel
release_channel {
channel = "REGULAR"
}
# Maintenance window
maintenance_policy {
daily_maintenance_window {
start_time = "03:00"
}
}
depends_on = [
google_project_service.container,
google_project_service.compute,
]
tags = var.tags
}
# Delete default node pool
resource "google_container_node_pool" "default" {
name = "${local.name_prefix}-default-pool"
location = local.region
cluster = google_container_cluster.main.name
project = local.project_id
node_count = 0 # Immediately scale to 0 to effectively delete it
management {
auto_repair = true
auto_upgrade = true
}
}
# Node Pools
resource "google_container_node_pool" "system" {
count = try(local.node_pools.system.count, 0) > 0 ? 1 : 0
name = "system"
location = local.region
cluster = google_container_cluster.main.name
project = local.project_id
node_count = try(local.node_pools.system.count, 1)
node_config {
machine_type = try(local.node_pools.system.machine_type, "e2-medium")
disk_size_gb = 100
disk_type = "pd-standard"
service_account = google_service_account.gke_nodes.email
oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform",
]
labels = {
pool = "system"
role = "system"
}
tags = ["system"]
}
management {
auto_repair = true
auto_upgrade = true
}
autoscaling {
min_node_count = 1
max_node_count = try(local.node_pools.system.count, 1) * 2
}
}
resource "google_container_node_pool" "validators" {
count = try(local.node_pools.validators.count, 0) > 0 ? 1 : 0
name = "validators"
location = local.region
cluster = google_container_cluster.main.name
project = local.project_id
node_count = try(local.node_pools.validators.count, 1)
node_config {
machine_type = try(local.node_pools.validators.machine_type, "e2-medium")
disk_size_gb = 512
disk_type = "pd-ssd"
service_account = google_service_account.gke_nodes.email
oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform",
]
labels = {
pool = "validators"
role = "validator"
}
taint {
key = "role"
value = "validator"
effect = "NO_SCHEDULE"
}
tags = ["validators"]
}
management {
auto_repair = true
auto_upgrade = true
}
autoscaling {
min_node_count = 1
max_node_count = try(local.node_pools.validators.count, 1) * 2
}
}
resource "google_container_node_pool" "rpc" {
count = try(local.node_pools.rpc.count, 0) > 0 ? 1 : 0
name = "rpc"
location = local.region
cluster = google_container_cluster.main.name
project = local.project_id
node_count = try(local.node_pools.rpc.count, 1)
node_config {
machine_type = try(local.node_pools.rpc.machine_type, "e2-medium")
disk_size_gb = 256
disk_type = "pd-ssd"
service_account = google_service_account.gke_nodes.email
oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform",
]
labels = {
pool = "rpc"
role = "rpc"
}
tags = ["rpc"]
}
management {
auto_repair = true
auto_upgrade = true
}
autoscaling {
min_node_count = 1
max_node_count = try(local.node_pools.rpc.count, 1) * 2
}
}
# Service Account for GKE nodes
resource "google_service_account" "gke_nodes" {
account_id = "${local.name_prefix}-gke-nodes"
display_name = "GKE Nodes Service Account"
project = local.project_id
}
resource "google_project_iam_member" "gke_nodes" {
project = local.project_id
role = "roles/container.nodeServiceAccount"
member = "serviceAccount:${google_service_account.gke_nodes.email}"
}
# Enable required APIs
resource "google_project_service" "container" {
project = local.project_id
service = "container.googleapis.com"
disable_on_destroy = false
}
resource "google_project_service" "compute" {
project = local.project_id
service = "compute.googleapis.com"
disable_on_destroy = false
}