I am trying to associate specific Virtual Machine NIC IPs to Load Balancer Backend pool using azurerm_network_interface_backend_address_pool_association resource. Since I have specific requirement to map only specific VM NIC ips to backendpool pool. So, I have created a mapping in vm_nic_to_lb_bpool_map.
Below is my module folder structure
I am sharing complete code
loadbalancer module
resource "azurerm_lb" "lb" {
for_each = var.loadbalancers.tiers
name = each.value.lb_name
location = "eastus"
resource_group_name = var.existing_resource_group
sku = "Standard"
frontend_ip_configuration {
name = "fip-${each.value.fip_name}"
subnet_id = each.value.subnet_id
private_ip_address_allocation = each.value.private_ip_type
}
}
resource "azurerm_lb_probe" "probe" {
depends_on = [azurerm_lb.lb]
for_each = {
for lb in flatten([
for lb_name, lb in var.loadbalancers.tiers : [
for probe_name, probe in lb.lb_probes : {
lb_name_value = lb.lb_name
lb_name = lb_name
probe_name = probe_name
protocol_value = probe.protocol
port_value = probe.port
}
]
]
) : "${lb.lb_name}-${lb.probe_name}" => lb
}
loadbalancer_id = azurerm_lb.lb[each.value.lb_name].id
name = each.value.probe_name
protocol = each.value.protocol_value
port = each.value.port_value
}
resource "azurerm_lb_backend_address_pool" "bepool" {
depends_on = [azurerm_lb.lb]
for_each = var.loadbalancers.tiers
name = each.value.backend_address_pool_name
loadbalancer_id = azurerm_lb.lb[each.key].id
}
resource "azurerm_lb_rule" "rule" {
depends_on = [azurerm_lb.lb]
for_each = {
for lb in flatten([
for lb_name, lb in var.loadbalancers.tiers : [
for lb_rule, rule in lb.lb_rules : {
lb_name = lb_name
lb_rule = lb_rule
frontend_port = rule.frontend_port
protocol = rule.protocol
backend_port = rule.backend_port
enable_floating_ip = rule.enable_floating_ip
frontend_ip_config_name = rule.frontend_ip_config_name
probe_name = rule.probe_name
}
]
]
) : "${lb.lb_name}-${lb.lb_rule}" => lb
}
loadbalancer_id = azurerm_lb.lb[each.value.lb_name].id
name = each.value.lb_rule
protocol = each.value.protocol
frontend_port = each.value.frontend_port
backend_port = each.value.backend_port
frontend_ip_configuration_name = azurerm_lb.lb[each.value.lb_name].frontend_ip_configuration[0].name
enable_floating_ip = each.value.enable_floating_ip
backend_address_pool_ids = [azurerm_lb_backend_address_pool.bepool[each.value.lb_name].id]
probe_id = azurerm_lb_probe.probe["${each.value.lb_name}-${each.value.probe_name}"].id
}
resource "azurerm_network_interface_backend_address_pool_association" "nic_lb_association" {
for_each = var.vm_nic_to_lb_bpool_map
network_interface_id = var.virtual_machine_nic_keys[each.key].id
ip_configuration_name = var.virtual_machine_nic_keys[each.key].ip_configuration[0].name
backend_address_pool_id = azurerm_lb_backend_address_pool.bepool[each.value].id
}
loadbalancer variables
variable "loadbalancers" {
}
variable "existing_resource_group" {
type = string
}
variable "virtual_machine_nic_keys" {
}
variable "vm_nic_to_lb_bpool_map" {
}
Virtual machine module
data "azurerm_resource_group" "rg" {
name = var.existing_resource_group
}
resource "azurerm_network_interface" "nic" {
for_each = {
for vm in flatten([
for vm_name, vm in var.virtual_machines.nodes : [
for nic_name, nic in vm.networks : {
vm_number = vm.vm_num,
vm_name = vm_name,
nic_name = nic_name,
subnet_value = nic.subnet
nic_name_value = nic.nic_name
}
]
]
) : "${vm.vm_name}-${vm.nic_name}" => vm
}
name = "${var.vm_prefix}-${each.value.nic_name_value}-nic"
location = "eastus"
resource_group_name = var.existing_resource_group
ip_configuration {
name = "${var.vm_prefix}-${each.value.nic_name_value}-ipconfig"
subnet_id = each.value.subnet_value
private_ip_address_allocation = "Dynamic"
}
}
resource "azurerm_linux_virtual_machine" "vm" {
depends_on = [azurerm_network_interface.nic]
for_each = var.virtual_machines.nodes
name = "${var.vm_prefix}-${each.value.vm_name}-${each.value.vm_num}"
admin_username = "plutoadmin"
admin_password = "pluto@1234522"
disable_password_authentication = false
location = "eastus"
resource_group_name = var.existing_resource_group
network_interface_ids = [for nic_key, nic in azurerm_network_interface.nic : nic.id if startswith(nic_key, "${each.key}-")]
size = "Standard_B2ms"
os_disk {
name = "${var.vm_prefix}-${each.value.vm_name}-${each.value.vm_num}-OSdisk"
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "RedHat"
offer = "RHEL"
sku = "82gen2"
version = "latest"
}
}
Virtual Machine output
output "vm_nics_ids" {
value = [for nic_key, nic in azurerm_network_interface.nic : nic.id]
}
output "virtual_machine_nic_keys" {
value = merge([
for vm_name, vm in var.virtual_machines.nodes :
{
for nic_name, nic in vm.networks :
"${vm_name}-${nic_name}" => azurerm_network_interface.nic["${vm_name}-${nic_name}-nic"].id
}
]...)
}
virtual machine variables
variable "vm_prefix" {
type = string
default = "pluto"
}
variable "virtual_machines" {
}
variable "existing_resource_group" {
type = string
}
main.tf
module "virtualmachine" {
source = "./virtualmachine"
existing_resource_group = var.existing_resource_group
virtual_machines = var.virtual_machines
vm_prefix = var.vm_prefix
}
module "loadbalancer" {
depends_on = [module.virtualmachine]
source = "./loadbalancer"
existing_resource_group = var.existing_resource_group
loadbalancers = var.loadbalancers
vm_nic_to_lb_bpool_map = var.vm_nic_to_lb_bpool_map
virtual_machine_nic_keys = module.virtualmachine.virtual_machine_nic_keys
}
tfvars
existing_resource_group = "pluto"
existing_infra_rg = "pluto-infra"
virtual_machines = {
nodes = {
app1_node1 = {
"vm_name" = "app"
"vm_num" = "1"
networks = {
nic1 = {
"nic_name" = "app-1"
"subnet" = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/app-subnet"
},
}
},
service1_node1 = {
"vm_name" = "service"
"vm_num" = "1"
networks = {
nic1 = {
"nic_name" = "service-1"
"subnet" = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/app-subnet"
},
}
},
db1_node1 = {
"vm_name" = "db"
"vm_num" = "1"
networks = {
nic1 = {
"nic_name" = "db-1-1"
"subnet" = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/app-subnet"
},
nic2 = {
"nic_name" = "db-1-2"
"subnet" = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/db-subnet"
}
}
},
db2_node2 = {
"vm_name" = "db"
"vm_num" = "2"
networks = {
nic1 = {
"nic_name" = "db-2-1"
"subnet" = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/app-subnet"
},
nic2 = {
"nic_name" = "db-2-2"
"subnet" = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/db-subnet"
},
}
},
}
}
loadbalancers = {
tiers = {
app-lb = {
lb_name = "app-loadbalancer"
fip_name = "app-fip"
subnet_id = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/app-subnet"
private_ip_type = "Dynamic"
backend_address_pool_name = "app-bpool"
lb_probes = {
tomcat-probe = {
protocol = "Tcp"
port = "8080"
}
}
lb_rules = {
tomcat-rule = {
frontend_port = "7070"
protocol = "Tcp"
backend_port = "8080"
enable_floating_ip = true
frontend_ip_config_name = "app-loadbalancer"
probe_name = "tomcat-probe"
}
}
},
db-lb = {
lb_name = "db-loadbalancer"
fip_name = "db-fip"
subnet_id = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/app-subnet"
private_ip_type = "Dynamic"
backend_address_pool_name = "db-bpool"
lb_probes = {
db-probe = {
protocol = "Tcp"
port = "8080"
}
}
lb_rules = {
db-rule = {
frontend_port = "7070"
protocol = "Tcp"
backend_port = "8080"
enable_floating_ip = true
frontend_ip_config_name = "db-loadbalancer"
probe_name = "db-probe"
}
}
}
}
}
vm_nic_to_lb_bpool_map = {
"app1_node1-nic1" = "app_lb"
"db1_node1-nic1" = "db_lb"
"db2_node2-nic2" = "db_lb"
}
**variables**
variable "resource_group_location" {
type = string
default = "eastus"
}
variable "vm_prefix" {
type = string
default = "pluto"
}
variable "virtual_machines" {
}
variable "existing_resource_group" {
type = string
}
variable "loadbalancers" {
}
variable "vm_nic_to_lb_bpool_map" {
}
ERROR :
│ Error: Invalid index
│
│ on virtualmachine\output.tf line 11, in output "virtual_machine_nic_keys":
│ 11: "${vm_name}-${nic_name}" => azurerm_network_interface.nic["${vm_name}-${nic_name}-nic"].id
│ ├────────────────
│ │ azurerm_network_interface.nic is object with 6 attributes
│
│ The given key does not identify an element in this collection value.
could someone help to find the error root cause or how to write output variable in virtual machine to output the virtual machine nic keys that matches the vm_nic_to_lb_bpool_map ?
Expecting the below association
"app1_node1-nic1" = "app_lb" "db1_node1-nic1" = "db_lb" "db2_node2-nic2" = "db_lb"
non-module code working as expected with below code
resource "azurerm_network_interface_backend_address_pool_association" "nic_lb_association" {
for_each = local.vm_to_lb_map
network_interface_id = azurerm_network_interface.nic-poc[each.key].id
ip_configuration_name = azurerm_network_interface.nic-poc[each.key].ip_configuration[0].name
backend_address_pool_id = azurerm_lb_backend_address_pool.bepool[each.value].id
}

You are getting this error because of how you defined the output variable
virtual_machine_nic_keysin yourvirtualmachinemodule. The error means that the collectionazurerm_network_interface.nicdoes not have the key you are using to access it. Therefore, the keys you are using and the keys in theazurerm_network_interface.nicmap do not match.To resolve this issue, let's focus on the relevant portion of your Terraform configuration:
To overcome the issue double-check that the NIC names you're constructing keys with (
"${vm_name}-${nic_name}-nic") match exactly how they are named when creatingazurerm_network_interface.nicresources. The error suggests there's at least one key that doesn't match.My terraform configuration:
main.tf:
modules/vm/main.tf:modules/loadbalancer/main.tf:Output: