From 69339fcd2a756daabc33c7308386ea3473ea8688 Mon Sep 17 00:00:00 2001 From: Alexander Held Date: Tue, 12 Aug 2025 21:21:58 +0200 Subject: [PATCH 1/4] =?UTF-8?q?feat:=20=E2=9C=A8=20add=20support=20for=20m?= =?UTF-8?q?ultiple=20worker=5Fnode=20types=20and=20labels?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 50 +++++++++++++++++ network.tf | 6 +- outputs.tf | 8 ++- server.tf | 126 ++++++++++++++++++++++++++++++++++-------- talos.tf | 15 +---- talos_patch_worker.tf | 12 +++- variables.tf | 46 ++++++++++++++- 7 files changed, 216 insertions(+), 47 deletions(-) diff --git a/README.md b/README.md index 6044f16..7e14f8e 100644 --- a/README.md +++ b/README.md @@ -204,6 +204,56 @@ module "talos" { } ``` +### Mixed Worker Node Types + +For more advanced use cases, you can define different types of worker nodes with individual configurations using the `worker_nodes` variable: + +```hcl +module "talos" { + source = "hcloud-talos/talos/hcloud" + version = "" + + talos_version = "v1.10.3" + kubernetes_version = "1.30.3" + + hcloud_token = "your-hcloud-token" + firewall_use_current_ip = true + + cluster_name = "mixed-cluster" + datacenter_name = "fsn1-dc14" + + control_plane_count = 1 + control_plane_server_type = "cx22" + + # Define different worker node types + worker_nodes = [ + # Standard x86 workers + { + type = "cx22" + labels = { + "node.kubernetes.io/instance-type" = "cx22" + } + }, + # ARM workers for specific workloads + { + type = "cax22" + labels = { + "node.kubernetes.io/arch" = "arm64" + "affinity.example.com" = "example" + } + } + ] +} +``` + +> [!NOTE] +> The `worker_nodes` variable allows you to: +> - Mix different server types (x86 and ARM) +> - Add custom labels to nodes +> - Control the count of each node type independently +> +> The legacy `worker_count` and `worker_server_type` variables are still supported for backward compatibility but are deprecated in favor of `worker_nodes`. + You need to pipe the outputs of the module: ```hcl diff --git a/network.tf b/network.tf index ee57b9e..0950758 100644 --- a/network.tf +++ b/network.tf @@ -84,7 +84,7 @@ resource "hcloud_primary_ip" "control_plane_ipv6" { } resource "hcloud_primary_ip" "worker_ipv4" { - count = var.worker_count + count = local.total_worker_count name = "${local.cluster_prefix}worker-${count.index + 1}-ipv4" datacenter = data.hcloud_datacenter.this.name type = "ipv4" @@ -97,7 +97,7 @@ resource "hcloud_primary_ip" "worker_ipv4" { } resource "hcloud_primary_ip" "worker_ipv6" { - count = var.enable_ipv6 ? var.worker_count > 0 ? var.worker_count : 1 : 0 + count = var.enable_ipv6 ? local.total_worker_count > 0 ? local.total_worker_count : 1 : 0 name = "${local.cluster_prefix}worker-${count.index + 1}-ipv6" datacenter = data.hcloud_datacenter.this.name type = "ipv6" @@ -139,6 +139,6 @@ locals { for index in range(var.control_plane_count > 0 ? var.control_plane_count : 1) : cidrhost(hcloud_network_subnet.nodes.ip_range, index + 101) ] worker_private_ipv4_list = [ - for index in range(var.worker_count > 0 ? var.worker_count : 1) : cidrhost(hcloud_network_subnet.nodes.ip_range, index + 201) + for index in range(local.total_worker_count > 0 ? local.total_worker_count : 1) : cidrhost(hcloud_network_subnet.nodes.ip_range, index + 201) ] } diff --git a/outputs.tf b/outputs.tf index 65c0050..c527ecd 100644 --- a/outputs.tf +++ b/outputs.tf @@ -10,6 +10,7 @@ output "kubeconfig" { output "talos_client_configuration" { value = data.talos_client_configuration.this + sensitive = true } output "talos_machine_configurations_control_plane" { @@ -40,8 +41,9 @@ output "hetzner_network_id" { output "talos_worker_ids" { description = "Server IDs of the hetzner talos workers machines" - value = { - for id, server in hcloud_server.workers : id => server.id - } + value = merge( + { for id, server in hcloud_server.workers_new : id => server.id}, + { for id, server in hcloud_server.workers_legacy : id => server.id} + ) } diff --git a/server.tf b/server.tf index 7b0bcbe..261a8df 100644 --- a/server.tf +++ b/server.tf @@ -19,14 +19,55 @@ locals { (var.disable_arm ? null : data.hcloud_image.arm[0].id) : // Use ARM image if not disabled (var.disable_x86 ? null : data.hcloud_image.x86[0].id) // Use x86 image if not disabled ) - worker_image_id = ( - var.worker_count > 0 ? # Only calculate if workers exist - ( - substr(var.worker_server_type, 0, 3) == "cax" ? - (var.disable_arm ? null : data.hcloud_image.arm[0].id) : // Use ARM image if not disabled - (var.disable_x86 ? null : data.hcloud_image.x86[0].id) // Use x86 image if not disabled - ) : null # No workers, no image needed - ) + + # Calculate total worker count from both old and new variables + legacy_worker_count = var.worker_count + new_worker_count = length(var.worker_nodes) + total_worker_count = local.legacy_worker_count + local.new_worker_count + + # Generate worker node configurations from both old and new variables + legacy_workers = var.worker_count > 0 ? [ + for i in range(var.worker_count) : { + index = i + name = "${local.cluster_prefix}worker-${i + 1}" + server_type = var.worker_server_type + image_id = ( + substr(var.worker_server_type, 0, 3) == "cax" ? + (var.disable_arm ? null : data.hcloud_image.arm[0].id) : + (var.disable_x86 ? null : data.hcloud_image.x86[0].id) + ) + ipv4_public = local.worker_public_ipv4_list[i] + ipv6_public = var.enable_ipv6 ? local.worker_public_ipv6_list[i] : null + ipv6_public_subnet = var.enable_ipv6 ? local.worker_public_ipv6_subnet_list[i] : null + ipv4_private = local.worker_private_ipv4_list[i] + labels = {} + node_group_index = 0 + node_in_group_index = i + } + ] : [] + + + new_workers = [ + for i, worker in var.worker_nodes : { + index = local.legacy_worker_count + i + name = "${local.cluster_prefix}worker-${local.legacy_worker_count + i + 1}" + server_type = worker.type + image_id = ( + substr(worker.type, 0, 3) == "cax" ? + (var.disable_arm ? null : data.hcloud_image.arm[0].id) : + (var.disable_x86 ? null : data.hcloud_image.x86[0].id) + ) + ipv4_public = local.worker_public_ipv4_list[local.legacy_worker_count + i] + ipv6_public = var.enable_ipv6 ? local.worker_public_ipv6_list[local.legacy_worker_count + i] : null + ipv6_public_subnet = var.enable_ipv6 ? local.worker_public_ipv6_subnet_list[local.legacy_worker_count + i] : null + ipv4_private = local.worker_private_ipv4_list[local.legacy_worker_count + i] + labels = worker.labels + } + ] + + # Combine legacy and new workers + workers = concat(local.legacy_workers, local.new_workers) + control_planes = [ for i in range(var.control_plane_count) : { index = i @@ -37,16 +78,6 @@ locals { ipv4_private = local.control_plane_private_ipv4_list[i] } ] - workers = [ - for i in range(var.worker_count) : { - index = i - name = "${local.cluster_prefix}worker-${i + 1}" - ipv4_public = local.worker_public_ipv4_list[i], - ipv6_public = var.enable_ipv6 ? local.worker_public_ipv6_list[i] : null - ipv6_public_subnet = var.enable_ipv6 ? local.worker_public_ipv6_subnet_list[i] : null - ipv4_private = local.worker_private_ipv4_list[i] - } - ] } resource "tls_private_key" "ssh_key" { @@ -107,21 +138,70 @@ resource "hcloud_server" "control_planes" { } } -resource "hcloud_server" "workers" { - for_each = { for worker in local.workers : worker.name => worker } +resource "hcloud_server" "workers_legacy" { + for_each = { for worker in local.legacy_workers : worker.name => worker } datacenter = data.hcloud_datacenter.this.name name = each.value.name - image = local.worker_image_id - server_type = var.worker_server_type + image = each.value.image_id + server_type = each.value.server_type user_data = data.talos_machine_configuration.worker[each.value.name].machine_configuration ssh_keys = [hcloud_ssh_key.this.id] placement_group_id = hcloud_placement_group.worker.id - labels = { + labels = merge({ "cluster" = var.cluster_name, "role" = "worker" + "server_type" = each.value.server_type + }, each.value.labels) + + firewall_ids = [ + hcloud_firewall.this.id + ] + + public_net { + ipv4_enabled = true + ipv4 = hcloud_primary_ip.worker_ipv4[each.value.index].id + ipv6_enabled = var.enable_ipv6 + ipv6 = var.enable_ipv6 ? hcloud_primary_ip.worker_ipv6[each.value.index].id : null } + network { + network_id = hcloud_network_subnet.nodes.network_id + ip = each.value.ipv4_private + alias_ips = [] # fix for https://github.com/hetznercloud/terraform-provider-hcloud/issues/650 + } + + depends_on = [ + hcloud_network_subnet.nodes, + data.talos_machine_configuration.worker + ] + + lifecycle { + ignore_changes = [ + user_data, + image + ] + } +} + + + +resource "hcloud_server" "workers_new" { + for_each = { for worker in local.new_workers : worker.name => worker } + datacenter = data.hcloud_datacenter.this.name + name = each.value.name + image = each.value.image_id + server_type = each.value.server_type + user_data = data.talos_machine_configuration.worker[each.value.name].machine_configuration + ssh_keys = [hcloud_ssh_key.this.id] + placement_group_id = hcloud_placement_group.worker.id + + labels = merge({ + "cluster" = var.cluster_name, + "role" = "worker" + "server_type" = each.value.server_type + }, each.value.labels) + firewall_ids = [ hcloud_firewall.this.id ] diff --git a/talos.tf b/talos.tf index 063466d..2aa21ff 100644 --- a/talos.tf +++ b/talos.tf @@ -103,20 +103,7 @@ data "talos_machine_configuration" "dummy_control_plane" { examples = false } -# Dummy configuration generated when worker_count is 0 for debugging purposes -# tflint-ignore: terraform_unused_declarations -data "talos_machine_configuration" "dummy_worker" { - count = var.worker_count == 0 ? 1 : 0 - talos_version = var.talos_version - cluster_name = var.cluster_name - cluster_endpoint = local.cluster_endpoint_url_internal # Uses dummy endpoint when count is 0 - kubernetes_version = var.kubernetes_version - machine_type = "worker" - machine_secrets = talos_machine_secrets.this.machine_secrets - config_patches = concat([yamlencode(local.worker_yaml["dummy-worker-0"])], var.talos_worker_extra_config_patches) # Use dummy yaml + extra patches - docs = false - examples = false -} + resource "talos_machine_bootstrap" "this" { count = var.control_plane_count > 0 ? 1 : 0 diff --git a/talos_patch_worker.tf b/talos_patch_worker.tf index 712247a..ccac25b 100644 --- a/talos_patch_worker.tf +++ b/talos_patch_worker.tf @@ -1,15 +1,21 @@ locals { # Define a dummy worker entry for when count is 0 - dummy_workers = var.worker_count == 0 ? [{ + dummy_workers = local.total_worker_count == 0 ? [{ index = 0 name = "dummy-worker-0" + server_type = "cx11" + image_id = null ipv4_public = "0.0.0.0" # Fallback ipv6_public = null # Fallback ipv6_public_subnet = null # Fallback ipv4_private = cidrhost(local.node_ipv4_cidr, 200) # Use a predictable dummy private IP + labels = {} + node_group_index = 0 + node_in_group_index = 0 }] : [] - # Combine real and dummy workers + # Combine real and dummy workers - always include dummy when no workers exist +# merged_workers = local.total_worker_count == 0 ? local.dummy_workers : local.workers merged_workers = concat(local.workers, local.dummy_workers) # Generate YAML for all (real or dummy) workers @@ -70,6 +76,7 @@ locals { "time.cloudflare.com" ] } + nodeLabels = worker.labels registries = var.registries } cluster = { @@ -88,4 +95,5 @@ locals { } } } + value = "" } diff --git a/variables.tf b/variables.tf index d57580a..8dd7deb 100644 --- a/variables.tf +++ b/variables.tf @@ -218,7 +218,7 @@ variable "control_plane_server_type" { variable "worker_count" { type = number default = 0 - description = "The number of worker nodes to create. Maximum 99." + description = "DEPRECATED: Use worker_nodes instead. The number of worker nodes to create. Maximum 99." validation { condition = var.worker_count <= 99 error_message = "The number of worker nodes must be less than 100." @@ -229,7 +229,7 @@ variable "worker_server_type" { type = string default = "cx11" description = < Date: Tue, 12 Aug 2025 23:42:05 +0200 Subject: [PATCH 2/4] =?UTF-8?q?style:=20=F0=9F=8E=A8=20apply=20terraform?= =?UTF-8?q?=20fmt?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- outputs.tf | 6 +++--- server.tf | 42 +++++++++++++++++++++--------------------- talos_patch_worker.tf | 22 +++++++++++----------- variables.tf | 2 +- 4 files changed, 36 insertions(+), 36 deletions(-) diff --git a/outputs.tf b/outputs.tf index c527ecd..b8c1178 100644 --- a/outputs.tf +++ b/outputs.tf @@ -9,7 +9,7 @@ output "kubeconfig" { } output "talos_client_configuration" { - value = data.talos_client_configuration.this + value = data.talos_client_configuration.this sensitive = true } @@ -42,8 +42,8 @@ output "hetzner_network_id" { output "talos_worker_ids" { description = "Server IDs of the hetzner talos workers machines" value = merge( - { for id, server in hcloud_server.workers_new : id => server.id}, - { for id, server in hcloud_server.workers_legacy : id => server.id} + { for id, server in hcloud_server.workers_new : id => server.id }, + { for id, server in hcloud_server.workers_legacy : id => server.id } ) } diff --git a/server.tf b/server.tf index 261a8df..0fad620 100644 --- a/server.tf +++ b/server.tf @@ -22,36 +22,36 @@ locals { # Calculate total worker count from both old and new variables legacy_worker_count = var.worker_count - new_worker_count = length(var.worker_nodes) - total_worker_count = local.legacy_worker_count + local.new_worker_count - + new_worker_count = length(var.worker_nodes) + total_worker_count = local.legacy_worker_count + local.new_worker_count + # Generate worker node configurations from both old and new variables legacy_workers = var.worker_count > 0 ? [ for i in range(var.worker_count) : { - index = i - name = "${local.cluster_prefix}worker-${i + 1}" - server_type = var.worker_server_type + index = i + name = "${local.cluster_prefix}worker-${i + 1}" + server_type = var.worker_server_type image_id = ( substr(var.worker_server_type, 0, 3) == "cax" ? (var.disable_arm ? null : data.hcloud_image.arm[0].id) : (var.disable_x86 ? null : data.hcloud_image.x86[0].id) ) - ipv4_public = local.worker_public_ipv4_list[i] - ipv6_public = var.enable_ipv6 ? local.worker_public_ipv6_list[i] : null - ipv6_public_subnet = var.enable_ipv6 ? local.worker_public_ipv6_subnet_list[i] : null - ipv4_private = local.worker_private_ipv4_list[i] - labels = {} - node_group_index = 0 + ipv4_public = local.worker_public_ipv4_list[i] + ipv6_public = var.enable_ipv6 ? local.worker_public_ipv6_list[i] : null + ipv6_public_subnet = var.enable_ipv6 ? local.worker_public_ipv6_subnet_list[i] : null + ipv4_private = local.worker_private_ipv4_list[i] + labels = {} + node_group_index = 0 node_in_group_index = i } ] : [] - + new_workers = [ for i, worker in var.worker_nodes : { - index = local.legacy_worker_count + i - name = "${local.cluster_prefix}worker-${local.legacy_worker_count + i + 1}" - server_type = worker.type + index = local.legacy_worker_count + i + name = "${local.cluster_prefix}worker-${local.legacy_worker_count + i + 1}" + server_type = worker.type image_id = ( substr(worker.type, 0, 3) == "cax" ? (var.disable_arm ? null : data.hcloud_image.arm[0].id) : @@ -64,7 +64,7 @@ locals { labels = worker.labels } ] - + # Combine legacy and new workers workers = concat(local.legacy_workers, local.new_workers) @@ -149,8 +149,8 @@ resource "hcloud_server" "workers_legacy" { placement_group_id = hcloud_placement_group.worker.id labels = merge({ - "cluster" = var.cluster_name, - "role" = "worker" + "cluster" = var.cluster_name, + "role" = "worker" "server_type" = each.value.server_type }, each.value.labels) @@ -197,8 +197,8 @@ resource "hcloud_server" "workers_new" { placement_group_id = hcloud_placement_group.worker.id labels = merge({ - "cluster" = var.cluster_name, - "role" = "worker" + "cluster" = var.cluster_name, + "role" = "worker" "server_type" = each.value.server_type }, each.value.labels) diff --git a/talos_patch_worker.tf b/talos_patch_worker.tf index ccac25b..00cc06c 100644 --- a/talos_patch_worker.tf +++ b/talos_patch_worker.tf @@ -1,21 +1,21 @@ locals { # Define a dummy worker entry for when count is 0 dummy_workers = local.total_worker_count == 0 ? [{ - index = 0 - name = "dummy-worker-0" - server_type = "cx11" - image_id = null - ipv4_public = "0.0.0.0" # Fallback - ipv6_public = null # Fallback - ipv6_public_subnet = null # Fallback - ipv4_private = cidrhost(local.node_ipv4_cidr, 200) # Use a predictable dummy private IP - labels = {} - node_group_index = 0 + index = 0 + name = "dummy-worker-0" + server_type = "cx11" + image_id = null + ipv4_public = "0.0.0.0" # Fallback + ipv6_public = null # Fallback + ipv6_public_subnet = null # Fallback + ipv4_private = cidrhost(local.node_ipv4_cidr, 200) # Use a predictable dummy private IP + labels = {} + node_group_index = 0 node_in_group_index = 0 }] : [] # Combine real and dummy workers - always include dummy when no workers exist -# merged_workers = local.total_worker_count == 0 ? local.dummy_workers : local.workers + # merged_workers = local.total_worker_count == 0 ? local.dummy_workers : local.workers merged_workers = concat(local.workers, local.dummy_workers) # Generate YAML for all (real or dummy) workers diff --git a/variables.tf b/variables.tf index 8dd7deb..b1f101a 100644 --- a/variables.tf +++ b/variables.tf @@ -281,7 +281,7 @@ variable "worker_nodes" { error_message = "Invalid worker server type in worker_nodes." } validation { - condition = length(var.worker_nodes) <= 99 + condition = length(var.worker_nodes) <= 99 error_message = "Total number of worker nodes must be less than 100." } } From 83789d650db40701bc5d301459ac96b7b97fa141 Mon Sep 17 00:00:00 2001 From: Marcel Richter Date: Sun, 31 Aug 2025 14:38:03 +0200 Subject: [PATCH 3/4] feat: add Kubernetes taints support for worker nodes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add taints field to worker_nodes variable for workload isolation - Implement registerWithTaints in kubelet configuration per Talos best practices - Update README with taint configuration examples - Add taints to both legacy and new worker configurations - Remove unused debug variable in talos_patch_worker.tf Based on Talos discussion #9895, taints are applied at node registration using kubelet.registerWithTaints to comply with NodeRestriction admission. 🤖 Generated with Claude Code Co-Authored-By: Claude --- README.md | 10 +++++++++- server.tf | 2 ++ talos_patch_worker.tf | 44 ++++++++++++++++++++++++++++--------------- variables.tf | 13 +++++++++++++ 4 files changed, 53 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 7e14f8e..11fdeee 100644 --- a/README.md +++ b/README.md @@ -234,13 +234,20 @@ module "talos" { "node.kubernetes.io/instance-type" = "cx22" } }, - # ARM workers for specific workloads + # ARM workers for specific workloads with taints { type = "cax22" labels = { "node.kubernetes.io/arch" = "arm64" "affinity.example.com" = "example" } + taints = [ + { + key = "arm64-only" + value = "true" + effect = "NoSchedule" + } + ] } ] } @@ -250,6 +257,7 @@ module "talos" { > The `worker_nodes` variable allows you to: > - Mix different server types (x86 and ARM) > - Add custom labels to nodes +> - Apply taints for workload isolation > - Control the count of each node type independently > > The legacy `worker_count` and `worker_server_type` variables are still supported for backward compatibility but are deprecated in favor of `worker_nodes`. diff --git a/server.tf b/server.tf index 0fad620..84fbd18 100644 --- a/server.tf +++ b/server.tf @@ -41,6 +41,7 @@ locals { ipv6_public_subnet = var.enable_ipv6 ? local.worker_public_ipv6_subnet_list[i] : null ipv4_private = local.worker_private_ipv4_list[i] labels = {} + taints = [] node_group_index = 0 node_in_group_index = i } @@ -62,6 +63,7 @@ locals { ipv6_public_subnet = var.enable_ipv6 ? local.worker_public_ipv6_subnet_list[local.legacy_worker_count + i] : null ipv4_private = local.worker_private_ipv4_list[local.legacy_worker_count + i] labels = worker.labels + taints = worker.taints } ] diff --git a/talos_patch_worker.tf b/talos_patch_worker.tf index 00cc06c..4441f47 100644 --- a/talos_patch_worker.tf +++ b/talos_patch_worker.tf @@ -10,6 +10,7 @@ locals { ipv6_public_subnet = null # Fallback ipv4_private = cidrhost(local.node_ipv4_cidr, 200) # Use a predictable dummy private IP labels = {} + taints = [] node_group_index = 0 node_in_group_index = 0 }] : [] @@ -29,20 +30,34 @@ locals { ] } certSANs = local.cert_SANs - kubelet = { - extraArgs = merge( - { - "cloud-provider" = "external" - "rotate-server-certificates" = true - }, - var.kubelet_extra_args - ) - nodeIP = { - validSubnets = [ - local.node_ipv4_cidr - ] - } - } + kubelet = merge( + { + extraArgs = merge( + { + "cloud-provider" = "external" + "rotate-server-certificates" = true + }, + var.kubelet_extra_args + ) + nodeIP = { + validSubnets = [ + local.node_ipv4_cidr + ] + } + }, + # Add registerWithTaints if taints are defined + length(worker.taints) > 0 ? { + extraConfig = { + registerWithTaints = [ + for taint in worker.taints : { + key = taint.key + value = taint.value + effect = taint.effect + } + ] + } + } : {} + ) network = { extraHostEntries = local.extra_host_entries kubespan = { @@ -95,5 +110,4 @@ locals { } } } - value = "" } diff --git a/variables.tf b/variables.tf index b1f101a..358e42e 100644 --- a/variables.tf +++ b/variables.tf @@ -248,6 +248,11 @@ variable "worker_nodes" { type = list(object({ type = string labels = optional(map(string), {}) + taints = optional(list(object({ + key = string + value = string + effect = string + })), []) })) default = [] description = < Date: Mon, 1 Sep 2025 23:08:11 +0200 Subject: [PATCH 4/4] fix: keep the hcloud_server resource name for backwards compatibility --- outputs.tf | 2 +- server.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/outputs.tf b/outputs.tf index b8c1178..875f435 100644 --- a/outputs.tf +++ b/outputs.tf @@ -43,7 +43,7 @@ output "talos_worker_ids" { description = "Server IDs of the hetzner talos workers machines" value = merge( { for id, server in hcloud_server.workers_new : id => server.id }, - { for id, server in hcloud_server.workers_legacy : id => server.id } + { for id, server in hcloud_server.workers : id => server.id } ) } diff --git a/server.tf b/server.tf index 84fbd18..7cc375b 100644 --- a/server.tf +++ b/server.tf @@ -140,7 +140,7 @@ resource "hcloud_server" "control_planes" { } } -resource "hcloud_server" "workers_legacy" { +resource "hcloud_server" "workers" { for_each = { for worker in local.legacy_workers : worker.name => worker } datacenter = data.hcloud_datacenter.this.name name = each.value.name