Skip to content

Commit d53d1fc

Browse files
authored
Merge branch 'hcloud-talos:main' into main
2 parents 641823b + 88b4017 commit d53d1fc

File tree

8 files changed

+276
-70
lines changed

8 files changed

+276
-70
lines changed

.github/workflows/checkov.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ jobs:
4242
download_external_modules: true
4343

4444
- name: Upload SARIF file
45-
uses: github/codeql-action/upload-sarif@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3.30.0
45+
uses: github/codeql-action/upload-sarif@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3.30.1
4646

4747
# Results are generated only on a success or failure
4848
# this is required since GitHub by default won't run the next step

README.md

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -204,6 +204,64 @@ module "talos" {
204204
}
205205
```
206206

207+
### Mixed Worker Node Types
208+
209+
For more advanced use cases, you can define different types of worker nodes with individual configurations using the `worker_nodes` variable:
210+
211+
```hcl
212+
module "talos" {
213+
source = "hcloud-talos/talos/hcloud"
214+
version = "<latest-version>"
215+
216+
talos_version = "v1.10.3"
217+
kubernetes_version = "1.30.3"
218+
219+
hcloud_token = "your-hcloud-token"
220+
firewall_use_current_ip = true
221+
222+
cluster_name = "mixed-cluster"
223+
datacenter_name = "fsn1-dc14"
224+
225+
control_plane_count = 1
226+
control_plane_server_type = "cx22"
227+
228+
# Define different worker node types
229+
worker_nodes = [
230+
# Standard x86 workers
231+
{
232+
type = "cx22"
233+
labels = {
234+
"node.kubernetes.io/instance-type" = "cx22"
235+
}
236+
},
237+
# ARM workers for specific workloads with taints
238+
{
239+
type = "cax22"
240+
labels = {
241+
"node.kubernetes.io/arch" = "arm64"
242+
"affinity.example.com" = "example"
243+
}
244+
taints = [
245+
{
246+
key = "arm64-only"
247+
value = "true"
248+
effect = "NoSchedule"
249+
}
250+
]
251+
}
252+
]
253+
}
254+
```
255+
256+
> [!NOTE]
257+
> The `worker_nodes` variable allows you to:
258+
> - Mix different server types (x86 and ARM)
259+
> - Add custom labels to nodes
260+
> - Apply taints for workload isolation
261+
> - Control the count of each node type independently
262+
>
263+
> The legacy `worker_count` and `worker_server_type` variables are still supported for backward compatibility but are deprecated in favor of `worker_nodes`.
264+
207265
You need to pipe the outputs of the module:
208266

209267
```hcl

network.tf

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ resource "hcloud_primary_ip" "control_plane_ipv6" {
8484
}
8585

8686
resource "hcloud_primary_ip" "worker_ipv4" {
87-
count = var.worker_count
87+
count = local.total_worker_count
8888
name = "${local.cluster_prefix}worker-${count.index + 1}-ipv4"
8989
datacenter = data.hcloud_datacenter.this.name
9090
type = "ipv4"
@@ -97,7 +97,7 @@ resource "hcloud_primary_ip" "worker_ipv4" {
9797
}
9898

9999
resource "hcloud_primary_ip" "worker_ipv6" {
100-
count = var.enable_ipv6 ? var.worker_count > 0 ? var.worker_count : 1 : 0
100+
count = var.enable_ipv6 ? local.total_worker_count > 0 ? local.total_worker_count : 1 : 0
101101
name = "${local.cluster_prefix}worker-${count.index + 1}-ipv6"
102102
datacenter = data.hcloud_datacenter.this.name
103103
type = "ipv6"
@@ -139,6 +139,6 @@ locals {
139139
for index in range(var.control_plane_count > 0 ? var.control_plane_count : 1) : cidrhost(hcloud_network_subnet.nodes.ip_range, index + 101)
140140
]
141141
worker_private_ipv4_list = [
142-
for index in range(var.worker_count > 0 ? var.worker_count : 1) : cidrhost(hcloud_network_subnet.nodes.ip_range, index + 201)
142+
for index in range(local.total_worker_count > 0 ? local.total_worker_count : 1) : cidrhost(hcloud_network_subnet.nodes.ip_range, index + 201)
143143
]
144144
}

outputs.tf

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,8 @@ output "kubeconfig" {
99
}
1010

1111
output "talos_client_configuration" {
12-
value = data.talos_client_configuration.this
12+
value = data.talos_client_configuration.this
13+
sensitive = true
1314
}
1415

1516
output "talos_machine_configurations_control_plane" {
@@ -40,8 +41,9 @@ output "hetzner_network_id" {
4041

4142
output "talos_worker_ids" {
4243
description = "Server IDs of the hetzner talos workers machines"
43-
value = {
44-
for id, server in hcloud_server.workers : id => server.id
45-
}
44+
value = merge(
45+
{ for id, server in hcloud_server.workers_new : id => server.id },
46+
{ for id, server in hcloud_server.workers : id => server.id }
47+
)
4648
}
4749

server.tf

Lines changed: 106 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,57 @@ locals {
1919
(var.disable_arm ? null : data.hcloud_image.arm[0].id) : // Use ARM image if not disabled
2020
(var.disable_x86 ? null : data.hcloud_image.x86[0].id) // Use x86 image if not disabled
2121
)
22-
worker_image_id = (
23-
var.worker_count > 0 ? # Only calculate if workers exist
24-
(
25-
substr(var.worker_server_type, 0, 3) == "cax" ?
26-
(var.disable_arm ? null : data.hcloud_image.arm[0].id) : // Use ARM image if not disabled
27-
(var.disable_x86 ? null : data.hcloud_image.x86[0].id) // Use x86 image if not disabled
28-
) : null # No workers, no image needed
29-
)
22+
23+
# Calculate total worker count from both old and new variables
24+
legacy_worker_count = var.worker_count
25+
new_worker_count = length(var.worker_nodes)
26+
total_worker_count = local.legacy_worker_count + local.new_worker_count
27+
28+
# Generate worker node configurations from both old and new variables
29+
legacy_workers = var.worker_count > 0 ? [
30+
for i in range(var.worker_count) : {
31+
index = i
32+
name = "${local.cluster_prefix}worker-${i + 1}"
33+
server_type = var.worker_server_type
34+
image_id = (
35+
substr(var.worker_server_type, 0, 3) == "cax" ?
36+
(var.disable_arm ? null : data.hcloud_image.arm[0].id) :
37+
(var.disable_x86 ? null : data.hcloud_image.x86[0].id)
38+
)
39+
ipv4_public = local.worker_public_ipv4_list[i]
40+
ipv6_public = var.enable_ipv6 ? local.worker_public_ipv6_list[i] : null
41+
ipv6_public_subnet = var.enable_ipv6 ? local.worker_public_ipv6_subnet_list[i] : null
42+
ipv4_private = local.worker_private_ipv4_list[i]
43+
labels = {}
44+
taints = []
45+
node_group_index = 0
46+
node_in_group_index = i
47+
}
48+
] : []
49+
50+
51+
new_workers = [
52+
for i, worker in var.worker_nodes : {
53+
index = local.legacy_worker_count + i
54+
name = "${local.cluster_prefix}worker-${local.legacy_worker_count + i + 1}"
55+
server_type = worker.type
56+
image_id = (
57+
substr(worker.type, 0, 3) == "cax" ?
58+
(var.disable_arm ? null : data.hcloud_image.arm[0].id) :
59+
(var.disable_x86 ? null : data.hcloud_image.x86[0].id)
60+
)
61+
ipv4_public = local.worker_public_ipv4_list[local.legacy_worker_count + i]
62+
ipv6_public = var.enable_ipv6 ? local.worker_public_ipv6_list[local.legacy_worker_count + i] : null
63+
ipv6_public_subnet = var.enable_ipv6 ? local.worker_public_ipv6_subnet_list[local.legacy_worker_count + i] : null
64+
ipv4_private = local.worker_private_ipv4_list[local.legacy_worker_count + i]
65+
labels = worker.labels
66+
taints = worker.taints
67+
}
68+
]
69+
70+
# Combine legacy and new workers
71+
workers = concat(local.legacy_workers, local.new_workers)
72+
3073
control_planes = [
3174
for i in range(var.control_plane_count) : {
3275
index = i
@@ -37,16 +80,6 @@ locals {
3780
ipv4_private = local.control_plane_private_ipv4_list[i]
3881
}
3982
]
40-
workers = [
41-
for i in range(var.worker_count) : {
42-
index = i
43-
name = "${local.cluster_prefix}worker-${i + 1}"
44-
ipv4_public = local.worker_public_ipv4_list[i],
45-
ipv6_public = var.enable_ipv6 ? local.worker_public_ipv6_list[i] : null
46-
ipv6_public_subnet = var.enable_ipv6 ? local.worker_public_ipv6_subnet_list[i] : null
47-
ipv4_private = local.worker_private_ipv4_list[i]
48-
}
49-
]
5083
}
5184

5285
resource "tls_private_key" "ssh_key" {
@@ -108,20 +141,69 @@ resource "hcloud_server" "control_planes" {
108141
}
109142

110143
resource "hcloud_server" "workers" {
111-
for_each = { for worker in local.workers : worker.name => worker }
144+
for_each = { for worker in local.legacy_workers : worker.name => worker }
112145
datacenter = data.hcloud_datacenter.this.name
113146
name = each.value.name
114-
image = local.worker_image_id
115-
server_type = var.worker_server_type
147+
image = each.value.image_id
148+
server_type = each.value.server_type
116149
user_data = data.talos_machine_configuration.worker[each.value.name].machine_configuration
117150
ssh_keys = [hcloud_ssh_key.this.id]
118151
placement_group_id = hcloud_placement_group.worker.id
119152

120-
labels = {
121-
"cluster" = var.cluster_name,
122-
"role" = "worker"
153+
labels = merge({
154+
"cluster" = var.cluster_name,
155+
"role" = "worker"
156+
"server_type" = each.value.server_type
157+
}, each.value.labels)
158+
159+
firewall_ids = [
160+
hcloud_firewall.this.id
161+
]
162+
163+
public_net {
164+
ipv4_enabled = true
165+
ipv4 = hcloud_primary_ip.worker_ipv4[each.value.index].id
166+
ipv6_enabled = var.enable_ipv6
167+
ipv6 = var.enable_ipv6 ? hcloud_primary_ip.worker_ipv6[each.value.index].id : null
123168
}
124169

170+
network {
171+
network_id = hcloud_network_subnet.nodes.network_id
172+
ip = each.value.ipv4_private
173+
alias_ips = [] # fix for https://github.com/hetznercloud/terraform-provider-hcloud/issues/650
174+
}
175+
176+
depends_on = [
177+
hcloud_network_subnet.nodes,
178+
data.talos_machine_configuration.worker
179+
]
180+
181+
lifecycle {
182+
ignore_changes = [
183+
user_data,
184+
image
185+
]
186+
}
187+
}
188+
189+
190+
191+
resource "hcloud_server" "workers_new" {
192+
for_each = { for worker in local.new_workers : worker.name => worker }
193+
datacenter = data.hcloud_datacenter.this.name
194+
name = each.value.name
195+
image = each.value.image_id
196+
server_type = each.value.server_type
197+
user_data = data.talos_machine_configuration.worker[each.value.name].machine_configuration
198+
ssh_keys = [hcloud_ssh_key.this.id]
199+
placement_group_id = hcloud_placement_group.worker.id
200+
201+
labels = merge({
202+
"cluster" = var.cluster_name,
203+
"role" = "worker"
204+
"server_type" = each.value.server_type
205+
}, each.value.labels)
206+
125207
firewall_ids = [
126208
hcloud_firewall.this.id
127209
]

talos.tf

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -103,20 +103,7 @@ data "talos_machine_configuration" "dummy_control_plane" {
103103
examples = false
104104
}
105105

106-
# Dummy configuration generated when worker_count is 0 for debugging purposes
107-
# tflint-ignore: terraform_unused_declarations
108-
data "talos_machine_configuration" "dummy_worker" {
109-
count = var.worker_count == 0 ? 1 : 0
110-
talos_version = var.talos_version
111-
cluster_name = var.cluster_name
112-
cluster_endpoint = local.cluster_endpoint_url_internal # Uses dummy endpoint when count is 0
113-
kubernetes_version = var.kubernetes_version
114-
machine_type = "worker"
115-
machine_secrets = talos_machine_secrets.this.machine_secrets
116-
config_patches = concat([yamlencode(local.worker_yaml["dummy-worker-0"])], var.talos_worker_extra_config_patches) # Use dummy yaml + extra patches
117-
docs = false
118-
examples = false
119-
}
106+
120107

121108
resource "talos_machine_bootstrap" "this" {
122109
count = var.control_plane_count > 0 ? 1 : 0

talos_patch_worker.tf

Lines changed: 44 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,22 @@
11
locals {
22
# Define a dummy worker entry for when count is 0
3-
dummy_workers = var.worker_count == 0 ? [{
4-
index = 0
5-
name = "dummy-worker-0"
6-
ipv4_public = "0.0.0.0" # Fallback
7-
ipv6_public = null # Fallback
8-
ipv6_public_subnet = null # Fallback
9-
ipv4_private = cidrhost(local.node_ipv4_cidr, 200) # Use a predictable dummy private IP
3+
dummy_workers = local.total_worker_count == 0 ? [{
4+
index = 0
5+
name = "dummy-worker-0"
6+
server_type = "cx11"
7+
image_id = null
8+
ipv4_public = "0.0.0.0" # Fallback
9+
ipv6_public = null # Fallback
10+
ipv6_public_subnet = null # Fallback
11+
ipv4_private = cidrhost(local.node_ipv4_cidr, 200) # Use a predictable dummy private IP
12+
labels = {}
13+
taints = []
14+
node_group_index = 0
15+
node_in_group_index = 0
1016
}] : []
1117

12-
# Combine real and dummy workers
18+
# Combine real and dummy workers - always include dummy when no workers exist
19+
# merged_workers = local.total_worker_count == 0 ? local.dummy_workers : local.workers
1320
merged_workers = concat(local.workers, local.dummy_workers)
1421

1522
# Generate YAML for all (real or dummy) workers
@@ -23,20 +30,34 @@ locals {
2330
]
2431
}
2532
certSANs = local.cert_SANs
26-
kubelet = {
27-
extraArgs = merge(
28-
{
29-
"cloud-provider" = "external"
30-
"rotate-server-certificates" = true
31-
},
32-
var.kubelet_extra_args
33-
)
34-
nodeIP = {
35-
validSubnets = [
36-
local.node_ipv4_cidr
37-
]
38-
}
39-
}
33+
kubelet = merge(
34+
{
35+
extraArgs = merge(
36+
{
37+
"cloud-provider" = "external"
38+
"rotate-server-certificates" = true
39+
},
40+
var.kubelet_extra_args
41+
)
42+
nodeIP = {
43+
validSubnets = [
44+
local.node_ipv4_cidr
45+
]
46+
}
47+
},
48+
# Add registerWithTaints if taints are defined
49+
length(worker.taints) > 0 ? {
50+
extraConfig = {
51+
registerWithTaints = [
52+
for taint in worker.taints : {
53+
key = taint.key
54+
value = taint.value
55+
effect = taint.effect
56+
}
57+
]
58+
}
59+
} : {}
60+
)
4061
network = {
4162
extraHostEntries = local.extra_host_entries
4263
kubespan = {
@@ -70,6 +91,7 @@ locals {
7091
"time.cloudflare.com"
7192
]
7293
}
94+
nodeLabels = worker.labels
7395
registries = var.registries
7496
}
7597
cluster = {

0 commit comments

Comments
 (0)