Skip to content

Commit 59434e9

Browse files
committed
style: 🎨 apply terraform fmt
1 parent 121928b commit 59434e9

File tree

4 files changed

+36
-36
lines changed

4 files changed

+36
-36
lines changed

outputs.tf

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ output "kubeconfig" {
99
}
1010

1111
output "talos_client_configuration" {
12-
value = data.talos_client_configuration.this
12+
value = data.talos_client_configuration.this
1313
sensitive = true
1414
}
1515

@@ -42,8 +42,8 @@ output "hetzner_network_id" {
4242
output "talos_worker_ids" {
4343
description = "Server IDs of the hetzner talos workers machines"
4444
value = merge(
45-
{ for id, server in hcloud_server.workers_new : id => server.id},
46-
{ for id, server in hcloud_server.workers_legacy : id => server.id}
45+
{ for id, server in hcloud_server.workers_new : id => server.id },
46+
{ for id, server in hcloud_server.workers_legacy : id => server.id }
4747
)
4848
}
4949

server.tf

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -22,36 +22,36 @@ locals {
2222

2323
# Calculate total worker count from both old and new variables
2424
legacy_worker_count = var.worker_count
25-
new_worker_count = length(var.worker_nodes)
26-
total_worker_count = local.legacy_worker_count + local.new_worker_count
27-
25+
new_worker_count = length(var.worker_nodes)
26+
total_worker_count = local.legacy_worker_count + local.new_worker_count
27+
2828
# Generate worker node configurations from both old and new variables
2929
legacy_workers = var.worker_count > 0 ? [
3030
for i in range(var.worker_count) : {
31-
index = i
32-
name = "${local.cluster_prefix}worker-${i + 1}"
33-
server_type = var.worker_server_type
31+
index = i
32+
name = "${local.cluster_prefix}worker-${i + 1}"
33+
server_type = var.worker_server_type
3434
image_id = (
3535
substr(var.worker_server_type, 0, 3) == "cax" ?
3636
(var.disable_arm ? null : data.hcloud_image.arm[0].id) :
3737
(var.disable_x86 ? null : data.hcloud_image.x86[0].id)
3838
)
39-
ipv4_public = local.worker_public_ipv4_list[i]
40-
ipv6_public = var.enable_ipv6 ? local.worker_public_ipv6_list[i] : null
41-
ipv6_public_subnet = var.enable_ipv6 ? local.worker_public_ipv6_subnet_list[i] : null
42-
ipv4_private = local.worker_private_ipv4_list[i]
43-
labels = {}
44-
node_group_index = 0
39+
ipv4_public = local.worker_public_ipv4_list[i]
40+
ipv6_public = var.enable_ipv6 ? local.worker_public_ipv6_list[i] : null
41+
ipv6_public_subnet = var.enable_ipv6 ? local.worker_public_ipv6_subnet_list[i] : null
42+
ipv4_private = local.worker_private_ipv4_list[i]
43+
labels = {}
44+
node_group_index = 0
4545
node_in_group_index = i
4646
}
4747
] : []
48-
48+
4949

5050
new_workers = [
5151
for i, worker in var.worker_nodes : {
52-
index = local.legacy_worker_count + i
53-
name = "${local.cluster_prefix}worker-${local.legacy_worker_count + i + 1}"
54-
server_type = worker.type
52+
index = local.legacy_worker_count + i
53+
name = "${local.cluster_prefix}worker-${local.legacy_worker_count + i + 1}"
54+
server_type = worker.type
5555
image_id = (
5656
substr(worker.type, 0, 3) == "cax" ?
5757
(var.disable_arm ? null : data.hcloud_image.arm[0].id) :
@@ -64,7 +64,7 @@ locals {
6464
labels = worker.labels
6565
}
6666
]
67-
67+
6868
# Combine legacy and new workers
6969
workers = concat(local.legacy_workers, local.new_workers)
7070

@@ -149,8 +149,8 @@ resource "hcloud_server" "workers_legacy" {
149149
placement_group_id = hcloud_placement_group.worker.id
150150

151151
labels = merge({
152-
"cluster" = var.cluster_name,
153-
"role" = "worker"
152+
"cluster" = var.cluster_name,
153+
"role" = "worker"
154154
"server_type" = each.value.server_type
155155
}, each.value.labels)
156156

@@ -197,8 +197,8 @@ resource "hcloud_server" "workers_new" {
197197
placement_group_id = hcloud_placement_group.worker.id
198198

199199
labels = merge({
200-
"cluster" = var.cluster_name,
201-
"role" = "worker"
200+
"cluster" = var.cluster_name,
201+
"role" = "worker"
202202
"server_type" = each.value.server_type
203203
}, each.value.labels)
204204

talos_patch_worker.tf

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,21 @@
11
locals {
22
# Define a dummy worker entry for when count is 0
33
dummy_workers = local.total_worker_count == 0 ? [{
4-
index = 0
5-
name = "dummy-worker-0"
6-
server_type = "cx11"
7-
image_id = null
8-
ipv4_public = "0.0.0.0" # Fallback
9-
ipv6_public = null # Fallback
10-
ipv6_public_subnet = null # Fallback
11-
ipv4_private = cidrhost(local.node_ipv4_cidr, 200) # Use a predictable dummy private IP
12-
labels = {}
13-
node_group_index = 0
4+
index = 0
5+
name = "dummy-worker-0"
6+
server_type = "cx11"
7+
image_id = null
8+
ipv4_public = "0.0.0.0" # Fallback
9+
ipv6_public = null # Fallback
10+
ipv6_public_subnet = null # Fallback
11+
ipv4_private = cidrhost(local.node_ipv4_cidr, 200) # Use a predictable dummy private IP
12+
labels = {}
13+
node_group_index = 0
1414
node_in_group_index = 0
1515
}] : []
1616

1717
# Combine real and dummy workers - always include dummy when no workers exist
18-
# merged_workers = local.total_worker_count == 0 ? local.dummy_workers : local.workers
18+
# merged_workers = local.total_worker_count == 0 ? local.dummy_workers : local.workers
1919
merged_workers = concat(local.workers, local.dummy_workers)
2020

2121
# Generate YAML for all (real or dummy) workers

variables.tf

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -281,7 +281,7 @@ variable "worker_nodes" {
281281
error_message = "Invalid worker server type in worker_nodes."
282282
}
283283
validation {
284-
condition = length(var.worker_nodes) <= 99
284+
condition = length(var.worker_nodes) <= 99
285285
error_message = "Total number of worker nodes must be less than 100."
286286
}
287287
}

0 commit comments

Comments
 (0)