From fba1d7c276bdd38592af66ecaee79e653189e5ef Mon Sep 17 00:00:00 2001 From: Naman1997 Date: Sun, 8 Sep 2024 15:01:28 +1000 Subject: [PATCH 1/2] Upgrade provider and remove old doc section --- README.md | 18 --- docs/Wireguard_Setup.md | 256 ---------------------------------------- main.tf | 2 +- modules/domain/main.tf | 2 +- 4 files changed, 2 insertions(+), 276 deletions(-) delete mode 100644 docs/Wireguard_Setup.md diff --git a/README.md b/README.md index 67efbb8..a5dad2d 100644 --- a/README.md +++ b/README.md @@ -50,24 +50,6 @@ terraform plan terraform apply --auto-approve ``` -## Using HAProxy as a Load Balancer for an Ingress - -Since HAProxy is load-balancing ports 80 and 443 (of worker nodes), we can deploy nginx-controller such that it uses those ports as an external load balancer IP. - -``` -kubectl label ns ingress-nginx pod-security.kubernetes.io/enforce=privileged -# Update the IP address in the controller yaml -vim ./nginx-example/nginx-controller.yaml -helm install ingress-nginx ingress-nginx/ingress-nginx -n ingress-nginx --values ./nginx-example/nginx-controller.yaml --create-namespace -kubectl create deployment nginx --image=nginx --replicas=5 -k expose deploy nginx --port 80 -# Edit this config to point to your domain -vim ./nginx-example/ingress.yaml.example -mv ./nginx-example/ingress.yaml.example ./nginx-example/ingress.yaml -k create -f ./nginx-example/ingress.yaml -curl -k https://192.168.0.101 -``` - ## Expose your cluster to the internet (Optional) It is possible to expose your cluster to the internet over a small vps even if both your vps and your public ips are dynamic. This is possible by setting up dynamic dns for both your internal network and the vps using something like duckdns diff --git a/docs/Wireguard_Setup.md b/docs/Wireguard_Setup.md deleted file mode 100644 index 88fe925..0000000 --- a/docs/Wireguard_Setup.md +++ /dev/null @@ -1,256 +0,0 @@ -# Exposing private cluster over public internet - -This guide assumes that you're using a VM in OCI - this is not required and you can use any other cloud provider with slight changes to follow this guide. -This guide also assumes that the wireguard client and server are using Ubuntu - you can change the installation commands for your distro and follow this guide. - - -## Create the VM in OCI with a .pem file for ssh access - -Make sure you're able to ssh into the VM. Then run the following commands: - -``` -sudo apt-get update -y && sudo apt-get upgrade -y -sudo apt-get dist-upgrade -y -curl -fsSL https://get.docker.com -o get-docker.sh -sh get-docker.sh -sudo apt-get install docker-compose wireguard -y -sudo reboot -``` - -## Setting up DuckDNS - -This step is only needed if you don't own a domain already. -Create an account on [DuckDNS](https://www.duckdns.org/). After logging in, you'll see your token and your list of subdomains. -Create a new subdomain and keep its value handy for the following command. - -``` -mkdir duckdns -cd duckdns -# Using DuckDNS for a subdomain - update subdomains list and token value -sudo docker run -d \ - --name=duckdns \ - -e PUID=1000 \ - -e PGID=1000 \ - -e TZ=Asia/Kolkata \ - -e SUBDOMAINS=example.duckdns.org \ - -e TOKEN=token_value \ - -e LOG_FILE=true \ - -v `pwd`:/config `#optional` \ - --restart unless-stopped \ - lscr.io/linuxserver/duckdns:latest - -# Go back to the home dir -cd .. -``` - -## Setting up Nginx Proxy Manager - -Open only port 80 and 443 on the VM subnet settings. - -Port 8080 is for managing your proxy manager (with the port mapping in this config) - by default it will have very weak creds - we'll port forward the port over SSH to configure this securely later. -``` -mkdir nginx-proxy-manager -cd nginx-proxy-manager -vim docker-compose.yml -``` - -Paste the following into the file. Update username and passwords as needed. -``` -version: "3" -services: - app: - image: 'jc21/nginx-proxy-manager:latest' - restart: unless-stopped - ports: - # These ports are in format : - - '80:80' # Public HTTP Port - - '443:443' # Public HTTPS Port - - '8080:81' # Admin Web Port - # Add any other Stream port you want to expose - # - '21:21' # FTP - environment: - DB_MYSQL_HOST: "db" - DB_MYSQL_PORT: 3306 - DB_MYSQL_USER: "username" - DB_MYSQL_PASSWORD: "password" - DB_MYSQL_NAME: "username" - # Uncomment this if IPv6 is not enabled on your host - DISABLE_IPV6: 'true' - volumes: - - ./data:/data - - ./letsencrypt:/etc/letsencrypt - depends_on: - - db - - db: - image: 'jc21/mariadb-aria:latest' - restart: unless-stopped - environment: - MYSQL_ROOT_PASSWORD: 'password' - MYSQL_DATABASE: 'username' - MYSQL_USER: 'username' - MYSQL_PASSWORD: 'password' - volumes: - - ./data/mysql:/var/lib/mysql -``` - -Deploy nginx proxy manager. -``` -sudo docker-compose up -d -``` -In case you need to restart this compose after the wireguard connection - make sure to delete the data and letsencrypt dirs. - - -SSH using -L flag to port forward 8080 -``` -ssh -L 8080:127.0.0.1:8080 ubuntu@IP -``` -In your local go to http://localhost:8080/login -Update the username and password from admin@example.com/changeme to something more secure - - - -## Setting up Wireguard - -Many cloud providers use something called CGNAT to stop wireguard traffic. -Follow the steps mentioned in this [repo](https://github.com/mochman/Bypass_CGNAT.git) to get around this. - -In the following instructions: - -Client = Raspberry Pi/VM running HAProxy - -Server = VPS in the cloud - -Assuming that you're using OCI as your cloud provider, you may have to follow the instructions below to fix some issues with the script. - -- Modify the Endpoint on the client side to use the duckdns subdomain -- Fix the public key on the client side - this will require regenerating the wg keys for both client and server as the script seems to mess up the public key on the client side - -``` -# On both client and server -wg genkey | tee privatekey | wg pubkey > publickey -``` - -- Copy the private key from file 'privatekey' and update in this file - -``` -sudo vim /etc/wireguard/wg0.conf -``` -- Copy the publickey of client and move to the config of server and also the other way around - -On client run the script mentioned below and select this option --> 2) Reload Wireguard Service. It will ask some questions regarding the config - just press enter from that point onwards to select the default config. - -``` -# Only on the client -./Oracle_Installer.sh -sudo systemctl restart wg-quick@wg0.service -``` - -- Make sure the server and client are able to ping each other -- One way to check this is to use wg show - -``` -sudo wg show -``` -- If you see anything in the 'transfer' section - then this means the VPN is working! - -If you're still having trouble, refer to the client and server configs shown below. - -``` -# Server -[Interface] -Address = 10.1.0.1/24 -SaveConfig = true -PostUp = iptables -A FORWARD -i wg0 -j ACCEPT; iptables -t nat -A POSTROUTING -o br-e9809ca86b25 -j MASQUERADE; -PostDown = iptables -D FORWARD -i wg0 -j ACCEPT; iptables -t nat -D POSTROUTING -o br-e9809ca86b25 -j MASQUERADE; -ListenPort = 55108 -PrivateKey = server_private_key - -[Peer] -PublicKey = client_public_key -AllowedIPs = 10.1.0.2/32 -``` - -``` -#Client -[Interface] -PrivateKey = client_private_key -Address = 10.1.0.2/24 - - -[Peer] -PublicKey = server_public_key -AllowedIPs = 0.0.0.0/0 -Endpoint = example.duckdns.org:55108 -PersistentKeepalive = 25 -``` - -## Re-using the same subdomain by using multi-path ingresses - -Let's say that you have one subdomain "example.duckdns.com" and you want to host a bunch of websites using the same subdomain. Meaning you want to have websites with paths something like this: - -- example.duckdns.com/wordpress -- example.duckdns.com/blog -- example.duckdns.com/docs -- example.duckdns.com/grafana - -In order to make this work properly, you'll need to add some annotations to rewrite the ingress path before it reaches the service endpoint in kubernetes. To do this, you need to add a rewrite-target annotation to your ingress. This annotation depends on what ingress controller you're using in your cluster. - -In case you're using the nginx ingress controller as shown in the main readme file of this repo, then you need to add the following annotation to your ingress resources. - -``` -nginx.ingress.kubernetes.io/rewrite-target: /$2 -``` - -This is re-writing the path that is present in any path that has the following syntax: - -``` -- path: /something(/|$)(.*) -``` - -Read [this](https://github.com/kubernetes/ingress-nginx/blob/main/docs/examples/rewrite/README.md) document to learn more about the nginx ingress controller's rewrite-target annotation. - -### Example Ingress - -``` -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - annotations: - meta.helm.sh/release-name: longhorn - meta.helm.sh/release-namespace: longhorn-system - nginx.ingress.kubernetes.io/rewrite-target: /$2 - generation: 2 - labels: - app: longhorn-ingress - app.kubernetes.io/instance: longhorn - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: longhorn - app.kubernetes.io/version: v1.4.0 - helm.sh/chart: longhorn-1.4.0 - name: longhorn-ingress - namespace: longhorn-system -spec: - ingressClassName: nginx - rules: - - host: example.duckdns.org - http: - paths: - - backend: - service: - name: longhorn-frontend - port: - number: 80 - path: /longhorn(/|$)(.*) - pathType: ImplementationSpecific -status: - loadBalancer: - ingress: - - ip: 192.168.0.101 -``` - -Now longhorn will be accessible at `https://example.duckdns.org/longhorn/` - -## Notes - -Make sure that your IP is not being leaked by checking your subdomain in https://ipleak.net/ \ No newline at end of file diff --git a/main.tf b/main.tf index 6aa5995..7c3c5f4 100644 --- a/main.tf +++ b/main.tf @@ -7,7 +7,7 @@ terraform { } proxmox = { source = "bpg/proxmox" - version = "0.57.1" + version = "0.63.0" } } } diff --git a/modules/domain/main.tf b/modules/domain/main.tf index 057e873..bb6e142 100644 --- a/modules/domain/main.tf +++ b/modules/domain/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { proxmox = { source = "bpg/proxmox" - version = "0.57.1" + version = "0.63.0" } } } From 9ca35784723d3edaa0ad1bc5d699a91252026b7c Mon Sep 17 00:00:00 2001 From: Naman1997 Date: Wed, 25 Sep 2024 15:06:50 +1000 Subject: [PATCH 2/2] Upgrade provider and auto-create a VM for lb --- README.md | 9 ++- main.tf | 32 ++++++--- modules/domain/main.tf | 2 +- modules/proxy/main.tf | 139 +++++++++++++++++++++++++++++++++++++ modules/proxy/output.tf | 3 + modules/proxy/variables.tf | 18 +++++ terraform.tfvars.example | 5 -- variables.tf | 16 ----- 8 files changed, 187 insertions(+), 37 deletions(-) create mode 100644 modules/proxy/main.tf create mode 100644 modules/proxy/output.tf create mode 100644 modules/proxy/variables.tf diff --git a/README.md b/README.md index a5dad2d..cf0fdad 100644 --- a/README.md +++ b/README.md @@ -22,14 +22,13 @@ Automated talos cluster with system extensions Docker is mandatory on the `Client` as this projects builds a custom talos image with system extensions using the [imager](https://github.com/siderolabs/talos/pkgs/container/installer) docker image on the `Client` itself. -## Create an HA Proxy Server +## Options for creation of HA Proxy Server -You can use the [no-lb](https://github.com/Naman1997/simple-talos-cluster/tree/no-lb) branch in case you do not want to use an external load-balancer. This branch uses the 1st master node that gets created as the cluster endpoint. - -I've installed `haproxy` on my Raspberry Pi. You can choose to do the same in a LXC container or a VM. +The `main` banch will automatically create a VM for a load balancer with 2 CPUs and 2 GiB of memory on your Proxmox node. -You need to have passwordless SSH access to a user (from the Client node) in this node which has the permissions to modify the file `/etc/haproxy/haproxy.cfg` and permissions to run `sudo systemctl restart haproxy`. An example is covered in this [doc](docs/HA_Proxy.md). +You can use the [no-lb](https://github.com/Naman1997/simple-talos-cluster/tree/no-lb) branch in case you do not want to use an external load-balancer. This branch uses the 1st master node that gets created as the cluster endpoint. +Another option is to use the [manual-lb](https://github.com/Naman1997/simple-talos-cluster/tree/manual-lb) branch in case you wish to create an external lb manually. ## Create the terraform.tfvars file diff --git a/main.tf b/main.tf index 7c3c5f4..b5ae608 100644 --- a/main.tf +++ b/main.tf @@ -7,7 +7,7 @@ terraform { } proxmox = { source = "bpg/proxmox" - version = "0.63.0" + version = "0.65.0" } } } @@ -24,6 +24,7 @@ data "external" "versions" { } locals { + ha_proxy_user = "ubuntu" qemu_ga_version = data.external.versions.result["qemu_ga_version"] amd_ucode_version = data.external.versions.result["amd_ucode_version"] intel_ucode_version = data.external.versions.result["intel_ucode_version"] @@ -159,10 +160,19 @@ module "worker_domain" { scan_interface = var.INTERFACE_TO_SCAN } +module "proxy" { + source = "./modules/proxy" + ha_proxy_user = local.ha_proxy_user + DEFAULT_BRIDGE = var.DEFAULT_BRIDGE + TARGET_NODE = var.TARGET_NODE + ssh_key = join("", [var.SSH_KEY, ".pub"]) +} + resource "local_file" "haproxy_config" { depends_on = [ module.master_domain.node, - module.worker_domain.node + module.worker_domain.node, + module.proxy.node ] content = templatefile("${path.root}/templates/haproxy.tmpl", { @@ -181,17 +191,17 @@ resource "local_file" "haproxy_config" { destination = "/etc/haproxy/haproxy.cfg" connection { type = "ssh" - host = var.ha_proxy_server - user = var.ha_proxy_user - private_key = file(var.ha_proxy_key) + host = module.proxy.proxy_ipv4_address + user = local.ha_proxy_user + private_key = file(var.SSH_KEY) } } provisioner "remote-exec" { connection { - host = var.ha_proxy_server - user = var.ha_proxy_user - private_key = file(var.ha_proxy_key) + host = module.proxy.proxy_ipv4_address + user = local.ha_proxy_user + private_key = file(var.SSH_KEY) } script = "${path.root}/scripts/haproxy.sh" } @@ -200,11 +210,13 @@ resource "local_file" "haproxy_config" { resource "local_file" "talosctl_config" { depends_on = [ module.master_domain.node, - module.worker_domain.node + module.worker_domain.node, + module.proxy.node, + resource.local_file.haproxy_config ] content = templatefile("${path.root}/templates/talosctl.tmpl", { - load_balancer = var.ha_proxy_server, + load_balancer = module.proxy.proxy_ipv4_address, node_map_masters = tolist(module.master_domain.*.address), node_map_workers = tolist(module.worker_domain.*.address) primary_controller = module.master_domain[0].address diff --git a/modules/domain/main.tf b/modules/domain/main.tf index bb6e142..fed0e59 100644 --- a/modules/domain/main.tf +++ b/modules/domain/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { proxmox = { source = "bpg/proxmox" - version = "0.63.0" + version = "0.65.0" } } } diff --git a/modules/proxy/main.tf b/modules/proxy/main.tf new file mode 100644 index 0000000..9dbdaae --- /dev/null +++ b/modules/proxy/main.tf @@ -0,0 +1,139 @@ +terraform { + required_providers { + proxmox = { + source = "bpg/proxmox" + version = "0.65.0" + } + } +} + +data "local_file" "ssh_public_key" { + filename = pathexpand(var.ssh_key) +} + +resource "proxmox_virtual_environment_file" "cloud_config" { + content_type = "snippets" + datastore_id = "local" + node_name = var.TARGET_NODE + + source_raw { + data = <<-EOF + #cloud-config + users: + - default + - name: ${var.ha_proxy_user} + groups: + - sudo + shell: /bin/bash + ssh_authorized_keys: + - ${trimspace(data.local_file.ssh_public_key.content)} + sudo: ALL=(ALL) NOPASSWD:ALL + runcmd: + - apt update -y && apt dist-upgrade -y + - apt install -y qemu-guest-agent haproxy net-tools unattended-upgrades + - timedatectl set-timezone America/Toronto + - systemctl enable qemu-guest-agent + - systemctl enable --now haproxy + - systemctl start qemu-guest-agent + - chown -R ${var.ha_proxy_user}:${var.ha_proxy_user} /etc/haproxy/ + - echo "done" > /tmp/cloud-config.done + EOF + + file_name = "cloud-config.yaml" + } +} + +resource "proxmox_virtual_environment_download_file" "ubuntu_cloud_image" { + content_type = "iso" + datastore_id = "local" + node_name = var.TARGET_NODE + url = "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img" + + lifecycle { + prevent_destroy = true + } +} + +resource "proxmox_virtual_environment_vm" "node" { + name = "haproxy" + node_name = var.TARGET_NODE + + agent { + enabled = true + } + + cpu { + cores = 2 + } + + memory { + dedicated = 2048 + } + + disk { + datastore_id = "local-lvm" + file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id + interface = "virtio0" + iothread = true + discard = "on" + size = 20 + } + + initialization { + ip_config { + ipv4 { + address = "dhcp" + } + } + + user_data_file_id = proxmox_virtual_environment_file.cloud_config.id + } + + network_device { + bridge = var.DEFAULT_BRIDGE + } + + provisioner "local-exec" { + command = <<-EOT + n=0 + until [ "$n" -ge 10 ] + do + echo "Attempt number: $n" + ssh-keygen -R $ADDRESS + if [ $? -eq 0 ]; then + echo "Successfully removed $ADDRESS" + break + fi + n=$((n+1)) + sleep $[ ( $RANDOM % 10 ) + 1 ]s + done + EOT + environment = { + ADDRESS = element(flatten(self.ipv4_addresses), 1) + } + when = destroy + } + + provisioner "local-exec" { + command = <<-EOT + n=0 + until [ "$n" -ge 10 ] + do + echo "Attempt number: $n" + ssh-keyscan -H $ADDRESS >> ~/.ssh/known_hosts + ssh -q -o StrictHostKeyChecking=no ${var.ha_proxy_user}@$ADDRESS exit < /dev/null + if [ $? -eq 0 ]; then + echo "Successfully added $ADDRESS" + break + fi + n=$((n+1)) + sleep $[ ( $RANDOM % 10 ) + 1 ]s + done + EOT + environment = { + ADDRESS = element(flatten(self.ipv4_addresses), 1) + } + when = create + } + +} \ No newline at end of file diff --git a/modules/proxy/output.tf b/modules/proxy/output.tf new file mode 100644 index 0000000..45d43aa --- /dev/null +++ b/modules/proxy/output.tf @@ -0,0 +1,3 @@ +output "proxy_ipv4_address" { + value = proxmox_virtual_environment_vm.node.ipv4_addresses[1][0] +} \ No newline at end of file diff --git a/modules/proxy/variables.tf b/modules/proxy/variables.tf new file mode 100644 index 0000000..126fbfc --- /dev/null +++ b/modules/proxy/variables.tf @@ -0,0 +1,18 @@ +variable "ha_proxy_user" { + description = "Username for proxy VM" + type = string +} + +variable "DEFAULT_BRIDGE" { + description = "Bridge to use when creating VMs in proxmox" + type = string +} + +variable "TARGET_NODE" { + description = "Target node name in proxmox" + type = string +} + +variable "ssh_key" { + description = "Public SSH key to be authorized" +} \ No newline at end of file diff --git a/terraform.tfvars.example b/terraform.tfvars.example index a162511..4679480 100644 --- a/terraform.tfvars.example +++ b/terraform.tfvars.example @@ -26,11 +26,6 @@ worker_config = { sockets = 1 } -# HA Proxy config -ha_proxy_server = "192.168.0.101" -ha_proxy_user = "wireproxy" -ha_proxy_key = "/home/user/.ssh/id_rsa" - # Leave this empty if you are not sure/have a single NIC # Change this to virbr0 if you're running proxmox inside a KVM VM # You may need to use sudo for terrafrom apply due to this diff --git a/variables.tf b/variables.tf index 58e4000..3ba5e05 100644 --- a/variables.tf +++ b/variables.tf @@ -87,22 +87,6 @@ variable "worker_config" { }) } -# HA Proxy config -variable "ha_proxy_server" { - description = "IP address of server running haproxy" - type = string -} - -variable "ha_proxy_user" { - description = "User on ha_proxy_server that can modify '/etc/haproxy/haproxy.cfg' and restart haproxy.service" - type = string -} - -variable "ha_proxy_key" { - description = "SSH key used to log in ha_proxy_server" - type = string -} - variable "INTERFACE_TO_SCAN" { description = "Interface that you wish to scan for finding the talos VMs. Leave this empty for default value." type = string