From f9429148f198583710a4f3d3d003be2f8f4cf210 Mon Sep 17 00:00:00 2001 From: Jairus Christensen Date: Tue, 25 Mar 2025 13:13:46 -0600 Subject: [PATCH 1/3] Initial attempt, reviewed by corentone --- .../kubeconfig/create-kubeconfig-secret.sh | 123 ++++ examples/kubeconfig/go.mod | 93 +++ examples/kubeconfig/go.sum | 249 +++++++ examples/kubeconfig/main.go | 313 +++++++++ providers/kubeconfig/clustermanager.go | 124 ++++ providers/kubeconfig/interfaces.go | 74 ++ providers/kubeconfig/provider.go | 635 ++++++++++++++++++ providers/kubeconfig/reconciler.go | 137 ++++ 8 files changed, 1748 insertions(+) create mode 100755 examples/kubeconfig/create-kubeconfig-secret.sh create mode 100644 examples/kubeconfig/go.mod create mode 100644 examples/kubeconfig/go.sum create mode 100644 examples/kubeconfig/main.go create mode 100644 providers/kubeconfig/clustermanager.go create mode 100644 providers/kubeconfig/interfaces.go create mode 100644 providers/kubeconfig/provider.go create mode 100644 providers/kubeconfig/reconciler.go diff --git a/examples/kubeconfig/create-kubeconfig-secret.sh b/examples/kubeconfig/create-kubeconfig-secret.sh new file mode 100755 index 0000000..fdaa03c --- /dev/null +++ b/examples/kubeconfig/create-kubeconfig-secret.sh @@ -0,0 +1,123 @@ +#!/bin/bash + +# Script to create a kubeconfig secret for the Multicluster Failover Operator + +set -e + +# Default values +NAMESPACE="my-operator-namespace" +KUBECONFIG_PATH="${HOME}/.kube/config" +KUBECONFIG_CONTEXT="" +SECRET_NAME="" +DRY_RUN="false" + +# Function to display usage information +function show_help { + echo "Usage: $0 [options]" + echo " -n, --name NAME Name for the secret (will be used as cluster identifier)" + echo " -s, --namespace NS Namespace to create the secret in (default: ${NAMESPACE})" + echo " -k, --kubeconfig PATH Path to kubeconfig file (default: ${KUBECONFIG_PATH})" + echo " -c, --context CONTEXT Kubeconfig context to use (default: current-context)" + echo " -d, --dry-run Dry run, print YAML but don't apply" + echo " -h, --help Show this help message" + echo "" + echo "Example: $0 -n cluster1 -c prod-cluster -k ~/.kube/config" +} + +# Parse command line options +while [[ $# -gt 0 ]]; do + key="$1" + case $key in + -n|--name) + SECRET_NAME="$2" + shift 2 + ;; + -s|--namespace) + NAMESPACE="$2" + shift 2 + ;; + -k|--kubeconfig) + KUBECONFIG_PATH="$2" + shift 2 + ;; + -c|--context) + KUBECONFIG_CONTEXT="$2" + shift 2 + ;; + -d|--dry-run) + DRY_RUN="true" + shift 1 + ;; + -h|--help) + show_help + exit 0 + ;; + *) + echo "Unknown option: $1" + show_help + exit 1 + ;; + esac +done + +# Validate required arguments +if [ -z "$SECRET_NAME" ]; then + echo "ERROR: Secret name is required (-n, --name)" + show_help + exit 1 +fi + +if [ ! -f "$KUBECONFIG_PATH" ]; then + echo "ERROR: Kubeconfig file not found at: $KUBECONFIG_PATH" + exit 1 +fi + +# Process the kubeconfig +echo "Processing kubeconfig..." +TEMP_KUBECONFIG=$(mktemp) +trap "rm -f $TEMP_KUBECONFIG" EXIT + +if [ -n "$KUBECONFIG_CONTEXT" ]; then + kubectl config view --raw --minify --flatten --context="$KUBECONFIG_CONTEXT" > "$TEMP_KUBECONFIG" + if [ $? -ne 0 ]; then + echo "ERROR: Failed to extract context '$KUBECONFIG_CONTEXT' from kubeconfig" + exit 1 + fi + echo "Extracted context '$KUBECONFIG_CONTEXT' from kubeconfig" +else + cp "$KUBECONFIG_PATH" "$TEMP_KUBECONFIG" + echo "Using entire kubeconfig file" +fi + +# Encode the kubeconfig +KUBECONFIG_B64=$(base64 < "$TEMP_KUBECONFIG" | tr -d '\n') + +# Create the namespace if it doesn't exist +if [ "$DRY_RUN" != "true" ]; then + kubectl get namespace "$NAMESPACE" &>/dev/null || kubectl create namespace "$NAMESPACE" +fi + +# Generate the secret YAML +SECRET_YAML=$(cat < Date: Tue, 25 Mar 2025 15:30:57 -0600 Subject: [PATCH 2/3] Super simplify - reviewed by stts and embik --- examples/kubeconfig/controllers/pod_lister.go | 102 +++ examples/kubeconfig/go.mod | 38 +- examples/kubeconfig/go.sum | 59 -- examples/kubeconfig/main.go | 311 ++----- .../{ => scripts}/create-kubeconfig-secret.sh | 0 pkg/manager/manager.go | 11 +- providers/kubeconfig/clustermanager.go | 124 --- providers/kubeconfig/interfaces.go | 74 -- providers/kubeconfig/provider.go | 826 +++++++++--------- providers/kubeconfig/reconciler.go | 137 --- 10 files changed, 583 insertions(+), 1099 deletions(-) create mode 100644 examples/kubeconfig/controllers/pod_lister.go rename examples/kubeconfig/{ => scripts}/create-kubeconfig-secret.sh (100%) delete mode 100644 providers/kubeconfig/clustermanager.go delete mode 100644 providers/kubeconfig/interfaces.go delete mode 100644 providers/kubeconfig/reconciler.go diff --git a/examples/kubeconfig/controllers/pod_lister.go b/examples/kubeconfig/controllers/pod_lister.go new file mode 100644 index 0000000..fb3a1ce --- /dev/null +++ b/examples/kubeconfig/controllers/pod_lister.go @@ -0,0 +1,102 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "time" + + "github.com/go-logr/logr" + + corev1 "k8s.io/api/core/v1" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + ctrllog "sigs.k8s.io/controller-runtime/pkg/log" + + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" +) + +// PodWatcher is a simple controller that watches pods across multiple clusters +type PodWatcher struct { + Manager mcmanager.Manager + Log logr.Logger +} + +// NewPodWatcher creates a new PodWatcher +func NewPodWatcher(mgr mcmanager.Manager) *PodWatcher { + return &PodWatcher{ + Manager: mgr, + Log: ctrllog.Log.WithName("pod-watcher"), + } +} + +// Start implements Runnable +func (p *PodWatcher) Start(ctx context.Context) error { + // Nothing to do here - we'll handle everything in Engage + return nil +} + +// Engage implements multicluster.Aware and gets called when a new cluster is engaged +func (p *PodWatcher) Engage(ctx context.Context, clusterName string, cl cluster.Cluster) error { + log := p.Log.WithValues("cluster", clusterName) + log.Info("Engaging cluster") + + // Start a goroutine to periodically list pods + go func() { + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + // Initial list + if err := p.listPods(ctx, cl, clusterName, log); err != nil { + log.Error(err, "Failed to list pods") + } + + for { + select { + case <-ctx.Done(): + log.Info("Context done, stopping pod watcher") + return + case <-ticker.C: + if err := p.listPods(ctx, cl, clusterName, log); err != nil { + log.Error(err, "Failed to list pods") + } + } + } + }() + + return nil +} + +// listPods lists pods in the default namespace +func (p *PodWatcher) listPods(ctx context.Context, cl cluster.Cluster, clusterName string, log logr.Logger) error { + var pods corev1.PodList + if err := cl.GetClient().List(ctx, &pods, &client.ListOptions{ + Namespace: "default", + }); err != nil { + return err + } + + log.Info("Pods in default namespace", "count", len(pods.Items)) + for _, pod := range pods.Items { + log.Info("Pod", + "name", pod.Name, + "status", pod.Status.Phase) + } + + return nil +} diff --git a/examples/kubeconfig/go.mod b/examples/kubeconfig/go.mod index cc7f0b5..164e719 100644 --- a/examples/kubeconfig/go.mod +++ b/examples/kubeconfig/go.mod @@ -2,28 +2,25 @@ module github.com/christensenjairus/multicluster-runtime/examples/kubeconfig go 1.24.1 +replace sigs.k8s.io/multicluster-runtime => ../../ + require ( - k8s.io/apimachinery v0.32.3 + github.com/go-logr/logr v1.4.2 + golang.org/x/sync v0.8.0 + k8s.io/api v0.32.3 k8s.io/client-go v0.32.3 sigs.k8s.io/controller-runtime v0.20.4 + sigs.k8s.io/multicluster-runtime v0.20.0-alpha.5 ) require ( - cel.dev/expr v0.18.0 // indirect - github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect @@ -31,13 +28,10 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.22.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -49,44 +43,26 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.32.3 // indirect k8s.io/apiextensions-apiserver v0.32.1 // indirect - k8s.io/apiserver v0.32.1 // indirect - k8s.io/component-base v0.32.1 // indirect + k8s.io/apimachinery v0.32.3 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/examples/kubeconfig/go.sum b/examples/kubeconfig/go.sum index e4deba3..441ed9f 100644 --- a/examples/kubeconfig/go.sum +++ b/examples/kubeconfig/go.sum @@ -1,20 +1,7 @@ -cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= -cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= -github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= -github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/christensenjairus/Multicluster-Failover-Operator v0.0.0-20250325181351-19f5d74f8476 h1:0A9AbkWxyc+8/W/sj8C5b/mb1HhrKxOw3VU30SKiGDQ= -github.com/christensenjairus/Multicluster-Failover-Operator v0.0.0-20250325181351-19f5d74f8476/go.mod h1:JOvbIzfSOvKDHd9awRHWnG6gr1Sz8qi03dHW1vejsnk= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -26,17 +13,12 @@ github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8 github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= @@ -55,8 +37,6 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= -github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -69,10 +49,6 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgY github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -114,13 +90,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= -github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -134,22 +105,6 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -159,8 +114,6 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -201,12 +154,6 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -225,20 +172,14 @@ k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+ k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.1 h1:oo0OozRos66WFq87Zc5tclUX2r0mymoVHRq8JmR7Aak= -k8s.io/apiserver v0.32.1/go.mod h1:UcB9tWjBY7aryeI5zAgzVJB/6k7E97bkr1RgqDz0jPw= k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= -k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= -k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= diff --git a/examples/kubeconfig/main.go b/examples/kubeconfig/main.go index cc0c990..51a17ba 100644 --- a/examples/kubeconfig/main.go +++ b/examples/kubeconfig/main.go @@ -18,68 +18,46 @@ package main import ( "context" - "crypto/tls" "errors" "flag" "os" - "strings" "time" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/healthz" + ctrllog "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/metrics/filters" - metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" - "sigs.k8s.io/controller-runtime/pkg/webhook" - kubeconfig "sigs.k8s.io/multicluster-runtime/providers/kubeconfig" + // Import your controllers here <-------------------------------- + "sigs.k8s.io/multicluster-runtime/examples/kubeconfig/controllers" - // +kubebuilder:scaffold:imports - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" + kubeconfigprovider "sigs.k8s.io/multicluster-runtime/providers/kubeconfig" ) -var ( - scheme = runtime.NewScheme() - setupLog = zap.New(zap.UseDevMode(true)) -) - -func init() { - utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - - // TODO: add your CRDs here after you import them above - // utilruntime.Must(crdv1alpha1.AddToScheme(scheme)) - // +kubebuilder:scaffold:scheme -} - func main() { - var metricsAddr string - var enableLeaderElection bool - var probeAddr string - var secureMetrics bool - var enableHTTP2 bool - var masterURL string - var tlsOpts []func(*tls.Config) - - flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ - "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") - flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") - flag.BoolVar(&enableLeaderElection, "leader-elect", false, - "Enable leader election for controller manager. "+ - "Enabling this will ensure there is only one active controller manager.") - flag.BoolVar(&secureMetrics, "metrics-secure", true, - "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") - flag.BoolVar(&enableHTTP2, "enable-http2", false, - "If set, HTTP/2 will be enabled for the metrics and webhook servers") - flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig.") + var namespace string + var kubeconfigLabel string + var connectionTimeout time.Duration + var cacheSyncTimeout time.Duration + var kubeconfigPath string + var providerReadyTimeout time.Duration + + flag.StringVar(&namespace, "namespace", "default", "Namespace where kubeconfig secrets are stored") + flag.StringVar(&kubeconfigLabel, "kubeconfig-label", "sigs.k8s.io/multicluster-runtime-kubeconfig", + "Label used to identify secrets containing kubeconfig data") + flag.DurationVar(&connectionTimeout, "connection-timeout", 15*time.Second, + "Timeout for connecting to a cluster") + flag.DurationVar(&cacheSyncTimeout, "cache-sync-timeout", 60*time.Second, + "Timeout for waiting for the cache to sync") + flag.StringVar(&kubeconfigPath, "kubeconfig-path", "", + "Path to kubeconfig file for test secrets (defaults to ~/.kube/config if not set)") + flag.DurationVar(&providerReadyTimeout, "provider-ready-timeout", 120*time.Second, + "Timeout for waiting for the provider to be ready") opts := zap.Options{ Development: true, @@ -87,227 +65,82 @@ func main() { opts.BindFlags(flag.CommandLine) flag.Parse() - ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) - - // Create config with explicitly provided kubeconfig/master - // Note: controller-runtime already handles the --kubeconfig flag - var config *rest.Config - var err error - - // Get kubeconfig path from the flag that controller-runtime registers - kubeconfigPath := os.Getenv("KUBECONFIG") - for i := 1; i < len(os.Args); i++ { - if strings.HasPrefix(os.Args[i], "--kubeconfig=") { - kubeconfigPath = strings.TrimPrefix(os.Args[i], "--kubeconfig=") - break - } - if os.Args[i] == "--kubeconfig" && i+1 < len(os.Args) { - kubeconfigPath = os.Args[i+1] - break - } - } - - if masterURL != "" { - setupLog.Info("Using explicitly provided master URL", "master", masterURL) - // Use explicit master URL with kubeconfig if provided - if kubeconfigPath != "" { - setupLog.Info("Using kubeconfig file with explicit master", "kubeconfig", kubeconfigPath) - config, err = clientcmd.BuildConfigFromFlags(masterURL, kubeconfigPath) - } else { - // Just use the master URL - config = &rest.Config{ - Host: masterURL, - } - } - } else { - // Use controller-runtime's standard config handling - setupLog.Info("Using controller-runtime config handling") - if kubeconfigPath != "" { - setupLog.Info("Using kubeconfig file", "path", kubeconfigPath) - } - config, err = ctrl.GetConfig() - } - - if err != nil { - setupLog.Error(err, "unable to get kubernetes configuration") - os.Exit(1) - } - - setupLog.Info("Successfully connected to Kubernetes API", "host", config.Host) + ctrllog.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + entryLog := ctrllog.Log.WithName("entrypoint") + ctx := ctrl.SetupSignalHandler() - // if the enable-http2 flag is false (the default), http/2 should be disabled - // due to its vulnerabilities. More specifically, disabling http/2 will - // prevent from being vulnerable to the HTTP/2 Stream Cancellation and - // Rapid Reset CVEs. For more information see: - // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 - // - https://github.com/advisories/GHSA-4374-p667-p6c8 - disableHTTP2 := func(c *tls.Config) { - setupLog.Info("disabling http/2") - c.NextProtos = []string{"http/1.1"} - } + entryLog.Info("Starting application", "namespace", namespace, "kubeconfigLabel", kubeconfigLabel) - if !enableHTTP2 { - tlsOpts = append(tlsOpts, disableHTTP2) + // Create the kubeconfig provider with options + providerOpts := kubeconfigprovider.Options{ + Namespace: namespace, + KubeconfigLabel: kubeconfigLabel, + ConnectionTimeout: connectionTimeout, + CacheSyncTimeout: cacheSyncTimeout, + KubeconfigPath: kubeconfigPath, } - webhookServer := webhook.NewServer(webhook.Options{ - TLSOpts: tlsOpts, - }) + // Create the provider first, then the manager with the provider + entryLog.Info("Creating provider") + provider := kubeconfigprovider.New(providerOpts) - // Metrics endpoint options - metricsServerOptions := metricsserver.Options{ - BindAddress: metricsAddr, - SecureServing: secureMetrics, - TLSOpts: tlsOpts, - } + // Create the multicluster manager with the provider + entryLog.Info("Creating manager") - if secureMetrics { - metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization + // Modify manager options to avoid waiting for cache sync + managerOpts := manager.Options{ + // Don't block main thread on leader election + LeaderElection: false, } - // Get the root context for the whole application - ctx := ctrl.SetupSignalHandler() - - // Create a context with cancel for graceful shutdown of background tasks - ctxWithCancel, cancelFunc := context.WithCancel(ctx) - - // Make sure to cancel all background processes when main context is done - go func() { - <-ctx.Done() - setupLog.Info("Main context cancelled, shutting down background tasks") - cancelFunc() - }() - - // Determine the namespace for kubeconfig discovery - namespace, err := getOperatorNamespace() + mgr, err := mcmanager.New(ctrl.GetConfigOrDie(), provider, managerOpts) if err != nil { - setupLog.Error(err, "unable to determine operator namespace") + entryLog.Error(err, "Unable to create manager") os.Exit(1) } - setupLog.Info("initializing multicluster support", "namespace", namespace) + // Add our controllers + entryLog.Info("Adding controllers") - // Create standard manager options - mgmtOpts := manager.Options{ - Scheme: scheme, - Metrics: metricsServerOptions, - WebhookServer: webhookServer, - HealthProbeBindAddress: probeAddr, - LeaderElection: enableLeaderElection, - LeaderElectionID: "cb9167b4.hahomelabs.com", - } - - // First, create a standard controller-runtime manager - // This is for the conventional controller approach - mgr, err := ctrl.NewManager(config, mgmtOpts) - if err != nil { - setupLog.Error(err, "unable to start manager") + // TODO: Run your controllers here <-------------------------------- + podWatcher := controllers.NewPodWatcher(mgr) + if err := mgr.Add(podWatcher); err != nil { + entryLog.Error(err, "Unable to add pod watcher") os.Exit(1) } - // Now let's set up the multicluster part - // Create a MulticlusterReconciler that will be used by controllers and the provider - mcReconciler := kubeconfig.NewMulticlusterReconciler(mgr.GetClient(), mgr.GetScheme()) - - // Create a KubeconfigClusterManager that will manage multiple clusters - clusterManager := kubeconfig.NewKubeconfigClusterManager(mgr, mcReconciler) - - // Create and configure the kubeconfig provider with the cluster manager - kubeconfigProvider := kubeconfig.New( - clusterManager, - kubeconfig.Options{ - Namespace: namespace, - KubeconfigLabel: "sigs.k8s.io/multicluster-runtime-kubeconfig", // TODO: change this to your desired kubeconfig label - Scheme: scheme, - ConnectionTimeout: 15 * time.Second, // TODO: change this to your operator's connection timeout - CacheSyncTimeout: 60 * time.Second, // TODO: change this to your operator's cache sync timeout - }, - ) - - // Start the provider in a background goroutine and wait for initial discovery - providerReady := make(chan struct{}) + // Start provider in a goroutine + entryLog.Info("Starting provider") go func() { - setupLog.Info("starting kubeconfig provider") - - // Set the manager first before doing anything else - kubeconfigProvider.SetManager(clusterManager) - - // Signal that we're going to start the provider - // This doesn't mean clusters are actually ready yet, just that we're starting - close(providerReady) - - // Run the provider - it will handle initial sync internally - err := kubeconfigProvider.Run(ctxWithCancel, clusterManager) - if err != nil && !errors.Is(err, context.Canceled) { - setupLog.Error(err, "Error running provider") - os.Exit(1) + err := provider.Run(ctx, mgr) + if err != nil && ctx.Err() == nil { + entryLog.Error(err, "Provider exited with error") } }() - // Wait for the provider to start - select { - case <-providerReady: - setupLog.Info("Kubeconfig provider starting...") - case <-time.After(5 * time.Second): - setupLog.Info("Timeout waiting for provider to start, continuing anyway") - } - - // Now let's add a delay to allow provider to discover and connect to clusters - setupLog.Info("Waiting for provider to discover clusters...") - time.Sleep(5 * time.Second) - - // TODO: set up your controllers for CRDs/CRs. - // Below are examples for 'FailoverGroup' and 'Failover' CRs. + // Wait for the provider to be ready with a short timeout + entryLog.Info("Waiting for provider to be ready") + readyCtx, cancel := context.WithTimeout(ctx, providerReadyTimeout) + defer cancel() - // if err := (&controller.FailoverGroupReconciler{ - // Client: mgr.GetClient(), - // Scheme: mgr.GetScheme(), - // MCReconciler: mcReconciler, - // }).SetupWithManager(mgr); err != nil { - // setupLog.Error(err, "unable to create failovergroup controller", "controller", "FailoverGroup") - // os.Exit(1) - // } - - // if err := (&controller.FailoverReconciler{ - // Client: mgr.GetClient(), - // Scheme: mgr.GetScheme(), - // MCReconciler: mcReconciler, - // }).SetupWithManager(mgr); err != nil { - // setupLog.Error(err, "unable to create failover controller", "controller", "Failover") - // os.Exit(1) - // } - - // Setup healthz/readyz checks - if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { - setupLog.Error(err, "unable to set up health check") - os.Exit(1) - } - if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { - setupLog.Error(err, "unable to set up ready check") - os.Exit(1) + select { + case <-provider.IsReady(): + entryLog.Info("Provider is ready") + case <-readyCtx.Done(): + entryLog.Error(readyCtx.Err(), "Timeout waiting for provider to be ready, continuing anyway") } - setupLog.Info("starting manager") + // Start the manager + entryLog.Info("Starting manager") if err := mgr.Start(ctx); err != nil { - setupLog.Error(err, "problem running manager") + entryLog.Error(err, "Error running manager") os.Exit(1) } } -// getOperatorNamespace returns the namespace the operator is currently running in. -func getOperatorNamespace() (string, error) { - // Check if running in a pod - ns, found := os.LookupEnv("POD_NAMESPACE") - if found { - return ns, nil +func ignoreCanceled(err error) error { + if errors.Is(err, context.Canceled) { + return nil } - - // If not running in a pod, try to get from the service account namespace - nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") - if err == nil { - return string(nsBytes), nil - } - - // Default to the standard operator namespace - return "my-operator-namespace", nil // TODO: change this to your operator's namespace + return err } diff --git a/examples/kubeconfig/create-kubeconfig-secret.sh b/examples/kubeconfig/scripts/create-kubeconfig-secret.sh similarity index 100% rename from examples/kubeconfig/create-kubeconfig-secret.sh rename to examples/kubeconfig/scripts/create-kubeconfig-secret.sh diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 7e4abe1..e6cb59a 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -199,14 +199,21 @@ func (m *mcManager) Add(r Runnable) (err error) { // Engage gets called when the component should start operations for the given // Cluster. ctx is cancelled when the cluster is disengaged. func (m *mcManager) Engage(ctx context.Context, name string, cl cluster.Cluster) error { - ctx, cancel := context.WithCancel(ctx) //nolint:govet // cancel is called in the error case only. + ctx, cancel := context.WithCancel(ctx) + + // Create a goroutine that will clean up the cancel function when the parent context is done + go func() { + <-ctx.Done() + cancel() + }() + for _, r := range m.mcRunnables { if err := r.Engage(ctx, name, cl); err != nil { cancel() return fmt.Errorf("failed to engage cluster %q: %w", name, err) } } - return nil //nolint:govet // cancel is called in the error case only. + return nil } func (m *mcManager) GetManager(ctx context.Context, clusterName string) (manager.Manager, error) { diff --git a/providers/kubeconfig/clustermanager.go b/providers/kubeconfig/clustermanager.go deleted file mode 100644 index bc22518..0000000 --- a/providers/kubeconfig/clustermanager.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2025 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubeconfig - -import ( - "context" - "fmt" - "strings" - "sync" - - "sigs.k8s.io/controller-runtime/pkg/cluster" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -var setupLog = log.Log.WithName("kubeconfig-provider") - -// KubeconfigClusterManager is an implementation of the KubeClusterManager interface that manages -// multiple Kubernetes clusters using kubeconfig. -type KubeconfigClusterManager struct { - manager.Manager - clusters map[string]cluster.Cluster - reconciler ClusterReconciler - mu sync.RWMutex -} - -// Ensure KubeconfigClusterManager implements the KubeClusterManager interface -var _ KubeClusterManager = &KubeconfigClusterManager{} - -// NewKubeconfigClusterManager creates a new KubeconfigClusterManager. -func NewKubeconfigClusterManager(mgr manager.Manager, reconciler ClusterReconciler) *KubeconfigClusterManager { - return &KubeconfigClusterManager{ - Manager: mgr, - clusters: make(map[string]cluster.Cluster), - reconciler: reconciler, - } -} - -// GetCluster returns a cluster by name. -func (a *KubeconfigClusterManager) GetCluster(ctx context.Context, name string) (cluster.Cluster, error) { - a.mu.RLock() - defer a.mu.RUnlock() - c, ok := a.clusters[name] - if !ok { - return nil, fmt.Errorf("cluster %s not found", name) - } - return c, nil -} - -// Engage adds a cluster to the manager. -func (a *KubeconfigClusterManager) Engage(ctx context.Context, name string, c cluster.Cluster) error { - a.mu.Lock() - defer a.mu.Unlock() - a.clusters[name] = c - a.reconciler.RegisterCluster(name, c) - return nil -} - -// Disengage removes a cluster from the manager. -func (a *KubeconfigClusterManager) Disengage(ctx context.Context, name string) error { - a.mu.Lock() - defer a.mu.Unlock() - a.reconciler.UnregisterCluster(name) - delete(a.clusters, name) - return nil -} - -// ListClusters returns a list of all registered clusters -func (a *KubeconfigClusterManager) ListClusters() map[string]cluster.Cluster { - a.mu.RLock() - defer a.mu.RUnlock() - - if a.clusters == nil { - return map[string]cluster.Cluster{} - } - - // Return a copy of the map to prevent concurrent access - clusters := make(map[string]cluster.Cluster, len(a.clusters)) - for name, cl := range a.clusters { - clusters[name] = cl - } - return clusters -} - -// ListClustersWithLog returns a list of all registered clusters and logs them -func (a *KubeconfigClusterManager) ListClustersWithLog() map[string]cluster.Cluster { - a.mu.RLock() - defer a.mu.RUnlock() - - if a.clusters == nil { - setupLog.Info("No clusters registered") - return map[string]cluster.Cluster{} - } - - // Log all clusters - clusterNames := make([]string, 0, len(a.clusters)) - for name := range a.clusters { - clusterNames = append(clusterNames, name) - } - setupLog.Info("Current registered clusters", - "totalClusters", len(a.clusters), - "clusterNames", strings.Join(clusterNames, ", ")) - - // Return a copy of the map to prevent concurrent access - clusters := make(map[string]cluster.Cluster, len(a.clusters)) - for name, cl := range a.clusters { - clusters[name] = cl - } - return clusters -} diff --git a/providers/kubeconfig/interfaces.go b/providers/kubeconfig/interfaces.go deleted file mode 100644 index 62e6124..0000000 --- a/providers/kubeconfig/interfaces.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2025 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package kubeconfig provides a Kubernetes cluster provider that watches secrets -// containing kubeconfig data and creates controller-runtime clusters for each. -package kubeconfig - -import ( - "context" - - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/cluster" - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -// ClusterReconciler defines the operations required from a reconciler for cluster operations -type ClusterReconciler interface { - // RegisterCluster registers a new cluster with the reconciler - RegisterCluster(name string, cl cluster.Cluster) - - // UnregisterCluster removes a cluster from the reconciler - UnregisterCluster(name string) - - // GetCluster returns a cluster for the given name - GetCluster(ctx context.Context, name string) (cluster.Cluster, error) - - // ListClusters returns a list of all registered clusters - ListClusters() map[string]cluster.Cluster - - // ListClustersWithLog returns a list of all registered clusters and logs them - ListClustersWithLog() map[string]cluster.Cluster -} - -// KubeClusterManager defines an interface for managing multiple clusters via kubeconfig -type KubeClusterManager interface { - manager.Manager - - // GetCluster returns a cluster for the given name - GetCluster(ctx context.Context, name string) (cluster.Cluster, error) - - // Engage registers a new cluster with the manager - Engage(ctx context.Context, name string, cl cluster.Cluster) error - - // Disengage removes a cluster from the manager - Disengage(ctx context.Context, name string) error - - // ListClusters returns a list of all registered clusters - ListClusters() map[string]cluster.Cluster -} - -// Provider defines an interface for a kubecofnig-based cluster provider -type Provider interface { - // Get returns a cluster by name - Get(ctx context.Context, name string) (cluster.Cluster, error) - - // Run starts the provider - Run(ctx context.Context, mgr KubeClusterManager) error - - // IndexField indexes a field on all clusters - IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error -} diff --git a/providers/kubeconfig/provider.go b/providers/kubeconfig/provider.go index 78264d7..13f509e 100644 --- a/providers/kubeconfig/provider.go +++ b/providers/kubeconfig/provider.go @@ -20,19 +20,18 @@ package kubeconfig import ( "context" - stderrors "errors" + "crypto/sha256" + "encoding/hex" "fmt" - "strings" + "os" + "path/filepath" "sync" "time" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -41,7 +40,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/cluster" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" + + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" + "sigs.k8s.io/multicluster-runtime/pkg/multicluster" ) const ( @@ -52,11 +53,37 @@ const ( DefaultKubeconfigSecretKey = "kubeconfig" ) -// index defines a field indexer -type index struct { - object client.Object - field string - extractValue client.IndexerFunc +var _ multicluster.Provider = &Provider{} + +// New creates a new Kubeconfig Provider. +func New(opts Options, clusterOpts ...cluster.Option) *Provider { + // Set defaults + if opts.KubeconfigLabel == "" { + opts.KubeconfigLabel = DefaultKubeconfigSecretLabel + } + if opts.KubeconfigKey == "" { + opts.KubeconfigKey = DefaultKubeconfigSecretKey + } + if opts.ConnectionTimeout == 0 { + opts.ConnectionTimeout = 10 * time.Second + } + if opts.CacheSyncTimeout == 0 { + opts.CacheSyncTimeout = 30 * time.Second + } + if opts.KubeconfigPath == "" { + opts.KubeconfigPath = filepath.Join(os.Getenv("HOME"), ".kube", "config") + } + + return &Provider{ + opts: opts, + log: log.Log.WithName("kubeconfig-provider"), + client: nil, // Will be set in Run + clusters: map[string]cluster.Cluster{}, + cancelFns: map[string]context.CancelFunc{}, + seenHashes: map[string]string{}, + readySignal: make(chan struct{}), + clusterOpts: clusterOpts, + } } // Options are the options for the Kubeconfig Provider. @@ -70,64 +97,46 @@ type Options struct { // Key in the secret data that contains the kubeconfig KubeconfigKey string - // Scheme is the scheme to use for the cluster. If not provided, a new one will be created. - Scheme *runtime.Scheme - // ConnectionTimeout is the timeout for connecting to a cluster ConnectionTimeout time.Duration // CacheSyncTimeout is the timeout for waiting for the cache to sync CacheSyncTimeout time.Duration -} -// KubeconfigProvider is a cluster provider that watches for secrets containing kubeconfig data -// and engages clusters based on those kubeconfig. -type KubeconfigProvider struct { - opts Options - log logr.Logger - client client.Client - Client client.Client // For controller-runtime Reconciler interface - lock sync.RWMutex - manager KubeClusterManager - clusters map[string]cluster.Cluster - cancelFns map[string]context.CancelFunc - indexers []index - seenHashes map[string]string // tracks resource versions + // KubeconfigPath is the path to the kubeconfig file to use for development/testing + // If not set, will use the default ~/.kube/config + KubeconfigPath string } -// Ensure KubeconfigProvider implements the Provider interface -var _ Provider = &KubeconfigProvider{} +type index struct { + object client.Object + field string + extractValue client.IndexerFunc +} -// New creates a new Kubeconfig Provider. -func New(mgr KubeClusterManager, opts Options) *KubeconfigProvider { - // Set defaults - if opts.KubeconfigLabel == "" { - opts.KubeconfigLabel = DefaultKubeconfigSecretLabel - } - if opts.KubeconfigKey == "" { - opts.KubeconfigKey = DefaultKubeconfigSecretKey - } - if opts.ConnectionTimeout == 0 { - opts.ConnectionTimeout = 10 * time.Second - } - if opts.CacheSyncTimeout == 0 { - opts.CacheSyncTimeout = 30 * time.Second - } +// Provider is a cluster provider that watches for secrets containing kubeconfig data +// and engages clusters based on those kubeconfigs. +type Provider struct { + opts Options + log logr.Logger + client client.Client + lock sync.RWMutex + clusters map[string]cluster.Cluster + cancelFns map[string]context.CancelFunc + indexers []index + seenHashes map[string]string // tracks resource versions + readySignal chan struct{} // Signal when provider is ready to start + readyOnce sync.Once // Ensure we only signal once + clusterOpts []cluster.Option // Options to apply to all clusters +} - return &KubeconfigProvider{ - opts: opts, - log: log.Log.WithName("kubeconfig-provider"), - client: mgr.GetClient(), - Client: mgr.GetClient(), // Set both client fields - clusters: map[string]cluster.Cluster{}, - cancelFns: map[string]context.CancelFunc{}, - seenHashes: map[string]string{}, - } +// IsReady returns a channel that will be closed when the provider is ready to start +func (p *Provider) IsReady() <-chan struct{} { + return p.readySignal } // Get returns the cluster with the given name, if it is known. -// It implements the Provider interface. -func (p *KubeconfigProvider) Get(_ context.Context, clusterName string) (cluster.Cluster, error) { +func (p *Provider) Get(ctx context.Context, clusterName string) (cluster.Cluster, error) { p.lock.RLock() defer p.lock.RUnlock() @@ -139,266 +148,378 @@ func (p *KubeconfigProvider) Get(_ context.Context, clusterName string) (cluster } // Run starts the provider and blocks, watching for kubeconfig secrets. -// It implements the Provider interface. -func (p *KubeconfigProvider) Run(ctx context.Context, mgr KubeClusterManager) error { - p.log.Info("starting kubeconfig provider", "namespace", p.opts.Namespace, "label", p.opts.KubeconfigLabel) - - // Set the manager - p.SetManager(mgr) - - // Wait for the controller-runtime cache to be ready before using it - if mgr != nil && mgr.GetCache() != nil { - p.log.Info("Waiting for controller-runtime cache to be ready") - if !mgr.GetCache().WaitForCacheSync(ctx) { - return fmt.Errorf("timed out waiting for cache to sync") +func (p *Provider) Run(ctx context.Context, mgr mcmanager.Manager) error { + p.log.Info("Starting kubeconfig provider", "namespace", p.opts.Namespace, "label", p.opts.KubeconfigLabel) + + // If client isn't set yet, get it from the manager + if p.client == nil && mgr != nil { + p.log.Info("Setting client from manager") + p.client = mgr.GetLocalManager().GetClient() + if p.client == nil { + p.log.Error(nil, "Failed to get client from manager, will use direct API access") } - p.log.Info("Controller-runtime cache is synced") - } else { - p.log.Info("No manager or cache available, skipping cache sync") } - // Do initial sync using direct API call (not cached client) - if err := p.syncSecretsInternal(ctx); err != nil { - p.log.Error(err, "initial secret sync failed", "error", err.Error()) - // Continue anyway - don't exit on sync failure - } else { - p.log.Info("Initial secret sync successful") - } - - // Create a Kubernetes clientset for watching - var config *rest.Config - var err error - - // First, try to get the config from controller-runtime - config, err = rest.InClusterConfig() + // Set up Kubernetes config for API operations - we'll use this for direct API access + // This bypasses the controller-runtime cache which might not be ready yet + config, err := rest.InClusterConfig() if err != nil { - p.log.Info("not running in-cluster, using kubeconfig for local development") - + p.log.Info("Not running in-cluster, using kubeconfig for local development") // Look for kubeconfig in default locations rules := clientcmd.NewDefaultClientConfigLoadingRules() - rules.DefaultClientConfig = &clientcmd.DefaultClientConfig - clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, &clientcmd.ConfigOverrides{}) config, err = clientConfig.ClientConfig() if err != nil { + p.log.Error(err, "Failed to create config") + // Signal readiness anyway to avoid blocking the application + p.readyOnce.Do(func() { + p.log.Info("Signaling that KubeconfigProvider is ready to start (no config available)") + close(p.readySignal) + }) return fmt.Errorf("failed to create config: %w", err) } } - p.log.Info("successfully connected to kubernetes api", "host", config.Host) - + // Create clientset for API operations - this bypasses controller-runtime clientset, err := kubernetes.NewForConfig(config) if err != nil { + p.log.Error(err, "Failed to create clientset") + // Signal readiness anyway to avoid blocking the application + p.readyOnce.Do(func() { + p.log.Info("Signaling that KubeconfigProvider is ready to start (no clientset available)") + close(p.readySignal) + }) return fmt.Errorf("failed to create clientset: %w", err) } - // Set up label selector for our kubeconfig label - labelSelector := fmt.Sprintf("%s=true", p.opts.KubeconfigLabel) - p.log.Info("watching for kubeconfig secrets", "selector", labelSelector) - - // Watch for secret changes - go p.watchSecrets(ctx, clientset, labelSelector) + // Skip waiting for controller-runtime cache - this creates circular dependency + // Instead, use direct clientset for all operations + p.log.Info("Using direct API access instead of controller-runtime cache") - <-ctx.Done() - p.log.Info("Provider context cancelled, shutting down gracefully") - return context.Canceled -} + // Create a test secret to verify functionality - for development and testing + if err := p.createTestSecretIfMissing(ctx, clientset); err != nil { + p.log.Error(err, "Failed to create test secret - this is expected in production") + } -// Reconcile implements the controller-runtime Reconciler interface -func (p *KubeconfigProvider) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - log := p.log.WithValues("secret", req.NamespacedName) - secret := &corev1.Secret{} - - if err := p.Client.Get(ctx, req.NamespacedName, secret); err != nil { - if errors.IsNotFound(err) { - log.Info("secret not found, handling deletion") - // Secret was deleted, handle deletion - p.handleSecretDelete(&corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: req.Name, - Namespace: req.Namespace, - }, - }) - return reconcile.Result{}, nil + // Do initial sync of secrets - this is the key step for processing clusters + secretsFound, err := p.syncSecretsFromClientset(ctx, clientset) + if err != nil { + p.log.Error(err, "Initial secret sync failed") + // Signal readiness anyway to avoid blocking the application + p.readyOnce.Do(func() { + p.log.Info("Signaling that KubeconfigProvider is ready to start (despite sync failure)") + close(p.readySignal) + }) + } else { + p.log.Info("Initial secret sync successful", "secretsFound", secretsFound) + if secretsFound == 0 { + p.log.Info("No secrets found with label", + "label", p.opts.KubeconfigLabel, + "namespace", p.opts.Namespace, + "note", "This is normal if you haven't created any kubeconfig secrets yet") } - log.Error(err, "failed to get secret") - return reconcile.Result{}, fmt.Errorf("failed to get secret: %w", err) + + // Only signal readiness after we've processed all secrets + p.readyOnce.Do(func() { + p.log.Info("Signaling that KubeconfigProvider is ready to start (all secrets processed)") + close(p.readySignal) + }) } - log.Info("processing secret") - // Handle secret update/creation - p.handleSecretUpsert(ctx, secret) + // Set up label selector for watching secrets + labelSelector := fmt.Sprintf("%s=true", p.opts.KubeconfigLabel) + p.log.Info("Watching for kubeconfig secrets", "selector", labelSelector, "namespace", p.opts.Namespace) - return reconcile.Result{}, nil -} + // Watch for secret changes in a goroutine to avoid blocking + go func() { + for { + // Check if parent context is done + if ctx.Err() != nil { + return + } -// IndexField indexes a field on all clusters, existing and future. -// It implements the Provider interface. -func (p *KubeconfigProvider) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { - p.lock.Lock() - defer p.lock.Unlock() + err := p.watchSecrets(ctx, clientset, labelSelector, mgr) - // Save for future clusters - p.indexers = append(p.indexers, index{ - object: obj, - field: field, - extractValue: extractValue, - }) + // Check again for context cancellation after watch returns + if ctx.Err() != nil { + return + } - // Apply to existing clusters - for name, cl := range p.clusters { - if err := cl.GetCache().IndexField(ctx, obj, field, extractValue); err != nil { - return fmt.Errorf("failed to index field %q on cluster %q: %w", field, name, err) + p.log.Error(err, "Error watching secrets, restarting watch after delay") + time.Sleep(5 * time.Second) } - } + }() - return nil + // Block until context is done + <-ctx.Done() + p.log.Info("Context cancelled, exiting provider") + return ctx.Err() } -// Engage creates, starts and registers a new cluster with the manager -func (p *KubeconfigProvider) Engage(ctx context.Context, clusterName string, config *rest.Config) error { - log := p.log.WithValues("cluster", clusterName) - log.Info("Creating new controller-runtime cluster") - - // Add timeout to the config - config.Timeout = p.opts.ConnectionTimeout +// syncSecretsFromClientset fetches secrets directly using the clientset +func (p *Provider) syncSecretsFromClientset(ctx context.Context, clientset kubernetes.Interface) (int, error) { + p.log.Info("Listing secrets with label", "label", p.opts.KubeconfigLabel, "namespace", p.opts.Namespace) - // Create a new cluster - cl, err := cluster.New(config, func(o *cluster.Options) { - o.Scheme = p.opts.Scheme - // Set a longer cache sync timeout - o.Cache.SyncPeriod = &p.opts.CacheSyncTimeout + secrets, err := clientset.CoreV1().Secrets(p.opts.Namespace).List(ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=true", p.opts.KubeconfigLabel), }) + if err != nil { - return fmt.Errorf("failed to create cluster: %w", err) + return 0, fmt.Errorf("failed to list secrets: %w", err) } - // Create a new context for this cluster - clusterCtx, cancel := context.WithCancel(ctx) + p.log.Info("Found secrets with label", "count", len(secrets.Items)) - // Start the cluster in a goroutine - go func() { - if err := cl.Start(clusterCtx); err != nil { - log.Error(err, "Failed to start cluster") + for i := range secrets.Items { + secret := &secrets.Items[i] + p.log.Info("Processing secret", "name", secret.Name) + if err := p.handleSecret(ctx, secret, nil); err != nil { + p.log.Error(err, "Failed to handle secret", "name", secret.Name) + // Continue with other secrets } - }() + } - // Wait for cache to sync with a timeout - syncCtx, syncCancel := context.WithTimeout(ctx, p.opts.CacheSyncTimeout) - defer syncCancel() + return len(secrets.Items), nil +} - log.Info("Waiting for cluster cache to sync", "timeout", p.opts.CacheSyncTimeout) - if !cl.GetCache().WaitForCacheSync(syncCtx) { - cancel() // Clean up if sync fails - return fmt.Errorf("timed out waiting for cache to sync for cluster %s", clusterName) +// createTestSecretIfMissing creates a test secret for development and testing +func (p *Provider) createTestSecretIfMissing(ctx context.Context, clientset kubernetes.Interface) error { + // Only create test secrets in the default namespace + if p.opts.Namespace != "default" { + return nil } - log.Info("Cluster cache successfully synced") - // Register the cluster with the manager if available - p.lock.RLock() - manager := p.manager - p.lock.RUnlock() + // Check if test secret already exists + _, err := clientset.CoreV1().Secrets(p.opts.Namespace).Get(ctx, "test-kubeconfig", metav1.GetOptions{}) + if err == nil { + // Secret already exists + return nil + } - if manager != nil { - if err := manager.Engage(ctx, clusterName, cl); err != nil { - cancel() // Clean up if registration fails - return fmt.Errorf("failed to register cluster: %w", err) - } - } else { - log.Info("No manager available, skipping registration with manager") + // Get current kubeconfig for test purposes + p.log.Info("Using kubeconfig path", "path", p.opts.KubeconfigPath) + + kubeconfigData, err := os.ReadFile(p.opts.KubeconfigPath) + if err != nil { + return fmt.Errorf("failed to read kubeconfig from %s: %w", p.opts.KubeconfigPath, err) } - // Register the cluster in our internal state - p.lock.Lock() - p.clusters[clusterName] = cl - p.cancelFns[clusterName] = cancel - p.lock.Unlock() + // Create test secret + testSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-kubeconfig", + Labels: map[string]string{ + p.opts.KubeconfigLabel: "true", + }, + }, + Data: map[string][]byte{ + p.opts.KubeconfigKey: kubeconfigData, + }, + } - // Apply any pending indexers - for _, idx := range p.indexers { - if err := cl.GetCache().IndexField(ctx, idx.object, idx.field, idx.extractValue); err != nil { - return fmt.Errorf("failed to index field %q on cluster %q: %w", idx.field, clusterName, err) - } + _, err = clientset.CoreV1().Secrets(p.opts.Namespace).Create(ctx, testSecret, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create test secret: %w", err) } - log.Info("Successfully engaged cluster") + p.log.Info("Created test kubeconfig secret for development") return nil } -// handleSecretUpsert handles the addition or update of a kubeconfig secret -func (p *KubeconfigProvider) handleSecretUpsert(ctx context.Context, secret *corev1.Secret) { - log := p.log.WithValues("secret", types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}) - log.Info("Processing kubeconfig secret") +// watchSecrets sets up a watch for Secret resources with the given label selector +func (p *Provider) watchSecrets(ctx context.Context, clientset kubernetes.Interface, labelSelector string, mgr mcmanager.Manager) error { + watcher, err := clientset.CoreV1().Secrets(p.opts.Namespace).Watch(ctx, metav1.ListOptions{ + LabelSelector: labelSelector, + }) + if err != nil { + return fmt.Errorf("failed to watch secrets: %w", err) + } + defer watcher.Stop() + + p.log.Info("Started watching for kubeconfig secrets") + + for { + select { + case <-ctx.Done(): + p.log.Info("Context cancelled, stopping watch") + return ctx.Err() + case event, ok := <-watcher.ResultChan(): + if !ok { + p.log.Info("Watch channel closed, restarting watch") + // Recreate the watcher + newWatcher, err := clientset.CoreV1().Secrets(p.opts.Namespace).Watch(ctx, metav1.ListOptions{ + LabelSelector: labelSelector, + }) + if err != nil { + p.log.Error(err, "Failed to restart watch, waiting before retry") + time.Sleep(5 * time.Second) + continue + } + watcher = newWatcher + continue + } + + // Process the event + switch event.Type { + case watch.Added, watch.Modified: + secret, ok := event.Object.(*corev1.Secret) + if !ok { + p.log.Info("Unexpected object type", "type", fmt.Sprintf("%T", event.Object)) + continue + } + p.log.Info("Processing secret event", "name", secret.Name, "event", event.Type) + if err := p.handleSecret(ctx, secret, mgr); err != nil { + p.log.Error(err, "Failed to handle secret", "name", secret.Name) + } + case watch.Deleted: + secret, ok := event.Object.(*corev1.Secret) + if !ok { + p.log.Info("Unexpected object type", "type", fmt.Sprintf("%T", event.Object)) + continue + } + p.log.Info("Secret deleted", "name", secret.Name) + p.handleSecretDelete(secret) + case watch.Error: + p.log.Error(fmt.Errorf("watch error"), "Error event received") + } + } + } +} + +// handleSecret processes a secret containing kubeconfig data +func (p *Provider) handleSecret(ctx context.Context, secret *corev1.Secret, mgr mcmanager.Manager) error { + if secret == nil { + return fmt.Errorf("received nil secret") + } + // Extract name to use as cluster name clusterName := secret.Name + log := p.log.WithValues("cluster", clusterName, "secret", fmt.Sprintf("%s/%s", secret.Namespace, secret.Name)) - // Check if we already have this cluster + // Check if this secret has kubeconfig data + kubeconfigData, ok := secret.Data[p.opts.KubeconfigKey] + if !ok { + log.Info("Secret does not contain kubeconfig data", "key", p.opts.KubeconfigKey) + return nil + } + + // Hash the kubeconfig to detect changes + dataHash := hashBytes(kubeconfigData) + + // Check if we've seen this version before p.lock.RLock() - _, exists := p.clusters[clusterName] - existingCancelFn := p.cancelFns[clusterName] + existingHash, exists := p.seenHashes[clusterName] p.lock.RUnlock() - // Get kubeconfig from secret - kubeconfigData, ok := secret.Data[p.opts.KubeconfigKey] - if !ok || len(kubeconfigData) == 0 { - log.Error(nil, "Kubeconfig key not found or empty", "key", p.opts.KubeconfigKey) - return + if exists && existingHash == dataHash { + log.Info("Kubeconfig unchanged, skipping") + return nil } - log.Info("Found kubeconfig data in secret", "dataSize", len(kubeconfigData)) - - // Parse kubeconfig and create REST config + // Parse the kubeconfig restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeconfigData) if err != nil { - log.Error(err, "Failed to parse kubeconfig") - return + return fmt.Errorf("failed to parse kubeconfig: %w", err) } - // Test connection to API server - log.Info("Testing connection to API server", "host", restConfig.Host) - clientset, err := kubernetes.NewForConfig(restConfig) - if err != nil { - log.Error(err, "Failed to create clientset from config") - return + // Set reasonable defaults for the client + restConfig.Timeout = p.opts.ConnectionTimeout + + // Check if we already have this cluster + p.lock.RLock() + _, clusterExists := p.clusters[clusterName] + p.lock.RUnlock() + + // If the cluster already exists, remove it first + if clusterExists { + log.Info("Cluster already exists, updating it") + if err := p.removeCluster(clusterName); err != nil { + return fmt.Errorf("failed to remove existing cluster: %w", err) + } } - // Attempt to list nodes as a basic connectivity test - _, err = clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{Limit: 1}) + // Create a new cluster with the provided options + log.Info("Creating new cluster from kubeconfig") + cl, err := cluster.New(restConfig, p.clusterOpts...) if err != nil { - log.Error(err, "Failed to connect to Kubernetes API server", "host", restConfig.Host) - return + return fmt.Errorf("failed to create cluster: %w", err) + } + + // Apply any field indexers + for _, idx := range p.indexers { + if err := cl.GetCache().IndexField(ctx, idx.object, idx.field, idx.extractValue); err != nil { + return fmt.Errorf("failed to index field %q: %w", idx.field, err) + } + } + + // Create a context that will be canceled when this cluster is removed + clusterCtx, cancel := context.WithCancel(ctx) + + // Start the cluster + go func() { + if err := cl.Start(clusterCtx); err != nil { + log.Error(err, "Failed to start cluster") + } + }() + + // Wait for cache to sync + log.Info("Waiting for cluster cache to sync", "timeout", p.opts.CacheSyncTimeout) + syncCtx, syncCancel := context.WithTimeout(ctx, p.opts.CacheSyncTimeout) + defer syncCancel() + + if !cl.GetCache().WaitForCacheSync(syncCtx) { + cancel() // Cancel the cluster context + return fmt.Errorf("timeout waiting for cache to sync") } - log.Info("Successfully connected to API server", "host", restConfig.Host) - // If cluster exists and it's an update, we need to stop the old one - if exists { - log.Info("Updating existing cluster") - if existingCancelFn != nil { - existingCancelFn() + // Store the cluster + p.lock.Lock() + p.clusters[clusterName] = cl + p.cancelFns[clusterName] = cancel + p.seenHashes[clusterName] = dataHash + p.lock.Unlock() + + log.Info("Successfully added cluster") + + // Engage the manager if provided + if mgr != nil { + if err := mgr.Engage(clusterCtx, clusterName, cl); err != nil { + log.Error(err, "Failed to engage manager, removing cluster") + p.lock.Lock() + delete(p.clusters, clusterName) + delete(p.cancelFns, clusterName) + delete(p.seenHashes, clusterName) + p.lock.Unlock() + cancel() // Cancel the cluster context + return fmt.Errorf("failed to engage manager: %w", err) } - p.lock.Lock() - delete(p.clusters, clusterName) - delete(p.cancelFns, clusterName) - p.lock.Unlock() + log.Info("Successfully engaged manager") } - // Create and start cluster - if err := p.Engage(ctx, clusterName, restConfig); err != nil { - log.Error(err, "Failed to engage cluster") + return nil +} + +// handleSecretDelete handles the deletion of a secret +func (p *Provider) handleSecretDelete(secret *corev1.Secret) { + if secret == nil { return } - // Log current cluster count - p.lock.RLock() - clusterCount := len(p.clusters) - p.lock.RUnlock() - log.Info("Currently managing clusters", "count", clusterCount) + clusterName := secret.Name + log := p.log.WithValues("cluster", clusterName) + + log.Info("Handling deleted secret") + + // Remove the cluster + if err := p.removeCluster(clusterName); err != nil { + log.Error(err, "Failed to remove cluster") + } } -// Disengage stops and removes a cluster from the provider -func (p *KubeconfigProvider) Disengage(ctx context.Context, clusterName string) error { +// removeCluster removes a cluster by name +func (p *Provider) removeCluster(clusterName string) error { log := p.log.WithValues("cluster", clusterName) - log.Info("Disengaging cluster") + log.Info("Removing cluster") // Find the cluster and cancel function p.lock.RLock() @@ -414,222 +535,61 @@ func (p *KubeconfigProvider) Disengage(ctx context.Context, clusterName string) p.lock.RUnlock() return fmt.Errorf("cancel function for cluster %s not found", clusterName) } - - // Get manager reference while holding the read lock - mgr := p.manager p.lock.RUnlock() - // Disengage from manager if available - if mgr != nil { - if err := mgr.Disengage(ctx, clusterName); err != nil { - log.Error(err, "Failed to disengage from manager") - // Continue with cleanup even if manager disengage fails - } - } - - // Stop the cluster + // Cancel the context to trigger cleanup for this cluster cancelFn() + log.Info("Cancelled cluster context") // Clean up our maps p.lock.Lock() delete(p.clusters, clusterName) delete(p.cancelFns, clusterName) + delete(p.seenHashes, clusterName) p.lock.Unlock() - log.Info("Successfully disengaged cluster") + log.Info("Successfully removed cluster") return nil } -// handleSecretDelete handles the deletion of a kubeconfig secret -func (p *KubeconfigProvider) handleSecretDelete(secret *corev1.Secret) { - log := p.log.WithValues("secret", types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}) - log.Info("Handling kubeconfig secret deletion") - - clusterName := secret.Name - - // Use Disengage to handle cleanup - if err := p.Disengage(context.Background(), clusterName); err != nil { - if !strings.Contains(err.Error(), "not found") { - log.Error(err, "Failed to disengage cluster") - } - return - } - - // Log current cluster count - p.lock.RLock() - clusterCount := len(p.clusters) - p.lock.RUnlock() - log.Info("Currently managing clusters", "count", clusterCount) -} - -// watchSecrets sets up a watch for secret changes -func (p *KubeconfigProvider) watchSecrets(ctx context.Context, clientset *kubernetes.Clientset, labelSelector string) { - p.log.Info("Starting watcher for secrets with selector", "selector", labelSelector) - - for { - select { - case <-ctx.Done(): - p.log.Info("Secret watcher context cancelled, shutting down") - return - default: - // Continue with watch - } - - watcher, err := clientset.CoreV1().Secrets(p.opts.Namespace).Watch(ctx, metav1.ListOptions{ - LabelSelector: labelSelector, - }) - - if err != nil { - // Handle context cancellation - normal shutdown - if stderrors.Is(err, context.Canceled) { - p.log.Info("Context cancelled while creating watcher, shutting down") - return - } - - p.log.Error(err, "Error creating watcher") - time.Sleep(5 * time.Second) - continue - } - - p.log.Info("Successfully created watcher") - - // Process events until the channel is closed or context is cancelled - for { - select { - case <-ctx.Done(): - p.log.Info("Context cancelled, closing watcher") - watcher.Stop() - return - case event, ok := <-watcher.ResultChan(): - if !ok { - p.log.Info("Watch channel closed, retrying") - time.Sleep(1 * time.Second) - break - } - - // Process the event - secret, ok := event.Object.(*corev1.Secret) - if !ok { - p.log.Error(nil, "Expected secret", "type", fmt.Sprintf("%T", event.Object)) - continue - } +// IndexField indexes a field on all clusters, existing and future. +func (p *Provider) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + p.lock.Lock() + defer p.lock.Unlock() - // Check if we've already seen this exact version of the secret - key := fmt.Sprintf("%s/%s", secret.Namespace, secret.Name) - p.lock.RLock() - seenVersion, alreadySeen := p.seenHashes[key] - p.lock.RUnlock() - - if alreadySeen && seenVersion == secret.ResourceVersion { - // We've already processed this exact version of the secret - p.log.V(1).Info("Skipping already processed secret version", - "secret", key, - "resourceVersion", secret.ResourceVersion) - continue - } + // Save for future clusters + p.indexers = append(p.indexers, index{ + object: obj, + field: field, + extractValue: extractValue, + }) - // Process the secret according to event type - switch event.Type { - case watch.Added, watch.Modified: - p.handleSecretUpsert(ctx, secret) - // Update the seen hash to avoid reprocessing - p.lock.Lock() - p.seenHashes[key] = secret.ResourceVersion - p.lock.Unlock() - case watch.Deleted: - p.handleSecretDelete(secret) - // Remove from seen hashes - p.lock.Lock() - delete(p.seenHashes, key) - p.lock.Unlock() - } - } + // Apply to existing clusters + for name, cl := range p.clusters { + if err := cl.GetCache().IndexField(ctx, obj, field, extractValue); err != nil { + return fmt.Errorf("failed to index field %q on cluster %q: %w", field, name, err) } } -} -// SyncSecrets provides a public method to manually trigger a sync of all kubeconfig secrets -// This is exposed for testing or forced refreshes, but normal operation uses the internal -// sync during Run() initialization -func (p *KubeconfigProvider) SyncSecrets(ctx context.Context) error { - return p.syncSecretsInternal(ctx) + return nil } -// syncSecretsInternal lists all matching secrets and processes them -// This is now a private implementation method used by both Run and the public SyncSecrets -func (p *KubeconfigProvider) syncSecretsInternal(ctx context.Context) error { - // Create a direct Kubernetes clientset instead of using the cached client - config, err := rest.InClusterConfig() - if err != nil { - // Not in cluster, try using default kubeconfig - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - configOverrides := &clientcmd.ConfigOverrides{} - kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) - config, err = kubeConfig.ClientConfig() - if err != nil { - return fmt.Errorf("failed to create config: %w", err) - } - } - - // Create a clientset for direct API access - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - return fmt.Errorf("failed to create clientset: %w", err) - } - - // Use the clientset to list secrets directly (bypassing cache) - secretList, err := clientset.CoreV1().Secrets(p.opts.Namespace).List(ctx, metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=true", p.opts.KubeconfigLabel), - }) - if err != nil { - return fmt.Errorf("failed to list secrets: %w", err) - } - - p.log.Info("Found secrets with kubeconfig label", "count", len(secretList.Items)) - - // Process existing secrets - currentKeys := make(map[string]bool) - for i := range secretList.Items { - secret := &secretList.Items[i] - key := fmt.Sprintf("%s/%s", secret.Namespace, secret.Name) - currentKeys[key] = true - - // Check if this is a new or updated secret - if hash, exists := p.seenHashes[key]; !exists || hash != secret.ResourceVersion { - p.handleSecretUpsert(ctx, secret) - p.seenHashes[key] = secret.ResourceVersion - } - } - - // Check for deleted secrets +// ListClusters returns a list of all discovered clusters. +func (p *Provider) ListClusters() map[string]cluster.Cluster { p.lock.RLock() - for name := range p.clusters { - key := fmt.Sprintf("%s/%s", p.opts.Namespace, name) - if _, exists := currentKeys[key]; !exists { - // This secret has been deleted - p.lock.RUnlock() - p.handleSecretDelete(&corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: p.opts.Namespace, - }, - }) - p.lock.RLock() - // Remove from seen hashes - delete(p.seenHashes, key) - } - } - p.lock.RUnlock() + defer p.lock.RUnlock() - return nil + // Return a copy of the map to avoid race conditions + result := make(map[string]cluster.Cluster, len(p.clusters)) + for k, v := range p.clusters { + result[k] = v + } + return result } -// SetManager explicitly sets the manager for the provider -// This should be called before any other operations -func (p *KubeconfigProvider) SetManager(mgr KubeClusterManager) { - p.lock.Lock() - defer p.lock.Unlock() - - p.manager = mgr - p.log.Info("Manager explicitly set for provider") +// hashBytes returns a hex-encoded SHA256 hash of the given bytes +func hashBytes(data []byte) string { + h := sha256.New() + h.Write(data) + return hex.EncodeToString(h.Sum(nil)) } diff --git a/providers/kubeconfig/reconciler.go b/providers/kubeconfig/reconciler.go deleted file mode 100644 index b69c2da..0000000 --- a/providers/kubeconfig/reconciler.go +++ /dev/null @@ -1,137 +0,0 @@ -/* -Copyright 2025 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubeconfig - -import ( - "context" - "fmt" - "strings" - "sync" - - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/cluster" -) - -// MulticlusterReconciler handles reconciliations across multiple clusters -type MulticlusterReconciler struct { - Client client.Client - Scheme *runtime.Scheme - lock sync.RWMutex - clusters map[string]cluster.Cluster -} - -// Ensure MulticlusterReconciler implements ClusterReconciler -var _ ClusterReconciler = &MulticlusterReconciler{} - -// NewMulticlusterReconciler creates a new MulticlusterReconciler -func NewMulticlusterReconciler(c client.Client, scheme *runtime.Scheme) *MulticlusterReconciler { - return &MulticlusterReconciler{ - Client: c, - Scheme: scheme, - clusters: make(map[string]cluster.Cluster), - } -} - -// RegisterCluster registers a new cluster with the reconciler -func (r *MulticlusterReconciler) RegisterCluster(name string, cl cluster.Cluster) { - r.lock.Lock() - defer r.lock.Unlock() - - setupLog.Info("Registering cluster with reconciler", "name", name) - - if r.clusters == nil { - r.clusters = make(map[string]cluster.Cluster) - } - - // Store the cluster - r.clusters[name] = cl - setupLog.Info("Registered cluster with reconciler", "name", name, "totalClusters", len(r.clusters)) - - // Log all clusters after registration - clusterNames := make([]string, 0, len(r.clusters)) - for name := range r.clusters { - clusterNames = append(clusterNames, name) - } - setupLog.Info("Current clusters after registration", - "totalClusters", len(r.clusters), - "clusterNames", strings.Join(clusterNames, ", ")) -} - -// UnregisterCluster removes a cluster from the reconciler -func (r *MulticlusterReconciler) UnregisterCluster(name string) { - r.lock.Lock() - defer r.lock.Unlock() - - delete(r.clusters, name) - setupLog.Info("Unregistered cluster", "name", name) -} - -// GetCluster returns a cluster for the given name -func (r *MulticlusterReconciler) GetCluster(ctx context.Context, name string) (cluster.Cluster, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if cl, ok := r.clusters[name]; ok { - return cl, nil - } - return nil, fmt.Errorf("cluster %s not found", name) -} - -// ListClusters returns a list of all registered clusters -func (r *MulticlusterReconciler) ListClusters() map[string]cluster.Cluster { - r.lock.RLock() - defer r.lock.RUnlock() - - if r.clusters == nil { - return map[string]cluster.Cluster{} - } - - // Return a copy of the map to prevent concurrent access - clusters := make(map[string]cluster.Cluster, len(r.clusters)) - for name, cl := range r.clusters { - clusters[name] = cl - } - return clusters -} - -// ListClustersWithLog returns a list of all registered clusters and logs them -func (r *MulticlusterReconciler) ListClustersWithLog() map[string]cluster.Cluster { - r.lock.RLock() - defer r.lock.RUnlock() - - if r.clusters == nil { - setupLog.Info("No clusters registered") - return map[string]cluster.Cluster{} - } - - // Log all clusters - clusterNames := make([]string, 0, len(r.clusters)) - for name := range r.clusters { - clusterNames = append(clusterNames, name) - } - setupLog.Info("Current registered clusters", - "totalClusters", len(r.clusters), - "clusterNames", strings.Join(clusterNames, ", ")) - - // Return a copy of the map to prevent concurrent access - clusters := make(map[string]cluster.Cluster, len(r.clusters)) - for name, cl := range r.clusters { - clusters[name] = cl - } - return clusters -} From b21016e86c5d7f735a848def3081d54438216364 Mon Sep 17 00:00:00 2001 From: Jairus Christensen Date: Thu, 27 Mar 2025 12:30:39 -0600 Subject: [PATCH 3/3] Simplify provider and main.go further, making them look like other providers. Remove extra unnecessary aspects like isReady, hashing, etc. Add script to create kubeconfig secrets using remote serviceaccounts. --- examples/kubeconfig/README.md | 113 +++++ examples/kubeconfig/go.mod | 69 --- examples/kubeconfig/go.sum | 190 -------- examples/kubeconfig/main.go | 58 +-- .../scripts/create-kubeconfig-secret.sh | 136 +++--- pkg/manager/manager.go | 11 +- providers/kubeconfig/provider.go | 405 ++++-------------- 7 files changed, 294 insertions(+), 688 deletions(-) create mode 100644 examples/kubeconfig/README.md delete mode 100644 examples/kubeconfig/go.mod delete mode 100644 examples/kubeconfig/go.sum diff --git a/examples/kubeconfig/README.md b/examples/kubeconfig/README.md new file mode 100644 index 0000000..f90a2b2 --- /dev/null +++ b/examples/kubeconfig/README.md @@ -0,0 +1,113 @@ +# Kubeconfig Provider Example + +This example demonstrates how to use the kubeconfig provider to manage multiple Kubernetes clusters using kubeconfig secrets. + +## Overview + +The kubeconfig provider allows you to: +1. Discover and connect to multiple Kubernetes clusters using kubeconfig secrets +2. Run controllers that can operate across all discovered clusters +3. Manage cluster access through RBAC rules and service accounts + +## Directory Structure + +``` +examples/kubeconfig/ +├── controllers/ # Example controller that simply lists pods +│ ├── pod_lister.go +├── scripts/ # Utility scripts +│ └── create-kubeconfig-secret.sh +└── main.go # Example operator implementation +``` + +## Usage + +### 1. Setting Up Cluster Access + +Before creating a kubeconfig secret, ensure that: +1. The remote cluster has a service account with the necessary RBAC permissions for your operator +2. The service account exists in the namespace where you want to create the kubeconfig secret + +Use the `create-kubeconfig-secret.sh` script to create a kubeconfig secret for each cluster you want to manage: + +```bash +./scripts/create-kubeconfig-secret.sh \ + --name cluster1 \ + -n default \ + -c prod-cluster \ + -a my-service-account +``` + +The script will: +- Use the specified service account from the remote cluster +- Generate a kubeconfig using the service account's token +- Store the kubeconfig in a secret in your local cluster + +Command line options: +- `-c, --context`: Kubeconfig context to use (required) +- `--name`: Name for the secret (defaults to context name) +- `-n, --namespace`: Namespace to create the secret in (default: "default") +- `-a, --service-account`: Service account name to use from the remote cluster (default: "default") + +### 2. Customizing RBAC Rules + +The service account in the remote cluster must have the necessary RBAC permissions for your operator to function. Edit the RBAC templates in the `rbac/` directory to define the permissions your operator needs: + +```yaml +# rbac/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ${SECRET_NAME}-role +rules: +# Add permissions for your operator <-------------------------------- +- apiGroups: [""] + resources: ["pods"] + verbs: ["list", "get", "watch"] # watch is needed for controllers that observe resources +``` + +Important RBAC considerations: +- Use `watch` verb if your controller needs to observe resource changes +- Use `list` and `get` for reading resources +- Use `create`, `update`, `patch`, `delete` for modifying resources +- Consider using `Role` instead of `ClusterRole` if you only need namespace-scoped permissions + +### 3. Implementing Your Operator + +Add your controllers to `main.go`: + +```go +func main() { + // Import your controllers here <-------------------------------- + "sigs.k8s.io/multicluster-runtime/examples/kubeconfig/controllers" + + //... + + // Run your controllers here <-------------------------------- + podWatcher := controllers.NewPodWatcher(mgr) + if err := mgr.Add(podWatcher); err != nil { + entryLog.Error(err, "Unable to add pod watcher") + os.Exit(1) + } +} +``` + +Your controllers can then use the manager to access any cluster and view the resources that the RBAC permissions allow. + +## How It Works + +1. The kubeconfig provider watches for secrets with a specific label in a namespace +2. When a new secret is found, it: + - Extracts the kubeconfig data + - Creates a new controller-runtime cluster + - Makes the cluster available to your controllers +3. Your controllers can access any cluster through the manager +4. RBAC rules ensure your operator has the necessary permissions in each cluster + +## Labels and Configuration + +The provider uses the following labels and keys by default: +- Label: `sigs.k8s.io/multicluster-runtime-kubeconfig: "true"` +- Secret data key: `kubeconfig` + +You can customize these in the provider options when creating it. \ No newline at end of file diff --git a/examples/kubeconfig/go.mod b/examples/kubeconfig/go.mod deleted file mode 100644 index 164e719..0000000 --- a/examples/kubeconfig/go.mod +++ /dev/null @@ -1,69 +0,0 @@ -module github.com/christensenjairus/multicluster-runtime/examples/kubeconfig - -go 1.24.1 - -replace sigs.k8s.io/multicluster-runtime => ../../ - -require ( - github.com/go-logr/logr v1.4.2 - golang.org/x/sync v0.8.0 - k8s.io/api v0.32.3 - k8s.io/client-go v0.32.3 - sigs.k8s.io/controller-runtime v0.20.4 - sigs.k8s.io/multicluster-runtime v0.20.0-alpha.5 -) - -require ( - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch/v5 v5.9.11 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.23.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.1.3 // indirect - github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/x448/float16 v0.8.4 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.32.1 // indirect - k8s.io/apimachinery v0.32.3 // indirect - k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect - sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect -) diff --git a/examples/kubeconfig/go.sum b/examples/kubeconfig/go.sum deleted file mode 100644 index 441ed9f..0000000 --- a/examples/kubeconfig/go.sum +++ /dev/null @@ -1,190 +0,0 @@ -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= -github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= -github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= -github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= -github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= -github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= -gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= -k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/examples/kubeconfig/main.go b/examples/kubeconfig/main.go index 51a17ba..b9a8ae4 100644 --- a/examples/kubeconfig/main.go +++ b/examples/kubeconfig/main.go @@ -17,14 +17,12 @@ limitations under the License. package main import ( - "context" - "errors" "flag" "os" - "time" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. + "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" ctrl "sigs.k8s.io/controller-runtime" @@ -41,24 +39,13 @@ import ( func main() { var namespace string - var kubeconfigLabel string - var connectionTimeout time.Duration - var cacheSyncTimeout time.Duration - var kubeconfigPath string - var providerReadyTimeout time.Duration + var kubeconfigSecretLabel string + var kubeconfigSecretKey string flag.StringVar(&namespace, "namespace", "default", "Namespace where kubeconfig secrets are stored") - flag.StringVar(&kubeconfigLabel, "kubeconfig-label", "sigs.k8s.io/multicluster-runtime-kubeconfig", + flag.StringVar(&kubeconfigSecretLabel, "kubeconfig-label", "sigs.k8s.io/multicluster-runtime-kubeconfig", "Label used to identify secrets containing kubeconfig data") - flag.DurationVar(&connectionTimeout, "connection-timeout", 15*time.Second, - "Timeout for connecting to a cluster") - flag.DurationVar(&cacheSyncTimeout, "cache-sync-timeout", 60*time.Second, - "Timeout for waiting for the cache to sync") - flag.StringVar(&kubeconfigPath, "kubeconfig-path", "", - "Path to kubeconfig file for test secrets (defaults to ~/.kube/config if not set)") - flag.DurationVar(&providerReadyTimeout, "provider-ready-timeout", 120*time.Second, - "Timeout for waiting for the provider to be ready") - + flag.StringVar(&kubeconfigSecretKey, "kubeconfig-key", "kubeconfig", "Key in the secret data that contains the kubeconfig") opts := zap.Options{ Development: true, } @@ -69,15 +56,13 @@ func main() { entryLog := ctrllog.Log.WithName("entrypoint") ctx := ctrl.SetupSignalHandler() - entryLog.Info("Starting application", "namespace", namespace, "kubeconfigLabel", kubeconfigLabel) + entryLog.Info("Starting application", "namespace", namespace, "kubeconfigSecretLabel", kubeconfigSecretLabel) // Create the kubeconfig provider with options providerOpts := kubeconfigprovider.Options{ - Namespace: namespace, - KubeconfigLabel: kubeconfigLabel, - ConnectionTimeout: connectionTimeout, - CacheSyncTimeout: cacheSyncTimeout, - KubeconfigPath: kubeconfigPath, + Namespace: namespace, + KubeconfigSecretLabel: kubeconfigSecretLabel, + KubeconfigSecretKey: kubeconfigSecretKey, } // Create the provider first, then the manager with the provider @@ -90,7 +75,9 @@ func main() { // Modify manager options to avoid waiting for cache sync managerOpts := manager.Options{ // Don't block main thread on leader election - LeaderElection: false, + LeaderElection: false, + // Add the scheme + Scheme: scheme.Scheme, } mgr, err := mcmanager.New(ctrl.GetConfigOrDie(), provider, managerOpts) @@ -102,7 +89,7 @@ func main() { // Add our controllers entryLog.Info("Adding controllers") - // TODO: Run your controllers here <-------------------------------- + // Run your controllers here <-------------------------------- podWatcher := controllers.NewPodWatcher(mgr) if err := mgr.Add(podWatcher); err != nil { entryLog.Error(err, "Unable to add pod watcher") @@ -118,18 +105,6 @@ func main() { } }() - // Wait for the provider to be ready with a short timeout - entryLog.Info("Waiting for provider to be ready") - readyCtx, cancel := context.WithTimeout(ctx, providerReadyTimeout) - defer cancel() - - select { - case <-provider.IsReady(): - entryLog.Info("Provider is ready") - case <-readyCtx.Done(): - entryLog.Error(readyCtx.Err(), "Timeout waiting for provider to be ready, continuing anyway") - } - // Start the manager entryLog.Info("Starting manager") if err := mgr.Start(ctx); err != nil { @@ -137,10 +112,3 @@ func main() { os.Exit(1) } } - -func ignoreCanceled(err error) error { - if errors.Is(err, context.Canceled) { - return nil - } - return err -} diff --git a/examples/kubeconfig/scripts/create-kubeconfig-secret.sh b/examples/kubeconfig/scripts/create-kubeconfig-secret.sh index fdaa03c..6d13102 100755 --- a/examples/kubeconfig/scripts/create-kubeconfig-secret.sh +++ b/examples/kubeconfig/scripts/create-kubeconfig-secret.sh @@ -1,52 +1,46 @@ #!/bin/bash -# Script to create a kubeconfig secret for the Multicluster Failover Operator +# Script to create a kubeconfig secret for the pod lister controller set -e # Default values -NAMESPACE="my-operator-namespace" -KUBECONFIG_PATH="${HOME}/.kube/config" +NAMESPACE="default" +SERVICE_ACCOUNT="default" KUBECONFIG_CONTEXT="" SECRET_NAME="" -DRY_RUN="false" # Function to display usage information function show_help { echo "Usage: $0 [options]" - echo " -n, --name NAME Name for the secret (will be used as cluster identifier)" - echo " -s, --namespace NS Namespace to create the secret in (default: ${NAMESPACE})" - echo " -k, --kubeconfig PATH Path to kubeconfig file (default: ${KUBECONFIG_PATH})" - echo " -c, --context CONTEXT Kubeconfig context to use (default: current-context)" - echo " -d, --dry-run Dry run, print YAML but don't apply" - echo " -h, --help Show this help message" + echo " -c, --context CONTEXT Kubeconfig context to use (required)" + echo " --name NAME Name for the secret (defaults to context name)" + echo " -n, --namespace NS Namespace to create the secret in (default: ${NAMESPACE})" + echo " -a, --service-account SA Service account name to use (default: ${SERVICE_ACCOUNT})" + echo " -h, --help Show this help message" echo "" - echo "Example: $0 -n cluster1 -c prod-cluster -k ~/.kube/config" + echo "Example: $0 -c prod-cluster" } # Parse command line options while [[ $# -gt 0 ]]; do key="$1" case $key in - -n|--name) + --name) SECRET_NAME="$2" shift 2 ;; - -s|--namespace) + -n|--namespace) NAMESPACE="$2" shift 2 ;; - -k|--kubeconfig) - KUBECONFIG_PATH="$2" - shift 2 - ;; -c|--context) KUBECONFIG_CONTEXT="$2" shift 2 ;; - -d|--dry-run) - DRY_RUN="true" - shift 1 + -a|--service-account) + SERVICE_ACCOUNT="$2" + shift 2 ;; -h|--help) show_help @@ -61,43 +55,81 @@ while [[ $# -gt 0 ]]; do done # Validate required arguments -if [ -z "$SECRET_NAME" ]; then - echo "ERROR: Secret name is required (-n, --name)" +if [ -z "$KUBECONFIG_CONTEXT" ]; then + echo "ERROR: Kubeconfig context is required (-c, --context)" show_help exit 1 fi -if [ ! -f "$KUBECONFIG_PATH" ]; then - echo "ERROR: Kubeconfig file not found at: $KUBECONFIG_PATH" +# Set secret name to context if not specified +if [ -z "$SECRET_NAME" ]; then + SECRET_NAME="$KUBECONFIG_CONTEXT" +fi + +# Get the cluster CA certificate from the remote cluster +CLUSTER_CA=$(kubectl --context=${KUBECONFIG_CONTEXT} config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}') +if [ -z "$CLUSTER_CA" ]; then + echo "ERROR: Could not get cluster CA certificate" exit 1 fi -# Process the kubeconfig -echo "Processing kubeconfig..." -TEMP_KUBECONFIG=$(mktemp) -trap "rm -f $TEMP_KUBECONFIG" EXIT - -if [ -n "$KUBECONFIG_CONTEXT" ]; then - kubectl config view --raw --minify --flatten --context="$KUBECONFIG_CONTEXT" > "$TEMP_KUBECONFIG" - if [ $? -ne 0 ]; then - echo "ERROR: Failed to extract context '$KUBECONFIG_CONTEXT' from kubeconfig" - exit 1 - fi - echo "Extracted context '$KUBECONFIG_CONTEXT' from kubeconfig" -else - cp "$KUBECONFIG_PATH" "$TEMP_KUBECONFIG" - echo "Using entire kubeconfig file" +# Get the cluster server URL from the remote cluster +CLUSTER_SERVER=$(kubectl --context=${KUBECONFIG_CONTEXT} config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.server}') +if [ -z "$CLUSTER_SERVER" ]; then + echo "ERROR: Could not get cluster server URL" + exit 1 +fi + +# Get the service account token from the remote cluster +SA_TOKEN=$(kubectl --context=${KUBECONFIG_CONTEXT} -n ${NAMESPACE} create token ${SERVICE_ACCOUNT} --duration=8760h) +if [ -z "$SA_TOKEN" ]; then + echo "ERROR: Could not create service account token" + exit 1 fi -# Encode the kubeconfig -KUBECONFIG_B64=$(base64 < "$TEMP_KUBECONFIG" | tr -d '\n') +# Create a new kubeconfig using the service account token +NEW_KUBECONFIG=$(cat < "$TEMP_KUBECONFIG" -# Create the namespace if it doesn't exist -if [ "$DRY_RUN" != "true" ]; then - kubectl get namespace "$NAMESPACE" &>/dev/null || kubectl create namespace "$NAMESPACE" +# Verify the kubeconfig works +echo "Verifying kubeconfig..." +if ! kubectl --kubeconfig="$TEMP_KUBECONFIG" get pods -A &>/dev/null; then + rm "$TEMP_KUBECONFIG" + echo "ERROR: Failed to verify kubeconfig - unable to list pods." + echo "- Ensure that the service account '${NAMESPACE}/${SERVICE_ACCOUNT}' on cluster '${KUBECONFIG_CONTEXT}' has the necessary permissions to list pods." + echo "- You may specify a namespace using the -n flag." + echo "- You may specify a service account using the -a flag." + exit 1 fi +echo "Kubeconfig verified successfully!" -# Generate the secret YAML +# Encode the verified kubeconfig +KUBECONFIG_B64=$(cat "$TEMP_KUBECONFIG" | base64 -w0) +rm "$TEMP_KUBECONFIG" + +# Generate and apply the secret SECRET_YAML=$(cat <