diff --git a/examples/kubeconfig/README.md b/examples/kubeconfig/README.md new file mode 100644 index 0000000..f90a2b2 --- /dev/null +++ b/examples/kubeconfig/README.md @@ -0,0 +1,113 @@ +# Kubeconfig Provider Example + +This example demonstrates how to use the kubeconfig provider to manage multiple Kubernetes clusters using kubeconfig secrets. + +## Overview + +The kubeconfig provider allows you to: +1. Discover and connect to multiple Kubernetes clusters using kubeconfig secrets +2. Run controllers that can operate across all discovered clusters +3. Manage cluster access through RBAC rules and service accounts + +## Directory Structure + +``` +examples/kubeconfig/ +├── controllers/ # Example controller that simply lists pods +│ ├── pod_lister.go +├── scripts/ # Utility scripts +│ └── create-kubeconfig-secret.sh +└── main.go # Example operator implementation +``` + +## Usage + +### 1. Setting Up Cluster Access + +Before creating a kubeconfig secret, ensure that: +1. The remote cluster has a service account with the necessary RBAC permissions for your operator +2. The service account exists in the namespace where you want to create the kubeconfig secret + +Use the `create-kubeconfig-secret.sh` script to create a kubeconfig secret for each cluster you want to manage: + +```bash +./scripts/create-kubeconfig-secret.sh \ + --name cluster1 \ + -n default \ + -c prod-cluster \ + -a my-service-account +``` + +The script will: +- Use the specified service account from the remote cluster +- Generate a kubeconfig using the service account's token +- Store the kubeconfig in a secret in your local cluster + +Command line options: +- `-c, --context`: Kubeconfig context to use (required) +- `--name`: Name for the secret (defaults to context name) +- `-n, --namespace`: Namespace to create the secret in (default: "default") +- `-a, --service-account`: Service account name to use from the remote cluster (default: "default") + +### 2. Customizing RBAC Rules + +The service account in the remote cluster must have the necessary RBAC permissions for your operator to function. Edit the RBAC templates in the `rbac/` directory to define the permissions your operator needs: + +```yaml +# rbac/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ${SECRET_NAME}-role +rules: +# Add permissions for your operator <-------------------------------- +- apiGroups: [""] + resources: ["pods"] + verbs: ["list", "get", "watch"] # watch is needed for controllers that observe resources +``` + +Important RBAC considerations: +- Use `watch` verb if your controller needs to observe resource changes +- Use `list` and `get` for reading resources +- Use `create`, `update`, `patch`, `delete` for modifying resources +- Consider using `Role` instead of `ClusterRole` if you only need namespace-scoped permissions + +### 3. Implementing Your Operator + +Add your controllers to `main.go`: + +```go +func main() { + // Import your controllers here <-------------------------------- + "sigs.k8s.io/multicluster-runtime/examples/kubeconfig/controllers" + + //... + + // Run your controllers here <-------------------------------- + podWatcher := controllers.NewPodWatcher(mgr) + if err := mgr.Add(podWatcher); err != nil { + entryLog.Error(err, "Unable to add pod watcher") + os.Exit(1) + } +} +``` + +Your controllers can then use the manager to access any cluster and view the resources that the RBAC permissions allow. + +## How It Works + +1. The kubeconfig provider watches for secrets with a specific label in a namespace +2. When a new secret is found, it: + - Extracts the kubeconfig data + - Creates a new controller-runtime cluster + - Makes the cluster available to your controllers +3. Your controllers can access any cluster through the manager +4. RBAC rules ensure your operator has the necessary permissions in each cluster + +## Labels and Configuration + +The provider uses the following labels and keys by default: +- Label: `sigs.k8s.io/multicluster-runtime-kubeconfig: "true"` +- Secret data key: `kubeconfig` + +You can customize these in the provider options when creating it. \ No newline at end of file diff --git a/examples/kubeconfig/controllers/pod_lister.go b/examples/kubeconfig/controllers/pod_lister.go new file mode 100644 index 0000000..fb3a1ce --- /dev/null +++ b/examples/kubeconfig/controllers/pod_lister.go @@ -0,0 +1,102 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "time" + + "github.com/go-logr/logr" + + corev1 "k8s.io/api/core/v1" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + ctrllog "sigs.k8s.io/controller-runtime/pkg/log" + + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" +) + +// PodWatcher is a simple controller that watches pods across multiple clusters +type PodWatcher struct { + Manager mcmanager.Manager + Log logr.Logger +} + +// NewPodWatcher creates a new PodWatcher +func NewPodWatcher(mgr mcmanager.Manager) *PodWatcher { + return &PodWatcher{ + Manager: mgr, + Log: ctrllog.Log.WithName("pod-watcher"), + } +} + +// Start implements Runnable +func (p *PodWatcher) Start(ctx context.Context) error { + // Nothing to do here - we'll handle everything in Engage + return nil +} + +// Engage implements multicluster.Aware and gets called when a new cluster is engaged +func (p *PodWatcher) Engage(ctx context.Context, clusterName string, cl cluster.Cluster) error { + log := p.Log.WithValues("cluster", clusterName) + log.Info("Engaging cluster") + + // Start a goroutine to periodically list pods + go func() { + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + // Initial list + if err := p.listPods(ctx, cl, clusterName, log); err != nil { + log.Error(err, "Failed to list pods") + } + + for { + select { + case <-ctx.Done(): + log.Info("Context done, stopping pod watcher") + return + case <-ticker.C: + if err := p.listPods(ctx, cl, clusterName, log); err != nil { + log.Error(err, "Failed to list pods") + } + } + } + }() + + return nil +} + +// listPods lists pods in the default namespace +func (p *PodWatcher) listPods(ctx context.Context, cl cluster.Cluster, clusterName string, log logr.Logger) error { + var pods corev1.PodList + if err := cl.GetClient().List(ctx, &pods, &client.ListOptions{ + Namespace: "default", + }); err != nil { + return err + } + + log.Info("Pods in default namespace", "count", len(pods.Items)) + for _, pod := range pods.Items { + log.Info("Pod", + "name", pod.Name, + "status", pod.Status.Phase) + } + + return nil +} diff --git a/examples/kubeconfig/main.go b/examples/kubeconfig/main.go new file mode 100644 index 0000000..b9a8ae4 --- /dev/null +++ b/examples/kubeconfig/main.go @@ -0,0 +1,114 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "os" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth" + + ctrl "sigs.k8s.io/controller-runtime" + ctrllog "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" + + // Import your controllers here <-------------------------------- + "sigs.k8s.io/multicluster-runtime/examples/kubeconfig/controllers" + + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" + kubeconfigprovider "sigs.k8s.io/multicluster-runtime/providers/kubeconfig" +) + +func main() { + var namespace string + var kubeconfigSecretLabel string + var kubeconfigSecretKey string + + flag.StringVar(&namespace, "namespace", "default", "Namespace where kubeconfig secrets are stored") + flag.StringVar(&kubeconfigSecretLabel, "kubeconfig-label", "sigs.k8s.io/multicluster-runtime-kubeconfig", + "Label used to identify secrets containing kubeconfig data") + flag.StringVar(&kubeconfigSecretKey, "kubeconfig-key", "kubeconfig", "Key in the secret data that contains the kubeconfig") + opts := zap.Options{ + Development: true, + } + opts.BindFlags(flag.CommandLine) + flag.Parse() + + ctrllog.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + entryLog := ctrllog.Log.WithName("entrypoint") + ctx := ctrl.SetupSignalHandler() + + entryLog.Info("Starting application", "namespace", namespace, "kubeconfigSecretLabel", kubeconfigSecretLabel) + + // Create the kubeconfig provider with options + providerOpts := kubeconfigprovider.Options{ + Namespace: namespace, + KubeconfigSecretLabel: kubeconfigSecretLabel, + KubeconfigSecretKey: kubeconfigSecretKey, + } + + // Create the provider first, then the manager with the provider + entryLog.Info("Creating provider") + provider := kubeconfigprovider.New(providerOpts) + + // Create the multicluster manager with the provider + entryLog.Info("Creating manager") + + // Modify manager options to avoid waiting for cache sync + managerOpts := manager.Options{ + // Don't block main thread on leader election + LeaderElection: false, + // Add the scheme + Scheme: scheme.Scheme, + } + + mgr, err := mcmanager.New(ctrl.GetConfigOrDie(), provider, managerOpts) + if err != nil { + entryLog.Error(err, "Unable to create manager") + os.Exit(1) + } + + // Add our controllers + entryLog.Info("Adding controllers") + + // Run your controllers here <-------------------------------- + podWatcher := controllers.NewPodWatcher(mgr) + if err := mgr.Add(podWatcher); err != nil { + entryLog.Error(err, "Unable to add pod watcher") + os.Exit(1) + } + + // Start provider in a goroutine + entryLog.Info("Starting provider") + go func() { + err := provider.Run(ctx, mgr) + if err != nil && ctx.Err() == nil { + entryLog.Error(err, "Provider exited with error") + } + }() + + // Start the manager + entryLog.Info("Starting manager") + if err := mgr.Start(ctx); err != nil { + entryLog.Error(err, "Error running manager") + os.Exit(1) + } +} diff --git a/examples/kubeconfig/scripts/create-kubeconfig-secret.sh b/examples/kubeconfig/scripts/create-kubeconfig-secret.sh new file mode 100755 index 0000000..6d13102 --- /dev/null +++ b/examples/kubeconfig/scripts/create-kubeconfig-secret.sh @@ -0,0 +1,151 @@ +#!/bin/bash + +# Script to create a kubeconfig secret for the pod lister controller + +set -e + +# Default values +NAMESPACE="default" +SERVICE_ACCOUNT="default" +KUBECONFIG_CONTEXT="" +SECRET_NAME="" + +# Function to display usage information +function show_help { + echo "Usage: $0 [options]" + echo " -c, --context CONTEXT Kubeconfig context to use (required)" + echo " --name NAME Name for the secret (defaults to context name)" + echo " -n, --namespace NS Namespace to create the secret in (default: ${NAMESPACE})" + echo " -a, --service-account SA Service account name to use (default: ${SERVICE_ACCOUNT})" + echo " -h, --help Show this help message" + echo "" + echo "Example: $0 -c prod-cluster" +} + +# Parse command line options +while [[ $# -gt 0 ]]; do + key="$1" + case $key in + --name) + SECRET_NAME="$2" + shift 2 + ;; + -n|--namespace) + NAMESPACE="$2" + shift 2 + ;; + -c|--context) + KUBECONFIG_CONTEXT="$2" + shift 2 + ;; + -a|--service-account) + SERVICE_ACCOUNT="$2" + shift 2 + ;; + -h|--help) + show_help + exit 0 + ;; + *) + echo "Unknown option: $1" + show_help + exit 1 + ;; + esac +done + +# Validate required arguments +if [ -z "$KUBECONFIG_CONTEXT" ]; then + echo "ERROR: Kubeconfig context is required (-c, --context)" + show_help + exit 1 +fi + +# Set secret name to context if not specified +if [ -z "$SECRET_NAME" ]; then + SECRET_NAME="$KUBECONFIG_CONTEXT" +fi + +# Get the cluster CA certificate from the remote cluster +CLUSTER_CA=$(kubectl --context=${KUBECONFIG_CONTEXT} config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}') +if [ -z "$CLUSTER_CA" ]; then + echo "ERROR: Could not get cluster CA certificate" + exit 1 +fi + +# Get the cluster server URL from the remote cluster +CLUSTER_SERVER=$(kubectl --context=${KUBECONFIG_CONTEXT} config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.server}') +if [ -z "$CLUSTER_SERVER" ]; then + echo "ERROR: Could not get cluster server URL" + exit 1 +fi + +# Get the service account token from the remote cluster +SA_TOKEN=$(kubectl --context=${KUBECONFIG_CONTEXT} -n ${NAMESPACE} create token ${SERVICE_ACCOUNT} --duration=8760h) +if [ -z "$SA_TOKEN" ]; then + echo "ERROR: Could not create service account token" + exit 1 +fi + +# Create a new kubeconfig using the service account token +NEW_KUBECONFIG=$(cat < "$TEMP_KUBECONFIG" + +# Verify the kubeconfig works +echo "Verifying kubeconfig..." +if ! kubectl --kubeconfig="$TEMP_KUBECONFIG" get pods -A &>/dev/null; then + rm "$TEMP_KUBECONFIG" + echo "ERROR: Failed to verify kubeconfig - unable to list pods." + echo "- Ensure that the service account '${NAMESPACE}/${SERVICE_ACCOUNT}' on cluster '${KUBECONFIG_CONTEXT}' has the necessary permissions to list pods." + echo "- You may specify a namespace using the -n flag." + echo "- You may specify a service account using the -a flag." + exit 1 +fi +echo "Kubeconfig verified successfully!" + +# Encode the verified kubeconfig +KUBECONFIG_B64=$(cat "$TEMP_KUBECONFIG" | base64 -w0) +rm "$TEMP_KUBECONFIG" + +# Generate and apply the secret +SECRET_YAML=$(cat <