|
| 1 | +name: Smoke Test |
| 2 | + |
| 3 | +on: |
| 4 | + pull_request: |
| 5 | + branches: [main, 'release-*'] |
| 6 | + push: |
| 7 | + branches: [main] |
| 8 | + workflow_dispatch: |
| 9 | + |
| 10 | +permissions: |
| 11 | + contents: read |
| 12 | + |
| 13 | +env: |
| 14 | + CLUSTER_NAME: capi-quickstart |
| 15 | + KIND_CLUSTER_NAME: capi-operator-smoke-test |
| 16 | + KUBERNETES_VERSION: v1.33.0 |
| 17 | + CONTROLLER_IMG: cluster-api-operator |
| 18 | + TAG: smoke-test |
| 19 | + |
| 20 | +jobs: |
| 21 | + smoke-test: |
| 22 | + runs-on: ubuntu-latest |
| 23 | + steps: |
| 24 | + - name: Checkout code |
| 25 | + uses: actions/checkout@v4 |
| 26 | + with: |
| 27 | + fetch-depth: 0 |
| 28 | + |
| 29 | + - name: Set up Go |
| 30 | + uses: actions/setup-go@v5 |
| 31 | + with: |
| 32 | + go-version-file: 'go.mod' |
| 33 | + |
| 34 | + - name: Install tools |
| 35 | + run: | |
| 36 | + # kubectl |
| 37 | + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" |
| 38 | + chmod +x kubectl && sudo mv kubectl /usr/local/bin/ |
| 39 | +
|
| 40 | + # yq |
| 41 | + wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O yq |
| 42 | + chmod +x yq && sudo mv yq /usr/local/bin/ |
| 43 | +
|
| 44 | + # helm |
| 45 | + curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash |
| 46 | +
|
| 47 | + # clusterctl |
| 48 | + curl -L https://github.com/kubernetes-sigs/cluster-api/releases/latest/download/clusterctl-linux-amd64 -o clusterctl |
| 49 | + chmod +x clusterctl && sudo mv clusterctl /usr/local/bin/ |
| 50 | +
|
| 51 | + - name: Build Docker image |
| 52 | + run: | |
| 53 | + make docker-build |
| 54 | + docker tag ${CONTROLLER_IMG}-amd64:${TAG} ${CONTROLLER_IMG}:${TAG} |
| 55 | +
|
| 56 | + - name: Build charts |
| 57 | + run: | |
| 58 | + make release-chart |
| 59 | + echo "HELM_CHART_TAG=$(make -s -f Makefile -p | grep '^HELM_CHART_TAG :=' | cut -d' ' -f3)" >> $GITHUB_ENV |
| 60 | +
|
| 61 | + - name: Create kind cluster |
| 62 | + run: | |
| 63 | + chmod +x ./hack/ensure-kind.sh |
| 64 | + ./hack/ensure-kind.sh |
| 65 | +
|
| 66 | + cat <<EOF > /tmp/kind-config.yaml |
| 67 | + kind: Cluster |
| 68 | + apiVersion: kind.x-k8s.io/v1alpha4 |
| 69 | + networking: |
| 70 | + ipFamily: ipv4 |
| 71 | + nodes: |
| 72 | + - role: control-plane |
| 73 | + extraMounts: |
| 74 | + - hostPath: /var/run/docker.sock |
| 75 | + containerPath: /var/run/docker.sock |
| 76 | + containerdConfigPatches: |
| 77 | + - |- |
| 78 | + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] |
| 79 | + endpoint = ["https://mirror.gcr.io", "https://registry-1.docker.io"] |
| 80 | + EOF |
| 81 | +
|
| 82 | + kind create cluster --name ${KIND_CLUSTER_NAME} --config /tmp/kind-config.yaml --wait 5m |
| 83 | + kind load docker-image ${CONTROLLER_IMG}:${TAG} --name ${KIND_CLUSTER_NAME} |
| 84 | +
|
| 85 | + - name: Install cert-manager |
| 86 | + run: | |
| 87 | + helm repo add jetstack https://charts.jetstack.io |
| 88 | + helm repo update |
| 89 | + helm install cert-manager jetstack/cert-manager \ |
| 90 | + --namespace cert-manager \ |
| 91 | + --create-namespace \ |
| 92 | + --set installCRDs=true \ |
| 93 | + --wait \ |
| 94 | + --timeout 5m |
| 95 | +
|
| 96 | + - name: Install Cluster API Operator |
| 97 | + run: | |
| 98 | + CHART_PACKAGE="out/package/cluster-api-operator-${HELM_CHART_TAG}.tgz" |
| 99 | + helm install capi-operator "$CHART_PACKAGE" \ |
| 100 | + --create-namespace \ |
| 101 | + -n capi-operator-system \ |
| 102 | + --set image.manager.repository=${CONTROLLER_IMG} \ |
| 103 | + --set image.manager.tag=${TAG} \ |
| 104 | + --set image.manager.pullPolicy=IfNotPresent \ |
| 105 | + --wait \ |
| 106 | + --timeout 90s |
| 107 | +
|
| 108 | + - name: Deploy providers |
| 109 | + run: | |
| 110 | + cat <<EOF > /tmp/providers-values.yaml |
| 111 | + core: |
| 112 | + cluster-api: |
| 113 | + namespace: capi-system |
| 114 | + bootstrap: |
| 115 | + kubeadm: |
| 116 | + namespace: capi-kubeadm-bootstrap-system |
| 117 | + controlPlane: |
| 118 | + kubeadm: |
| 119 | + namespace: capi-kubeadm-control-plane-system |
| 120 | + infrastructure: |
| 121 | + docker: |
| 122 | + namespace: capd-system |
| 123 | + manager: |
| 124 | + featureGates: |
| 125 | + core: |
| 126 | + ClusterTopology: true |
| 127 | + ClusterResourceSet: true |
| 128 | + MachinePool: true |
| 129 | + kubeadm: |
| 130 | + ClusterTopology: true |
| 131 | + MachinePool: true |
| 132 | + docker: |
| 133 | + ClusterTopology: true |
| 134 | + EOF |
| 135 | +
|
| 136 | + PROVIDERS_CHART_PACKAGE="out/package/cluster-api-operator-providers-${HELM_CHART_TAG}.tgz" |
| 137 | + helm install capi-providers "$PROVIDERS_CHART_PACKAGE" -f /tmp/providers-values.yaml --wait |
| 138 | +
|
| 139 | + - name: Wait for providers |
| 140 | + run: | |
| 141 | + kubectl wait --for=condition=Ready --timeout=300s -n capi-system coreprovider/cluster-api |
| 142 | + kubectl wait --for=condition=Ready --timeout=300s -n capi-kubeadm-bootstrap-system bootstrapprovider/kubeadm |
| 143 | + kubectl wait --for=condition=Ready --timeout=300s -n capi-kubeadm-control-plane-system controlplaneprovider/kubeadm |
| 144 | + kubectl wait --for=condition=Ready --timeout=300s -n capd-system infrastructureprovider/docker |
| 145 | +
|
| 146 | + kubectl wait --for=condition=Available --timeout=300s -n capi-system deployment/capi-controller-manager |
| 147 | + kubectl wait --for=condition=Available --timeout=300s -n capi-kubeadm-bootstrap-system deployment/capi-kubeadm-bootstrap-controller-manager |
| 148 | + kubectl wait --for=condition=Available --timeout=300s -n capi-kubeadm-control-plane-system deployment/capi-kubeadm-control-plane-controller-manager |
| 149 | + kubectl wait --for=condition=Available --timeout=300s -n capd-system deployment/capd-controller-manager |
| 150 | +
|
| 151 | + - name: Verify providers |
| 152 | + run: | |
| 153 | + kubectl get coreprovider,bootstrapprovider,controlplaneprovider,infrastructureprovider -A |
| 154 | + kubectl get pods -A | grep -E "(capi-|capd-)" |
| 155 | +
|
| 156 | + - name: Create workload cluster |
| 157 | + run: | |
| 158 | + clusterctl generate cluster ${CLUSTER_NAME} \ |
| 159 | + --infrastructure docker:v1.10.0 \ |
| 160 | + --flavor development \ |
| 161 | + --kubernetes-version ${KUBERNETES_VERSION} \ |
| 162 | + --control-plane-machine-count=1 \ |
| 163 | + --worker-machine-count=2 \ |
| 164 | + > capi-quickstart.yaml |
| 165 | +
|
| 166 | + kubectl apply -f capi-quickstart.yaml |
| 167 | +
|
| 168 | + - name: Get workload cluster kubeconfig |
| 169 | + run: | |
| 170 | + timeout 300s bash -c "until kubectl get secret ${CLUSTER_NAME}-kubeconfig -n default &>/dev/null; do sleep 2; done" |
| 171 | + clusterctl get kubeconfig ${CLUSTER_NAME} --namespace default > ${CLUSTER_NAME}.kubeconfig |
| 172 | + echo "KUBECONFIG=$(pwd)/${CLUSTER_NAME}.kubeconfig" >> $GITHUB_ENV |
| 173 | +
|
| 174 | + - name: Wait for workload cluster API server |
| 175 | + run: | |
| 176 | + timeout 300s bash -c "until kubectl cluster-info &>/dev/null; do sleep 5; done" |
| 177 | +
|
| 178 | + - name: Install CNI |
| 179 | + run: | |
| 180 | + kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/calico.yaml |
| 181 | + kubectl wait --for=condition=Ready --timeout=300s pods -n tigera-operator -l app.kubernetes.io/name=tigera-operator || true |
| 182 | + kubectl wait --for=condition=Ready --timeout=300s pods -n calico-system --all || true |
| 183 | +
|
| 184 | + - name: Wait for nodes |
| 185 | + run: | |
| 186 | + kubectl wait --for=condition=Ready --timeout=300s nodes --all |
| 187 | + kubectl get nodes -o wide |
| 188 | +
|
| 189 | + - name: Verify cluster |
| 190 | + run: | |
| 191 | + kubectl get po -A |
| 192 | + kubectl wait --for=condition=Ready --timeout=300s pods -n kube-system -l k8s-app=kube-proxy |
| 193 | + kubectl wait --for=condition=Ready --timeout=300s pods -n kube-system -l component=kube-apiserver |
| 194 | + kubectl wait --for=condition=Ready --timeout=300s pods -n kube-system -l component=kube-controller-manager |
| 195 | + kubectl wait --for=condition=Ready --timeout=300s pods -n kube-system -l component=kube-scheduler |
| 196 | +
|
| 197 | + - name: Collect logs on failure |
| 198 | + if: failure() |
| 199 | + run: | |
| 200 | + echo "=== Recent Events ===" |
| 201 | + kubectl get events -A --sort-by='.lastTimestamp' | tail -50 |
| 202 | +
|
| 203 | + echo -e "\n=== Provider Logs ===" |
| 204 | + kubectl logs -n capi-operator-system deployment/capi-operator-cluster-api-operator --tail=50 || true |
| 205 | + kubectl logs -n capi-system deployment/capi-controller-manager --tail=50 || true |
| 206 | + kubectl logs -n capd-system deployment/capd-controller-manager --tail=50 || true |
| 207 | +
|
| 208 | + echo -e "\n=== Cluster Resources ===" |
| 209 | + kubectl get cluster,dockercluster,kubeadmcontrolplane,machine,dockermachine -A -o wide || true |
| 210 | +
|
| 211 | + echo -e "\n=== Failed Pods ===" |
| 212 | + kubectl get pods -A | grep -v Running | grep -v Completed || true |
| 213 | +
|
| 214 | + if [ -f "${CLUSTER_NAME}.kubeconfig" ]; then |
| 215 | + export KUBECONFIG=$(pwd)/${CLUSTER_NAME}.kubeconfig |
| 216 | + echo -e "\n=== Workload Cluster Status ===" |
| 217 | + kubectl get nodes -o wide || true |
| 218 | + kubectl get pods -A --field-selector=status.phase!=Running,status.phase!=Succeeded || true |
| 219 | + fi |
| 220 | +
|
| 221 | + - name: Cleanup |
| 222 | + if: always() |
| 223 | + run: | |
| 224 | + kind delete cluster --name ${CLUSTER_NAME} || true |
| 225 | + kind delete cluster --name ${KIND_CLUSTER_NAME} || true |
0 commit comments