Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ SKIP_CLEANUP ?= false
SKIP_CREATE_MGMT_CLUSTER ?= false

.PHONY: test-e2e-run
test-e2e-run: $(ENVSUBST) $(KUBECTL) $(GINKGO) e2e-image ## Run the end-to-end tests
test-e2e-run: $(ENVSUBST) $(KUBECTL) $(GINKGO) ## Run the end-to-end tests
$(ENVSUBST) < $(E2E_CONF_FILE) > $(E2E_CONF_FILE_ENVSUBST) && \
time $(GINKGO) -v --trace -poll-progress-after=$(GINKGO_POLL_PROGRESS_AFTER) -poll-progress-interval=$(GINKGO_POLL_PROGRESS_INTERVAL) \
--tags=e2e --focus="$(GINKGO_FOCUS)" -skip="$(GINKGO_SKIP)" --nodes=$(GINKGO_NODES) --no-color=$(GINKGO_NOCOLOR) \
Expand Down
2 changes: 1 addition & 1 deletion config/default/manager_image_patch.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,5 @@ spec:
spec:
containers:
# Change the value of image field below to your controller image URL
- image: gcr.io/k8s-staging-cluster-api-gcp/cluster-api-gcp-controller:e2e
- image: ${CONTROLLER_IMAGE}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need to to do this? The tests are running I believe?

It's not a blocker for ok-to-test, but it will likely be a blocker for merging. If we have to do this I suggest splitting it out into its own commit (or PR?), and a comment would help out future selves understand why

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is needed for the GKE cluster, since we have to push the image to GCR. Unlike the existing kind clusters where we can load the image into the cluster, for GKE we have to upload it.

name: manager
27 changes: 27 additions & 0 deletions scripts/ci-e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,12 @@ EOF
export IMAGE_ID="projects/${GCP_PROJECT}/global/images/${image_id}"
}

init_controller_image() {
export CONTROLLER_IMAGE="gcr.io/${GCP_PROJECT}/cluster-api-gcp-controller:${TEST_NAME}"
echo "Tagging and pushing controller image to ${CONTROLLER_IMAGE}"
docker tag gcr.io/k8s-staging-cluster-api-gcp/cluster-api-gcp-controller:e2e "${CONTROLLER_IMAGE}"
docker push "${CONTROLLER_IMAGE}"
}

# initialize a router and cloud NAT
init_networks() {
Expand All @@ -127,6 +133,14 @@ init_networks() {
--nat-all-subnet-ip-ranges --auto-allocate-nat-external-ips
}

# create a GKE cluster to be used as a bootstrap cluster
create_gke_bootstrap_cluster() {
gcloud container clusters create "${TEST_NAME}-gke-bootstrap" --project "$GCP_PROJECT" \
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OK this is nice, because we can we can develop Workload Identity support, and then hopefully add the same thing to kind. (Workload Identity is Google's name for automatic authentication to GCP without needing a ServiceAccount Key in a secret, historically it used a proxy but now it can use identity federation via IAM workloadPools).

But ... probably could be a separate commit or PR. This one is less of a blocker though IMO, because it's additive

--region "$GCP_REGION" --num-nodes 1 --machine-type e2-medium --release-channel regular \
--network "${GCP_NETWORK_NAME}" --quiet
export GKE_BOOTSTRAP_KUBECONFIG="${ARTIFACTS}/gke_bootstrap_kubeconfig"
KUBECONFIG="${GKE_BOOTSTRAP_KUBECONFIG}" gcloud container clusters get-credentials "${TEST_NAME}-gke-bootstrap" --region "${GCP_REGION}" --project "${GCP_PROJECT}"
}

cleanup() {
# Force a cleanup of cluster api created resources using gcloud commands
Expand All @@ -152,6 +166,9 @@ cleanup() {
| awk '{print "gcloud compute firewall-rules delete --project '"$GCP_PROJECT"' --quiet " $1 "\n"}' \
| bash) || true

gcloud container clusters delete "${TEST_NAME}-gke-bootstrap" --project "$GCP_PROJECT" \
--region "$GCP_REGION" --quiet || true

# cleanup the networks
gcloud compute routers nats delete "${TEST_NAME}-mynat" --project="${GCP_PROJECT}" \
--router-region="${GCP_REGION}" --router="${TEST_NAME}-myrouter" --quiet || true
Expand Down Expand Up @@ -275,6 +292,16 @@ EOF
init_networks
fi

# Initialize the GKE bootstrap cluster
if [[ -n "${SKIP_INIT_GKE_BOOTSTRAP:-}" ]]; then
echo "Skipping GKE bootstrap cluster initialization..."
else
create_gke_bootstrap_cluster
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we don't have permission to create GKE clusters in prow (and we probably don't want to take a hard dependency on GKE in OSS testing). I suggest this should be a different script, or that the default behavior should not change (e.g. only create a GKE cluster if TEST_MANAGEMENT_CLUSTER=gke or something like that)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sounds good, if we end up using GKE cluster I can separate that.

fi

make e2e-image
init_controller_image

make test-e2e
test_status="${?}"
echo TESTSTATUS
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/config/gcp-ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ managementClusterName: capg-e2e

images:
# Use local dev images built source tree;
- name: gcr.io/k8s-staging-cluster-api-gcp/cluster-api-gcp-controller:e2e
- name: ${CONTROLLER_IMAGE}
loadBehavior: mustLoad

providers:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
---
apiVersion: cluster.x-k8s.io/v1beta1
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not something to be addressed in this PR (i.e. this problem already exists), but we should consider using kustomize or patching a base config in code, so we can understand more easily what is different in each scenario.

kind: Cluster
metadata:
name: "${CLUSTER_NAME}"
labels:
cni: "${CLUSTER_NAME}-crs-cni"
ccm: "${CLUSTER_NAME}-crs-ccm"
spec:
clusterNetwork:
pods:
cidrBlocks: ["192.168.0.0/16"]
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: GCPCluster
name: "${CLUSTER_NAME}"
controlPlaneRef:
kind: KubeadmControlPlane
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
name: "${CLUSTER_NAME}-control-plane"
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: GCPCluster
metadata:
name: "${CLUSTER_NAME}"
spec:
project: "${GCP_PROJECT}"
region: "${GCP_REGION}"
network:
name: "${GCP_NETWORK_NAME}"
subnets:
- name: control-plane-subnet
cidrBlock: "10.0.0.0/17"
purpose: PRIVATE
region: us-east4
loadBalancer:
loadBalancerType: InternalExternal
---
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
name: "${CLUSTER_NAME}-control-plane"
spec:
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
machineTemplate:
infrastructureRef:
kind: GCPMachineTemplate
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
name: "${CLUSTER_NAME}-control-plane"
kubeadmConfigSpec:
initConfiguration:
nodeRegistration:
name: '{{ ds.meta_data.local_hostname.split(".")[0] }}'
kubeletExtraArgs:
cloud-provider: external
clusterConfiguration:
apiServer:
timeoutForControlPlane: 20m
controllerManager:
extraArgs:
cloud-provider: external
allocate-node-cidrs: "false"
kubernetesVersion: "${KUBERNETES_VERSION}"
files:
- content: |
[Global]
project-id = "${GCP_PROJECT}"
network-name = "${GCP_NETWORK_NAME}"
multizone = true
owner: root:root
path: /etc/kubernetes/cloud.config
permissions: "0744"
joinConfiguration:
nodeRegistration:
name: '{{ ds.meta_data.local_hostname.split(".")[0] }}'
kubeletExtraArgs:
cloud-provider: external
version: "${KUBERNETES_VERSION}"
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: GCPMachineTemplate
metadata:
name: "${CLUSTER_NAME}-control-plane"
spec:
template:
spec:
instanceType: "${GCP_CONTROL_PLANE_MACHINE_TYPE}"
image: "${IMAGE_ID}"
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: "${CLUSTER_NAME}-md-0"
spec:
clusterName: "${CLUSTER_NAME}"
replicas: ${WORKER_MACHINE_COUNT}
selector:
matchLabels:
template:
spec:
clusterName: "${CLUSTER_NAME}"
version: "${KUBERNETES_VERSION}"
bootstrap:
configRef:
name: "${CLUSTER_NAME}-md-0"
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
infrastructureRef:
name: "${CLUSTER_NAME}-md-0"
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: GCPMachineTemplate
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: GCPMachineTemplate
metadata:
name: "${CLUSTER_NAME}-md-0"
spec:
template:
spec:
instanceType: "${GCP_NODE_MACHINE_TYPE}"
image: "${IMAGE_ID}"
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: "${CLUSTER_NAME}-md-0"
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
name: '{{ ds.meta_data.local_hostname.split(".")[0] }}'
kubeletExtraArgs:
cloud-provider: external
---
apiVersion: v1
kind: ConfigMap
metadata:
name: "${CLUSTER_NAME}-crs-cni"
data: ${CNI_RESOURCES}
---
apiVersion: addons.cluster.x-k8s.io/v1beta1
kind: ClusterResourceSet
metadata:
name: "${CLUSTER_NAME}-crs-cni"
spec:
strategy: ApplyOnce
clusterSelector:
matchLabels:
cni: "${CLUSTER_NAME}-crs-cni"
resources:
- name: "${CLUSTER_NAME}-crs-cni"
kind: ConfigMap
---
apiVersion: v1
kind: ConfigMap
metadata:
name: "${CLUSTER_NAME}-crs-ccm"
data: ${CCM_RESOURCES}
---
apiVersion: addons.cluster.x-k8s.io/v1beta1
kind: ClusterResourceSet
metadata:
name: "${CLUSTER_NAME}-crs-ccm"
spec:
strategy: ApplyOnce
clusterSelector:
matchLabels:
ccm: "${CLUSTER_NAME}-crs-ccm"
resources:
- name: "${CLUSTER_NAME}-crs-ccm"
kind: ConfigMap
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ metadata:
ccm: "${CLUSTER_NAME}-crs-ccm"
spec:
clusterNetwork:
apiServerPort: 6443
pods:
cidrBlocks: ["192.168.0.0/16"]
infrastructureRef:
Expand All @@ -33,8 +34,10 @@ spec:
cidrBlock: "10.0.0.0/17"
purpose: PRIVATE
region: us-east4
controlPlaneEndpoint:
port: 6443
loadBalancer:
loadBalancerType: InternalExternal
loadBalancerType: Internal
---
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
Expand Down
Loading