Skip to content

Commit e81b7d4

Browse files
authored
Merge pull request #3285 from SataQiu/clean-master-20200703
🌱 Remove use of 'master' word from our codebase
2 parents 4cc7ebf + bafbf6e commit e81b7d4

File tree

10 files changed

+16
-16
lines changed

10 files changed

+16
-16
lines changed

bootstrap/kubeadm/main.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ func InitFlags(fs *pflag.FlagSet) {
8484
"Interval at which non-leader candidates will wait to force acquire leadership (duration string)")
8585

8686
fs.DurationVar(&leaderElectionRenewDeadline, "leader-election-renew-deadline", 10*time.Second,
87-
"Duration that the acting master will retry refreshing leadership before giving up (duration string)")
87+
"Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)")
8888

8989
fs.DurationVar(&leaderElectionRetryPeriod, "leader-election-retry-period", 2*time.Second,
9090
"Duration the LeaderElector clients should wait between tries of actions (duration string)")

controllers/machinehealthcheck_targets.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ const (
3737
// Event types
3838

3939
// EventSkippedControlPlane is emitted in case an unhealthy node (or a machine
40-
// associated with the node) has the `master` role
40+
// associated with the node) has the `control-plane` role
4141
// Deprecated: no longer in use
4242
EventSkippedControlPlane string = "SkippedControlPlane"
4343
// EventMachineDeletionFailed is emitted in case remediation of a machine

controlplane/kubeadm/internal/workload_cluster.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,9 +44,9 @@ import (
4444
)
4545

4646
const (
47-
kubeProxyKey = "kube-proxy"
48-
kubeadmConfigKey = "kubeadm-config"
49-
labelNodeRoleMaster = "node-role.kubernetes.io/master"
47+
kubeProxyKey = "kube-proxy"
48+
kubeadmConfigKey = "kubeadm-config"
49+
labelNodeRoleControlPlane = "node-role.kubernetes.io/master"
5050
)
5151

5252
var (
@@ -89,7 +89,7 @@ type Workload struct {
8989
func (w *Workload) getControlPlaneNodes(ctx context.Context) (*corev1.NodeList, error) {
9090
nodes := &corev1.NodeList{}
9191
labels := map[string]string{
92-
labelNodeRoleMaster: "",
92+
labelNodeRoleControlPlane: "",
9393
}
9494

9595
if err := w.Client.List(ctx, nodes, ctrlclient.MatchingLabels(labels)); err != nil {

controlplane/kubeadm/internal/workload_cluster_etcd_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ func TestRemoveEtcdMemberForMachine(t *testing.T) {
183183
Name: "cp1",
184184
Namespace: "cp1",
185185
Labels: map[string]string{
186-
labelNodeRoleMaster: "",
186+
labelNodeRoleControlPlane: "",
187187
},
188188
},
189189
}

controlplane/kubeadm/internal/workload_cluster_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -483,7 +483,7 @@ func TestClusterStatus(t *testing.T) {
483483
ObjectMeta: metav1.ObjectMeta{
484484
Name: "node1",
485485
Labels: map[string]string{
486-
labelNodeRoleMaster: "",
486+
labelNodeRoleControlPlane: "",
487487
},
488488
},
489489
Status: corev1.NodeStatus{
@@ -497,7 +497,7 @@ func TestClusterStatus(t *testing.T) {
497497
ObjectMeta: metav1.ObjectMeta{
498498
Name: "node2",
499499
Labels: map[string]string{
500-
labelNodeRoleMaster: "",
500+
labelNodeRoleControlPlane: "",
501501
},
502502
},
503503
Status: corev1.NodeStatus{

controlplane/kubeadm/main.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ func InitFlags(fs *pflag.FlagSet) {
8282
"Interval at which non-leader candidates will wait to force acquire leadership (duration string)")
8383

8484
fs.DurationVar(&leaderElectionRenewDeadline, "leader-election-renew-deadline", 10*time.Second,
85-
"Duration that the acting master will retry refreshing leadership before giving up (duration string)")
85+
"Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)")
8686

8787
fs.DurationVar(&leaderElectionRetryPeriod, "leader-election-retry-period", 2*time.Second,
8888
"Duration the LeaderElector clients should wait between tries of actions (duration string)")

docs/book/src/user/concepts.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ The Bootstrap Provider is responsible for:
2323

2424
1. Generating the cluster certificates, if not otherwise specified
2525
1. Initializing the control plane, and gating the creation of other nodes until it is complete
26-
1. Joining master and worker nodes to the cluster
26+
1. Joining control plane and worker nodes to the cluster
2727

2828
### Control plane
2929

docs/book/src/user/quick-start.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -486,7 +486,7 @@ export NODE_OS="ubuntu_18_04"
486486
export SSH_KEY="my-ssh"
487487
export POD_CIDR="172.25.0.0/16"
488488
export SERVICE_CIDR="172.26.0.0/16"
489-
export MASTER_NODE_TYPE="t1.small"
489+
export CONTROLPLANE_NODE_TYPE="t1.small"
490490
export WORKER_NODE_TYPE="t1.small"
491491
```
492492

main.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ func InitFlags(fs *pflag.FlagSet) {
9090
"Interval at which non-leader candidates will wait to force acquire leadership (duration string)")
9191

9292
fs.DurationVar(&leaderElectionRenewDeadline, "leader-election-renew-deadline", 10*time.Second,
93-
"Duration that the acting master will retry refreshing leadership before giving up (duration string)")
93+
"Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)")
9494

9595
fs.DurationVar(&leaderElectionRetryPeriod, "leader-election-retry-period", 2*time.Second,
9696
"Duration the LeaderElector clients should wait between tries of actions (duration string)")

test/framework/cluster_proxy.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ func (p *clusterProxy) GetWorkloadCluster(ctx context.Context, namespace, name s
183183
// gets the kubeconfig from the cluster
184184
config := p.getKubeconfig(ctx, namespace, name)
185185

186-
// if we are on mac and the cluster is a DockerCluster, it is required to fix the master address
186+
// if we are on mac and the cluster is a DockerCluster, it is required to fix the control plane address
187187
// by using localhost:load-balancer-host-port instead of the address used in the docker network.
188188
if goruntime.GOOS == "darwin" && p.isDockerCluster(ctx, namespace, name) {
189189
p.fixConfig(ctx, name, config)
@@ -226,12 +226,12 @@ func (p *clusterProxy) fixConfig(ctx context.Context, name string, config *api.C
226226
port, err := findLoadBalancerPort(ctx, name)
227227
Expect(err).ToNot(HaveOccurred(), "Failed to get load balancer port")
228228

229-
masterURL := &url.URL{
229+
controlPlaneURL := &url.URL{
230230
Scheme: "https",
231231
Host: "127.0.0.1:" + port,
232232
}
233233
currentCluster := config.Contexts[config.CurrentContext].Cluster
234-
config.Clusters[currentCluster].Server = masterURL.String()
234+
config.Clusters[currentCluster].Server = controlPlaneURL.String()
235235
}
236236

237237
func findLoadBalancerPort(ctx context.Context, name string) (string, error) {

0 commit comments

Comments
 (0)