Skip to content

Commit

Permalink
Merge pull request #982 from lzhecheng/0.7-fix-capz-tests
Browse files Browse the repository at this point in the history
Cherry-pick #607, #627 and #841 to branch release-0.7
  • Loading branch information
k8s-ci-robot authored Jan 10, 2022
2 parents d85eedd + adaa8b5 commit 7c38771
Show file tree
Hide file tree
Showing 14 changed files with 370 additions and 288 deletions.
4 changes: 4 additions & 0 deletions pkg/consts/consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,10 @@ const (

// NodeLabelRole specifies the role of a node
NodeLabelRole = "kubernetes.io/role"
// MasterNodeRoleLabel specifies is the master node label for a node
MasterNodeRoleLabel = "node-role.kubernetes.io/master"
// ControlPlaneNodeRoleLabel specifies is the control-plane node label for a node
ControlPlaneNodeRoleLabel = "node-role.kubernetes.io/control-plane"

// NicFailedState is the failed state of a nic
NicFailedState = "Failed"
Expand Down
20 changes: 13 additions & 7 deletions pkg/provider/azure_standard.go
Original file line number Diff line number Diff line change
Expand Up @@ -148,14 +148,20 @@ func (az *Cloud) getAzureLoadBalancerName(clusterName string, vmSetName string,
return lbNamePrefix
}

// isMasterNode returns true if the node has a master role label.
// The master role is determined by looking for:
// * a kubernetes.io/role="master" label
func isMasterNode(node *v1.Node) bool {
// isControlPlaneNode returns true if the node has a control-plane role label.
// The control-plane role is determined by looking for:
// * a node-role.kubernetes.io/control-plane or node-role.kubernetes.io/master="" label
func isControlPlaneNode(node *v1.Node) bool {
if _, ok := node.Labels[consts.ControlPlaneNodeRoleLabel]; ok {
return true
}
// include master role labels for k8s < 1.19
if _, ok := node.Labels[consts.MasterNodeRoleLabel]; ok {
return true
}
if val, ok := node.Labels[consts.NodeLabelRole]; ok && val == "master" {
return true
}

return false
}

Expand Down Expand Up @@ -682,7 +688,7 @@ func (as *availabilitySet) getAgentPoolAvailabilitySets(vms []compute.VirtualMac
agentPoolAvailabilitySets = &[]string{}
for nx := range nodes {
nodeName := (*nodes[nx]).Name
if isMasterNode(nodes[nx]) {
if isControlPlaneNode(nodes[nx]) {
continue
}
asID, ok := vmNameToAvailabilitySetID[nodeName]
Expand Down Expand Up @@ -936,7 +942,7 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No
hostUpdates := make([]func() error, 0, len(nodes))
for _, node := range nodes {
localNodeName := node.Name
if as.useStandardLoadBalancer() && as.excludeMasterNodesFromStandardLB() && isMasterNode(node) {
if as.useStandardLoadBalancer() && as.excludeMasterNodesFromStandardLB() && isControlPlaneNode(node) {
klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID)
continue
}
Expand Down
30 changes: 24 additions & 6 deletions pkg/provider/azure_standard_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,27 +51,45 @@ const (
primary = "primary"
)

func TestIsMasterNode(t *testing.T) {
if isMasterNode(&v1.Node{}) {
func TestIsControlPlaneNode(t *testing.T) {
if isControlPlaneNode(&v1.Node{}) {
t.Errorf("Empty node should not be master!")
}
if isMasterNode(&v1.Node{
if isControlPlaneNode(&v1.Node{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
consts.NodeLabelRole: "worker",
},
},
}) {
t.Errorf("Node labelled 'worker' should not be master!")
t.Errorf("Node labelled 'worker' should not be control plane!")
}
if !isMasterNode(&v1.Node{
if !isControlPlaneNode(&v1.Node{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
consts.NodeLabelRole: "master",
},
},
}) {
t.Errorf("Node should be master!")
t.Errorf("Node with kubernetes.io/role: \"master\" label should be control plane!")
}
if !isControlPlaneNode(&v1.Node{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
consts.MasterNodeRoleLabel: "",
},
},
}) {
t.Errorf("Node with node-role.kubernetes.io/master: \"\" label should be control plane!")
}
if !isControlPlaneNode(&v1.Node{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
consts.ControlPlaneNodeRoleLabel: "",
},
},
}) {
t.Errorf("Node with node-role.kubernetes.io/control-plane: \"\" label should be control plane!")
}
}

Expand Down
12 changes: 7 additions & 5 deletions pkg/provider/azure_vmss.go
Original file line number Diff line number Diff line change
Expand Up @@ -740,7 +740,7 @@ func (ss *ScaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]compu
func (ss *ScaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
agentPoolScaleSets := &[]string{}
for nx := range nodes {
if isMasterNode(nodes[nx]) {
if isControlPlaneNode(nodes[nx]) {
continue
}

Expand Down Expand Up @@ -1094,7 +1094,7 @@ func (ss *ScaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back
// multiple standard load balancers and the basic load balancer doesn't
if ss.useStandardLoadBalancer() && !ss.EnableMultipleStandardLoadBalancers {
for _, node := range nodes {
if ss.excludeMasterNodesFromStandardLB() && isMasterNode(node) {
if ss.excludeMasterNodesFromStandardLB() && isControlPlaneNode(node) {
continue
}

Expand Down Expand Up @@ -1241,7 +1241,7 @@ func (ss *ScaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
for _, node := range nodes {
localNodeName := node.Name

if ss.useStandardLoadBalancer() && ss.excludeMasterNodesFromStandardLB() && isMasterNode(node) {
if ss.useStandardLoadBalancer() && ss.excludeMasterNodesFromStandardLB() && isControlPlaneNode(node) {
klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID)
continue
}
Expand Down Expand Up @@ -1535,8 +1535,10 @@ func (ss *ScaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,

nodeResourceGroup, nodeVMSS, nodeInstanceID, nodeVMSSVM, err := ss.ensureBackendPoolDeletedFromNode(nodeName, backendPoolID)
if err != nil {
klog.Errorf("EnsureBackendPoolDeleted(%s): backendPoolID(%s) - failed with error %v", getServiceName(service), backendPoolID, err)
allErrs = append(allErrs, err)
if !errors.Is(err, ErrorNotVmssInstance) { // Do nothing for the VMAS nodes.
klog.Errorf("EnsureBackendPoolDeleted(%s): backendPoolID(%s) - failed with error %v", getServiceName(service), backendPoolID, err)
allErrs = append(allErrs, err)
}
continue
}

Expand Down
2 changes: 2 additions & 0 deletions site/content/en/topics/tagging-resources.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ the controller manager would parse this configuration and tag the shared resourc

The non-shared resource (public IP) could be tagged by setting `tags` in `azure.json` or service annotation `service.beta.kubernetes.io/azure-pip-tags`. The format of the two is similiar and the tags in the annotation would be considered first when there are conflicts between the configuration file and the annotation.

> The annotation `service.beta.kubernetes.io/azure-pip-tags` only works for managed public IPs. For BYO public IPs, the cloud provider would not apply any tags to them.
When the configuration, file or annotation, is updated, the old ones would be updated if there are conflicts. For example, after updating `{"tags": "a=b,c=d"}` to `{"tags": "a=c,e=f"}`, the new tags would be `a=c,c=d,e=f`.

## Integrating with system tags
Expand Down
8 changes: 7 additions & 1 deletion tests/e2e/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ import (

const (
reportDirEnv = "CCM_JUNIT_REPORT_DIR"
artifactsDirEnv = "ARTIFACTS"
defaultReportDir = "_report/"
)

Expand All @@ -56,7 +57,12 @@ func TestAzureTest(t *testing.T) {
RegisterFailHandler(Fail)
reportDir := os.Getenv(reportDirEnv)
if reportDir == "" {
reportDir = defaultReportDir
artifactsDir := os.Getenv(artifactsDirEnv)
if artifactsDir != "" {
reportDir = artifactsDir
} else {
reportDir = defaultReportDir
}
}
var r []Reporter
if reportDir != "" {
Expand Down
Loading

0 comments on commit 7c38771

Please sign in to comment.