diff --git a/pkg/log/logger.go b/pkg/log/logger.go new file mode 100644 index 0000000000..8afd9f42fd --- /dev/null +++ b/pkg/log/logger.go @@ -0,0 +1,67 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/go-logr/logr" + "k8s.io/klog/v2" +) + +// FromContextOrBackground returns a logger from the context if it exists, otherwise it returns a background logger. +// Current implementation uses klog as the background logger. +func FromContextOrBackground(ctx context.Context) logr.Logger { + return klog.FromContext(ctx) +} + +// NewContext returns a new context with the provided logger. +func NewContext(ctx context.Context, logger logr.Logger) context.Context { + return klog.NewContext(ctx, logger) +} + +// Background returns the background logger. +// Current implementation uses klog as the background logger. +func Background() logr.Logger { + return klog.Background() +} + +// Noop returns a logger that discards all log messages. +func Noop() logr.Logger { + return logr.Discard() +} + +// ValueAsMap converts a value to a map[string]any. +// It returns a map with an "error" key if the conversion fails. +// NOTE: +// +// It should ONLY be used when the default klog formatter failed to serialize the value in JSON format. +// Protobuf messages had implemented `String()` method, which the value is hard to read. Use this method to bypass instead. +func ValueAsMap(value any) map[string]any { + v, err := json.Marshal(value) + if err != nil { + return map[string]any{"error": fmt.Sprintf(" %s", err)} + } + var rv map[string]any + if err := json.Unmarshal(v, &rv); err != nil { + return map[string]any{"error": fmt.Sprintf(" %s", err)} + } + + return rv +} diff --git a/pkg/provider/azure_loadbalancer.go b/pkg/provider/azure_loadbalancer.go index b4b39041e2..79e014e88d 100644 --- a/pkg/provider/azure_loadbalancer.go +++ b/pkg/provider/azure_loadbalancer.go @@ -44,6 +44,7 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer" "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil" @@ -85,27 +86,29 @@ func (az *Cloud) existsPip(clusterName string, service *v1.Service) bool { // Implementations must treat the *v1.Service parameter as read-only and not modify it. // Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager. // TODO: Break this up into different interfaces (LB, etc) when we have more than one type of service -func (az *Cloud) GetLoadBalancer(_ context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) { +func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) { + logger := log.FromContextOrBackground(ctx).WithName("GetLoadBalancer").WithValues("service", service.Name) + ctx = log.NewContext(ctx, logger) + existingLBs, err := az.ListLB(service) if err != nil { return nil, az.existsPip(clusterName, service), err } - _, _, status, _, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false, &existingLBs) + _, _, status, _, existsLb, err := az.getServiceLoadBalancer(ctx, service, clusterName, nil, false, &existingLBs) if err != nil || existsLb { return status, existsLb || az.existsPip(clusterName, service), err } flippedService := flipServiceInternalAnnotation(service) - _, _, status, _, existsLb, err = az.getServiceLoadBalancer(flippedService, clusterName, nil, false, &existingLBs) + _, _, status, _, existsLb, err = az.getServiceLoadBalancer(ctx, flippedService, clusterName, nil, false, &existingLBs) if err != nil || existsLb { return status, existsLb || az.existsPip(clusterName, service), err } // Return exists = false only if the load balancer and the public IP are not found on Azure - if !existsLb && !az.existsPip(clusterName, service) { - serviceName := getServiceName(service) - klog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName) + if !az.existsPip(clusterName, service) { + logger.V(5).Info("LoadBalancer and PublicIP not found") return nil, false, nil } @@ -121,61 +124,59 @@ func getPublicIPDomainNameLabel(service *v1.Service) (string, bool) { } // reconcileService reconcile the LoadBalancer service. It returns LoadBalancerStatus on success. -func (az *Cloud) reconcileService(_ context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { - serviceName := getServiceName(service) - resourceBaseName := az.GetLoadBalancerName(context.TODO(), "", service) - klog.V(2).Infof("reconcileService: Start reconciling Service %q with its resource basename %q", serviceName, resourceBaseName) +func (az *Cloud) reconcileService(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { + logger := log.FromContextOrBackground(ctx) - lb, err := az.reconcileLoadBalancer(clusterName, service, nodes, true /* wantLb */) + logger.V(2).Info("Start reconciling Service", "lb", az.GetLoadBalancerName(ctx, clusterName, service)) + + lb, err := az.reconcileLoadBalancer(ctx, clusterName, service, nodes, true /* wantLb */) if err != nil { - klog.Errorf("reconcileLoadBalancer(%s) failed: %v", serviceName, err) + logger.Error(err, "Failed to reconcile LoadBalancer") return nil, err } lbStatus, lbIPsPrimaryPIPs, fipConfigs, err := az.getServiceLoadBalancerStatus(service, lb) if err != nil { - klog.Errorf("getServiceLoadBalancerStatus(%s) failed: %v", serviceName, err) + logger.Error(err, "Failed to get LoadBalancer status") if !errors.Is(err, ErrorNotVmssInstance) { return nil, err } } - serviceIPs := lbIPsPrimaryPIPs - klog.V(2).Infof("reconcileService: reconciling security group for service %q with IPs %q, wantLb = true", serviceName, serviceIPs) - if _, err := az.reconcileSecurityGroup(clusterName, service, ptr.Deref(lb.Name, ""), serviceIPs, true /* wantLb */); err != nil { - klog.Errorf("reconcileSecurityGroup(%s) failed: %#v", serviceName, err) + if _, err := az.reconcileSecurityGroup(ctx, clusterName, service, ptr.Deref(lb.Name, ""), lbIPsPrimaryPIPs, true /* wantLb */); err != nil { + logger.Error(err, "Failed to reconcile SecurityGroup") return nil, err } for _, fipConfig := range fipConfigs { if err := az.reconcilePrivateLinkService(clusterName, service, fipConfig, true /* wantPLS */); err != nil { - klog.Errorf("reconcilePrivateLinkService(%s) failed: %#v", serviceName, err) + logger.Error(err, "Failed to reconcile PrivateLinkService") return nil, err } } updateService := updateServiceLoadBalancerIPs(service, lbIPsPrimaryPIPs) flippedService := flipServiceInternalAnnotation(updateService) - if _, err := az.reconcileLoadBalancer(clusterName, flippedService, nil, false /* wantLb */); err != nil { - klog.Errorf("reconcileLoadBalancer(%s) failed: %#v", serviceName, err) + if _, err := az.reconcileLoadBalancer(ctx, clusterName, flippedService, nil, false /* wantLb */); err != nil { + logger.Error(err, "Failed to reconcile flipped LoadBalancer") return nil, err } // lb is not reused here because the ETAG may be changed in above operations, hence reconcilePublicIP() would get lb again from cache. - klog.V(2).Infof("reconcileService: reconciling pip") + logger.V(2).Info("Reconciling PublicIPs") if _, err := az.reconcilePublicIPs(clusterName, updateService, pointer.StringDeref(lb.Name, ""), true /* wantLb */); err != nil { - klog.Errorf("reconcilePublicIP(%s) failed: %#v", serviceName, err) + logger.Error(err, "Failed to reconcile PublicIPs") return nil, err } lbName := strings.ToLower(pointer.StringDeref(lb.Name, "")) - key := strings.ToLower(serviceName) + key := strings.ToLower(getServiceName(service)) if az.useMultipleStandardLoadBalancers() && isLocalService(service) { az.localServiceNameToServiceInfoMap.Store(key, newServiceInfo(getServiceIPFamily(service), lbName)) // There are chances that the endpointslice changes after EnsureHostsInPool, so // need to check endpointslice for a second time. if err := az.checkAndApplyLocalServiceBackendPoolUpdates(*lb, service); err != nil { - klog.Errorf("failed to checkAndApplyLocalServiceBackendPoolUpdates: %v", err) + logger.Error(err, "Failed to checkAndApplyLocalServiceBackendPoolUpdates") return nil, err } } else { @@ -204,15 +205,23 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser az.serviceReconcileLock.Lock() defer az.serviceReconcileLock.Unlock() - var err error - serviceName := getServiceName(service) - mc := metrics.NewMetricContext("services", "ensure_loadbalancer", az.ResourceGroup, az.getNetworkResourceSubscriptionID(), serviceName) - klog.V(5).InfoS("EnsureLoadBalancer Start", "service", serviceName, "cluster", clusterName, "service_spec", service) + var ( + svcName = getServiceName(service) + logger = log.FromContextOrBackground(ctx).WithName("EnsureLoadBalancer").WithValues("cluster", clusterName, "service", svcName) + mc = metrics.NewMetricContext("services", "ensure_loadbalancer", az.ResourceGroup, az.getNetworkResourceSubscriptionID(), svcName) + isOperationSucceeded = false + err error + ) + + logger.V(5).Info("Starting", "service-spec", log.ValueAsMap(service)) - isOperationSucceeded := false defer func() { mc.ObserveOperationWithResult(isOperationSucceeded) - klog.V(5).InfoS("EnsureLoadBalancer Finish", "service", serviceName, "cluster", clusterName, "service_spec", service, "error", err) + if err != nil { + logger.V(5).Error(err, "Finished with error", "service-spec", log.ValueAsMap(service)) + } else { + logger.V(5).Info("Finished", "service-spec", log.ValueAsMap(service)) + } }() lbStatus, err := az.reconcileService(ctx, clusterName, service, nodes) @@ -251,35 +260,43 @@ func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, ser az.serviceReconcileLock.Lock() defer az.serviceReconcileLock.Unlock() - var err error - serviceName := getServiceName(service) - mc := metrics.NewMetricContext("services", "update_loadbalancer", az.ResourceGroup, az.getNetworkResourceSubscriptionID(), serviceName) - klog.V(5).InfoS("UpdateLoadBalancer Start", "service", serviceName, "cluster", clusterName, "service_spec", service) - isOperationSucceeded := false + var ( + svcName = getServiceName(service) + logger = log.FromContextOrBackground(ctx).WithName("UpdateLoadBalancer").WithValues("cluster", clusterName, "service", svcName) + mc = metrics.NewMetricContext("services", "update_loadbalancer", az.ResourceGroup, az.getNetworkResourceSubscriptionID(), svcName) + isOperationSucceeded = false + err error + ) + + logger.V(5).Info("Starting", "service-spec", log.ValueAsMap(service)) defer func() { mc.ObserveOperationWithResult(isOperationSucceeded) - klog.V(5).InfoS("UpdateLoadBalancer Finish", "service", serviceName, "cluster", clusterName, "service_spec", service, "error", err) + if err != nil { + logger.V(5).Error(err, "Finished with error", "service-spec", log.ValueAsMap(service)) + } else { + logger.V(5).Info("Finished", "service-spec", log.ValueAsMap(service)) + } }() // In case UpdateLoadBalancer gets stale service spec, retrieve the latest from lister - service, serviceExists, err := az.getLatestService(serviceName, true) + service, serviceExists, err := az.getLatestService(svcName, true) if err != nil { return fmt.Errorf("UpdateLoadBalancer: failed to get latest service %s: %w", service.Name, err) } if !serviceExists { isOperationSucceeded = true - klog.V(2).Infof("UpdateLoadBalancer: skipping service %s because service is going to be deleted", serviceName) + logger.V(2).Info("Skipping because service is going to be deleted") return nil } - shouldUpdateLB, err := az.shouldUpdateLoadBalancer(clusterName, service, nodes) + shouldUpdateLB, err := az.shouldUpdateLoadBalancer(ctx, clusterName, service, nodes) if err != nil { return err } if !shouldUpdateLB { isOperationSucceeded = true - klog.V(2).Infof("UpdateLoadBalancer: skipping service %s because it is either being deleted or does not exist anymore", service.Name) + logger.V(2).Info("Skipping because it is either being deleted or does not exist anymore") return nil } @@ -300,41 +317,48 @@ func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, ser // doesn't exist even if some part of it is still laying around. // Implementations must treat the *v1.Service parameter as read-only and not modify it. // Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager -func (az *Cloud) EnsureLoadBalancerDeleted(_ context.Context, clusterName string, service *v1.Service) error { +func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { // Serialize service reconcile process az.serviceReconcileLock.Lock() defer az.serviceReconcileLock.Unlock() - var err error - serviceName := getServiceName(service) - mc := metrics.NewMetricContext("services", "ensure_loadbalancer_deleted", az.ResourceGroup, az.getNetworkResourceSubscriptionID(), serviceName) - klog.V(5).InfoS("EnsureLoadBalancerDeleted Start", "service", serviceName, "cluster", clusterName, "service_spec", service) - isOperationSucceeded := false + var ( + svcName = getServiceName(service) + logger = log.FromContextOrBackground(ctx).WithName("EnsureLoadBalancerDeleted").WithValues("cluster", clusterName, "service", svcName) + mc = metrics.NewMetricContext("services", "ensure_loadbalancer_deleted", az.ResourceGroup, az.getNetworkResourceSubscriptionID(), svcName) + isOperationSucceeded = false + err error + ) + ctx = log.NewContext(ctx, logger) + + logger.V(5).Info("Starting", "service-spec", log.ValueAsMap(service)) defer func() { mc.ObserveOperationWithResult(isOperationSucceeded) - klog.V(5).InfoS("EnsureLoadBalancerDeleted Finish", "service", serviceName, "cluster", clusterName, "service_spec", service, "error", err) + if err != nil { + logger.Error(err, "Finished with error", "service-spec", log.ValueAsMap(service)) + } else { + logger.V(5).Info("Finished", "service-spec", log.ValueAsMap(service)) + } }() - lb, _, _, lbIPsPrimaryPIPs, _, err := az.getServiceLoadBalancer(service, clusterName, nil, false, &[]network.LoadBalancer{}) + lb, _, _, lbIPsPrimaryPIPs, _, err := az.getServiceLoadBalancer(ctx, service, clusterName, nil, false, &[]network.LoadBalancer{}) if err != nil && !retry.HasStatusForbiddenOrIgnoredError(err) { return err } - serviceIPsToCleanup := lbIPsPrimaryPIPs - klog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IPs %q, wantLb = false", serviceName, serviceIPsToCleanup) - _, err = az.reconcileSecurityGroup(clusterName, service, ptr.Deref(lb.Name, ""), serviceIPsToCleanup, false /* wantLb */) + _, err = az.reconcileSecurityGroup(ctx, clusterName, service, ptr.Deref(lb.Name, ""), lbIPsPrimaryPIPs, false /* wantLb */) if err != nil { return err } - _, err = az.reconcileLoadBalancer(clusterName, service, nil, false /* wantLb */) + _, err = az.reconcileLoadBalancer(ctx, clusterName, service, nil, false /* wantLb */) if err != nil && !retry.HasStatusForbiddenOrIgnoredError(err) { return err } // check flipped service also flippedService := flipServiceInternalAnnotation(service) - if _, err := az.reconcileLoadBalancer(clusterName, flippedService, nil, false /* wantLb */); err != nil { + if _, err := az.reconcileLoadBalancer(ctx, clusterName, flippedService, nil, false /* wantLb */); err != nil { return err } @@ -343,11 +367,10 @@ func (az *Cloud) EnsureLoadBalancerDeleted(_ context.Context, clusterName string } if az.useMultipleStandardLoadBalancers() && isLocalService(service) { - key := strings.ToLower(serviceName) + key := strings.ToLower(svcName) az.localServiceNameToServiceInfoMap.Delete(key) } - klog.V(2).Infof("Delete service (%s): FINISH", serviceName) isOperationSucceeded = true return nil @@ -609,12 +632,15 @@ func (az *Cloud) safeDeleteLoadBalancer(lb network.LoadBalancer, clusterName, vm // with added metadata (such as name, location) and existsLB set to FALSE. // By default - cluster default LB is returned. func (az *Cloud) getServiceLoadBalancer( + ctx context.Context, service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool, existingLBs *[]network.LoadBalancer, ) (lb *network.LoadBalancer, refreshedLBs *[]network.LoadBalancer, status *v1.LoadBalancerStatus, lbIPsPrimaryPIPs []string, exists bool, err error) { + logger := log.FromContextOrBackground(ctx) + isInternal := requiresInternalLoadBalancer(service) var defaultLB *network.LoadBalancer primaryVMSetName := az.VMSet.GetPrimaryVMSetName() @@ -653,7 +679,7 @@ func (az *Cloud) getServiceLoadBalancer( // service is not on this load balancer continue } - klog.V(4).Infof("getServiceLoadBalancer(%s, %s, %v): current lb IPs: %q", service.Name, clusterName, wantLb, lbIPsPrimaryPIPs) + logger.V(4).Info(fmt.Sprintf("getServiceLoadBalancer(%s, %s, %v): current lb IPs: %q", service.Name, clusterName, wantLb, lbIPsPrimaryPIPs)) // select another load balancer instead of returning // the current one if the change is needed @@ -669,7 +695,7 @@ func (az *Cloud) getServiceLoadBalancer( } deletedLBName, err = az.removeFrontendIPConfigurationFromLoadBalancer(&existingLB, existingLBs, fipConfigs, clusterName, service) if err != nil { - klog.Errorf("getServiceLoadBalancer(%s, %s, %v): failed to remove frontend IP configurations %q from load balancer: %v", service.Name, clusterName, wantLb, fipConfigNames, err) + logger.Error(err, fmt.Sprintf("getServiceLoadBalancer(%s, %s, %v): failed to remove frontend IP configurations %q from load balancer", service.Name, clusterName, wantLb, fipConfigNames)) return nil, nil, nil, nil, false, err } if deletedLBName != "" { @@ -694,7 +720,7 @@ func (az *Cloud) getServiceLoadBalancer( if deletedLBName == "" { newLBs, err := az.cleanupLocalServiceBackendPool(service, nodes, existingLBs, clusterName) if err != nil { - klog.Errorf("getServiceLoadBalancer(%s, %s, %v): failed to cleanup backend pool for local service: %s", service.Name, clusterName, wantLb, err.Error()) + logger.Error(err, fmt.Sprintf("getServiceLoadBalancer(%s, %s, %v): failed to cleanup backend pool for local service", service.Name, clusterName, wantLb)) return nil, nil, nil, nil, false, err } existingLBs = newLBs @@ -1610,7 +1636,7 @@ func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurations( // This also reconciles the Service's Ports with the LoadBalancer config. // This entails adding rules/probes for expected Ports and removing stale rules/ports. // nodes only used if wantLb is true -func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) { +func (az *Cloud) reconcileLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) { isBackendPoolPreConfigured := az.isBackendPoolPreConfigured(service) serviceName := getServiceName(service) klog.V(2).Infof("reconcileLoadBalancer for service(%s) - wantLb(%t): started", serviceName, wantLb) @@ -1636,7 +1662,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, return nil, err } - lb, newLBs, lbStatus, _, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb, existingLBs) + lb, newLBs, lbStatus, _, _, err := az.getServiceLoadBalancer(ctx, service, clusterName, nodes, wantLb, existingLBs) if err != nil { klog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err) return nil, err @@ -2803,18 +2829,16 @@ func (az *Cloud) getExpectedHAModeLoadBalancingRuleProperties( // This reconciles the Network Security Group similar to how the LB is reconciled. // This entails adding required, missing SecurityRules and removing stale rules. func (az *Cloud) reconcileSecurityGroup( + ctx context.Context, clusterName string, service *v1.Service, lbName string, lbIPs []string, wantLb bool, ) (*network.SecurityGroup, error) { - logger := klog.Background().WithName("reconcileSecurityGroup"). - WithValues("cluster", clusterName). - WithValues("service", getServiceName(service)). + logger := log.FromContextOrBackground(ctx).WithName("reconcileSecurityGroup"). WithValues("load-balancer", lbName). WithValues("delete-lb", !wantLb) logger.V(2).Info("Starting") - - ctx := klog.NewContext(context.Background(), logger) + ctx = log.NewContext(ctx, logger) if wantLb && len(lbIPs) == 0 { return nil, fmt.Errorf("no load balancer IP for setting up security rules for service %s", service.Name) @@ -2837,7 +2861,7 @@ func (az *Cloud) reconcileSecurityGroup( // When deleting LB, we don't need to validate the annotation opts = append(opts, loadbalancer.SkipAnnotationValidation()) } - accessControl, err = loadbalancer.NewAccessControl(service, &sg, opts...) + accessControl, err = loadbalancer.NewAccessControl(logger, service, &sg, opts...) if err != nil { logger.Error(err, "Failed to parse access control configuration for service") return nil, err @@ -2938,25 +2962,25 @@ func (az *Cloud) reconcileSecurityGroup( if updated { logger.V(2).Info("Preparing to update security group") - logger.V(10).Info("CreateOrUpdateSecurityGroup begin") + logger.V(5).Info("CreateOrUpdateSecurityGroup begin") err := az.CreateOrUpdateSecurityGroup(*rv) if err != nil { logger.Error(err, "Failed to update security group") return nil, err } - logger.V(10).Info("CreateOrUpdateSecurityGroup end") + logger.V(5).Info("CreateOrUpdateSecurityGroup end") _ = az.nsgCache.Delete(pointer.StringDeref(rv.Name, "")) } return rv, nil } -func (az *Cloud) shouldUpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (bool, error) { +func (az *Cloud) shouldUpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (bool, error) { existingManagedLBs, err := az.ListManagedLBs(service, nodes, clusterName) if err != nil { return false, fmt.Errorf("shouldUpdateLoadBalancer: failed to list managed load balancers: %w", err) } - _, _, _, _, existsLb, _ := az.getServiceLoadBalancer(service, clusterName, nodes, false, existingManagedLBs) + _, _, _, _, existsLb, _ := az.getServiceLoadBalancer(ctx, service, clusterName, nodes, false, existingManagedLBs) return existsLb && service.ObjectMeta.DeletionTimestamp == nil && service.Spec.Type == v1.ServiceTypeLoadBalancer, nil } diff --git a/pkg/provider/azure_loadbalancer_accesscontrol.go b/pkg/provider/azure_loadbalancer_accesscontrol.go index 107859221e..a0445864a0 100644 --- a/pkg/provider/azure_loadbalancer_accesscontrol.go +++ b/pkg/provider/azure_loadbalancer_accesscontrol.go @@ -21,13 +21,12 @@ import ( "fmt" "net/netip" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/klog/v2" - - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer" "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/fnutil" ) @@ -60,34 +59,34 @@ func (az *Cloud) listSharedIPPortMapping( ingressIPs []netip.Addr, ) (map[network.SecurityRuleProtocol][]int32, error) { var ( - logger = klog.FromContext(ctx).WithName("listSharedIPPortMapping") + logger = log.FromContextOrBackground(ctx).WithName("listSharedIPPortMapping") rv = make(map[network.SecurityRuleProtocol][]int32) ) var services []*v1.Service { var err error - logger.Info("Listing all services") + logger.V(5).Info("Listing all services") services, err = az.serviceLister.List(labels.Everything()) if err != nil { logger.Error(err, "Failed to list all services") return nil, fmt.Errorf("list all services: %w", err) } - logger.Info("Listed all services", "num-all-services", len(services)) + logger.V(5).Info("Listed all services", "num-all-services", len(services)) // Filter services by ingress IPs or backend node pool IPs (when disable floating IP) if consts.IsK8sServiceDisableLoadBalancerFloatingIP(svc) { - logger.Info("Filter service by disableFloatingIP") + logger.V(5).Info("Filter service by disableFloatingIP") services = filterServicesByDisableFloatingIP(services) } else { - logger.Info("Filter service by external IPs") + logger.V(5).Info("Filter service by external IPs") services = filterServicesByIngressIPs(services, ingressIPs) } } - logger.Info("Filtered services", "num-filtered-services", len(services)) + logger.V(5).Info("Filtered services", "num-filtered-services", len(services)) for _, s := range services { - logger.V(4).Info("iterating service", "service", s.Name, "namespace", s.Namespace) + logger.V(5).Info("Iterating service", "service", s.Name, "namespace", s.Namespace) if svc.Namespace == s.Namespace && svc.Name == s.Name { // skip the service itself continue @@ -103,7 +102,7 @@ func (az *Cloud) listSharedIPPortMapping( } } - logger.V(4).Info("retain port mapping", "port-mapping", rv) + logger.V(5).Info("Retain port mapping", "port-mapping", rv) return rv, nil } diff --git a/pkg/provider/azure_loadbalancer_accesscontrol_test.go b/pkg/provider/azure_loadbalancer_accesscontrol_test.go index 397e408fd6..3f1ed4c728 100644 --- a/pkg/provider/azure_loadbalancer_accesscontrol_test.go +++ b/pkg/provider/azure_loadbalancer_accesscontrol_test.go @@ -37,6 +37,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer" "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil" "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup" @@ -53,6 +54,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { fx = fixture.NewFixture() k8sFx = fx.Kubernetes() azureFx = fx.Azure() + ctx = log.NewContext(context.Background(), log.Noop()) ) t.Run("internal Load Balancer", func(t *testing.T) { @@ -86,7 +88,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - sg, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + sg, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) testutil.ExpectEqualInJSON(t, azureFx.SecurityGroup().Build(), sg) }) @@ -173,7 +175,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) @@ -259,7 +261,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) }) @@ -334,7 +336,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) }) @@ -416,7 +418,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) @@ -491,7 +493,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) @@ -571,7 +573,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) @@ -669,7 +671,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) @@ -749,7 +751,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) @@ -842,13 +844,12 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) }) t.Run("skip - when rules are up-to-date", func(t *testing.T) { - t.Run("with `service.beta.kubernetes.io/azure-additional-public-ips` specified", func(t *testing.T) { var ( ctrl = gomock.NewController(t) @@ -910,7 +911,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) @@ -972,7 +973,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) @@ -1038,7 +1039,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) @@ -1122,7 +1123,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) @@ -1189,7 +1190,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) @@ -1268,7 +1269,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) @@ -1365,7 +1366,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) }) @@ -1549,7 +1550,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) @@ -1680,7 +1681,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) @@ -1881,7 +1882,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) @@ -2163,7 +2164,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) } @@ -2447,7 +2448,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.NoError(t, err) }) } @@ -2636,7 +2637,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), false) // deleting + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), false) // deleting assert.NoError(t, err) }) @@ -2820,7 +2821,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), false) // deleting + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), false) // deleting assert.NoError(t, err) }) @@ -2943,7 +2944,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { Return(loadBalancer, &retry.Error{HTTPStatusCode: http.StatusNotFound}). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, nil, false) // deleting + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, nil, false) // deleting assert.NoError(t, err) }) @@ -2972,7 +2973,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { Return(securityGroup, nil). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.Error(t, err) assert.ErrorIs(t, err, loadbalancer.ErrSetBothLoadBalancerSourceRangesAndAllowedIPRanges) }) @@ -2997,7 +2998,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { Return(securityGroup, expectedErr). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.Error(t, err) assert.ErrorIs(t, err, expectedErr.RawError) }) @@ -3027,7 +3028,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { Return(loadBalancer, expectedErr). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.Error(t, err) assert.ErrorIs(t, err, expectedErr.RawError) }) @@ -3070,7 +3071,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.Error(t, err) assert.ErrorIs(t, err, expectedErr.RawError) }) @@ -3105,7 +3106,7 @@ func TestCloud_reconcileSecurityGroup(t *testing.T) { ). Times(1) - _, err := az.reconcileSecurityGroup(ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) + _, err := az.reconcileSecurityGroup(ctx, ClusterName, &svc, *loadBalancer.Name, azureFx.LoadBalancer().Addresses(), EnsureLB) assert.Error(t, err) }) }) diff --git a/pkg/provider/azure_loadbalancer_test.go b/pkg/provider/azure_loadbalancer_test.go index cff7b18843..17811d1ff3 100644 --- a/pkg/provider/azure_loadbalancer_test.go +++ b/pkg/provider/azure_loadbalancer_test.go @@ -788,7 +788,7 @@ func TestEnsureLoadBalancerDeleted(t *testing.T) { mockLBsClient.EXPECT().List(gomock.Any(), az.Config.ResourceGroup).Return(expectedLBs, nil).MaxTimes(2) az.LoadBalancerClient = mockLBsClient assert.Nil(t, err, "TestCase[%d]: %s", i, c.desc) - result, rerr := az.LoadBalancerClient.List(context.Background(), az.Config.ResourceGroup) + result, rerr := az.LoadBalancerClient.List(context.TODO(), az.Config.ResourceGroup) assert.Nil(t, rerr, "TestCase[%d]: %s", i, c.desc) assert.Equal(t, 0, len(result), "TestCase[%d]: %s", i, c.desc) } @@ -1798,7 +1798,7 @@ func TestGetServiceLoadBalancerMultiSLB(t *testing.T) { tc.service.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal } - lb, lbs, _, _, _, err := cloud.getServiceLoadBalancer(&tc.service, testClusterName, + lb, lbs, _, _, _, err := cloud.getServiceLoadBalancer(context.TODO(), &tc.service, testClusterName, []*v1.Node{}, true, &tc.existingLBs) assert.Equal(t, tc.expectedError, err) assert.Equal(t, tc.expectedLB, lb) @@ -1943,7 +1943,7 @@ func TestGetServiceLoadBalancerCommon(t *testing.T) { } az.LoadBalancerSku = test.sku service := test.service - lb, _, status, _, exists, err := az.getServiceLoadBalancer(&service, testClusterName, + lb, _, status, _, exists, err := az.getServiceLoadBalancer(context.TODO(), &service, testClusterName, clusterResources.nodes, test.wantLB, &[]network.LoadBalancer{}) assert.Equal(t, test.expectedLB, lb) assert.Equal(t, test.expectedStatus, status) @@ -1976,7 +1976,7 @@ func TestGetServiceLoadBalancerWithExtendedLocation(t *testing.T) { mockLBsClient.EXPECT().List(gomock.Any(), "rg").Return(nil, nil) az.LoadBalancerClient = mockLBsClient - lb, _, status, _, exists, err := az.getServiceLoadBalancer(&service, testClusterName, + lb, _, status, _, exists, err := az.getServiceLoadBalancer(context.TODO(), &service, testClusterName, clusterResources.nodes, false, &[]network.LoadBalancer{}) assert.Equal(t, expectedLB, lb, "GetServiceLoadBalancer shall return a default LB with expected location.") assert.Nil(t, status, "GetServiceLoadBalancer: Status should be nil for default LB.") @@ -2001,7 +2001,7 @@ func TestGetServiceLoadBalancerWithExtendedLocation(t *testing.T) { mockLBsClient.EXPECT().List(gomock.Any(), "rg").Return(nil, nil) az.LoadBalancerClient = mockLBsClient - lb, _, status, _, exists, err = az.getServiceLoadBalancer(&service, testClusterName, + lb, _, status, _, exists, err = az.getServiceLoadBalancer(context.TODO(), &service, testClusterName, clusterResources.nodes, true, &[]network.LoadBalancer{}) assert.Equal(t, expectedLB, lb, "GetServiceLoadBalancer shall return a new LB with expected location.") assert.Nil(t, status, "GetServiceLoadBalancer: Status should be nil for new LB.") @@ -3904,7 +3904,7 @@ func TestReconcileLoadBalancerCommon(t *testing.T) { }).AnyTimes() mockLBBackendPool.EXPECT().EnsureHostsInPool(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - lb, rerr := az.reconcileLoadBalancer("testCluster", &service, clusterResources.nodes, test.wantLb) + lb, rerr := az.reconcileLoadBalancer(context.TODO(), "testCluster", &service, clusterResources.nodes, test.wantLb) assert.Equal(t, test.expectedError, rerr) if test.expectedError == nil { @@ -5387,7 +5387,7 @@ func TestShouldUpdateLoadBalancer(t *testing.T) { mockVMSet.EXPECT().GetPrimaryVMSetName().Return(az.Config.PrimaryAvailabilitySetName).MaxTimes(3) az.VMSet = mockVMSet - shouldUpdateLoadBalancer, err := az.shouldUpdateLoadBalancer(testClusterName, &service, existingNodes) + shouldUpdateLoadBalancer, err := az.shouldUpdateLoadBalancer(context.TODO(), testClusterName, &service, existingNodes) assert.NoError(t, err) assert.Equal(t, test.expectedOutput, shouldUpdateLoadBalancer) }) @@ -6073,7 +6073,7 @@ func TestCleanOrphanedLoadBalancerLBInUseByVMSS(t *testing.T) { t.Run("cleanOrphanedLoadBalancer should retry deleting lb when meeting LoadBalancerInUseByVirtualMachineScaleSet", func(t *testing.T) { cloud := GetTestCloud(ctrl) - vmss, err := newScaleSet(context.Background(), cloud) + vmss, err := newScaleSet(context.TODO(), cloud) assert.NoError(t, err) cloud.VMSet = vmss cloud.LoadBalancerSku = consts.LoadBalancerSkuStandard @@ -6098,7 +6098,7 @@ func TestCleanOrphanedLoadBalancerLBInUseByVMSS(t *testing.T) { t.Run("cleanupOrphanedLoadBalancer should not call delete api if the lb does not exist", func(t *testing.T) { cloud := GetTestCloud(ctrl) - vmss, err := newScaleSet(context.Background(), cloud) + vmss, err := newScaleSet(context.TODO(), cloud) assert.NoError(t, err) cloud.VMSet = vmss cloud.LoadBalancerSku = consts.LoadBalancerSkuStandard diff --git a/pkg/provider/azure_test.go b/pkg/provider/azure_test.go index c7b3e95b04..6ccbfff835 100644 --- a/pkg/provider/azure_test.go +++ b/pkg/provider/azure_test.go @@ -132,7 +132,7 @@ func TestAddPort(t *testing.T) { }).AnyTimes() mockLBBackendPool.EXPECT().EnsureHostsInPool(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) + lb, err := az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes, true /* wantLb */) assert.Nil(t, err) // ensure we got a frontend ip configuration @@ -917,7 +917,7 @@ func TestReconcileLoadBalancerAddServiceOnInternalSubnet(t *testing.T) { }).AnyTimes() mockLBBackendPool.EXPECT().EnsureHostsInPool(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) + lb, err := az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes, true /* wantLb */) assert.Nil(t, err) // ensure we got 2 frontend ip configurations @@ -947,7 +947,7 @@ func TestReconcileLoadBalancerAddServicesOnMultipleSubnets(t *testing.T) { mockLBBackendPool.EXPECT().EnsureHostsInPool(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() // svc1 is using LB without "-internal" suffix - lb, err := az.reconcileLoadBalancer(testClusterName, &svc1, clusterResources.nodes, true /* wantLb */) + lb, err := az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc1, clusterResources.nodes, true /* wantLb */) if err != nil { t.Errorf("Unexpected error reconciling svc1: %q", err) } @@ -964,7 +964,7 @@ func TestReconcileLoadBalancerAddServicesOnMultipleSubnets(t *testing.T) { setMockLBsDualStack(az, ctrl, &expectedLBs, "service", 1, 2, true) // svc2 is using LB with "-internal" suffix - lb, err = az.reconcileLoadBalancer(testClusterName, &svc2, clusterResources.nodes, true /* wantLb */) + lb, err = az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc2, clusterResources.nodes, true /* wantLb */) if err != nil { t.Errorf("Unexpected error reconciling svc2: %q", err) } @@ -1000,7 +1000,7 @@ func TestReconcileLoadBalancerEditServiceSubnet(t *testing.T) { mockPLSClient := az.PrivateLinkServiceClient.(*mockprivatelinkserviceclient.MockInterface) mockPLSClient.EXPECT().List(gomock.Any(), az.Config.ResourceGroup).Return(expectedPLS, nil).MinTimes(1).MaxTimes(1) - lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) + lb, err := az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes, true /* wantLb */) if err != nil { t.Errorf("Unexpected error reconciling initial svc: %q", err) } @@ -1013,7 +1013,7 @@ func TestReconcileLoadBalancerEditServiceSubnet(t *testing.T) { expectedLBs = make([]network.LoadBalancer, 0) setMockLBsDualStack(az, ctrl, &expectedLBs, "service", 1, 1, true) - lb, err = az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) + lb, err = az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes, true /* wantLb */) if err != nil { t.Errorf("Unexpected error reconciling edits to svc: %q", err) } @@ -1045,7 +1045,7 @@ func TestReconcileLoadBalancerNodeHealth(t *testing.T) { }).AnyTimes() mockLBBackendPool.EXPECT().EnsureHostsInPool(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) + lb, err := az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes, true /* wantLb */) assert.Nil(t, err) // ensure we got a frontend ip configuration @@ -1074,7 +1074,7 @@ func TestReconcileLoadBalancerRemoveService(t *testing.T) { }).AnyTimes() mockLBBackendPool.EXPECT().EnsureHostsInPool(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - _, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) + _, err := az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes, true /* wantLb */) assert.Nil(t, err) expectedLBs[0].FrontendIPConfigurations = &[]network.FrontendIPConfiguration{} @@ -1085,7 +1085,7 @@ func TestReconcileLoadBalancerRemoveService(t *testing.T) { mockLBsClient.EXPECT().List(gomock.Any(), az.ResourceGroup).Return(expectedLBs, nil).MaxTimes(3) mockLBsClient.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, false /* wantLb */) + lb, err := az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes, false /* wantLb */) assert.Nil(t, err) // ensure we abandoned the frontend ip configuration @@ -1114,7 +1114,7 @@ func TestReconcileLoadBalancerRemoveAllPortsRemovesFrontendConfig(t *testing.T) }).AnyTimes() mockLBBackendPool.EXPECT().EnsureHostsInPool(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) + lb, err := az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes, true /* wantLb */) assert.Nil(t, err) validateLoadBalancer(t, lb, svc) @@ -1128,7 +1128,7 @@ func TestReconcileLoadBalancerRemoveAllPortsRemovesFrontendConfig(t *testing.T) mockLBsClient.EXPECT().List(gomock.Any(), az.ResourceGroup).Return(expectedLBs, nil).MaxTimes(3) mockLBsClient.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - lb, err = az.reconcileLoadBalancer(testClusterName, &svcUpdated, clusterResources.nodes, false /* wantLb*/) + lb, err = az.reconcileLoadBalancer(context.TODO(), testClusterName, &svcUpdated, clusterResources.nodes, false /* wantLb*/) assert.Nil(t, err) // ensure we abandoned the frontend ip configuration @@ -1155,13 +1155,13 @@ func TestReconcileLoadBalancerRemovesPort(t *testing.T) { expectedLBs := make([]network.LoadBalancer, 0) setMockLBsDualStack(az, ctrl, &expectedLBs, "service", 1, 1, false) svc := getTestServiceDualStack("service1", v1.ProtocolTCP, nil, 80, 443) - _, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) + _, err := az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes, true /* wantLb */) assert.Nil(t, err) expectedLBs = make([]network.LoadBalancer, 0) setMockLBsDualStack(az, ctrl, &expectedLBs, "service", 1, 1, false) svcUpdated := getTestServiceDualStack("service1", v1.ProtocolTCP, nil, 80) - lb, err := az.reconcileLoadBalancer(testClusterName, &svcUpdated, clusterResources.nodes, true /* wantLb */) + lb, err := az.reconcileLoadBalancer(context.TODO(), testClusterName, &svcUpdated, clusterResources.nodes, true /* wantLb */) assert.Nil(t, err) validateLoadBalancer(t, lb, svcUpdated) @@ -1192,12 +1192,12 @@ func TestReconcileLoadBalancerMultipleServices(t *testing.T) { mockPLSClient := az.PrivateLinkServiceClient.(*mockprivatelinkserviceclient.MockInterface) mockPLSClient.EXPECT().List(gomock.Any(), az.Config.ResourceGroup).Return(expectedPLS, nil).MinTimes(1).MaxTimes(1) - _, err := az.reconcileLoadBalancer(testClusterName, &svc1, clusterResources.nodes, true /* wantLb */) + _, err := az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc1, clusterResources.nodes, true /* wantLb */) assert.Nil(t, err) setMockLBsDualStack(az, ctrl, &expectedLBs, "service", 1, 2, false) - updatedLoadBalancer, err := az.reconcileLoadBalancer(testClusterName, &svc2, clusterResources.nodes, true /* wantLb */) + updatedLoadBalancer, err := az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc2, clusterResources.nodes, true /* wantLb */) assert.Nil(t, err) validateLoadBalancer(t, updatedLoadBalancer, svc1, svc2) @@ -1252,7 +1252,7 @@ func TestServiceDefaultsToNoSessionPersistence(t *testing.T) { }).AnyTimes() mockLBBackendPool.EXPECT().EnsureHostsInPool(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) + lb, err := az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes, true /* wantLb */) if err != nil { t.Errorf("Unexpected error reconciling svc1: %q", err) } @@ -1313,7 +1313,7 @@ func TestServiceRespectsNoSessionAffinity(t *testing.T) { }).AnyTimes() mockLBBackendPool.EXPECT().EnsureHostsInPool(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) + lb, err := az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes, true /* wantLb */) if err != nil { t.Errorf("Unexpected error reconciling svc1: %q", err) } @@ -1376,7 +1376,7 @@ func TestServiceRespectsClientIPSessionAffinity(t *testing.T) { }).AnyTimes() mockLBBackendPool.EXPECT().EnsureHostsInPool(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) + lb, err := az.reconcileLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes, true /* wantLb */) if err != nil { t.Errorf("Unexpected error reconciling svc1: %q", err) } diff --git a/pkg/provider/loadbalancer/accesscontrol.go b/pkg/provider/loadbalancer/accesscontrol.go index 4aba4c6488..3896970036 100644 --- a/pkg/provider/loadbalancer/accesscontrol.go +++ b/pkg/provider/loadbalancer/accesscontrol.go @@ -22,8 +22,8 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" + "github.com/go-logr/logr" v1 "k8s.io/api/core/v1" - "k8s.io/klog/v2" "k8s.io/utils/ptr" "sigs.k8s.io/cloud-provider-azure/pkg/consts" @@ -39,7 +39,7 @@ var ( ) type AccessControl struct { - logger klog.Logger + logger logr.Logger svc *v1.Service sgHelper *securitygroup.RuleHelper @@ -67,18 +67,15 @@ func SkipAnnotationValidation() AccessControlOption { } } -func NewAccessControl(svc *v1.Service, sg *network.SecurityGroup, opts ...AccessControlOption) (*AccessControl, error) { - logger := klog.Background(). - WithName("LoadBalancer.AccessControl"). - WithValues("service-name", svc.Name). - WithValues("security-group-name", ptr.To(sg.Name)) +func NewAccessControl(logger logr.Logger, svc *v1.Service, sg *network.SecurityGroup, opts ...AccessControlOption) (*AccessControl, error) { + logger = logger.WithName("AccessControl").WithValues("security-group", ptr.To(sg.Name)) options := defaultAccessControlOptions for _, opt := range opts { opt(&options) } - sgHelper, err := securitygroup.NewSecurityGroupHelper(sg) + sgHelper, err := securitygroup.NewSecurityGroupHelper(logger, sg) if err != nil { logger.Error(err, "Failed to initialize RuleHelper") return nil, err @@ -101,7 +98,7 @@ func NewAccessControl(svc *v1.Service, sg *network.SecurityGroup, opts ...Access return nil, err } if len(sourceRanges) > 0 && len(allowedIPRanges) > 0 { - logger.Error(err, "Forbidden configuration") + logger.Error(ErrSetBothLoadBalancerSourceRangesAndAllowedIPRanges, "Forbidden configuration") return nil, ErrSetBothLoadBalancerSourceRangesAndAllowedIPRanges } diff --git a/pkg/provider/loadbalancer/accesscontrol_test.go b/pkg/provider/loadbalancer/accesscontrol_test.go index 19767ee0d7..1ddc8e98d2 100644 --- a/pkg/provider/loadbalancer/accesscontrol_test.go +++ b/pkg/provider/loadbalancer/accesscontrol_test.go @@ -27,6 +27,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/internal/testutil" "sigs.k8s.io/cloud-provider-azure/internal/testutil/fixture" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/fnutil" "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil" "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup" @@ -119,7 +120,7 @@ func TestAccessControl_IsAllowFromInternet(t *testing.T) { for i := range tests { tt := tests[i] sg := azureFx.SecurityGroup().WithRules(azureFx.NoiseSecurityRules()).Build() - ac, err := NewAccessControl(&tt.svc, &sg) + ac, err := NewAccessControl(log.Noop(), &tt.svc, &sg) assert.NoError(t, err) actual := ac.IsAllowFromInternet() assert.Equal(t, tt.expectedOutput, actual, "[%s] expecting IsAllowFromInternet returns %v", tt.name, tt.expectedOutput) @@ -139,7 +140,7 @@ func TestNewAccessControl(t *testing.T) { WithAllowedIPRanges("10.0.0.1/32"). Build() - _, err := NewAccessControl(&svc, &sg) + _, err := NewAccessControl(log.Noop(), &svc, &sg) assert.ErrorIs(t, err, ErrSetBothLoadBalancerSourceRangesAndAllowedIPRanges) }) } @@ -192,7 +193,7 @@ func TestAccessControl_DenyAllExceptSourceRanges(t *testing.T) { for i := range tests { tt := tests[i] sg := azureFx.SecurityGroup().Build() - ac, err := NewAccessControl(&tt.svc, &sg) + ac, err := NewAccessControl(log.Noop(), &tt.svc, &sg) assert.NoError(t, err) actual := ac.DenyAllExceptSourceRanges() assert.Equal(t, tt.expectedOutput, actual, "[%s] expecting DenyAllExceptSourceRanges returns %v", tt.name, tt.expectedOutput) @@ -395,7 +396,7 @@ func TestAccessControl_AllowedRanges(t *testing.T) { for i := range tests { tt := tests[i] sg := azureFx.SecurityGroup().Build() - ac, err := NewAccessControl(&tt.svc, &sg) + ac, err := NewAccessControl(log.Noop(), &tt.svc, &sg) assert.NoError(t, err) var ( ipv4 = ac.AllowedIPv4Ranges() @@ -436,7 +437,7 @@ func TestAccessControl_PatchSecurityGroup(t *testing.T) { var ( sg = azureFx.SecurityGroup().WithRules(originalRules).Build() - ac, err = NewAccessControl(&svc, &sg) + ac, err = NewAccessControl(log.Noop(), &svc, &sg) ) assert.NoError(t, err) @@ -1086,7 +1087,7 @@ func TestAccessControl_CleanSecurityGroup(t *testing.T) { var ( sg = azureFx.SecurityGroup().Build() svc = fx.Kubernetes().Service().Build() - ac, err = NewAccessControl(&svc, &sg) + ac, err = NewAccessControl(log.Noop(), &svc, &sg) ) assert.NoError(t, err) @@ -1133,7 +1134,7 @@ func TestAccessControl_CleanSecurityGroup(t *testing.T) { netip.MustParseAddr("192.168.0.2"), } svc = fx.Kubernetes().Service().Build() - ac, err = NewAccessControl(&svc, &sg) + ac, err = NewAccessControl(log.Noop(), &svc, &sg) ) assert.NoError(t, err) @@ -1193,7 +1194,7 @@ func TestAccessControl_CleanSecurityGroup(t *testing.T) { netip.MustParseAddr("192.168.0.2"), } svc = fx.Kubernetes().Service().Build() - ac, err = NewAccessControl(&svc, &sg) + ac, err = NewAccessControl(log.Noop(), &svc, &sg) ) assert.NoError(t, err) @@ -1308,7 +1309,7 @@ func TestAccessControl_CleanSecurityGroup(t *testing.T) { netip.MustParseAddr("192.168.0.2"), } svc = fx.Kubernetes().Service().Build() - ac, err = NewAccessControl(&svc, &sg) + ac, err = NewAccessControl(log.Noop(), &svc, &sg) ) assert.NoError(t, err) @@ -1397,7 +1398,7 @@ func TestAccessControl_CleanSecurityGroup(t *testing.T) { netip.MustParseAddr("192.168.0.2"), } svc = fx.Kubernetes().Service().Build() - ac, err = NewAccessControl(&svc, &sg) + ac, err = NewAccessControl(log.Noop(), &svc, &sg) ) assert.NoError(t, err) diff --git a/pkg/provider/loadbalancer/securitygroup/securitygroup.go b/pkg/provider/loadbalancer/securitygroup/securitygroup.go index b5d01b38dd..789b922171 100644 --- a/pkg/provider/loadbalancer/securitygroup/securitygroup.go +++ b/pkg/provider/loadbalancer/securitygroup/securitygroup.go @@ -25,7 +25,7 @@ import ( "strconv" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" - "k8s.io/klog/v2" + "github.com/go-logr/logr" "k8s.io/utils/ptr" "sigs.k8s.io/cloud-provider-azure/pkg/consts" @@ -59,7 +59,7 @@ var ( // RuleHelper manages security rules within a security group. type RuleHelper struct { - logger klog.Logger + logger logr.Logger sg *network.SecurityGroup snapshot []byte @@ -69,7 +69,7 @@ type RuleHelper struct { priorities map[int32]string } -func NewSecurityGroupHelper(sg *network.SecurityGroup) (*RuleHelper, error) { +func NewSecurityGroupHelper(logger logr.Logger, sg *network.SecurityGroup) (*RuleHelper, error) { if sg == nil || sg.Name == nil || sg.SecurityGroupPropertiesFormat == nil || @@ -79,7 +79,6 @@ func NewSecurityGroupHelper(sg *network.SecurityGroup) (*RuleHelper, error) { return nil, ErrInvalidSecurityGroup } var ( - logger = klog.Background().WithName("RuleHelper").WithValues("security-group-name", ptr.To(sg.Name)) rules = make(map[string]*network.SecurityRule, len(*sg.SecurityGroupPropertiesFormat.SecurityRules)) priorities = make(map[int32]string, len(*sg.SecurityGroupPropertiesFormat.SecurityRules)) ) @@ -92,7 +91,7 @@ func NewSecurityGroupHelper(sg *network.SecurityGroup) (*RuleHelper, error) { snapshot := makeSecurityGroupSnapshot(sg) return &RuleHelper{ - logger: logger, + logger: logger.WithName("RuleHelper"), sg: sg, rules: rules, diff --git a/pkg/provider/loadbalancer/securitygroup/securitygroup_test.go b/pkg/provider/loadbalancer/securitygroup/securitygroup_test.go index f3e21e129b..3da61f130b 100644 --- a/pkg/provider/loadbalancer/securitygroup/securitygroup_test.go +++ b/pkg/provider/loadbalancer/securitygroup/securitygroup_test.go @@ -27,6 +27,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/internal/testutil" "sigs.k8s.io/cloud-provider-azure/internal/testutil/fixture" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/fnutil" "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil" . "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup" //nolint:revive @@ -34,7 +35,7 @@ import ( func ExpectNewSecurityGroupHelper(t *testing.T, sg *network.SecurityGroup) *RuleHelper { t.Helper() - helper, err := NewSecurityGroupHelper(sg) + helper, err := NewSecurityGroupHelper(log.Noop(), sg) if err != nil { assert.NoError(t, err) } @@ -43,28 +44,28 @@ func ExpectNewSecurityGroupHelper(t *testing.T, sg *network.SecurityGroup) *Rule func TestNewSecurityGroupHelper(t *testing.T) { { - _, err := NewSecurityGroupHelper(nil) + _, err := NewSecurityGroupHelper(log.Noop(), nil) assert.ErrorIs(t, err, ErrInvalidSecurityGroup) } { - _, err := NewSecurityGroupHelper(&network.SecurityGroup{}) + _, err := NewSecurityGroupHelper(log.Noop(), &network.SecurityGroup{}) assert.ErrorIs(t, err, ErrInvalidSecurityGroup) } { - _, err := NewSecurityGroupHelper(&network.SecurityGroup{ + _, err := NewSecurityGroupHelper(log.Noop(), &network.SecurityGroup{ Name: ptr.To("nsg"), }) assert.ErrorIs(t, err, ErrInvalidSecurityGroup) } { - _, err := NewSecurityGroupHelper(&network.SecurityGroup{ + _, err := NewSecurityGroupHelper(log.Noop(), &network.SecurityGroup{ Name: ptr.To("nsg"), SecurityGroupPropertiesFormat: &network.SecurityGroupPropertiesFormat{}, }) assert.ErrorIs(t, err, ErrInvalidSecurityGroup) } { - helper, err := NewSecurityGroupHelper(&network.SecurityGroup{ + helper, err := NewSecurityGroupHelper(log.Noop(), &network.SecurityGroup{ Name: ptr.To("nsg"), SecurityGroupPropertiesFormat: &network.SecurityGroupPropertiesFormat{ SecurityRules: &[]network.SecurityRule{},